Compare commits
659 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
56509a61b9 | ||
|
|
f37f8ac815 | ||
|
|
231b5feb23 | ||
|
|
81fa063338 | ||
|
|
07b4a4dbca | ||
|
|
fd6daaa73b | ||
|
|
6496d185ab | ||
|
|
7499c1f969 | ||
|
|
9c5db1057d | ||
|
|
30d24a3c1c | ||
|
|
ad4af06802 | ||
|
|
64b98a9b61 | ||
|
|
4fdcb136bc | ||
|
|
0e398f5802 | ||
|
|
b9869eadc3 | ||
|
|
936c5a8a7a | ||
|
|
10f19fade1 | ||
|
|
c01594c2a4 | ||
|
|
ccbd7bb785 | ||
|
|
6eb49dee5d | ||
|
|
6a4bf9fcff | ||
|
|
9ada89d51a | ||
|
|
524fddedb4 | ||
|
|
c4a7711e02 | ||
|
|
2e20fc413c | ||
|
|
498482d0f6 | ||
|
|
4bd5b6a4d6 | ||
|
|
2e764cb22d | ||
|
|
c8914679b7 | ||
|
|
e25ac0d587 | ||
|
|
41374aabcb | ||
|
|
f60d846eb3 | ||
|
|
96e54ab326 | ||
|
|
40a3feaad0 | ||
|
|
2611931f82 | ||
|
|
ec39d10695 | ||
|
|
30d8ed411a | ||
|
|
64a832467e | ||
|
|
9c5321c538 | ||
|
|
aba123dae0 | ||
|
|
5aca58ad2a | ||
|
|
a34418d724 | ||
|
|
5f4262921a | ||
|
|
6fcd05b855 | ||
|
|
7746a2b3cd | ||
|
|
2749dcd128 | ||
|
|
92343d91d6 | ||
|
|
ce7b48143a | ||
|
|
e30e98a496 | ||
|
|
4798bd9d33 | ||
|
|
38d6cb97ad | ||
|
|
3be111a160 | ||
|
|
97a66b73cf | ||
|
|
50fc3ec974 | ||
|
|
ee8d99b955 | ||
|
|
5bf7c4d241 | ||
|
|
c2b5f21832 | ||
|
|
bdac9b7241 | ||
|
|
ec6eae9537 | ||
|
|
f607074899 | ||
|
|
0571eecb0c | ||
|
|
4f3d6ddf17 | ||
|
|
34f0c593ad | ||
|
|
97ebcc2af1 | ||
|
|
4852b5c11e | ||
|
|
16ce06f621 | ||
|
|
811a54af6c | ||
|
|
e02973b6f4 | ||
|
|
b91eab6737 | ||
|
|
c89ef84df7 | ||
|
|
e3c8a1131a | ||
|
|
eb78b9268f | ||
|
|
d62e63c448 | ||
|
|
03e66d5b87 | ||
|
|
22afc99f1e | ||
|
|
c83f220fc4 | ||
|
|
0d0a8e9b68 | ||
|
|
bcafadb68a | ||
|
|
9999b2e3c6 | ||
|
|
6c23fb3173 | ||
|
|
e6517d4140 | ||
|
|
00a6dbbe97 | ||
|
|
4cf47dcd0f | ||
|
|
03863bd84d | ||
|
|
7a2eeb7439 | ||
|
|
6fb7d2883d | ||
|
|
a844c1ac74 | ||
|
|
a7b77d9658 | ||
|
|
3509713a23 | ||
|
|
4b3b41fea5 | ||
|
|
2be7fc072f | ||
|
|
ca222a14de | ||
|
|
4aa94ee290 | ||
|
|
5c051eb801 | ||
|
|
3761f00062 | ||
|
|
b705608b04 | ||
|
|
a5f2d5ff21 | ||
|
|
979e5f193a | ||
|
|
8dde60e869 | ||
|
|
224a570a08 | ||
|
|
78f2ea89f8 | ||
|
|
13ccf420d7 | ||
|
|
d47740bd8d | ||
|
|
e2aa0e8a35 | ||
|
|
d505be1fd4 | ||
|
|
40fd33d1b0 | ||
|
|
317a352a65 | ||
|
|
970bfce997 | ||
|
|
a3feddd8ed | ||
|
|
a8294c2c34 | ||
|
|
0823eed546 | ||
|
|
f85bc6e7f7 | ||
|
|
21c4e70f33 | ||
|
|
03a6f28d55 | ||
|
|
19e5d975ca | ||
|
|
5664625f67 | ||
|
|
375045953f | ||
|
|
b10b186cc8 | ||
|
|
bf8e0f4cae | ||
|
|
a6ae597dfc | ||
|
|
b975419bc7 | ||
|
|
0f036d6bec | ||
|
|
20fbfc7006 | ||
|
|
e167b72b16 | ||
|
|
68ef07bff6 | ||
|
|
10a20e208a | ||
|
|
e10394ba3b | ||
|
|
019585f0db | ||
|
|
e619845ffe | ||
|
|
3012928452 | ||
|
|
352ccde52b | ||
|
|
92fb51026a | ||
|
|
acf9c1141a | ||
|
|
a8bcc51071 | ||
|
|
dcd1c6766c | ||
|
|
00ee2529bc | ||
|
|
2af97cdbcb | ||
|
|
1accab02ed | ||
|
|
1a05899be0 | ||
|
|
d54f6be639 | ||
|
|
00614026b3 | ||
|
|
acf1da4d30 | ||
|
|
921ffb7bdb | ||
|
|
b2e22cbc59 | ||
|
|
55c598f9ff | ||
|
|
eabc0875de | ||
|
|
62270a3697 | ||
|
|
40d8aeecb0 | ||
|
|
2daa9ff260 | ||
|
|
25fd4297a8 | ||
|
|
f05d89ed72 | ||
|
|
4c2501be95 | ||
|
|
e2854232d0 | ||
|
|
6794fd06eb | ||
|
|
befc906167 | ||
|
|
422d240afb | ||
|
|
2b966b40f2 | ||
|
|
a992e16f7d | ||
|
|
0398dc1226 | ||
|
|
5592738603 | ||
|
|
688ffd024b | ||
|
|
4ac1c819e0 | ||
|
|
a6e0ae2896 | ||
|
|
cb8499c264 | ||
|
|
d2fb065d0d | ||
|
|
4449f7f2fb | ||
|
|
7cc60dfb8f | ||
|
|
028bae8f04 | ||
|
|
fa9555c430 | ||
|
|
48d11f0a5c | ||
|
|
09a0c3b40f | ||
|
|
e622bd5e7f | ||
|
|
0d31f40e16 | ||
|
|
e13500fc4f | ||
|
|
2a76942a74 | ||
|
|
c73c28de7e | ||
|
|
9e0ec0927c | ||
|
|
23e6715a02 | ||
|
|
f7eae86cdb | ||
|
|
889c0a50a4 | ||
|
|
7d15061984 | ||
|
|
ccbfb038ee | ||
|
|
cb951ebd28 | ||
|
|
d35c78e933 | ||
|
|
e9356c893b | ||
|
|
869483617b | ||
|
|
df96958fb8 | ||
|
|
de7ad9dfbc | ||
|
|
bf1cf4557e | ||
|
|
86d20496ea | ||
|
|
ae7ad2230f | ||
|
|
2007064c47 | ||
|
|
c8852339c9 | ||
|
|
eb0a19062e | ||
|
|
2f08577967 | ||
|
|
891f3af504 | ||
|
|
c5f200917a | ||
|
|
21622a1a17 | ||
|
|
a1067fa4ae | ||
|
|
4395a46190 | ||
|
|
c938523cd5 | ||
|
|
ae10fc7fb4 | ||
|
|
0299a17da1 | ||
|
|
d77cfd6ecc | ||
|
|
03d79996de | ||
|
|
553208a960 | ||
|
|
dfc59866e8 | ||
|
|
ac685d19f8 | ||
|
|
dd2e9e08df | ||
|
|
499b5befd6 | ||
|
|
c26ce9c4fe | ||
|
|
6263bc2d1b | ||
|
|
f7504fb5eb | ||
|
|
6869362f43 | ||
|
|
7600cc87d8 | ||
|
|
3192c78d96 | ||
|
|
c3dad00c1b | ||
|
|
a1bad378d2 | ||
|
|
73f1ed4f25 | ||
|
|
b28b4bd71e | ||
|
|
b15928c95e | ||
|
|
97d4f9e0ff | ||
|
|
0986caf0ad | ||
|
|
9cccf8f88a | ||
|
|
3ae5b4b280 | ||
|
|
62b0e25b84 | ||
|
|
4e5ed9d3b9 | ||
|
|
555436a222 | ||
|
|
6977119f1e | ||
|
|
52be516fa3 | ||
|
|
2dd3eee58e | ||
|
|
d40351286a | ||
|
|
d84a258b0a | ||
|
|
eb2a4dc724 | ||
|
|
316fa1cc01 | ||
|
|
04e2db1f41 | ||
|
|
2f7d781635 | ||
|
|
88ff269370 | ||
|
|
7121e1a3b0 | ||
|
|
8fd06b96d7 | ||
|
|
181c3cdc28 | ||
|
|
ccfa913186 | ||
|
|
2a9f31bfea | ||
|
|
0bc76f094a | ||
|
|
d394003739 | ||
|
|
17dd058308 | ||
|
|
99b1a3071d | ||
|
|
dc38e448bd | ||
|
|
1d1180ec0c | ||
|
|
81539c4ed6 | ||
|
|
cf1dcfe37c | ||
|
|
7293376973 | ||
|
|
d9f1a60a64 | ||
|
|
4f6526e1a5 | ||
|
|
e6ea09f482 | ||
|
|
d620651ef6 | ||
|
|
9221f93be9 | ||
|
|
795ea49093 | ||
|
|
6827459b9f | ||
|
|
e424d47ce6 | ||
|
|
ca0e732331 | ||
|
|
8e52905ea9 | ||
|
|
5cc26bb640 | ||
|
|
fdf00c1be6 | ||
|
|
47258a7093 | ||
|
|
5112d077d5 | ||
|
|
b4e8a23da4 | ||
|
|
63e9a4ae68 | ||
|
|
7e96a9afda | ||
|
|
f5a225f1e0 | ||
|
|
6f4a3816a5 | ||
|
|
29363794c1 | ||
|
|
64a3a718e6 | ||
|
|
b01c28ebc6 | ||
|
|
f5d1aaf7d9 | ||
|
|
f6f45881da | ||
|
|
cd7468f3be | ||
|
|
cd93b9ae0b | ||
|
|
0ffaafd788 | ||
|
|
c6283d1b5a | ||
|
|
24527859e6 | ||
|
|
0c6c5718fe | ||
|
|
c4bbc18cb6 | ||
|
|
6e76759225 | ||
|
|
87ed2d4a21 | ||
|
|
74b3309225 | ||
|
|
1d741cbfc5 | ||
|
|
12420db4b9 | ||
|
|
aad6a7e262 | ||
|
|
b12b804f0a | ||
|
|
64d38ed17e | ||
|
|
f8d64a7378 | ||
|
|
b92a0d5126 | ||
|
|
e0372358df | ||
|
|
1bce6e3faf | ||
|
|
81dd281789 | ||
|
|
f7b38dc270 | ||
|
|
ec9819071a | ||
|
|
72edc3c4fe | ||
|
|
5657e8d1da | ||
|
|
0700e0cf94 | ||
|
|
1cd2db9f8c | ||
|
|
10d411c4f7 | ||
|
|
167b8b8eb8 | ||
|
|
74da03d9fa | ||
|
|
b8a58dad65 | ||
|
|
b012713cf2 | ||
|
|
82d914149e | ||
|
|
450f5e03a5 | ||
|
|
b04706b875 | ||
|
|
10b0438201 | ||
|
|
0270ace3d4 | ||
|
|
bbb27fa484 | ||
|
|
df15e7b379 | ||
|
|
df651ab98e | ||
|
|
dd7a3b37b0 | ||
|
|
94a623c00e | ||
|
|
6cb0f2d392 | ||
|
|
17e165382f | ||
|
|
733ba07312 | ||
|
|
46cd9ff9f5 | ||
|
|
66ed4f7328 | ||
|
|
3be6d84675 | ||
|
|
406e980fae | ||
|
|
211065565f | ||
|
|
d979ee5573 | ||
|
|
c843b53c30 | ||
|
|
5d280e4d25 | ||
|
|
f00d43aa09 | ||
|
|
2e68d3cb3c | ||
|
|
4d6f11b61f | ||
|
|
aac9ba6c1e | ||
|
|
d926a3b5da | ||
|
|
fa5753c579 | ||
|
|
3fa3b2d836 | ||
|
|
76041e84e8 | ||
|
|
19c6572926 | ||
|
|
2217fb8c58 | ||
|
|
50fcb3914d | ||
|
|
9a0c0886ce | ||
|
|
fc41cc9878 | ||
|
|
08b220a1fb | ||
|
|
7e3beaf822 | ||
|
|
d2150efc19 | ||
|
|
380146b75b | ||
|
|
2bf096cfc7 | ||
|
|
cb887dee81 | ||
|
|
2ee7d5eeb6 | ||
|
|
6d6158ff08 | ||
|
|
11126cf4ae | ||
|
|
bd00f46d8b | ||
|
|
d8482cc286 | ||
|
|
f7a4317990 | ||
|
|
a55f6498c8 | ||
|
|
81f4aa9a5d | ||
|
|
3c7c8926fb | ||
|
|
a7ed46160a | ||
|
|
a9b97c7a2b | ||
|
|
0780ad4ad9 | ||
|
|
bf9992b613 | ||
|
|
8c5e1ff0a0 | ||
|
|
b3044a6e2b | ||
|
|
6260dd1018 | ||
|
|
e47801074e | ||
|
|
6d42973d7c | ||
|
|
68e41f130c | ||
|
|
65b33a848e | ||
|
|
5bfb6df0e0 | ||
|
|
13061d1ec7 | ||
|
|
0143a4227e | ||
|
|
3f63bcde12 | ||
|
|
b86c6bba4e | ||
|
|
4d19fc0860 | ||
|
|
9c57c30e57 | ||
|
|
9969c4e810 | ||
|
|
e2bc5d80c9 | ||
|
|
ab191e2b58 | ||
|
|
d418a6e872 | ||
|
|
bdfd1aef62 | ||
|
|
ff2de0c715 | ||
|
|
5b78b1e548 | ||
|
|
d1f965ae30 | ||
|
|
434267898b | ||
|
|
a00510a73c | ||
|
|
846fd31121 | ||
|
|
ab4344a781 | ||
|
|
ac97fefb91 | ||
|
|
8d034f544c | ||
|
|
ca1d2c7000 | ||
|
|
0acf15c025 | ||
|
|
94eed9b43c | ||
|
|
8a6665c03f | ||
|
|
85ae6fffbb | ||
|
|
bd85a36cb1 | ||
|
|
a449e4b47c | ||
|
|
42602a3f35 | ||
|
|
50f902cb02 | ||
|
|
b014ac12ee | ||
|
|
610f24e0cd | ||
|
|
f45f7e56fd | ||
|
|
afe366d6b7 | ||
|
|
1daa059ef9 | ||
|
|
9777aa6165 | ||
|
|
143ec1a019 | ||
|
|
13ee9ff37b | ||
|
|
a3c846b73e | ||
|
|
3d05575e9d | ||
|
|
9d00b5e165 | ||
|
|
a29b39e17a | ||
|
|
8273679634 | ||
|
|
f8c1e953d4 | ||
|
|
532d953b5a | ||
|
|
ecfdafab06 | ||
|
|
9bc39987f1 | ||
|
|
601b444a60 | ||
|
|
4b0671205d | ||
|
|
db634f4c0b | ||
|
|
7d9efd7cff | ||
|
|
06ef2a72c5 | ||
|
|
5e8b6dd164 | ||
|
|
03c7d564d9 | ||
|
|
c3ec3f4bc8 | ||
|
|
7273e2e6f2 | ||
|
|
af770e042a | ||
|
|
07a1bffc60 | ||
|
|
183e79398d | ||
|
|
d98bedd6e1 | ||
|
|
461245c83d | ||
|
|
6fcbb7bdb0 | ||
|
|
2304d03b40 | ||
|
|
4e3213f3bd | ||
|
|
55fb249f6b | ||
|
|
4d614b3088 | ||
|
|
cad0a762a0 | ||
|
|
0ae5075cc9 | ||
|
|
3145a732f2 | ||
|
|
ceaf6fd67a | ||
|
|
c26fa33094 | ||
|
|
b199d7a9fe | ||
|
|
0e65d8e64e | ||
|
|
1e742aec04 | ||
|
|
ba1e4917d1 | ||
|
|
4ce61875a4 | ||
|
|
04963f12a3 | ||
|
|
5d4b6c41a8 | ||
|
|
5cb3a096c1 | ||
|
|
ddf438dac0 | ||
|
|
ed13924c5a | ||
|
|
5cc6f88ade | ||
|
|
32124a7913 | ||
|
|
08042089f9 | ||
|
|
53969ae054 | ||
|
|
16c424de2a | ||
|
|
9e2f8f664b | ||
|
|
343d8f87b4 | ||
|
|
374a0af084 | ||
|
|
9f2e6d6172 | ||
|
|
af647990ab | ||
|
|
9b2b1df7e2 | ||
|
|
abdef7c326 | ||
|
|
b312e48d31 | ||
|
|
48a075529a | ||
|
|
7f22211e4b | ||
|
|
a63c3c8e0b | ||
|
|
bba162c55b | ||
|
|
540ba6d6ae | ||
|
|
29e8ce68e4 | ||
|
|
5691253acd | ||
|
|
cd5c85a245 | ||
|
|
7e1d1c19e6 | ||
|
|
46cdb40800 | ||
|
|
e3c6101b93 | ||
|
|
448aeb9c55 | ||
|
|
5e55104aa6 | ||
|
|
03cd83dc82 | ||
|
|
0cebae8e23 | ||
|
|
38bbe7567a | ||
|
|
fc95e8401a | ||
|
|
b70f821a10 | ||
|
|
95bb21f3f5 | ||
|
|
94741d52ed | ||
|
|
1ac6da4a8b | ||
|
|
090c0a60fa | ||
|
|
e7ca9113bc | ||
|
|
924700f381 | ||
|
|
d280b968d7 | ||
|
|
1d8c7a74d6 | ||
|
|
c1dc77c69c | ||
|
|
3ecb5a20a5 | ||
|
|
0c1460062d | ||
|
|
c0cef8ca43 | ||
|
|
a3e20ab2d6 | ||
|
|
7a23eb69eb | ||
|
|
7da12dc324 | ||
|
|
ed9b43e2cc | ||
|
|
2cd56e43a8 | ||
|
|
91f6c4b740 | ||
|
|
c0249caef9 | ||
|
|
e0d0bc0966 | ||
|
|
24eb7d6bc9 | ||
|
|
48c10f9454 | ||
|
|
d9b208260e | ||
|
|
5dd16399b3 | ||
|
|
96014f8e94 | ||
|
|
5dd14f2ee2 | ||
|
|
ad2e0bc4e3 | ||
|
|
85c61fb684 | ||
|
|
2601a09a83 | ||
|
|
d318ef6df7 | ||
|
|
7ed19de44e | ||
|
|
72652ff16e | ||
|
|
4a12471918 | ||
|
|
32cbbdbf73 | ||
|
|
ab28707d71 | ||
|
|
42a7203b1e | ||
|
|
5259c50612 | ||
|
|
06a84def5f | ||
|
|
d7bda01ccb | ||
|
|
890b2453f8 | ||
|
|
df9e1669cf | ||
|
|
8b491a46f3 | ||
|
|
c698dc9784 | ||
|
|
77dd1e3d45 | ||
|
|
b3cb8d0f53 | ||
|
|
260fc43281 | ||
|
|
b4ef7bb3ed | ||
|
|
816313de30 | ||
|
|
bb7bdffada | ||
|
|
0647666c65 | ||
|
|
8255945ea7 | ||
|
|
2364595697 | ||
|
|
e442d754d0 | ||
|
|
6b510652ed | ||
|
|
9ea5a88f84 | ||
|
|
aa0adc98f9 | ||
|
|
fdd2401f7b | ||
|
|
6b820d91ae | ||
|
|
611ad26d1b | ||
|
|
0911b5b2e8 | ||
|
|
c660ff80bf | ||
|
|
ef098923d6 | ||
|
|
a4f7ffea3f | ||
|
|
c5deb9d557 | ||
|
|
3ff2ea8d4e | ||
|
|
85ecee3525 | ||
|
|
d09e5f37ab | ||
|
|
7edcd7aaf5 | ||
|
|
7def1364b5 | ||
|
|
3b588f0502 | ||
|
|
03c520798e | ||
|
|
c0fa6af51b | ||
|
|
a4d0c47fc6 | ||
|
|
5bf00e87cc | ||
|
|
014ddd76f4 | ||
|
|
6eb4bdcf0e | ||
|
|
b4e032d9c9 | ||
|
|
4ca39dfd1e | ||
|
|
15ef62747a | ||
|
|
ad6dcb478d | ||
|
|
0b7aa65dbf | ||
|
|
e484d4bbf4 | ||
|
|
e6ff9c6cd5 | ||
|
|
fad63b28d1 | ||
|
|
7a075e091d | ||
|
|
0df4708267 | ||
|
|
d5b4e4ba60 | ||
|
|
b717dc0742 | ||
|
|
6ad37267e4 | ||
|
|
22d4d1fb42 | ||
|
|
98b0543b26 | ||
|
|
c0512e720c | ||
|
|
0f6664b260 | ||
|
|
f76f99e789 | ||
|
|
e2d48f42cc | ||
|
|
ec138cae62 | ||
|
|
986b89f5ed | ||
|
|
d799011039 | ||
|
|
0faa1c886a | ||
|
|
cb839d0fe8 | ||
|
|
ec4079733e | ||
|
|
4743c9ab16 | ||
|
|
c4e5e743c4 | ||
|
|
1d23681efe | ||
|
|
56d49f2f4f | ||
|
|
ac54b7cdd1 | ||
|
|
efe2771a34 | ||
|
|
10c4ec74cc | ||
|
|
d90026646f | ||
|
|
9cd1344740 | ||
|
|
c6a9335bf2 | ||
|
|
6c87148cd4 | ||
|
|
3f6c46e1ec | ||
|
|
b3c13b7aef | ||
|
|
55cfd5c904 | ||
|
|
d38f2223a5 | ||
|
|
86145d5eb5 | ||
|
|
ef335d9fd7 | ||
|
|
d2810ddc95 | ||
|
|
037c43cd25 | ||
|
|
aa86c16838 | ||
|
|
ed16a84e0d | ||
|
|
530a60a52d | ||
|
|
48463681fb | ||
|
|
f5a8739b7c | ||
|
|
4471e2bdbb | ||
|
|
63552282d7 | ||
|
|
ae385b336d | ||
|
|
0db55007ab | ||
|
|
d545b197ea | ||
|
|
bbc6fa57fa | ||
|
|
120218f9c6 | ||
|
|
38ee6adcd2 | ||
|
|
1d8e6473c6 | ||
|
|
494704b479 | ||
|
|
d634b08969 | ||
|
|
350f91601c | ||
|
|
659e1cfe85 | ||
|
|
1943d89147 | ||
|
|
aa822756e7 | ||
|
|
073b1084b7 | ||
|
|
5cbe15b676 | ||
|
|
e2cff9febe | ||
|
|
e9ad786578 | ||
|
|
0692b4be61 | ||
|
|
6550d4f634 | ||
|
|
c523ae2c52 | ||
|
|
5e1ba3fbb7 | ||
|
|
6e8a298d21 | ||
|
|
815e9534b8 | ||
|
|
5390a8ea71 | ||
|
|
e34c52402f | ||
|
|
86a6f337f6 | ||
|
|
a1f7d5549b | ||
|
|
5fbd07b146 | ||
|
|
b8f3c68b89 | ||
|
|
043b171028 | ||
|
|
b86d789abe | ||
|
|
e1c7dc80ae | ||
|
|
1fe0791a74 | ||
|
|
0d87eb4725 | ||
|
|
e2dac56a40 | ||
|
|
039fc80ed7 | ||
|
|
8e90a444c2 | ||
|
|
2ccd828e81 | ||
|
|
480f29bde7 | ||
|
|
11a6db8268 | ||
|
|
6566cc51e3 | ||
|
|
051cd03bbf | ||
|
|
0aa0a40d89 | ||
|
|
9dcbe750d1 | ||
|
|
10bf663a3b | ||
|
|
b71cfb7cfd | ||
|
|
87fedcfa74 | ||
|
|
3b9174a322 | ||
|
|
851fdd439f | ||
|
|
ab78e8efec | ||
|
|
b829febe0d | ||
|
|
39c90dd879 | ||
|
|
3a43042089 |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,3 +1,6 @@
|
||||
pkg/
|
||||
tivan
|
||||
.vagrant
|
||||
/telegraf
|
||||
.idea
|
||||
*~
|
||||
*#
|
||||
|
||||
354
CHANGELOG.md
Normal file
354
CHANGELOG.md
Normal file
@@ -0,0 +1,354 @@
|
||||
## v0.10.0 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
- Linux packages have been taken out of `opt`, the binary is now in `/usr/bin`
|
||||
and configuration files are in `/etc/telegraf`
|
||||
- **breaking change** `plugins` have been renamed to `inputs`. This was done because
|
||||
`plugins` is too generic, as there are now also "output plugins", and will likely
|
||||
be "aggregator plugins" and "filter plugins" in the future. Additionally,
|
||||
`inputs/` and `outputs/` directories have been placed in the root-level `plugins/`
|
||||
directory.
|
||||
- **breaking change** the `io` plugin has been renamed `diskio`
|
||||
- **breaking change** plugin measurements aggregated into a single measurement.
|
||||
- **breaking change** `jolokia` plugin: must use global tag/drop/pass parameters
|
||||
for configuration.
|
||||
- **breaking change** `twemproxy` plugin: `prefix` option removed.
|
||||
- **breaking change** `procstat` cpu measurements are now prepended with `cpu_time_`
|
||||
instead of only `cpu_`
|
||||
- **breaking change** some command-line flags have been renamed to separate words.
|
||||
`-configdirectory` -> `-config-directory`, `-filter` -> `-input-filter`,
|
||||
`-outputfilter` -> `-output-filter`
|
||||
- The prometheus plugin schema has not been changed (measurements have not been
|
||||
aggregated).
|
||||
|
||||
### Packaging change note:
|
||||
|
||||
RHEL/CentOS users upgrading from 0.2.x to 0.10.0 will probably have their
|
||||
configurations overwritten by the upgrade. There is a backup stored at
|
||||
/etc/telegraf/telegraf.conf.$(date +%s).backup.
|
||||
|
||||
### Features
|
||||
- Plugin measurements aggregated into a single measurement.
|
||||
- Added ability to specify per-plugin tags
|
||||
- Added ability to specify per-plugin measurement suffix and prefix.
|
||||
(`name_prefix` and `name_suffix`)
|
||||
- Added ability to override base plugin measurement name. (`name_override`)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
## v0.2.5 [unreleased]
|
||||
|
||||
### Features
|
||||
- [#427](https://github.com/influxdb/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen!
|
||||
- [#428](https://github.com/influxdb/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot!
|
||||
- [#449](https://github.com/influxdb/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff
|
||||
|
||||
### Bugfixes
|
||||
- [#430](https://github.com/influxdb/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham!
|
||||
- [#452](https://github.com/influxdb/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham!
|
||||
|
||||
## v0.2.4 [2015-12-08]
|
||||
|
||||
### Features
|
||||
- [#412](https://github.com/influxdb/telegraf/pull/412): Additional memcached stats. Thanks @mgresser!
|
||||
- [#410](https://github.com/influxdb/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain!
|
||||
- [#414](https://github.com/influxdb/telegraf/issues/414): Jolokia plugin auth parameters
|
||||
- [#415](https://github.com/influxdb/telegraf/issues/415): memcached plugin: support unix sockets
|
||||
- [#418](https://github.com/influxdb/telegraf/pull/418): memcached plugin additional unit tests.
|
||||
- [#408](https://github.com/influxdb/telegraf/pull/408): MailChimp plugin.
|
||||
- [#382](https://github.com/influxdb/telegraf/pull/382): Add system wide network protocol stats to `net` plugin.
|
||||
- [#401](https://github.com/influxdb/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter!
|
||||
|
||||
### Bugfixes
|
||||
- [#405](https://github.com/influxdb/telegraf/issues/405): Prometheus output cardinality issue
|
||||
- [#388](https://github.com/influxdb/telegraf/issues/388): Fix collection hangup when cpu times decrement.
|
||||
|
||||
## v0.2.3 [2015-11-30]
|
||||
|
||||
### Release Notes
|
||||
- **breaking change** The `kafka` plugin has been renamed to `kafka_consumer`.
|
||||
and most of the config option names have changed.
|
||||
This only affects the kafka consumer _plugin_ (not the
|
||||
output). There were a number of problems with the kafka plugin that led to it
|
||||
only collecting data once at startup, so the kafka plugin was basically non-
|
||||
functional.
|
||||
- Plugins can now be specified as a list, and multiple plugin instances of the
|
||||
same type can be specified, like this:
|
||||
|
||||
```
|
||||
[[inputs.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time"]
|
||||
```
|
||||
|
||||
- Riemann output added
|
||||
- Aerospike plugin: tag changed from `host` -> `aerospike_host`
|
||||
|
||||
### Features
|
||||
- [#379](https://github.com/influxdb/telegraf/pull/379): Riemann output, thanks @allenj!
|
||||
- [#375](https://github.com/influxdb/telegraf/pull/375): kafka_consumer service plugin.
|
||||
- [#392](https://github.com/influxdb/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras!
|
||||
- [#383](https://github.com/influxdb/telegraf/pull/383): Specify plugins as a list.
|
||||
- [#354](https://github.com/influxdb/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC!
|
||||
|
||||
### Bugfixes
|
||||
- [#371](https://github.com/influxdb/telegraf/issues/371): Kafka consumer plugin not functioning.
|
||||
- [#389](https://github.com/influxdb/telegraf/issues/389): NaN value panic
|
||||
|
||||
## v0.2.2 [2015-11-18]
|
||||
|
||||
### Release Notes
|
||||
- 0.2.1 has a bug where all lists within plugins get duplicated, this includes
|
||||
lists of servers/URLs. 0.2.2 is being released solely to fix that bug
|
||||
|
||||
### Bugfixes
|
||||
- [#377](https://github.com/influxdb/telegraf/pull/377): Fix for duplicate slices in inputs.
|
||||
|
||||
## v0.2.1 [2015-11-16]
|
||||
|
||||
### Release Notes
|
||||
- Telegraf will no longer use docker-compose for "long" unit test, it has been
|
||||
changed to just run docker commands in the Makefile. See `make docker-run` and
|
||||
`make docker-kill`. `make test` will still run all unit tests with docker.
|
||||
- Long unit tests are now run in CircleCI, with docker & race detector
|
||||
- Redis plugin tag has changed from `host` to `server`
|
||||
- HAProxy plugin tag has changed from `host` to `server`
|
||||
- UDP output now supported
|
||||
- Telegraf will now compile on FreeBSD
|
||||
- Users can now specify outputs as lists, specifying multiple outputs of the
|
||||
same type.
|
||||
|
||||
### Features
|
||||
- [#325](https://github.com/influxdb/telegraf/pull/325): NSQ output. Thanks @jrxFive!
|
||||
- [#318](https://github.com/influxdb/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter!
|
||||
- [#338](https://github.com/influxdb/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac!
|
||||
- [#337](https://github.com/influxdb/telegraf/pull/337): Jolokia plugin, thanks @saiello!
|
||||
- [#350](https://github.com/influxdb/telegraf/pull/350): Amon output.
|
||||
- [#365](https://github.com/influxdb/telegraf/pull/365): Twemproxy plugin by @codeb2cc
|
||||
- [#317](https://github.com/influxdb/telegraf/issues/317): ZFS plugin, thanks @cornerot!
|
||||
- [#364](https://github.com/influxdb/telegraf/pull/364): Support InfluxDB UDP output.
|
||||
- [#370](https://github.com/influxdb/telegraf/pull/370): Support specifying multiple outputs, as lists.
|
||||
- [#372](https://github.com/influxdb/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC!
|
||||
|
||||
### Bugfixes
|
||||
- [#331](https://github.com/influxdb/telegraf/pull/331): Dont overwrite host tag in redis plugin.
|
||||
- [#336](https://github.com/influxdb/telegraf/pull/336): Mongodb plugin should take 2 measurements.
|
||||
- [#351](https://github.com/influxdb/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes
|
||||
- [#360](https://github.com/influxdb/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo!
|
||||
|
||||
## v0.2.0 [2015-10-27]
|
||||
|
||||
### Release Notes
|
||||
- The -test flag will now only output 2 collections for plugins that need it
|
||||
- There is a new agent configuration option: `flush_interval`. This option tells
|
||||
Telegraf how often to flush data to InfluxDB and other output sinks. For example,
|
||||
users can set `interval = "2s"` and `flush_interval = "60s"` for Telegraf to
|
||||
collect data every 2 seconds, and flush every 60 seconds.
|
||||
- `precision` and `utc` are no longer valid agent config values. `precision` has
|
||||
moved to the `influxdb` output config, where it will continue to default to "s"
|
||||
- debug and test output will now print the raw line-protocol string
|
||||
- Telegraf will now, by default, round the collection interval to the nearest
|
||||
even interval. This means that `interval="10s"` will collect every :00, :10, etc.
|
||||
To ease scale concerns, flushing will be "jittered" by a random amount so that
|
||||
all Telegraf instances do not flush at the same time. Both of these options can
|
||||
be controlled via the `round_interval` and `flush_jitter` config options.
|
||||
- Telegraf will now retry metric flushes twice
|
||||
|
||||
### Features
|
||||
- [#205](https://github.com/influxdb/telegraf/issues/205): Include per-db redis keyspace info
|
||||
- [#226](https://github.com/influxdb/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini
|
||||
- [#90](https://github.com/influxdb/telegraf/issues/90): Add Docker labels to tags in docker plugin
|
||||
- [#223](https://github.com/influxdb/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee!
|
||||
- [#227](https://github.com/influxdb/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay!
|
||||
- [#241](https://github.com/influxdb/telegraf/pull/241): MQTT Output. Thanks @shirou!
|
||||
- Memory plugin: cached and buffered measurements re-added
|
||||
- Logging: additional logging for each collection interval, track the number
|
||||
of metrics collected and from how many inputs.
|
||||
- [#240](https://github.com/influxdb/telegraf/pull/240): procstat plugin, thanks @ranjib!
|
||||
- [#244](https://github.com/influxdb/telegraf/pull/244): netstat plugin, thanks @shirou!
|
||||
- [#262](https://github.com/influxdb/telegraf/pull/262): zookeeper plugin, thanks @jrxFive!
|
||||
- [#237](https://github.com/influxdb/telegraf/pull/237): statsd service plugin, thanks @sparrc
|
||||
- [#273](https://github.com/influxdb/telegraf/pull/273): puppet agent plugin, thats @jrxFive!
|
||||
- [#280](https://github.com/influxdb/telegraf/issues/280): Use InfluxDB client v2.
|
||||
- [#281](https://github.com/influxdb/telegraf/issues/281): Eliminate need to deep copy Batch Points.
|
||||
- [#286](https://github.com/influxdb/telegraf/issues/286): bcache plugin, thanks @cornerot!
|
||||
- [#287](https://github.com/influxdb/telegraf/issues/287): Batch AMQP output, thanks @ekini!
|
||||
- [#301](https://github.com/influxdb/telegraf/issues/301): Collect on even intervals
|
||||
- [#298](https://github.com/influxdb/telegraf/pull/298): Support retrying output writes
|
||||
- [#300](https://github.com/influxdb/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter!
|
||||
- [#322](https://github.com/influxdb/telegraf/issues/322): Librato output. Thanks @jipperinbham!
|
||||
|
||||
### Bugfixes
|
||||
- [#228](https://github.com/influxdb/telegraf/pull/228): New version of package will replace old one. Thanks @ekini!
|
||||
- [#232](https://github.com/influxdb/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime!
|
||||
- [#261](https://github.com/influxdb/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini!
|
||||
- [#245](https://github.com/influxdb/telegraf/issues/245): Document Exec plugin example. Thanks @ekini!
|
||||
- [#264](https://github.com/influxdb/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac!
|
||||
- [#290](https://github.com/influxdb/telegraf/issues/290): Fix some plugins sending their values as strings.
|
||||
- [#289](https://github.com/influxdb/telegraf/issues/289): Fix accumulator panic on nil tags.
|
||||
- [#302](https://github.com/influxdb/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi!
|
||||
|
||||
## v0.1.9 [2015-09-22]
|
||||
|
||||
### Release Notes
|
||||
- InfluxDB output config change: `url` is now `urls`, and is a list. Config files
|
||||
will still be backwards compatible if only `url` is specified.
|
||||
- The -test flag will now output two metric collections
|
||||
- Support for filtering telegraf outputs on the CLI -- Telegraf will now
|
||||
allow filtering of output sinks on the command-line using the `-outputfilter`
|
||||
flag, much like how the `-filter` flag works for inputs.
|
||||
- Support for filtering on config-file creation -- Telegraf now supports
|
||||
filtering to -sample-config command. You can now run
|
||||
`telegraf -sample-config -filter cpu -outputfilter influxdb` to get a config
|
||||
file with only the cpu plugin defined, and the influxdb output defined.
|
||||
- **Breaking Change**: The CPU collection plugin has been refactored to fix some
|
||||
bugs and outdated dependency issues. At the same time, I also decided to fix
|
||||
a naming consistency issue, so cpu_percentageIdle will become cpu_usage_idle.
|
||||
Also, all CPU time measurements now have it indicated in their name, so cpu_idle
|
||||
will become cpu_time_idle. Additionally, cpu_time measurements are going to be
|
||||
dropped in the default config.
|
||||
- **Breaking Change**: The memory plugin has been refactored and some measurements
|
||||
have been renamed for consistency. Some measurements have also been removed from being outputted. They are still being collected by gopsutil, and could easily be
|
||||
re-added in a "verbose" mode if there is demand for it.
|
||||
|
||||
### Features
|
||||
- [#143](https://github.com/influxdb/telegraf/issues/143): InfluxDB clustering support
|
||||
- [#181](https://github.com/influxdb/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye!
|
||||
- [#203](https://github.com/influxdb/telegraf/pull/200): AMQP output. Thanks @ekini!
|
||||
- [#182](https://github.com/influxdb/telegraf/pull/182): OpenTSDB output. Thanks @rplessl!
|
||||
- [#187](https://github.com/influxdb/telegraf/pull/187): Retry output sink connections on startup.
|
||||
- [#220](https://github.com/influxdb/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee!
|
||||
- [#217](https://github.com/influxdb/telegraf/pull/217): Add filtering for output sinks
|
||||
and filtering when specifying a config file.
|
||||
|
||||
### Bugfixes
|
||||
- [#170](https://github.com/influxdb/telegraf/issues/170): Systemd support
|
||||
- [#175](https://github.com/influxdb/telegraf/issues/175): Set write precision before gathering metrics
|
||||
- [#178](https://github.com/influxdb/telegraf/issues/178): redis plugin, multiple server thread hang bug
|
||||
- Fix net plugin on darwin
|
||||
- [#84](https://github.com/influxdb/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee!
|
||||
- [#189](https://github.com/influxdb/telegraf/pull/189): Fix mem_used_perc. Thanks @mced!
|
||||
- [#192](https://github.com/influxdb/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+
|
||||
- [#203](https://github.com/influxdb/telegraf/issues/203): EL5 rpm support. Thanks @ekini!
|
||||
- [#206](https://github.com/influxdb/telegraf/issues/206): CPU steal/guest values wrong on linux.
|
||||
- [#212](https://github.com/influxdb/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini!
|
||||
- [#212](https://github.com/influxdb/telegraf/issues/212): Fix makefile warning. Thanks @ekini!
|
||||
|
||||
## v0.1.8 [2015-09-04]
|
||||
|
||||
### Release Notes
|
||||
- Telegraf will now write data in UTC at second precision by default
|
||||
- Now using Go 1.5 to build telegraf
|
||||
|
||||
### Features
|
||||
- [#150](https://github.com/influxdb/telegraf/pull/150): Add Host Uptime metric to system plugin
|
||||
- [#158](https://github.com/influxdb/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4
|
||||
- [#159](https://github.com/influxdb/telegraf/pull/159): Use second precision for InfluxDB writes
|
||||
- [#165](https://github.com/influxdb/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0
|
||||
- [#162](https://github.com/influxdb/telegraf/pull/162): Write UTC by default, provide option
|
||||
- [#166](https://github.com/influxdb/telegraf/pull/166): Upload binaries to S3
|
||||
- [#169](https://github.com/influxdb/telegraf/pull/169): Ping plugin
|
||||
|
||||
### Bugfixes
|
||||
|
||||
## v0.1.7 [2015-08-28]
|
||||
|
||||
### Features
|
||||
- [#38](https://github.com/influxdb/telegraf/pull/38): Kafka output producer.
|
||||
- [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0!
|
||||
- [#136](https://github.com/influxdb/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin.
|
||||
- [#137](https://github.com/influxdb/telegraf/issues/137): Memcached: fix when a value contains a space
|
||||
- [#138](https://github.com/influxdb/telegraf/issues/138): MySQL server address tag.
|
||||
- [#142](https://github.com/influxdb/telegraf/pull/142): Add Description and SampleConfig funcs to output interface
|
||||
- Indent the toml config file for readability
|
||||
|
||||
### Bugfixes
|
||||
- [#128](https://github.com/influxdb/telegraf/issues/128): system_load measurement missing.
|
||||
- [#129](https://github.com/influxdb/telegraf/issues/129): Latest pkg url fix.
|
||||
- [#131](https://github.com/influxdb/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra!
|
||||
- [#140](https://github.com/influxdb/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc!
|
||||
|
||||
## v0.1.6 [2015-08-20]
|
||||
|
||||
### Features
|
||||
- [#112](https://github.com/influxdb/telegraf/pull/112): Datadog output. Thanks @jipperinbham!
|
||||
- [#116](https://github.com/influxdb/telegraf/pull/116): Use godep to vendor all dependencies
|
||||
- [#120](https://github.com/influxdb/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales!
|
||||
|
||||
### Bugfixes
|
||||
- [#113](https://github.com/influxdb/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility
|
||||
- [#118](https://github.com/influxdb/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser!
|
||||
- [#122](https://github.com/influxdb/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser!
|
||||
- [#126](https://github.com/influxdb/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error
|
||||
|
||||
## v0.1.5 [2015-08-13]
|
||||
|
||||
### Features
|
||||
- [#54](https://github.com/influxdb/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham!
|
||||
- [#55](https://github.com/influxdb/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar!
|
||||
- [#71](https://github.com/influxdb/telegraf/pull/71): HAProxy plugin. Thanks @kureikain!
|
||||
- [#72](https://github.com/influxdb/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk!
|
||||
- [#73](https://github.com/influxdb/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh!
|
||||
- [#77](https://github.com/influxdb/telegraf/issues/77): Automatically create database.
|
||||
- [#79](https://github.com/influxdb/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc!
|
||||
- [#86](https://github.com/influxdb/telegraf/pull/86): Lustre2 plugin. Thanks srfraser!
|
||||
- [#91](https://github.com/influxdb/telegraf/pull/91): Unit testing
|
||||
- [#92](https://github.com/influxdb/telegraf/pull/92): Exec plugin. Thanks @alvaromorales!
|
||||
- [#98](https://github.com/influxdb/telegraf/pull/98): LeoFS plugin. Thanks @mocchira!
|
||||
- [#103](https://github.com/influxdb/telegraf/pull/103): Filter by metric tags. Thanks @srfraser!
|
||||
- [#106](https://github.com/influxdb/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet!
|
||||
- [#107](https://github.com/influxdb/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham!
|
||||
- [#108](https://github.com/influxdb/telegraf/issues/108): Support setting per-CPU and total-CPU gathering.
|
||||
- [#111](https://github.com/influxdb/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay!
|
||||
|
||||
### Bugfixes
|
||||
- [#85](https://github.com/influxdb/telegraf/pull/85): Fix GetLocalHost testutil function for mac users
|
||||
- [#89](https://github.com/influxdb/telegraf/pull/89): go fmt fixes
|
||||
- [#94](https://github.com/influxdb/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama
|
||||
- [#101](https://github.com/influxdb/telegraf/issues/101): switch back from master branch if building locally
|
||||
- [#99](https://github.com/influxdb/telegraf/issues/99): update integer output to new InfluxDB line protocol format
|
||||
|
||||
## v0.1.4 [2015-07-09]
|
||||
|
||||
### Features
|
||||
- [#56](https://github.com/influxdb/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS!
|
||||
|
||||
### Bugfixes
|
||||
- [#50](https://github.com/influxdb/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff!
|
||||
- [#52](https://github.com/influxdb/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb!
|
||||
|
||||
## v0.1.3 [2015-07-05]
|
||||
|
||||
### Features
|
||||
- [#35](https://github.com/influxdb/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS!
|
||||
- [#47](https://github.com/influxdb/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
|
||||
|
||||
### Bugfixes
|
||||
- [#45](https://github.com/influxdb/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
|
||||
- [#43](https://github.com/influxdb/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
|
||||
|
||||
## v0.1.2 [2015-07-01]
|
||||
|
||||
### Features
|
||||
- [#12](https://github.com/influxdb/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
|
||||
- [#14](https://github.com/influxdb/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
|
||||
- [#16](https://github.com/influxdb/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
|
||||
- [#21](https://github.com/influxdb/telegraf/pull/21): Add memcached plugin. Thanks @Yukki!
|
||||
|
||||
### Bugfixes
|
||||
- [#13](https://github.com/influxdb/telegraf/pull/13): Fix the packaging script.
|
||||
- [#19](https://github.com/influxdb/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain!
|
||||
- [#20](https://github.com/influxdb/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
|
||||
- [#23](https://github.com/influxdb/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer!
|
||||
- [#32](https://github.com/influxdb/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
|
||||
|
||||
## v0.1.1 [2015-06-19]
|
||||
|
||||
### Release Notes
|
||||
|
||||
This is the initial release of Telegraf.
|
||||
199
CONFIGURATION.md
Normal file
199
CONFIGURATION.md
Normal file
@@ -0,0 +1,199 @@
|
||||
# Telegraf Configuration
|
||||
|
||||
## Generating a Configuration File
|
||||
|
||||
A default Telegraf config file can be generated using the `-sample-config` flag,
|
||||
like this: `telegraf -sample-config`
|
||||
|
||||
To generate a file with specific inputs and outputs, you can use the
|
||||
`-input-filter` and `-output-filter` flags, like this:
|
||||
`telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka`
|
||||
|
||||
## Telegraf Agent Configuration
|
||||
|
||||
Telegraf has a few options you can configure under the `agent` section of the
|
||||
config.
|
||||
|
||||
* **hostname**: The hostname is passed as a tag. By default this will be
|
||||
the value returned by `hostname` on the machine running Telegraf.
|
||||
You can override that value here.
|
||||
* **interval**: How often to gather metrics. Uses a simple number +
|
||||
unit parser, e.g. "10s" for 10 seconds or "5m" for 5 minutes.
|
||||
* **debug**: Set to true to gather and send metrics to STDOUT as well as
|
||||
InfluxDB.
|
||||
|
||||
## Input Configuration
|
||||
|
||||
There are some configuration options that are configurable per input:
|
||||
|
||||
* **name_override**: Override the base name of the measurement.
|
||||
(Default is the name of the input).
|
||||
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||
global interval, but if one particular input should be run less or more often,
|
||||
you can configure that here.
|
||||
|
||||
### Input Filters
|
||||
|
||||
There are also filters that can be configured per input:
|
||||
|
||||
* **pass**: An array of strings that is used to filter metrics generated by the
|
||||
current input. Each string in the array is tested as a glob match against field names
|
||||
and if it matches, the field is emitted.
|
||||
* **drop**: The inverse of pass, if a field name matches, it is not emitted.
|
||||
* **tagpass**: tag names and arrays of strings that are used to filter
|
||||
measurements by the current input. Each string in the array is tested as a glob
|
||||
match against the tag name, and if it matches the measurement is emitted.
|
||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
|
||||
emitted. This is tested on measurements that have passed the tagpass test.
|
||||
|
||||
### Input Configuration Examples
|
||||
|
||||
This is a full working config that will output CPU data to an InfluxDB instance
|
||||
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
|
||||
measurements at a 10s interval and will collect per-cpu data, dropping any
|
||||
fields which begin with `time_`.
|
||||
|
||||
```toml
|
||||
[tags]
|
||||
dc = "denver-1"
|
||||
|
||||
[agent]
|
||||
interval = "10s"
|
||||
|
||||
# OUTPUTS
|
||||
[[outputs.influxdb]]
|
||||
url = "http://192.168.59.103:8086" # required.
|
||||
database = "telegraf" # required.
|
||||
precision = "s"
|
||||
|
||||
# INPUTS
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
# filter all fields beginning with 'time_'
|
||||
drop = ["time_*"]
|
||||
```
|
||||
|
||||
### Input Config: tagpass and tagdrop
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time"]
|
||||
# Don't collect CPU data for cpu6 & cpu7
|
||||
[inputs.cpu.tagdrop]
|
||||
cpu = [ "cpu6", "cpu7" ]
|
||||
|
||||
[[inputs.disk]]
|
||||
[inputs.disk.tagpass]
|
||||
# tagpass conditions are OR, not AND.
|
||||
# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home)
|
||||
# then the metric passes
|
||||
fstype = [ "ext4", "xfs" ]
|
||||
# Globs can also be used on the tag values
|
||||
path = [ "/opt", "/home*" ]
|
||||
```
|
||||
|
||||
### Input Config: pass and drop
|
||||
|
||||
```toml
|
||||
# Drop all metrics for guest & steal CPU usage
|
||||
[[inputs.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
drop = ["usage_guest", "usage_steal"]
|
||||
|
||||
# Only store inode related metrics for disks
|
||||
[[inputs.disk]]
|
||||
pass = ["inodes*"]
|
||||
```
|
||||
|
||||
### Input config: prefix, suffix, and override
|
||||
|
||||
This plugin will emit measurements with the name `cpu_total`
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
name_suffix = "_total"
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
```
|
||||
|
||||
This will emit measurements with the name `foobar`
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
name_override = "foobar"
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
```
|
||||
|
||||
### Input config: tags
|
||||
|
||||
This plugin will emit measurements with two additional tags: `tag1=foo` and
|
||||
`tag2=bar`
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
[inputs.cpu.tags]
|
||||
tag1 = "foo"
|
||||
tag2 = "bar"
|
||||
```
|
||||
|
||||
### Multiple inputs of the same type
|
||||
|
||||
Additional inputs (or outputs) of the same type can be specified,
|
||||
just define more instances in the config file. It is highly recommended that
|
||||
you utilize `name_override`, `name_prefix`, or `name_suffix` config options
|
||||
to avoid measurement collisions:
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
name_override = "percpu_usage"
|
||||
drop = ["cpu_time*"]
|
||||
```
|
||||
|
||||
## Output Configuration
|
||||
|
||||
Telegraf also supports specifying multiple output sinks to send data to,
|
||||
configuring each output sink is different, but examples can be
|
||||
found by running `telegraf -sample-config`.
|
||||
|
||||
Outputs also support the same configurable options as inputs
|
||||
(pass, drop, tagpass, tagdrop)
|
||||
|
||||
```toml
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf"
|
||||
precision = "s"
|
||||
# Drop all measurements that start with "aerospike"
|
||||
drop = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-aerospike-data"
|
||||
precision = "s"
|
||||
# Only accept aerospike data:
|
||||
pass = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-cpu0-data"
|
||||
precision = "s"
|
||||
# Only store measurements where the tag "cpu" matches the value "cpu0"
|
||||
[outputs.influxdb.tagpass]
|
||||
cpu = ["cpu0"]
|
||||
```
|
||||
271
CONTRIBUTING.md
Normal file
271
CONTRIBUTING.md
Normal file
@@ -0,0 +1,271 @@
|
||||
## Sign the CLA
|
||||
|
||||
Before we can merge a pull request, you will need to sign the CLA,
|
||||
which can be found [on our website](http://influxdb.com/community/cla.html)
|
||||
|
||||
## Input Plugins
|
||||
|
||||
This section is for developers who want to create new collection inputs.
|
||||
Telegraf is entirely plugin driven. This interface allows for operators to
|
||||
pick and chose what is gathered as well as makes it easy for developers
|
||||
to create new ways of generating metrics.
|
||||
|
||||
Plugin authorship is kept as simple as possible to promote people to develop
|
||||
and submit new inputs.
|
||||
|
||||
### Input Plugin Guidelines
|
||||
|
||||
* A plugin must conform to the `inputs.Input` interface.
|
||||
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* Input Plugins must be added to the
|
||||
`github.com/influxdb/telegraf/plugins/inputs/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
plugin can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this plugin does.
|
||||
|
||||
### Input interface
|
||||
|
||||
```go
|
||||
type Input interface {
|
||||
SampleConfig() string
|
||||
Description() string
|
||||
Gather(Accumulator) error
|
||||
}
|
||||
|
||||
type Accumulator interface {
|
||||
Add(measurement string,
|
||||
value interface{},
|
||||
tags map[string]string,
|
||||
timestamp ...time.Time)
|
||||
AddFields(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
timestamp ...time.Time)
|
||||
}
|
||||
```
|
||||
|
||||
### Accumulator
|
||||
|
||||
The way that a plugin emits metrics is by interacting with the Accumulator.
|
||||
|
||||
The `Add` function takes 3 arguments:
|
||||
* **measurement**: A string description of the metric. For instance `bytes_read` or `
|
||||
faults`.
|
||||
* **value**: A value for the metric. This accepts 5 different types of value:
|
||||
* **int**: The most common type. All int types are accepted but favor using `int64`
|
||||
Useful for counters, etc.
|
||||
* **float**: Favor `float64`, useful for gauges, percentages, etc.
|
||||
* **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`,
|
||||
etc.
|
||||
* **string**: Typically used to indicate a message, or some kind of freeform
|
||||
information.
|
||||
* **time.Time**: Useful for indicating when a state last occurred, for instance `
|
||||
light_on_since`.
|
||||
* **tags**: This is a map of strings to strings to describe the where or who
|
||||
about the metric. For instance, the `net` plugin adds a tag named `"interface"`
|
||||
set to the name of the network interface, like `"eth0"`.
|
||||
|
||||
Let's say you've written a plugin that emits metrics about processes on the current host.
|
||||
|
||||
### Input Plugin Example
|
||||
|
||||
```go
|
||||
package simple
|
||||
|
||||
// simple.go
|
||||
|
||||
import "github.com/influxdb/telegraf/plugins/inputs"
|
||||
|
||||
type Simple struct {
|
||||
Ok bool
|
||||
}
|
||||
|
||||
func (s *Simple) Description() string {
|
||||
return "a demo plugin"
|
||||
}
|
||||
|
||||
func (s *Simple) SampleConfig() string {
|
||||
return "ok = true # indicate if everything is fine"
|
||||
}
|
||||
|
||||
func (s *Simple) Gather(acc inputs.Accumulator) error {
|
||||
if s.Ok {
|
||||
acc.Add("state", "pretty good", nil)
|
||||
} else {
|
||||
acc.Add("state", "not great", nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("simple", func() inputs.Input { return &Simple{} })
|
||||
}
|
||||
```
|
||||
|
||||
## Service Input Plugins
|
||||
|
||||
This section is for developers who want to create new "service" collection
|
||||
inputs. A service plugin differs from a regular plugin in that it operates
|
||||
a background service while Telegraf is running. One example would be the `statsd`
|
||||
plugin, which operates a statsd server.
|
||||
|
||||
Service Input Plugins are substantially more complicated than a regular plugin, as they
|
||||
will require threads and locks to verify data integrity. Service Input Plugins should
|
||||
be avoided unless there is no way to create their behavior with a regular plugin.
|
||||
|
||||
Their interface is quite similar to a regular plugin, with the addition of `Start()`
|
||||
and `Stop()` methods.
|
||||
|
||||
### Service Plugin Guidelines
|
||||
|
||||
* Same as the `Plugin` guidelines, except that they must conform to the
|
||||
`inputs.ServiceInput` interface.
|
||||
|
||||
### Service Plugin interface
|
||||
|
||||
```go
|
||||
type ServicePlugin interface {
|
||||
SampleConfig() string
|
||||
Description() string
|
||||
Gather(Accumulator) error
|
||||
Start() error
|
||||
Stop()
|
||||
}
|
||||
```
|
||||
|
||||
## Output Plugins
|
||||
|
||||
This section is for developers who want to create a new output sink. Outputs
|
||||
are created in a similar manner as collection plugins, and their interface has
|
||||
similar constructs.
|
||||
|
||||
### Output Plugin Guidelines
|
||||
|
||||
* An output must conform to the `outputs.Output` interface.
|
||||
* Outputs should call `outputs.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdb/telegraf/plugins/outputs/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
output can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this output does.
|
||||
|
||||
### Output interface
|
||||
|
||||
```go
|
||||
type Output interface {
|
||||
Connect() error
|
||||
Close() error
|
||||
Description() string
|
||||
SampleConfig() string
|
||||
Write(points []*client.Point) error
|
||||
}
|
||||
```
|
||||
|
||||
### Output Example
|
||||
|
||||
```go
|
||||
package simpleoutput
|
||||
|
||||
// simpleoutput.go
|
||||
|
||||
import "github.com/influxdb/telegraf/plugins/outputs"
|
||||
|
||||
type Simple struct {
|
||||
Ok bool
|
||||
}
|
||||
|
||||
func (s *Simple) Description() string {
|
||||
return "a demo output"
|
||||
}
|
||||
|
||||
func (s *Simple) SampleConfig() string {
|
||||
return "url = localhost"
|
||||
}
|
||||
|
||||
func (s *Simple) Connect() error {
|
||||
// Make a connection to the URL here
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Simple) Close() error {
|
||||
// Close connection to the URL here
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Simple) Write(points []*client.Point) error {
|
||||
for _, pt := range points {
|
||||
// write `pt` to the output sink here
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("simpleoutput", func() outputs.Output { return &Simple{} })
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Service Output Plugins
|
||||
|
||||
This section is for developers who want to create new "service" output. A
|
||||
service output differs from a regular output in that it operates a background service
|
||||
while Telegraf is running. One example would be the `prometheus_client` output,
|
||||
which operates an HTTP server.
|
||||
|
||||
Their interface is quite similar to a regular output, with the addition of `Start()`
|
||||
and `Stop()` methods.
|
||||
|
||||
### Service Output Guidelines
|
||||
|
||||
* Same as the `Output` guidelines, except that they must conform to the
|
||||
`output.ServiceOutput` interface.
|
||||
|
||||
### Service Output interface
|
||||
|
||||
```go
|
||||
type ServiceOutput interface {
|
||||
Connect() error
|
||||
Close() error
|
||||
Description() string
|
||||
SampleConfig() string
|
||||
Write(points []*client.Point) error
|
||||
Start() error
|
||||
Stop()
|
||||
}
|
||||
```
|
||||
|
||||
## Unit Tests
|
||||
|
||||
### Execute short tests
|
||||
|
||||
execute `make test-short`
|
||||
|
||||
### Execute long tests
|
||||
|
||||
As Telegraf collects metrics from several third-party services it becomes a
|
||||
difficult task to mock each service as some of them have complicated protocols
|
||||
which would take some time to replicate.
|
||||
|
||||
To overcome this situation we've decided to use docker containers to provide a
|
||||
fast and reproducible environment to test those services which require it.
|
||||
For other situations
|
||||
(i.e: https://github.com/influxdb/telegraf/blob/master/plugins/redis/redis_test.go)
|
||||
a simple mock will suffice.
|
||||
|
||||
To execute Telegraf tests follow these simple steps:
|
||||
|
||||
- Install docker following [these](https://docs.docker.com/installation/)
|
||||
instructions
|
||||
- execute `make test`
|
||||
|
||||
**OSX users**: you will need to install `boot2docker` or `docker-machine`.
|
||||
The Makefile will assume that you have a `docker-machine` box called `default` to
|
||||
get the IP address.
|
||||
|
||||
### Unit test troubleshooting
|
||||
|
||||
Try cleaning up your test environment by executing `make docker-kill` and
|
||||
re-running
|
||||
52
Godeps
Normal file
52
Godeps
Normal file
@@ -0,0 +1,52 @@
|
||||
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034
|
||||
github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef
|
||||
github.com/Sirupsen/logrus 446d1c146faa8ed3f4218f056fcd165f6bcfda81
|
||||
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
|
||||
github.com/armon/go-metrics 345426c77237ece5dab0e1605c3e4b35c3f54757
|
||||
github.com/aws/aws-sdk-go f09322ae1e6468fe828c862542389bc45baf3c00
|
||||
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
|
||||
github.com/boltdb/bolt 34a0fa5307f7562980fb8e7ff4723f7987edf49b
|
||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||
github.com/dancannon/gorethink a124c9663325ed9f7fb669d17c69961b59151e6e
|
||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/fsouza/go-dockerclient 175e1df973274f04e9b459a62cffc49808f1a649
|
||||
github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3
|
||||
github.com/go-sql-driver/mysql 7a8740a6bd8feb6af5786ab9a9f1513970019d8c
|
||||
github.com/gogo/protobuf 7b1331554dbe882cb3613ee8f1824a5583627963
|
||||
github.com/golang/protobuf 2402d76f3d41f928c7902a765dfc872356dd3aad
|
||||
github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/hailocab/go-hostpool 50839ee41f32bfca8d03a183031aa634b2dc1c64
|
||||
github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458
|
||||
github.com/hashicorp/raft d136cd15dfb7876fd7c89cad1995bc4f19ceb294
|
||||
github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee
|
||||
github.com/influxdb/influxdb bd63489ef0faae2465ae5b1f0a28bd7e71e02e38
|
||||
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
|
||||
github.com/klauspost/crc32 a3b15ae34567abb20a22992b989cd76f48d09c47
|
||||
github.com/lib/pq 11fc39a580a008f1f39bb3d11d984fb34ed778d9
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
|
||||
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
|
||||
github.com/pborman/uuid dee7705ef7b324f27ceb85a121c61f2c2e8ce988
|
||||
github.com/pmezard/go-difflib e8554b8641db39598be7f6342874b958f12ae1d4
|
||||
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common 0a3005bb37bc411040083a55372e77c405f6464c
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil ef151b7ff7fe76308f89a389447b7b78dfa02e0f
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify c92828f29518bc633893affbce12904ba41a7cfa
|
||||
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
golang.org/x/crypto f23ba3a5ee43012fcb4b92e1a2a405a92554f4f2
|
||||
golang.org/x/net 520af5de654dc4dd4f0f65aa40e66dbbd9043df1
|
||||
gopkg.in/dancannon/gorethink.v1 a124c9663325ed9f7fb669d17c69961b59151e6e
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
gopkg.in/mgo.v2 e30de8ac9ae3b30df7065f766c71f88bba7d4e49
|
||||
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4
|
||||
33
LICENSE_OF_DEPENDENCIES.md
Normal file
33
LICENSE_OF_DEPENDENCIES.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# List
|
||||
- github.com/Shopify/sarama [MIT LICENSE](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
|
||||
- github.com/Sirupsen/logrus [MIT LICENSE](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
|
||||
- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
|
||||
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
|
||||
- github.com/cenkalti/backoff [MIT LICENSE](https://github.com/cenkalti/backoff/blob/master/LICENSE)
|
||||
- github.com/dancannon/gorethink [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
|
||||
- github.com/eapache/go-resiliency [MIT LICENSE](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
|
||||
- github.com/eapache/queue [MIT LICENSE](https://github.com/eapache/queue/blob/master/LICENSE)
|
||||
- github.com/fsouza/go-dockerclient [BSD LICENSE](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
|
||||
- github.com/go-sql-driver/mysql [MPL LICENSE](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
|
||||
- github.com/gogo/protobuf [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
|
||||
- github.com/gonuts/go-shellquote (No License, but the project it was forked from https://github.com/kballard/go-shellquote is [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)).
|
||||
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
||||
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||
- github.com/naoina/toml [MIT LICENSE](https://github.com/naoina/toml/blob/master/LICENSE)
|
||||
- github.com/prometheus/client_golang [APACHE LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
|
||||
- github.com/samuel/go-zookeeper [BSD LICENSE](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
|
||||
- github.com/stretchr/objx [MIT LICENSE](github.com/stretchr/objx)
|
||||
- github.com/stretchr/testify [MIT LICENSE](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
|
||||
- github.com/wvanbergen/kafka [MIT LICENSE](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
|
||||
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
||||
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||
- golang.org/x/crypto/* [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||
- internal Glob function [MIT LICENSE](https://github.com/ryanuber/go-glob/blob/master/LICENSE)
|
||||
|
||||
98
Makefile
Normal file
98
Makefile
Normal file
@@ -0,0 +1,98 @@
|
||||
UNAME := $(shell sh -c 'uname')
|
||||
VERSION := $(shell sh -c 'git describe --always --tags')
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
else
|
||||
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
|
||||
endif
|
||||
|
||||
# Standard Telegraf build
|
||||
default: prepare build
|
||||
|
||||
# Only run the build (no dependency grabbing)
|
||||
build:
|
||||
go build -o telegraf -ldflags \
|
||||
"-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# Build with race detector
|
||||
dev: prepare
|
||||
go build -race -o telegraf -ldflags \
|
||||
"-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# Build linux 64-bit, 32-bit and arm architectures
|
||||
build-linux-bins: prepare
|
||||
GOARCH=amd64 GOOS=linux go build -o telegraf_linux_amd64 \
|
||||
-ldflags "-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
GOARCH=386 GOOS=linux go build -o telegraf_linux_386 \
|
||||
-ldflags "-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
GOARCH=arm GOOS=linux go build -o telegraf_linux_arm \
|
||||
-ldflags "-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# Get dependencies and use gdm to checkout changesets
|
||||
prepare:
|
||||
go get ./...
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
|
||||
# Run all docker containers necessary for unit tests
|
||||
docker-run:
|
||||
ifeq ($(UNAME), Darwin)
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=$(shell sh -c 'boot2docker ip || docker-machine ip default') \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
endif
|
||||
ifeq ($(UNAME), Linux)
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
endif
|
||||
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
||||
docker run --name memcached -p "11211:11211" -d memcached
|
||||
docker run --name postgres -p "5432:5432" -d postgres
|
||||
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
||||
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
||||
docker run --name redis -p "6379:6379" -d redis
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
|
||||
# Run docker containers necessary for CircleCI unit tests
|
||||
docker-run-circle:
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
|
||||
# Kill all docker containers, ignore errors
|
||||
docker-kill:
|
||||
-docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann
|
||||
-docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann
|
||||
|
||||
# Run full unit tests using docker containers (includes setup and teardown)
|
||||
test: docker-kill docker-run
|
||||
# Sleeping for kafka leadership election, TSDB setup, etc.
|
||||
sleep 60
|
||||
# SUCCESS, running tests
|
||||
go test -race ./...
|
||||
|
||||
# Run "short" unit tests
|
||||
test-short:
|
||||
go test -short ./...
|
||||
|
||||
.PHONY: test
|
||||
111
PLUGINS.md
111
PLUGINS.md
@@ -1,111 +0,0 @@
|
||||
Telegraf is entirely plugin driven. This interface allows for operators to
|
||||
pick and chose what is gathered as well as makes it easy for developers
|
||||
to create new ways of generating metrics.
|
||||
|
||||
Plugin authorship is kept as simple as possible to promote people to develop
|
||||
and submit new plugins.
|
||||
|
||||
## Guidelines
|
||||
|
||||
* A plugin must conform to the `plugins.Plugin` interface.
|
||||
* Telegraf promises to run each plugin's Gather function serially. This means
|
||||
developers don't have to worry about thread safety within these functions.
|
||||
* Each generated metric automatically has the name of the plugin that generated
|
||||
it prepended. This is to keep plugins honest.
|
||||
* Plugins should call `plugins.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the `github.com/influxdb/telegraf/plugins/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this plugin does.
|
||||
|
||||
### Plugin interface
|
||||
|
||||
```go
|
||||
type Plugin interface {
|
||||
SampleConfig() string
|
||||
Description() string
|
||||
Gather(Accumulator) error
|
||||
}
|
||||
|
||||
type Accumulator interface {
|
||||
Add(measurement string, value interface{}, tags map[string]string)
|
||||
AddValuesWithTime(measurement string, values map[string]interface{}, tags map[string]string, timestamp time.Time)
|
||||
}
|
||||
```
|
||||
|
||||
### Accumulator
|
||||
|
||||
The way that a plugin emits metrics is by interacting with the Accumulator.
|
||||
|
||||
The `Add` function takes 3 arguments:
|
||||
* **measurement**: A string description of the metric. For instance `bytes_read` or `faults`.
|
||||
* **value**: A value for the metric. This accepts 5 different types of value:
|
||||
* **int**: The most common type. All int types are accepted but favor using `int64`
|
||||
Useful for counters, etc.
|
||||
* **float**: Favor `float64`, useful for gauges, percentages, etc.
|
||||
* **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`, etc.
|
||||
* **string**: Typically used to indicate a message, or some kind of freeform information.
|
||||
* **time.Time**: Useful for indicating when a state last occurred, for instance `light_on_since`.
|
||||
* **tags**: This is a map of strings to strings to describe the where or who about the metric. For instance, the `net` plugin adds a tag named `"interface"` set to the name of the network interface, like `"eth0"`.
|
||||
|
||||
The `AddValuesWithTime` allows multiple values for a point to be passed. The values
|
||||
used are the same type profile as **value** above. The **timestamp** argument
|
||||
allows a point to be registered as having occurred at an arbitrary time.
|
||||
|
||||
Let's say you've written a plugin that emits metrics about processes on the current host.
|
||||
|
||||
```go
|
||||
|
||||
type Process struct {
|
||||
CPUTime float64
|
||||
MemoryBytes int64
|
||||
PID int
|
||||
}
|
||||
|
||||
func Gather(acc plugins.Accumulator) error {
|
||||
for _, process := range system.Processes() {
|
||||
tags := map[string]string {
|
||||
"pid": fmt.Sprintf("%d", process.Pid),
|
||||
}
|
||||
|
||||
acc.Add("cpu", process.CPUTime, tags)
|
||||
acc.Add("memoory", process.MemoryBytes, tags)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Example
|
||||
|
||||
```go
|
||||
|
||||
// simple.go
|
||||
|
||||
import "github.com/influxdb/telegraf/plugins"
|
||||
|
||||
type Simple struct {
|
||||
Ok bool
|
||||
}
|
||||
|
||||
func (s *Simple) Description() string {
|
||||
return "a demo plugin"
|
||||
}
|
||||
|
||||
func (s *Simple) SampleConfig() string {
|
||||
return "ok = true # indicate if everything is fine"
|
||||
}
|
||||
|
||||
func (s *Simple) Gather(acc plugins.Accumulator) error {
|
||||
if s.Ok {
|
||||
acc.Add("state", "pretty good", nil)
|
||||
} else {
|
||||
acc.Add("state", "not great", nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
plugins.Add("simple", func() plugins.Plugin { &Simple{} })
|
||||
}
|
||||
```
|
||||
|
||||
205
README.md
205
README.md
@@ -1,25 +1,196 @@
|
||||
# Telegraf - A native agent for InfluxDB
|
||||
# Telegraf [](https://circleci.com/gh/influxdata/telegraf)
|
||||
|
||||
## Quickstart
|
||||
Telegraf is an agent written in Go for collecting metrics from the system it's
|
||||
running on, or from other services, and writing them into InfluxDB or other
|
||||
[outputs](https://github.com/influxdata/telegraf#supported-output-plugins).
|
||||
|
||||
* Build from source or download telegraf (binaries forthcoming)
|
||||
* Run `telegraf -sample-config > telegraf.toml` to create an initial configuration
|
||||
* Edit the configuration to match your needs
|
||||
* Run `telegraf -config telegraf.toml -test` to output one full measurement sample to STDOUT
|
||||
* Run `telegraf -config telegraf.toml` to gather and send metrics to InfluxDB
|
||||
Design goals are to have a minimal memory footprint with a plugin system so
|
||||
that developers in the community can easily add support for collecting metrics
|
||||
from well known services (like Hadoop, Postgres, or Redis) and third party
|
||||
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
|
||||
|
||||
## Telegraf Options
|
||||
New input and output plugins are designed to be easy to contribute,
|
||||
we'll eagerly accept pull
|
||||
requests and will manage the set of plugins that Telegraf supports.
|
||||
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
|
||||
new plugins.
|
||||
|
||||
Telegraf has a few options you can configure under the `agent` section of the config. If you don't see an `agent` section run `telegraf -sample-config > telegraf.toml` to create a valid initial configuration:
|
||||
## Installation:
|
||||
|
||||
* **hostname**: The hostname is passed as a tag. By default this will be the value retured by `hostname` on the machine running Telegraf. You can override that value here.
|
||||
* **interval**: How ofter to gather metrics. Uses a simple number + unit parser, ie "10s" for 10 seconds or "5m" for 5 minutes.
|
||||
* **debug**: Set to true to gather and send metrics to STDOUT as well as InfluxDB.
|
||||
NOTE: Telegraf 0.10.x is **not** backwards-compatible with previous versions of
|
||||
telegraf, both in the database layout and the configuration file. 0.2.x will
|
||||
continue to be supported, see below for download links.
|
||||
|
||||
## Plugin Options
|
||||
TODO: link to blog post about 0.10.x changes.
|
||||
|
||||
There are 3 configuration options that are configurable per plugin:
|
||||
### Linux deb and rpm packages:
|
||||
|
||||
* **pass**: An array of strings that is used to filter metrics generated by the current plugin. Each string in the array is tested as a prefix against metrics and if it matches, the metric is emitted.
|
||||
* **drop**: The inverse of pass, if a metric matches, it is not emitted.
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single global interval, but if one particular plugin should be run less or more often, you can configure that here.
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.10.0_amd64.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.0-1.x86_64.rpm
|
||||
|
||||
0.2.x:
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.2.4-1.x86_64.rpm
|
||||
|
||||
##### Package instructions:
|
||||
|
||||
* Telegraf binary is installed in `/usr/bin/telegraf`
|
||||
* Telegraf daemon configuration file is in `/etc/telegraf/telegraf.conf`
|
||||
* On sysv systems, the telegraf daemon can be controlled via
|
||||
`service telegraf [action]`
|
||||
* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be
|
||||
controlled via `systemctl [action] telegraf`
|
||||
|
||||
### Linux binaries:
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.10.0.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_386_0.10.0.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.10.0.tar.gz
|
||||
|
||||
0.2.x:
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_386_0.2.4.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.2.4.tar.gz
|
||||
|
||||
##### Binary instructions:
|
||||
|
||||
These are standalone binaries that can be unpacked and executed on any linux
|
||||
system. They can be unpacked and renamed in a location such as
|
||||
`/usr/local/bin` for convenience. A config file will need to be generated,
|
||||
see "How to use it" below.
|
||||
|
||||
### OSX via Homebrew:
|
||||
|
||||
```
|
||||
brew update
|
||||
brew install telegraf
|
||||
```
|
||||
|
||||
### From Source:
|
||||
|
||||
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
|
||||
which gets installed via the Makefile
|
||||
if you don't have it already. You also must build with golang version 1.4+.
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install)
|
||||
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
|
||||
3. Run `go get github.com/influxdb/telegraf`
|
||||
4. Run `cd $GOPATH/src/github.com/influxdb/telegraf`
|
||||
5. Run `make`
|
||||
|
||||
### How to use it:
|
||||
|
||||
```console
|
||||
$ telegraf -help
|
||||
Telegraf, The plugin-driven server agent for reporting metrics into InfluxDB
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf <flags>
|
||||
|
||||
The flags are:
|
||||
|
||||
-config <file> configuration file to load
|
||||
-test gather metrics once, print them to stdout, and exit
|
||||
-sample-config print out full sample configuration to stdout
|
||||
-config-directory directory containing additional *.conf files
|
||||
-input-filter filter the input plugins to enable, separator is :
|
||||
-output-filter filter the output plugins to enable, separator is :
|
||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||
-version print the version to stdout
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf -sample-config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf -sample-config -input-filter cpu -output-filter influxdb
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf -config telegraf.conf -test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf -config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
See the [configuration guide](CONFIGURATION.md) for a rundown of the more advanced
|
||||
configuration options.
|
||||
|
||||
## Supported Input Plugins
|
||||
|
||||
Telegraf currently has support for collecting metrics from:
|
||||
|
||||
* aerospike
|
||||
* apache
|
||||
* bcache
|
||||
* disque
|
||||
* elasticsearch
|
||||
* exec (generic JSON-emitting executable plugin)
|
||||
* haproxy
|
||||
* httpjson (generic JSON-emitting http service plugin)
|
||||
* influxdb
|
||||
* jolokia
|
||||
* leofs
|
||||
* lustre2
|
||||
* mailchimp
|
||||
* memcached
|
||||
* mongodb
|
||||
* mysql
|
||||
* nginx
|
||||
* phpfpm
|
||||
* ping
|
||||
* postgresql
|
||||
* procstat
|
||||
* prometheus
|
||||
* puppetagent
|
||||
* rabbitmq
|
||||
* redis
|
||||
* rethinkdb
|
||||
* twemproxy
|
||||
* zfs
|
||||
* zookeeper
|
||||
* system
|
||||
* cpu
|
||||
* mem
|
||||
* net
|
||||
* netstat
|
||||
* disk
|
||||
* diskio
|
||||
* swap
|
||||
|
||||
Telegraf can also collect metrics via the following service plugins:
|
||||
|
||||
* statsd
|
||||
* kafka_consumer
|
||||
|
||||
We'll be adding support for many more over the coming months. Read on if you
|
||||
want to add support for another service or third-party API.
|
||||
|
||||
## Supported Output Plugins
|
||||
|
||||
* influxdb
|
||||
* amon
|
||||
* amqp
|
||||
* datadog
|
||||
* kafka
|
||||
* amazon kinesis
|
||||
* librato
|
||||
* mqtt
|
||||
* nsq
|
||||
* opentsdb
|
||||
* prometheus
|
||||
* riemann
|
||||
|
||||
## Contributing
|
||||
|
||||
Please see the
|
||||
[contributing guide](CONTRIBUTING.md)
|
||||
for details on contributing a plugin or output to Telegraf.
|
||||
|
||||
122
Vagrantfile
vendored
122
Vagrantfile
vendored
@@ -1,122 +0,0 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
# All Vagrant configuration is done here. The most common configuration
|
||||
# options are documented and commented below. For a complete reference,
|
||||
# please see the online documentation at vagrantup.com.
|
||||
|
||||
# Every Vagrant virtual environment requires a box to build off of.
|
||||
config.vm.box = "ubuntu/trusty64"
|
||||
|
||||
# Disable automatic box update checking. If you disable this, then
|
||||
# boxes will only be checked for updates when the user runs
|
||||
# `vagrant box outdated`. This is not recommended.
|
||||
# config.vm.box_check_update = false
|
||||
|
||||
# Create a forwarded port mapping which allows access to a specific port
|
||||
# within the machine from a port on the host machine. In the example below,
|
||||
# accessing "localhost:8080" will access port 80 on the guest machine.
|
||||
# config.vm.network "forwarded_port", guest: 80, host: 8080
|
||||
|
||||
# Create a private network, which allows host-only access to the machine
|
||||
# using a specific IP.
|
||||
# config.vm.network "private_network", ip: "192.168.33.10"
|
||||
|
||||
# Create a public network, which generally matched to bridged network.
|
||||
# Bridged networks make the machine appear as another physical device on
|
||||
# your network.
|
||||
# config.vm.network "public_network"
|
||||
|
||||
# If true, then any SSH connections made will enable agent forwarding.
|
||||
# Default value: false
|
||||
# config.ssh.forward_agent = true
|
||||
|
||||
# Share an additional folder to the guest VM. The first argument is
|
||||
# the path on the host to the actual folder. The second argument is
|
||||
# the path on the guest to mount the folder. And the optional third
|
||||
# argument is a set of non-required options.
|
||||
config.vm.synced_folder "~/go", "/home/vagrant/go"
|
||||
|
||||
# Provider-specific configuration so you can fine-tune various
|
||||
# backing providers for Vagrant. These expose provider-specific options.
|
||||
# Example for VirtualBox:
|
||||
#
|
||||
config.vm.provider "virtualbox" do |vb|
|
||||
# # Don't boot with headless mode
|
||||
# vb.gui = true
|
||||
#
|
||||
# # Use VBoxManage to customize the VM. For example to change memory:
|
||||
vb.customize ["modifyvm", :id, "--memory", "1024"]
|
||||
end
|
||||
#
|
||||
# View the documentation for the provider you're using for more
|
||||
# information on available options.
|
||||
|
||||
# Enable provisioning with CFEngine. CFEngine Community packages are
|
||||
# automatically installed. For example, configure the host as a
|
||||
# policy server and optionally a policy file to run:
|
||||
#
|
||||
# config.vm.provision "cfengine" do |cf|
|
||||
# cf.am_policy_hub = true
|
||||
# # cf.run_file = "motd.cf"
|
||||
# end
|
||||
#
|
||||
# You can also configure and bootstrap a client to an existing
|
||||
# policy server:
|
||||
#
|
||||
# config.vm.provision "cfengine" do |cf|
|
||||
# cf.policy_server_address = "10.0.2.15"
|
||||
# end
|
||||
|
||||
# Enable provisioning with Puppet stand alone. Puppet manifests
|
||||
# are contained in a directory path relative to this Vagrantfile.
|
||||
# You will need to create the manifests directory and a manifest in
|
||||
# the file default.pp in the manifests_path directory.
|
||||
#
|
||||
# config.vm.provision "puppet" do |puppet|
|
||||
# puppet.manifests_path = "manifests"
|
||||
# puppet.manifest_file = "site.pp"
|
||||
# end
|
||||
|
||||
# Enable provisioning with chef solo, specifying a cookbooks path, roles
|
||||
# path, and data_bags path (all relative to this Vagrantfile), and adding
|
||||
# some recipes and/or roles.
|
||||
#
|
||||
# config.vm.provision "chef_solo" do |chef|
|
||||
# chef.cookbooks_path = "../my-recipes/cookbooks"
|
||||
# chef.roles_path = "../my-recipes/roles"
|
||||
# chef.data_bags_path = "../my-recipes/data_bags"
|
||||
# chef.add_recipe "mysql"
|
||||
# chef.add_role "web"
|
||||
#
|
||||
# # You may also specify custom JSON attributes:
|
||||
# chef.json = { mysql_password: "foo" }
|
||||
# end
|
||||
|
||||
# Enable provisioning with chef server, specifying the chef server URL,
|
||||
# and the path to the validation key (relative to this Vagrantfile).
|
||||
#
|
||||
# The Opscode Platform uses HTTPS. Substitute your organization for
|
||||
# ORGNAME in the URL and validation key.
|
||||
#
|
||||
# If you have your own Chef Server, use the appropriate URL, which may be
|
||||
# HTTP instead of HTTPS depending on your configuration. Also change the
|
||||
# validation key to validation.pem.
|
||||
#
|
||||
# config.vm.provision "chef_client" do |chef|
|
||||
# chef.chef_server_url = "https://api.opscode.com/organizations/ORGNAME"
|
||||
# chef.validation_key_path = "ORGNAME-validator.pem"
|
||||
# end
|
||||
#
|
||||
# If you're using the Opscode platform, your validator client is
|
||||
# ORGNAME-validator, replacing ORGNAME with your organization name.
|
||||
#
|
||||
# If you have your own Chef Server, the default validation client name is
|
||||
# chef-validator, unless you changed the configuration.
|
||||
#
|
||||
# chef.validation_client_name = "ORGNAME-validator"
|
||||
end
|
||||
241
accumulator.go
241
accumulator.go
@@ -2,90 +2,187 @@ package telegraf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"log"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/client"
|
||||
"github.com/influxdb/telegraf/internal/config"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
)
|
||||
|
||||
type BatchPoints struct {
|
||||
client.BatchPoints
|
||||
type Accumulator interface {
|
||||
Add(measurement string, value interface{},
|
||||
tags map[string]string, t ...time.Time)
|
||||
AddFields(measurement string, fields map[string]interface{},
|
||||
tags map[string]string, t ...time.Time)
|
||||
|
||||
Debug bool
|
||||
SetDefaultTags(tags map[string]string)
|
||||
AddDefaultTag(key, value string)
|
||||
|
||||
Prefix string
|
||||
Prefix() string
|
||||
SetPrefix(prefix string)
|
||||
|
||||
Config *ConfiguredPlugin
|
||||
Debug() bool
|
||||
SetDebug(enabled bool)
|
||||
}
|
||||
|
||||
func (bp *BatchPoints) Add(measurement string, val interface{}, tags map[string]string) {
|
||||
measurement = bp.Prefix + measurement
|
||||
|
||||
if bp.Config != nil {
|
||||
if !bp.Config.ShouldPass(measurement) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if bp.Debug {
|
||||
var tg []string
|
||||
|
||||
for k, v := range tags {
|
||||
tg = append(tg, fmt.Sprintf("%s=\"%s\"", k, v))
|
||||
}
|
||||
|
||||
sort.Strings(tg)
|
||||
|
||||
fmt.Printf("> [%s] %s value=%v\n", strings.Join(tg, " "), measurement, val)
|
||||
}
|
||||
|
||||
bp.Points = append(bp.Points, client.Point{
|
||||
Measurement: measurement,
|
||||
Tags: tags,
|
||||
Fields: map[string]interface{}{
|
||||
"value": val,
|
||||
},
|
||||
})
|
||||
func NewAccumulator(
|
||||
inputConfig *config.InputConfig,
|
||||
points chan *client.Point,
|
||||
) Accumulator {
|
||||
acc := accumulator{}
|
||||
acc.points = points
|
||||
acc.inputConfig = inputConfig
|
||||
return &acc
|
||||
}
|
||||
|
||||
func (bp *BatchPoints) AddValuesWithTime(
|
||||
type accumulator struct {
|
||||
sync.Mutex
|
||||
|
||||
points chan *client.Point
|
||||
|
||||
defaultTags map[string]string
|
||||
|
||||
debug bool
|
||||
|
||||
inputConfig *config.InputConfig
|
||||
|
||||
prefix string
|
||||
}
|
||||
|
||||
func (ac *accumulator) Add(
|
||||
measurement string,
|
||||
values map[string]interface{},
|
||||
value interface{},
|
||||
tags map[string]string,
|
||||
timestamp time.Time,
|
||||
t ...time.Time,
|
||||
) {
|
||||
measurement = bp.Prefix + measurement
|
||||
|
||||
if bp.Config != nil {
|
||||
if !bp.Config.ShouldPass(measurement) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if bp.Debug {
|
||||
var tg []string
|
||||
|
||||
for k, v := range tags {
|
||||
tg = append(tg, fmt.Sprintf("%s=\"%s\"", k, v))
|
||||
}
|
||||
|
||||
var vals []string
|
||||
|
||||
for k, v := range values {
|
||||
vals = append(vals, fmt.Sprintf("%s=%v", k, v))
|
||||
}
|
||||
|
||||
sort.Strings(tg)
|
||||
sort.Strings(vals)
|
||||
|
||||
fmt.Printf("> [%s] %s %s\n", strings.Join(tg, " "), measurement, strings.Join(vals, " "))
|
||||
}
|
||||
|
||||
bp.Points = append(bp.Points, client.Point{
|
||||
Measurement: measurement,
|
||||
Tags: tags,
|
||||
Fields: values,
|
||||
Time: timestamp,
|
||||
})
|
||||
fields := make(map[string]interface{})
|
||||
fields["value"] = value
|
||||
ac.AddFields(measurement, fields, tags, t...)
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddFields(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if len(fields) == 0 || len(measurement) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldTagsPass(tags) {
|
||||
return
|
||||
}
|
||||
|
||||
// Override measurement name if set
|
||||
if len(ac.inputConfig.NameOverride) != 0 {
|
||||
measurement = ac.inputConfig.NameOverride
|
||||
}
|
||||
// Apply measurement prefix and suffix if set
|
||||
if len(ac.inputConfig.MeasurementPrefix) != 0 {
|
||||
measurement = ac.inputConfig.MeasurementPrefix + measurement
|
||||
}
|
||||
if len(ac.inputConfig.MeasurementSuffix) != 0 {
|
||||
measurement = measurement + ac.inputConfig.MeasurementSuffix
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
// Apply plugin-wide tags if set
|
||||
for k, v := range ac.inputConfig.Tags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
// Apply daemon-wide tags if set
|
||||
for k, v := range ac.defaultTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
result := make(map[string]interface{})
|
||||
for k, v := range fields {
|
||||
// Filter out any filtered fields
|
||||
if ac.inputConfig != nil {
|
||||
if !ac.inputConfig.Filter.ShouldPass(k) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
result[k] = v
|
||||
|
||||
// Validate uint64 and float64 fields
|
||||
switch val := v.(type) {
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
result[k] = int64(val)
|
||||
} else {
|
||||
result[k] = int64(9223372036854775807)
|
||||
}
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
if ac.debug {
|
||||
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
fields = nil
|
||||
if len(result) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var timestamp time.Time
|
||||
if len(t) > 0 {
|
||||
timestamp = t[0]
|
||||
} else {
|
||||
timestamp = time.Now()
|
||||
}
|
||||
|
||||
if ac.prefix != "" {
|
||||
measurement = ac.prefix + measurement
|
||||
}
|
||||
|
||||
pt, err := client.NewPoint(measurement, tags, result, timestamp)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return
|
||||
}
|
||||
if ac.debug {
|
||||
fmt.Println("> " + pt.String())
|
||||
}
|
||||
ac.points <- pt
|
||||
}
|
||||
|
||||
func (ac *accumulator) SetDefaultTags(tags map[string]string) {
|
||||
ac.defaultTags = tags
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddDefaultTag(key, value string) {
|
||||
ac.defaultTags[key] = value
|
||||
}
|
||||
|
||||
func (ac *accumulator) Prefix() string {
|
||||
return ac.prefix
|
||||
}
|
||||
|
||||
func (ac *accumulator) SetPrefix(prefix string) {
|
||||
ac.prefix = prefix
|
||||
}
|
||||
|
||||
func (ac *accumulator) Debug() bool {
|
||||
return ac.debug
|
||||
}
|
||||
|
||||
func (ac *accumulator) SetDebug(debug bool) {
|
||||
ac.debug = debug
|
||||
}
|
||||
|
||||
483
agent.go
483
agent.go
@@ -1,195 +1,160 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"math/big"
|
||||
"os"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/client"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
"github.com/influxdb/telegraf/internal/config"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdb/telegraf/plugins/outputs"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
)
|
||||
|
||||
type runningPlugin struct {
|
||||
name string
|
||||
plugin plugins.Plugin
|
||||
config *ConfiguredPlugin
|
||||
}
|
||||
|
||||
// Agent runs telegraf and collects data based on the given config
|
||||
type Agent struct {
|
||||
Interval Duration
|
||||
Debug bool
|
||||
Hostname string
|
||||
|
||||
Config *Config
|
||||
|
||||
plugins []*runningPlugin
|
||||
|
||||
conn *client.Client
|
||||
Config *config.Config
|
||||
}
|
||||
|
||||
func NewAgent(config *Config) (*Agent, error) {
|
||||
agent := &Agent{Config: config, Interval: Duration{10 * time.Second}}
|
||||
|
||||
err := config.ApplyAgent(agent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// NewAgent returns an Agent struct based off the given Config
|
||||
func NewAgent(config *config.Config) (*Agent, error) {
|
||||
a := &Agent{
|
||||
Config: config,
|
||||
}
|
||||
|
||||
if agent.Hostname == "" {
|
||||
if a.Config.Agent.Hostname == "" {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
agent.Hostname = hostname
|
||||
a.Config.Agent.Hostname = hostname
|
||||
}
|
||||
|
||||
if config.Tags == nil {
|
||||
config.Tags = map[string]string{}
|
||||
}
|
||||
config.Tags["host"] = a.Config.Agent.Hostname
|
||||
|
||||
config.Tags["host"] = agent.Hostname
|
||||
|
||||
return agent, nil
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (agent *Agent) Connect() error {
|
||||
config := agent.Config
|
||||
// Connect connects to all configured outputs
|
||||
func (a *Agent) Connect() error {
|
||||
for _, o := range a.Config.Outputs {
|
||||
switch ot := o.Output.(type) {
|
||||
case outputs.ServiceOutput:
|
||||
if err := ot.Start(); err != nil {
|
||||
log.Printf("Service for output %s failed to start, exiting\n%s\n",
|
||||
o.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
u, err := url.Parse(config.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
if a.Config.Agent.Debug {
|
||||
log.Printf("Attempting connection to output: %s\n", o.Name)
|
||||
}
|
||||
err := o.Output.Connect()
|
||||
if err != nil {
|
||||
log.Printf("Failed to connect to output %s, retrying in 15s\n", o.Name)
|
||||
time.Sleep(15 * time.Second)
|
||||
err = o.Output.Connect()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if a.Config.Agent.Debug {
|
||||
log.Printf("Successfully connected to output: %s\n", o.Name)
|
||||
}
|
||||
}
|
||||
|
||||
c, err := client.NewClient(client.Config{
|
||||
URL: *u,
|
||||
Username: config.Username,
|
||||
Password: config.Password,
|
||||
UserAgent: config.UserAgent,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
agent.conn = c
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Agent) LoadPlugins() ([]string, error) {
|
||||
var names []string
|
||||
|
||||
for _, name := range a.Config.PluginsDeclared() {
|
||||
creator, ok := plugins.Plugins[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Undefined but requested plugin: %s", name)
|
||||
// Close closes the connection to all configured outputs
|
||||
func (a *Agent) Close() error {
|
||||
var err error
|
||||
for _, o := range a.Config.Outputs {
|
||||
err = o.Output.Close()
|
||||
switch ot := o.Output.(type) {
|
||||
case outputs.ServiceOutput:
|
||||
ot.Stop()
|
||||
}
|
||||
|
||||
plugin := creator()
|
||||
|
||||
config, err := a.Config.ApplyPlugin(name, plugin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a.plugins = append(a.plugins, &runningPlugin{name, plugin, config})
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
|
||||
return names, nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *Agent) crankParallel() error {
|
||||
points := make(chan *BatchPoints, len(a.plugins))
|
||||
|
||||
// gatherParallel runs the inputs that are using the same reporting interval
|
||||
// as the telegraf agent.
|
||||
func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, plugin := range a.plugins {
|
||||
if plugin.config.Interval != 0 {
|
||||
start := time.Now()
|
||||
counter := 0
|
||||
for _, input := range a.Config.Inputs {
|
||||
if input.Config.Interval != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(plugin *runningPlugin) {
|
||||
counter++
|
||||
go func(input *config.RunningInput) {
|
||||
defer wg.Done()
|
||||
|
||||
var acc BatchPoints
|
||||
acc.Debug = a.Debug
|
||||
acc.Prefix = plugin.name + "_"
|
||||
acc.Config = plugin.config
|
||||
acc := NewAccumulator(input.Config, pointChan)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
// acc.SetPrefix(input.Name + "_")
|
||||
acc.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
plugin.plugin.Gather(&acc)
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
log.Printf("Error in input [%s]: %s", input.Name, err)
|
||||
}
|
||||
|
||||
points <- &acc
|
||||
}(plugin)
|
||||
}(input)
|
||||
}
|
||||
|
||||
if counter == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
close(points)
|
||||
|
||||
var acc BatchPoints
|
||||
acc.Tags = a.Config.Tags
|
||||
acc.Time = time.Now()
|
||||
acc.Database = a.Config.Database
|
||||
|
||||
for sub := range points {
|
||||
acc.Points = append(acc.Points, sub.Points...)
|
||||
}
|
||||
|
||||
_, err := a.conn.Write(acc.BatchPoints)
|
||||
return err
|
||||
elapsed := time.Since(start)
|
||||
log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n",
|
||||
a.Config.Agent.Interval.Duration, counter, elapsed)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Agent) crank() error {
|
||||
var acc BatchPoints
|
||||
|
||||
acc.Debug = a.Debug
|
||||
|
||||
for _, plugin := range a.plugins {
|
||||
acc.Prefix = plugin.name + "_"
|
||||
acc.Config = plugin.config
|
||||
err := plugin.plugin.Gather(&acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
acc.Tags = a.Config.Tags
|
||||
acc.Time = time.Now()
|
||||
acc.Database = a.Config.Database
|
||||
|
||||
_, err := a.conn.Write(acc.BatchPoints)
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *Agent) crankSeparate(shutdown chan struct{}, plugin *runningPlugin) error {
|
||||
ticker := time.NewTicker(plugin.config.Interval)
|
||||
// gatherSeparate runs the inputs that have been configured with their own
|
||||
// reporting interval.
|
||||
func (a *Agent) gatherSeparate(
|
||||
shutdown chan struct{},
|
||||
input *config.RunningInput,
|
||||
pointChan chan *client.Point,
|
||||
) error {
|
||||
ticker := time.NewTicker(input.Config.Interval)
|
||||
|
||||
for {
|
||||
var acc BatchPoints
|
||||
var outerr error
|
||||
start := time.Now()
|
||||
|
||||
acc.Debug = a.Debug
|
||||
acc := NewAccumulator(input.Config, pointChan)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
// acc.SetPrefix(input.Name + "_")
|
||||
acc.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
acc.Prefix = plugin.name + "_"
|
||||
acc.Config = plugin.config
|
||||
err := plugin.plugin.Gather(&acc)
|
||||
if err != nil {
|
||||
return err
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
log.Printf("Error in input [%s]: %s", input.Name, err)
|
||||
}
|
||||
|
||||
acc.Tags = a.Config.Tags
|
||||
acc.Time = time.Now()
|
||||
acc.Database = a.Config.Database
|
||||
elapsed := time.Since(start)
|
||||
log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
|
||||
input.Config.Interval, input.Name, elapsed)
|
||||
|
||||
a.conn.Write(acc.BatchPoints)
|
||||
if outerr != nil {
|
||||
return outerr
|
||||
}
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
@@ -200,86 +165,226 @@ func (a *Agent) crankSeparate(shutdown chan struct{}, plugin *runningPlugin) err
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Agent) TestAllPlugins() error {
|
||||
var names []string
|
||||
|
||||
for name, _ := range plugins.Plugins {
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
|
||||
var acc BatchPoints
|
||||
acc.Debug = true
|
||||
|
||||
fmt.Printf("* Testing all plugins with default configuration\n")
|
||||
|
||||
for _, name := range names {
|
||||
plugin := plugins.Plugins[name]()
|
||||
|
||||
fmt.Printf("* Plugin: %s\n", name)
|
||||
|
||||
acc.Prefix = name + "_"
|
||||
err := plugin.Gather(&acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Test verifies that we can 'Gather' from all inputs with their configured
|
||||
// Config struct
|
||||
func (a *Agent) Test() error {
|
||||
var acc BatchPoints
|
||||
shutdown := make(chan struct{})
|
||||
defer close(shutdown)
|
||||
pointChan := make(chan *client.Point)
|
||||
|
||||
acc.Debug = true
|
||||
// dummy receiver for the point channel
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-pointChan:
|
||||
// do nothing
|
||||
case <-shutdown:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for _, plugin := range a.plugins {
|
||||
acc.Prefix = plugin.name + "_"
|
||||
acc.Config = plugin.config
|
||||
for _, input := range a.Config.Inputs {
|
||||
acc := NewAccumulator(input.Config, pointChan)
|
||||
acc.SetDebug(true)
|
||||
// acc.SetPrefix(input.Name + "_")
|
||||
|
||||
fmt.Printf("* Plugin: %s\n", plugin.name)
|
||||
if plugin.config.Interval != 0 {
|
||||
fmt.Printf("* Internal: %s\n", plugin.config.Interval)
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
|
||||
if input.Config.Interval != 0 {
|
||||
fmt.Printf("* Internal: %s\n", input.Config.Interval)
|
||||
}
|
||||
|
||||
err := plugin.plugin.Gather(&acc)
|
||||
if err != nil {
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Special instructions for some inputs. cpu, for example, needs to be
|
||||
// run twice in order to return cpu usage percentages.
|
||||
switch input.Name {
|
||||
case "cpu", "mongodb":
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name)
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
if a.conn == nil {
|
||||
err := a.Connect()
|
||||
if err != nil {
|
||||
return err
|
||||
// writeOutput writes a list of points to a single output, with retries.
|
||||
// Optionally takes a `done` channel to indicate that it is done writing.
|
||||
func (a *Agent) writeOutput(
|
||||
points []*client.Point,
|
||||
ro *config.RunningOutput,
|
||||
shutdown chan struct{},
|
||||
wg *sync.WaitGroup,
|
||||
) {
|
||||
defer wg.Done()
|
||||
if len(points) == 0 {
|
||||
return
|
||||
}
|
||||
retry := 0
|
||||
retries := a.Config.Agent.FlushRetries
|
||||
start := time.Now()
|
||||
|
||||
for {
|
||||
filtered := ro.FilterPoints(points)
|
||||
err := ro.Output.Write(filtered)
|
||||
if err == nil {
|
||||
// Write successful
|
||||
elapsed := time.Since(start)
|
||||
log.Printf("Flushed %d metrics to output %s in %s\n",
|
||||
len(filtered), ro.Name, elapsed)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return
|
||||
default:
|
||||
if retry >= retries {
|
||||
// No more retries
|
||||
msg := "FATAL: Write to output [%s] failed %d times, dropping" +
|
||||
" %d metrics\n"
|
||||
log.Printf(msg, ro.Name, retries+1, len(points))
|
||||
return
|
||||
} else if err != nil {
|
||||
// Sleep for a retry
|
||||
log.Printf("Error in output [%s]: %s, retrying in %s",
|
||||
ro.Name, err.Error(), a.Config.Agent.FlushInterval.Duration)
|
||||
time.Sleep(a.Config.Agent.FlushInterval.Duration)
|
||||
}
|
||||
}
|
||||
|
||||
retry++
|
||||
}
|
||||
}
|
||||
|
||||
// flush writes a list of points to all configured outputs
|
||||
func (a *Agent) flush(
|
||||
points []*client.Point,
|
||||
shutdown chan struct{},
|
||||
wait bool,
|
||||
) {
|
||||
var wg sync.WaitGroup
|
||||
for _, o := range a.Config.Outputs {
|
||||
wg.Add(1)
|
||||
go a.writeOutput(points, o, shutdown, &wg)
|
||||
}
|
||||
if wait {
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
// flusher monitors the points input channel and flushes on the minimum interval
|
||||
func (a *Agent) flusher(shutdown chan struct{}, pointChan chan *client.Point) error {
|
||||
// Inelegant, but this sleep is to allow the Gather threads to run, so that
|
||||
// the flusher will flush after metrics are collected.
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
|
||||
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
|
||||
points := make([]*client.Point, 0)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
log.Println("Hang on, flushing any cached points before shutdown")
|
||||
a.flush(points, shutdown, true)
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
a.flush(points, shutdown, false)
|
||||
points = make([]*client.Point, 0)
|
||||
case pt := <-pointChan:
|
||||
points = append(points, pt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// jitterInterval applies the the interval jitter to the flush interval using
|
||||
// crypto/rand number generator
|
||||
func jitterInterval(ininterval, injitter time.Duration) time.Duration {
|
||||
var jitter int64
|
||||
outinterval := ininterval
|
||||
if injitter.Nanoseconds() != 0 {
|
||||
maxjitter := big.NewInt(injitter.Nanoseconds())
|
||||
if j, err := rand.Int(rand.Reader, maxjitter); err == nil {
|
||||
jitter = j.Int64()
|
||||
}
|
||||
outinterval = time.Duration(jitter + ininterval.Nanoseconds())
|
||||
}
|
||||
|
||||
if outinterval.Nanoseconds() < time.Duration(500*time.Millisecond).Nanoseconds() {
|
||||
log.Printf("Flush interval %s too low, setting to 500ms\n", outinterval)
|
||||
outinterval = time.Duration(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
return outinterval
|
||||
}
|
||||
|
||||
// Run runs the agent daemon, gathering every Interval
|
||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, plugin := range a.plugins {
|
||||
if plugin.config.Interval != 0 {
|
||||
a.Config.Agent.FlushInterval.Duration = jitterInterval(a.Config.Agent.FlushInterval.Duration,
|
||||
a.Config.Agent.FlushJitter.Duration)
|
||||
|
||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v, "+
|
||||
"Flush Interval:%s\n",
|
||||
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug,
|
||||
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
|
||||
|
||||
// channel shared between all input threads for accumulating points
|
||||
pointChan := make(chan *client.Point, 1000)
|
||||
|
||||
// Round collection to nearest interval by sleeping
|
||||
if a.Config.Agent.RoundInterval {
|
||||
i := int64(a.Config.Agent.Interval.Duration)
|
||||
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
|
||||
}
|
||||
ticker := time.NewTicker(a.Config.Agent.Interval.Duration)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := a.flusher(shutdown, pointChan); err != nil {
|
||||
log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
|
||||
close(shutdown)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
|
||||
// Start service of any ServicePlugins
|
||||
switch p := input.Input.(type) {
|
||||
case inputs.ServiceInput:
|
||||
if err := p.Start(); err != nil {
|
||||
log.Printf("Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
defer p.Stop()
|
||||
}
|
||||
|
||||
// Special handling for inputs that have their own collection interval
|
||||
// configured. Default intervals are handled below with gatherParallel
|
||||
if input.Config.Interval != 0 {
|
||||
wg.Add(1)
|
||||
go func(plugin *runningPlugin) {
|
||||
go func(input *config.RunningInput) {
|
||||
defer wg.Done()
|
||||
a.crankSeparate(shutdown, plugin)
|
||||
}(plugin)
|
||||
if err := a.gatherSeparate(shutdown, input, pointChan); err != nil {
|
||||
log.Printf(err.Error())
|
||||
}
|
||||
}(input)
|
||||
}
|
||||
}
|
||||
|
||||
defer wg.Wait()
|
||||
|
||||
ticker := time.NewTicker(a.Interval.Duration)
|
||||
|
||||
for {
|
||||
err := a.crankParallel()
|
||||
if err != nil {
|
||||
log.Printf("Error in plugins: %s", err)
|
||||
if err := a.gatherParallel(pointChan); err != nil {
|
||||
log.Printf(err.Error())
|
||||
}
|
||||
|
||||
select {
|
||||
|
||||
206
agent_test.go
206
agent_test.go
@@ -1,61 +1,175 @@
|
||||
package telegraf
|
||||
|
||||
/*
|
||||
func TestAgent_DrivesMetrics(t *testing.T) {
|
||||
var (
|
||||
plugin plugins.MockPlugin
|
||||
)
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
defer plugin.AssertExpectations(t)
|
||||
defer metrics.AssertExpectations(t)
|
||||
"github.com/influxdb/telegraf/internal/config"
|
||||
|
||||
a := &Agent{
|
||||
plugins: []plugins.Plugin{&plugin},
|
||||
Config: &Config{},
|
||||
}
|
||||
// needing to load the plugins
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/all"
|
||||
// needing to load the outputs
|
||||
_ "github.com/influxdb/telegraf/plugins/outputs/all"
|
||||
)
|
||||
|
||||
plugin.On("Add", "foo", 1.2, nil).Return(nil)
|
||||
plugin.On("Add", "bar", 888, nil).Return(nil)
|
||||
func TestAgent_LoadPlugin(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.InputFilters = []string{"mysql"}
|
||||
err := c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ := NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Inputs))
|
||||
|
||||
err := a.crank()
|
||||
require.NoError(t, err)
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"foo"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 0, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "foo"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "redis"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "foo", "redis", "bar"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Inputs))
|
||||
}
|
||||
|
||||
func TestAgent_AppliesTags(t *testing.T) {
|
||||
var (
|
||||
plugin plugins.MockPlugin
|
||||
metrics MockMetrics
|
||||
)
|
||||
func TestAgent_LoadOutput(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb"}
|
||||
err := c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ := NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||
|
||||
defer plugin.AssertExpectations(t)
|
||||
defer metrics.AssertExpectations(t)
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"kafka"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Outputs))
|
||||
|
||||
a := &Agent{
|
||||
plugins: []plugins.Plugin{&plugin},
|
||||
metrics: &metrics,
|
||||
Config: &Config{
|
||||
Tags: map[string]string{
|
||||
"dc": "us-west-1",
|
||||
},
|
||||
},
|
||||
}
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
|
||||
m1 := cypress.Metric()
|
||||
m1.Add("name", "foo")
|
||||
m1.Add("value", 1.2)
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"foo"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 0, len(a.Config.Outputs))
|
||||
|
||||
msgs := []*cypress.Message{m1}
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "foo"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||
|
||||
m2 := cypress.Metric()
|
||||
m2.Timestamp = m1.Timestamp
|
||||
m2.Add("name", "foo")
|
||||
m2.Add("value", 1.2)
|
||||
m2.AddTag("dc", "us-west-1")
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "kafka"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(c.Outputs))
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
|
||||
plugin.On("Read").Return(msgs, nil)
|
||||
metrics.On("Receive", m2).Return(nil)
|
||||
|
||||
err := a.crank()
|
||||
require.NoError(t, err)
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
}
|
||||
|
||||
func TestAgent_ZeroJitter(t *testing.T) {
|
||||
flushinterval := jitterInterval(time.Duration(10*time.Second),
|
||||
time.Duration(0*time.Second))
|
||||
|
||||
actual := flushinterval.Nanoseconds()
|
||||
exp := time.Duration(10 * time.Second).Nanoseconds()
|
||||
|
||||
if actual != exp {
|
||||
t.Errorf("Actual %v, expected %v", actual, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_ZeroInterval(t *testing.T) {
|
||||
min := time.Duration(500 * time.Millisecond).Nanoseconds()
|
||||
max := time.Duration(5 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
||||
time.Duration(5*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
|
||||
if actual > max {
|
||||
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
||||
break
|
||||
}
|
||||
if actual < min {
|
||||
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_ZeroBoth(t *testing.T) {
|
||||
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
||||
time.Duration(0*time.Second))
|
||||
|
||||
actual := flushinterval
|
||||
exp := time.Duration(500 * time.Millisecond)
|
||||
|
||||
if actual != exp {
|
||||
t.Errorf("Actual %v, expected %v", actual, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_JitterMax(t *testing.T) {
|
||||
max := time.Duration(32 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
||||
time.Duration(2*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
if actual > max {
|
||||
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_JitterMin(t *testing.T) {
|
||||
min := time.Duration(30 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
||||
time.Duration(2*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
if actual < min {
|
||||
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
670
build.py
Executable file
670
build.py
Executable file
@@ -0,0 +1,670 @@
|
||||
#!/usr/bin/env python2.7
|
||||
#
|
||||
# This is the Telegraf build script.
|
||||
#
|
||||
# Current caveats:
|
||||
# - Does not checkout the correct commit/branch (for now, you will need to do so manually)
|
||||
# - Has external dependencies for packaging (fpm) and uploading (boto)
|
||||
#
|
||||
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
import datetime
|
||||
import shutil
|
||||
import tempfile
|
||||
import hashlib
|
||||
import re
|
||||
|
||||
try:
|
||||
import boto
|
||||
from boto.s3.key import Key
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# PACKAGING VARIABLES
|
||||
INSTALL_ROOT_DIR = "/usr/bin"
|
||||
LOG_DIR = "/var/log/telegraf"
|
||||
SCRIPT_DIR = "/usr/lib/telegraf/scripts"
|
||||
CONFIG_DIR = "/etc/telegraf"
|
||||
LOGROTATE_DIR = "/etc/logrotate.d"
|
||||
|
||||
INIT_SCRIPT = "scripts/init.sh"
|
||||
SYSTEMD_SCRIPT = "scripts/telegraf.service"
|
||||
LOGROTATE_SCRIPT = "etc/logrotate.d/telegraf"
|
||||
DEFAULT_CONFIG = "etc/telegraf.conf"
|
||||
POSTINST_SCRIPT = "scripts/post-install.sh"
|
||||
PREINST_SCRIPT = "scripts/pre-install.sh"
|
||||
|
||||
# META-PACKAGE VARIABLES
|
||||
PACKAGE_LICENSE = "MIT"
|
||||
PACKAGE_URL = "https://github.com/influxdata/telegraf"
|
||||
MAINTAINER = "support@influxdb.com"
|
||||
VENDOR = "InfluxData"
|
||||
DESCRIPTION = "Plugin-driven server agent for reporting metrics into InfluxDB."
|
||||
|
||||
# SCRIPT START
|
||||
prereqs = [ 'git', 'go' ]
|
||||
optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ]
|
||||
|
||||
fpm_common_args = "-f -s dir --log error \
|
||||
--vendor {} \
|
||||
--url {} \
|
||||
--license {} \
|
||||
--maintainer {} \
|
||||
--config-files {} \
|
||||
--config-files {} \
|
||||
--after-install {} \
|
||||
--before-install {} \
|
||||
--description \"{}\"".format(
|
||||
VENDOR,
|
||||
PACKAGE_URL,
|
||||
PACKAGE_LICENSE,
|
||||
MAINTAINER,
|
||||
CONFIG_DIR + '/telegraf.conf',
|
||||
LOGROTATE_DIR + '/telegraf',
|
||||
POSTINST_SCRIPT,
|
||||
PREINST_SCRIPT,
|
||||
DESCRIPTION)
|
||||
|
||||
targets = {
|
||||
'telegraf' : './cmd/telegraf/telegraf.go',
|
||||
}
|
||||
|
||||
supported_builds = {
|
||||
# TODO(rossmcdonald): Add support for multiple GOARM values
|
||||
'darwin': [ "amd64", "386" ],
|
||||
# 'windows': [ "amd64", "386", "arm", "arm64" ],
|
||||
'linux': [ "amd64", "386", "arm" ]
|
||||
}
|
||||
supported_go = [ '1.5.1' ]
|
||||
supported_packages = {
|
||||
"darwin": [ "tar", "zip" ],
|
||||
"linux": [ "deb", "rpm", "tar", "zip" ],
|
||||
"windows": [ "tar", "zip" ],
|
||||
}
|
||||
|
||||
def run(command, allow_failure=False, shell=False):
|
||||
out = None
|
||||
try:
|
||||
if shell:
|
||||
out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell)
|
||||
else:
|
||||
out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print ""
|
||||
print ""
|
||||
print "Executed command failed!"
|
||||
print "-- Command run was: {}".format(command)
|
||||
print "-- Failure was: {}".format(e.output)
|
||||
if allow_failure:
|
||||
print "Continuing..."
|
||||
return None
|
||||
else:
|
||||
print ""
|
||||
print "Stopping."
|
||||
sys.exit(1)
|
||||
except OSError as e:
|
||||
print ""
|
||||
print ""
|
||||
print "Invalid command!"
|
||||
print "-- Command run was: {}".format(command)
|
||||
print "-- Failure was: {}".format(e)
|
||||
if allow_failure:
|
||||
print "Continuing..."
|
||||
return out
|
||||
else:
|
||||
print ""
|
||||
print "Stopping."
|
||||
sys.exit(1)
|
||||
else:
|
||||
return out
|
||||
|
||||
def create_temp_dir():
|
||||
return tempfile.mkdtemp(prefix="telegraf-build.")
|
||||
|
||||
def get_current_version():
|
||||
command = "git describe --always --tags --abbrev=0"
|
||||
out = run(command)
|
||||
return out.strip()
|
||||
|
||||
def get_current_commit(short=False):
|
||||
command = None
|
||||
if short:
|
||||
command = "git log --pretty=format:'%h' -n 1"
|
||||
else:
|
||||
command = "git rev-parse HEAD"
|
||||
out = run(command)
|
||||
return out.strip('\'\n\r ')
|
||||
|
||||
def get_current_branch():
|
||||
command = "git rev-parse --abbrev-ref HEAD"
|
||||
out = run(command)
|
||||
return out.strip()
|
||||
|
||||
def get_system_arch():
|
||||
arch = os.uname()[4]
|
||||
if arch == "x86_64":
|
||||
arch = "amd64"
|
||||
return arch
|
||||
|
||||
def get_system_platform():
|
||||
if sys.platform.startswith("linux"):
|
||||
return "linux"
|
||||
else:
|
||||
return sys.platform
|
||||
|
||||
def get_go_version():
|
||||
out = run("go version")
|
||||
matches = re.search('go version go(\S+)', out)
|
||||
if matches is not None:
|
||||
return matches.groups()[0].strip()
|
||||
return None
|
||||
|
||||
def check_path_for(b):
|
||||
def is_exe(fpath):
|
||||
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
|
||||
|
||||
for path in os.environ["PATH"].split(os.pathsep):
|
||||
path = path.strip('"')
|
||||
full_path = os.path.join(path, b)
|
||||
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
|
||||
return full_path
|
||||
|
||||
def check_environ(build_dir = None):
|
||||
print "\nChecking environment:"
|
||||
for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
|
||||
print "\t- {} -> {}".format(v, os.environ.get(v))
|
||||
|
||||
cwd = os.getcwd()
|
||||
if build_dir == None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
|
||||
print "\n!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures."
|
||||
|
||||
def check_prereqs():
|
||||
print "\nChecking for dependencies:"
|
||||
for req in prereqs:
|
||||
print "\t- {} ->".format(req),
|
||||
path = check_path_for(req)
|
||||
if path:
|
||||
print "{}".format(path)
|
||||
else:
|
||||
print "?"
|
||||
for req in optional_prereqs:
|
||||
print "\t- {} (optional) ->".format(req),
|
||||
path = check_path_for(req)
|
||||
if path:
|
||||
print "{}".format(path)
|
||||
else:
|
||||
print "?"
|
||||
print ""
|
||||
|
||||
def upload_packages(packages, nightly=False):
|
||||
print "Uploading packages to S3..."
|
||||
print ""
|
||||
c = boto.connect_s3()
|
||||
# TODO(rossmcdonald) - Set to different S3 bucket for release vs nightly
|
||||
bucket = c.get_bucket('telegraf-nightly')
|
||||
for p in packages:
|
||||
name = os.path.basename(p)
|
||||
if bucket.get_key(name) is None or nightly:
|
||||
print "\t - Uploading {}...".format(name),
|
||||
k = Key(bucket)
|
||||
k.key = name
|
||||
if nightly:
|
||||
n = k.set_contents_from_filename(p, replace=True)
|
||||
else:
|
||||
n = k.set_contents_from_filename(p, replace=False)
|
||||
k.make_public()
|
||||
print "[ DONE ]"
|
||||
else:
|
||||
print "\t - Not uploading {}, already exists.".format(p)
|
||||
print ""
|
||||
|
||||
def run_tests(race, parallel, timeout, no_vet):
|
||||
get_command = "go get -d -t ./..."
|
||||
print "Retrieving Go dependencies...",
|
||||
sys.stdout.flush()
|
||||
run(get_command)
|
||||
print "done."
|
||||
print "Running tests:"
|
||||
print "\tRace: ", race
|
||||
if parallel is not None:
|
||||
print "\tParallel:", parallel
|
||||
if timeout is not None:
|
||||
print "\tTimeout:", timeout
|
||||
sys.stdout.flush()
|
||||
p = subprocess.Popen(["go", "fmt", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out, err = p.communicate()
|
||||
if len(out) > 0 or len(err) > 0:
|
||||
print "Code not formatted. Please use 'go fmt ./...' to fix formatting errors."
|
||||
print out
|
||||
print err
|
||||
return False
|
||||
if not no_vet:
|
||||
p = subprocess.Popen(["go", "tool", "vet", "-composites=false", "./"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out, err = p.communicate()
|
||||
if len(out) > 0 or len(err) > 0:
|
||||
print "Go vet failed. Please run 'go vet ./...' and fix any errors."
|
||||
print out
|
||||
print err
|
||||
return False
|
||||
else:
|
||||
print "Skipping go vet ..."
|
||||
sys.stdout.flush()
|
||||
test_command = "go test -v"
|
||||
if race:
|
||||
test_command += " -race"
|
||||
if parallel is not None:
|
||||
test_command += " -parallel {}".format(parallel)
|
||||
if timeout is not None:
|
||||
test_command += " -timeout {}".format(timeout)
|
||||
test_command += " ./..."
|
||||
code = os.system(test_command)
|
||||
if code != 0:
|
||||
print "Tests Failed"
|
||||
return False
|
||||
else:
|
||||
print "Tests Passed"
|
||||
return True
|
||||
|
||||
def build(version=None,
|
||||
branch=None,
|
||||
commit=None,
|
||||
platform=None,
|
||||
arch=None,
|
||||
nightly=False,
|
||||
rc=None,
|
||||
race=False,
|
||||
clean=False,
|
||||
outdir=".",
|
||||
goarm_version="6"):
|
||||
print "-------------------------"
|
||||
print ""
|
||||
print "Build plan:"
|
||||
print "\t- version: {}".format(version)
|
||||
if rc:
|
||||
print "\t- release candidate: {}".format(rc)
|
||||
print "\t- commit: {}".format(commit)
|
||||
print "\t- branch: {}".format(branch)
|
||||
print "\t- platform: {}".format(platform)
|
||||
print "\t- arch: {}".format(arch)
|
||||
if arch == 'arm' and goarm_version:
|
||||
print "\t- ARM version: {}".format(goarm_version)
|
||||
print "\t- nightly? {}".format(str(nightly).lower())
|
||||
print "\t- race enabled? {}".format(str(race).lower())
|
||||
print ""
|
||||
|
||||
if not os.path.exists(outdir):
|
||||
os.makedirs(outdir)
|
||||
elif clean and outdir != '/':
|
||||
print "Cleaning build directory..."
|
||||
shutil.rmtree(outdir)
|
||||
os.makedirs(outdir)
|
||||
|
||||
if rc:
|
||||
# If a release candidate, update the version information accordingly
|
||||
version = "{}rc{}".format(version, rc)
|
||||
|
||||
print "Starting build..."
|
||||
for b, c in targets.iteritems():
|
||||
print "\t- Building '{}'...".format(os.path.join(outdir, b)),
|
||||
build_command = ""
|
||||
build_command += "GOOS={} GOARCH={} ".format(platform, arch)
|
||||
if arch == "arm" and goarm_version:
|
||||
if goarm_version not in ["5", "6", "7", "arm64"]:
|
||||
print "!! Invalid ARM build version: {}".format(goarm_version)
|
||||
build_command += "GOARM={} ".format(goarm_version)
|
||||
build_command += "go build -o {} ".format(os.path.join(outdir, b))
|
||||
if race:
|
||||
build_command += "-race "
|
||||
go_version = get_go_version()
|
||||
if "1.4" in go_version:
|
||||
build_command += "-ldflags=\"-X main.buildTime '{}' ".format(datetime.datetime.utcnow().isoformat())
|
||||
build_command += "-X main.Version {} ".format(version)
|
||||
build_command += "-X main.Branch {} ".format(branch)
|
||||
build_command += "-X main.Commit {}\" ".format(get_current_commit())
|
||||
else:
|
||||
build_command += "-ldflags=\"-X main.buildTime='{}' ".format(datetime.datetime.utcnow().isoformat())
|
||||
build_command += "-X main.Version={} ".format(version)
|
||||
build_command += "-X main.Branch={} ".format(branch)
|
||||
build_command += "-X main.Commit={}\" ".format(get_current_commit())
|
||||
build_command += c
|
||||
run(build_command, shell=True)
|
||||
print "[ DONE ]"
|
||||
print ""
|
||||
|
||||
def create_dir(path):
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as e:
|
||||
print e
|
||||
|
||||
def rename_file(fr, to):
|
||||
try:
|
||||
os.rename(fr, to)
|
||||
except OSError as e:
|
||||
print e
|
||||
# Return the original filename
|
||||
return fr
|
||||
else:
|
||||
# Return the new filename
|
||||
return to
|
||||
|
||||
def copy_file(fr, to):
|
||||
try:
|
||||
shutil.copy(fr, to)
|
||||
except OSError as e:
|
||||
print e
|
||||
|
||||
def create_package_fs(build_root):
|
||||
print "\t- Creating a filesystem hierarchy from directory: {}".format(build_root)
|
||||
# Using [1:] for the path names due to them being absolute
|
||||
# (will overwrite previous paths, per 'os.path.join' documentation)
|
||||
dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ]
|
||||
for d in dirs:
|
||||
create_dir(os.path.join(build_root, d))
|
||||
os.chmod(os.path.join(build_root, d), 0755)
|
||||
|
||||
def package_scripts(build_root):
|
||||
print "\t- Copying scripts and sample configuration to build directory"
|
||||
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
|
||||
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0644)
|
||||
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
|
||||
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0644)
|
||||
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"))
|
||||
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0644)
|
||||
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"))
|
||||
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0644)
|
||||
|
||||
def go_get(update=False):
|
||||
get_command = None
|
||||
if update:
|
||||
get_command = "go get -u -f -d ./..."
|
||||
else:
|
||||
get_command = "go get -d ./..."
|
||||
print "Retrieving Go dependencies...",
|
||||
run(get_command)
|
||||
print "done.\n"
|
||||
|
||||
def generate_md5_from_file(path):
|
||||
m = hashlib.md5()
|
||||
with open(path, 'rb') as f:
|
||||
for chunk in iter(lambda: f.read(4096), b""):
|
||||
m.update(chunk)
|
||||
return m.hexdigest()
|
||||
|
||||
def build_packages(build_output, version, nightly=False, rc=None, iteration=1):
|
||||
outfiles = []
|
||||
tmp_build_dir = create_temp_dir()
|
||||
try:
|
||||
print "-------------------------"
|
||||
print ""
|
||||
print "Packaging..."
|
||||
for p in build_output:
|
||||
# Create top-level folder displaying which platform (linux, etc)
|
||||
create_dir(os.path.join(tmp_build_dir, p))
|
||||
for a in build_output[p]:
|
||||
current_location = build_output[p][a]
|
||||
# Create second-level directory displaying the architecture (amd64, etc)p
|
||||
build_root = os.path.join(tmp_build_dir, p, a)
|
||||
# Create directory tree to mimic file system of package
|
||||
create_dir(build_root)
|
||||
create_package_fs(build_root)
|
||||
# Copy in packaging and miscellaneous scripts
|
||||
package_scripts(build_root)
|
||||
# Copy newly-built binaries to packaging directory
|
||||
for b in targets:
|
||||
if p == 'windows':
|
||||
b = b + '.exe'
|
||||
fr = os.path.join(current_location, b)
|
||||
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], b)
|
||||
print "\t- [{}][{}] - Moving from '{}' to '{}'".format(p, a, fr, to)
|
||||
copy_file(fr, to)
|
||||
# Package the directory structure
|
||||
for package_type in supported_packages[p]:
|
||||
print "\t- Packaging directory '{}' as '{}'...".format(build_root, package_type),
|
||||
name = "telegraf"
|
||||
package_version = version
|
||||
package_iteration = iteration
|
||||
if package_type in ['zip', 'tar']:
|
||||
if nightly:
|
||||
name = '{}-nightly_{}_{}'.format(name, p, a)
|
||||
else:
|
||||
name = '{}-{}_{}_{}'.format(name, version, p, a)
|
||||
if package_type == 'tar':
|
||||
# Add `tar.gz` to path to reduce package size
|
||||
current_location = os.path.join(current_location, name + '.tar.gz')
|
||||
if rc is not None:
|
||||
package_iteration = "0.rc{}".format(rc)
|
||||
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
|
||||
fpm_common_args,
|
||||
name,
|
||||
a,
|
||||
package_type,
|
||||
package_version,
|
||||
package_iteration,
|
||||
build_root,
|
||||
current_location)
|
||||
if package_type == "rpm":
|
||||
fpm_command += "--depends coreutils "
|
||||
out = run(fpm_command, shell=True)
|
||||
matches = re.search(':path=>"(.*)"', out)
|
||||
outfile = None
|
||||
if matches is not None:
|
||||
outfile = matches.groups()[0]
|
||||
if outfile is None:
|
||||
print "[ COULD NOT DETERMINE OUTPUT ]"
|
||||
else:
|
||||
# Strip nightly version (the unix epoch) from filename
|
||||
if nightly and package_type in ['deb', 'rpm']:
|
||||
outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly"))
|
||||
outfiles.append(os.path.join(os.getcwd(), outfile))
|
||||
print "[ DONE ]"
|
||||
# Display MD5 hash for generated package
|
||||
print "\t\tMD5 = {}".format(generate_md5_from_file(outfile))
|
||||
print ""
|
||||
return outfiles
|
||||
finally:
|
||||
# Cleanup
|
||||
shutil.rmtree(tmp_build_dir)
|
||||
|
||||
def print_usage():
|
||||
print "Usage: ./build.py [options]"
|
||||
print ""
|
||||
print "Options:"
|
||||
print "\t --outdir=<path> \n\t\t- Send build output to a specified path. Defaults to ./build."
|
||||
print "\t --arch=<arch> \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386, arm, or all"
|
||||
print "\t --goarm=<arm version> \n\t\t- Build for specified ARM version (when building for ARM). Default value is: 6"
|
||||
print "\t --platform=<platform> \n\t\t- Build for specified platform. Acceptable values: linux, windows, darwin, or all"
|
||||
print "\t --version=<version> \n\t\t- Version information to apply to build metadata. If not specified, will be pulled from repo tag."
|
||||
print "\t --commit=<commit> \n\t\t- Use specific commit for build (currently a NOOP)."
|
||||
print "\t --branch=<branch> \n\t\t- Build from a specific branch (currently a NOOP)."
|
||||
print "\t --rc=<rc number> \n\t\t- Whether or not the build is a release candidate (affects version information)."
|
||||
print "\t --iteration=<iteration number> \n\t\t- The iteration to display on the package output (defaults to 0 for RC's, and 1 otherwise)."
|
||||
print "\t --race \n\t\t- Whether the produced build should have race detection enabled."
|
||||
print "\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s)."
|
||||
print "\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information)."
|
||||
print "\t --update \n\t\t- Whether dependencies should be updated prior to building."
|
||||
print "\t --test \n\t\t- Run Go tests. Will not produce a build."
|
||||
print "\t --parallel \n\t\t- Run Go tests in parallel up to the count specified."
|
||||
print "\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s."
|
||||
print "\t --clean \n\t\t- Clean the build output directory prior to creating build."
|
||||
print ""
|
||||
|
||||
def print_package_summary(packages):
|
||||
print packages
|
||||
|
||||
def main():
|
||||
# Command-line arguments
|
||||
outdir = "build"
|
||||
commit = None
|
||||
target_platform = None
|
||||
target_arch = None
|
||||
nightly = False
|
||||
race = False
|
||||
branch = None
|
||||
version = get_current_version()
|
||||
rc = None
|
||||
package = False
|
||||
update = False
|
||||
clean = False
|
||||
upload = False
|
||||
test = False
|
||||
parallel = None
|
||||
timeout = None
|
||||
iteration = 1
|
||||
no_vet = False
|
||||
goarm_version = "6"
|
||||
|
||||
for arg in sys.argv[1:]:
|
||||
if '--outdir' in arg:
|
||||
# Output directory. If none is specified, then builds will be placed in the same directory.
|
||||
output_dir = arg.split("=")[1]
|
||||
if '--commit' in arg:
|
||||
# Commit to build from. If none is specified, then it will build from the most recent commit.
|
||||
commit = arg.split("=")[1]
|
||||
if '--branch' in arg:
|
||||
# Branch to build from. If none is specified, then it will build from the current branch.
|
||||
branch = arg.split("=")[1]
|
||||
elif '--arch' in arg:
|
||||
# Target architecture. If none is specified, then it will build for the current arch.
|
||||
target_arch = arg.split("=")[1]
|
||||
elif '--platform' in arg:
|
||||
# Target platform. If none is specified, then it will build for the current platform.
|
||||
target_platform = arg.split("=")[1]
|
||||
elif '--version' in arg:
|
||||
# Version to assign to this build (0.9.5, etc)
|
||||
version = arg.split("=")[1]
|
||||
elif '--rc' in arg:
|
||||
# Signifies that this is a release candidate build.
|
||||
rc = arg.split("=")[1]
|
||||
elif '--race' in arg:
|
||||
# Signifies that race detection should be enabled.
|
||||
race = True
|
||||
elif '--package' in arg:
|
||||
# Signifies that packages should be built.
|
||||
package = True
|
||||
elif '--nightly' in arg:
|
||||
# Signifies that this is a nightly build.
|
||||
nightly = True
|
||||
elif '--update' in arg:
|
||||
# Signifies that dependencies should be updated.
|
||||
update = True
|
||||
elif '--upload' in arg:
|
||||
# Signifies that the resulting packages should be uploaded to S3
|
||||
upload = True
|
||||
elif '--test' in arg:
|
||||
# Run tests and exit
|
||||
test = True
|
||||
elif '--parallel' in arg:
|
||||
# Set parallel for tests.
|
||||
parallel = int(arg.split("=")[1])
|
||||
elif '--timeout' in arg:
|
||||
# Set timeout for tests.
|
||||
timeout = arg.split("=")[1]
|
||||
elif '--clean' in arg:
|
||||
# Signifies that the outdir should be deleted before building
|
||||
clean = True
|
||||
elif '--iteration' in arg:
|
||||
iteration = arg.split("=")[1]
|
||||
elif '--no-vet' in arg:
|
||||
no_vet = True
|
||||
elif '--goarm' in arg:
|
||||
# Signifies GOARM flag to pass to build command when compiling for ARM
|
||||
goarm_version = arg.split("=")[1]
|
||||
elif '--help' in arg:
|
||||
print_usage()
|
||||
return 0
|
||||
else:
|
||||
print "!! Unknown argument: {}".format(arg)
|
||||
print_usage()
|
||||
return 1
|
||||
|
||||
if nightly:
|
||||
if rc:
|
||||
print "!! Cannot be both nightly and a release candidate! Stopping."
|
||||
return 1
|
||||
# In order to support nightly builds on the repository, we are adding the epoch timestamp
|
||||
# to the version so that version numbers are always greater than the previous nightly.
|
||||
version = "{}.n{}".format(version, int(time.time()))
|
||||
|
||||
# Pre-build checks
|
||||
check_environ()
|
||||
check_prereqs()
|
||||
|
||||
if not commit:
|
||||
commit = get_current_commit(short=True)
|
||||
if not branch:
|
||||
branch = get_current_branch()
|
||||
if not target_arch:
|
||||
if 'arm' in get_system_arch():
|
||||
# Prevent uname from reporting ARM arch (eg 'armv7l')
|
||||
target_arch = "arm"
|
||||
else:
|
||||
target_arch = get_system_arch()
|
||||
if not target_platform:
|
||||
target_platform = get_system_platform()
|
||||
if rc or nightly:
|
||||
# If a release candidate or nightly, set iteration to 0 (instead of 1)
|
||||
iteration = 0
|
||||
|
||||
build_output = {}
|
||||
# TODO(rossmcdonald): Prepare git repo for build (checking out correct branch/commit, etc.)
|
||||
# prepare(branch=branch, commit=commit)
|
||||
if test:
|
||||
if not run_tests(race, parallel, timeout, no_vet):
|
||||
return 1
|
||||
return 0
|
||||
|
||||
go_get(update=update)
|
||||
|
||||
platforms = []
|
||||
single_build = True
|
||||
if target_platform == 'all':
|
||||
platforms = supported_builds.keys()
|
||||
single_build = False
|
||||
else:
|
||||
platforms = [target_platform]
|
||||
|
||||
for platform in platforms:
|
||||
build_output.update( { platform : {} } )
|
||||
archs = []
|
||||
if target_arch == "all":
|
||||
single_build = False
|
||||
archs = supported_builds.get(platform)
|
||||
else:
|
||||
archs = [target_arch]
|
||||
for arch in archs:
|
||||
od = outdir
|
||||
if not single_build:
|
||||
od = os.path.join(outdir, platform, arch)
|
||||
build(version=version,
|
||||
branch=branch,
|
||||
commit=commit,
|
||||
platform=platform,
|
||||
arch=arch,
|
||||
nightly=nightly,
|
||||
rc=rc,
|
||||
race=race,
|
||||
clean=clean,
|
||||
outdir=od,
|
||||
goarm_version=goarm_version)
|
||||
build_output.get(platform).update( { arch : od } )
|
||||
|
||||
# Build packages
|
||||
if package:
|
||||
if not check_path_for("fpm"):
|
||||
print "!! Cannot package without command 'fpm'. Stopping."
|
||||
return 1
|
||||
packages = build_packages(build_output, version, nightly=nightly, rc=rc, iteration=iteration)
|
||||
# TODO(rossmcdonald): Add nice output for print_package_summary()
|
||||
# print_package_summary(packages)
|
||||
# Optionally upload to S3
|
||||
if upload:
|
||||
upload_packages(packages, nightly=nightly)
|
||||
return 0
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
|
||||
18
circle.yml
Normal file
18
circle.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
machine:
|
||||
services:
|
||||
- docker
|
||||
post:
|
||||
- sudo service zookeeper stop
|
||||
- go version
|
||||
- go version | grep 1.5.2 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.5.2.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.5.2.linux-amd64.tar.gz
|
||||
- go version
|
||||
|
||||
dependencies:
|
||||
override:
|
||||
- docker info
|
||||
|
||||
test:
|
||||
override:
|
||||
- bash scripts/circle-test.sh
|
||||
@@ -9,69 +9,175 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/influxdb/telegraf"
|
||||
_ "github.com/influxdb/telegraf/plugins/all"
|
||||
"github.com/influxdb/telegraf/internal/config"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/all"
|
||||
_ "github.com/influxdb/telegraf/plugins/outputs/all"
|
||||
)
|
||||
|
||||
var fDebug = flag.Bool("debug", false, "show metrics as they're generated to stdout")
|
||||
var fDebug = flag.Bool("debug", false,
|
||||
"show metrics as they're generated to stdout")
|
||||
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
||||
var fConfig = flag.String("config", "", "configuration file to load")
|
||||
var fConfigDirectory = flag.String("config-directory", "",
|
||||
"directory containing additional *.conf files")
|
||||
var fVersion = flag.Bool("version", false, "display the version")
|
||||
var fSampleConfig = flag.Bool("sample-config", false, "print out full sample configuration")
|
||||
var fSampleConfig = flag.Bool("sample-config", false,
|
||||
"print out full sample configuration")
|
||||
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
|
||||
var fInputFilters = flag.String("input-filter", "",
|
||||
"filter the plugins to enable, separator is :")
|
||||
var fOutputFilters = flag.String("output-filter", "",
|
||||
"filter the outputs to enable, separator is :")
|
||||
var fUsage = flag.String("usage", "",
|
||||
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
||||
|
||||
var Version = "unreleased"
|
||||
var fInputFiltersLegacy = flag.String("filter", "",
|
||||
"filter the plugins to enable, separator is :")
|
||||
var fOutputFiltersLegacy = flag.String("outputfilter", "",
|
||||
"filter the outputs to enable, separator is :")
|
||||
var fConfigDirectoryLegacy = flag.String("configdirectory", "",
|
||||
"directory containing additional *.conf files")
|
||||
|
||||
// Telegraf version
|
||||
// -ldflags "-X main.Version=`git describe --always --tags`"
|
||||
var Version string
|
||||
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf <flags>
|
||||
|
||||
The flags are:
|
||||
|
||||
-config <file> configuration file to load
|
||||
-test gather metrics once, print them to stdout, and exit
|
||||
-sample-config print out full sample configuration to stdout
|
||||
-config-directory directory containing additional *.conf files
|
||||
-input-filter filter the input plugins to enable, separator is :
|
||||
-output-filter filter the output plugins to enable, separator is :
|
||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||
-version print the version to stdout
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf -sample-config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf -sample-config -input-filter cpu -output-filter influxdb
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf -config telegraf.conf -test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf -config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
`
|
||||
|
||||
func main() {
|
||||
flag.Usage = usageExit
|
||||
flag.Parse()
|
||||
|
||||
if flag.NFlag() == 0 {
|
||||
usageExit()
|
||||
}
|
||||
|
||||
var inputFilters []string
|
||||
if *fInputFiltersLegacy != "" {
|
||||
inputFilter := strings.TrimSpace(*fInputFiltersLegacy)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
if *fInputFilters != "" {
|
||||
inputFilter := strings.TrimSpace(*fInputFilters)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
|
||||
var outputFilters []string
|
||||
if *fOutputFiltersLegacy != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFiltersLegacy)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
if *fOutputFilters != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFilters)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
|
||||
if *fVersion {
|
||||
fmt.Printf("InfluxDB Telegraf agent - Version %s\n", Version)
|
||||
v := fmt.Sprintf("Telegraf - Version %s", Version)
|
||||
fmt.Println(v)
|
||||
return
|
||||
}
|
||||
|
||||
if *fSampleConfig {
|
||||
telegraf.PrintSampleConfig()
|
||||
config.PrintSampleConfig(inputFilters, outputFilters)
|
||||
return
|
||||
}
|
||||
|
||||
if *fUsage != "" {
|
||||
if err := config.PrintInputConfig(*fUsage); err != nil {
|
||||
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
||||
log.Fatalf("%s and %s", err, err2)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
config *telegraf.Config
|
||||
err error
|
||||
c *config.Config
|
||||
err error
|
||||
)
|
||||
|
||||
if *fConfig != "" {
|
||||
config, err = telegraf.LoadConfig(*fConfig)
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = outputFilters
|
||||
c.InputFilters = inputFilters
|
||||
err = c.LoadConfig(*fConfig)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
config = telegraf.DefaultConfig()
|
||||
fmt.Println("Usage: Telegraf")
|
||||
flag.PrintDefaults()
|
||||
return
|
||||
}
|
||||
|
||||
ag, err := telegraf.NewAgent(config)
|
||||
if *fConfigDirectoryLegacy != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectoryLegacy)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if *fConfigDirectory != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectory)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
if len(c.Outputs) == 0 {
|
||||
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
|
||||
}
|
||||
if len(c.Inputs) == 0 {
|
||||
log.Fatalf("Error: no plugins found, did you provide a valid config file?")
|
||||
}
|
||||
|
||||
ag, err := telegraf.NewAgent(c)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if *fDebug {
|
||||
ag.Debug = true
|
||||
}
|
||||
|
||||
plugins, err := ag.LoadPlugins()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
ag.Config.Agent.Debug = true
|
||||
}
|
||||
|
||||
if *fTest {
|
||||
if *fConfig != "" {
|
||||
err = ag.Test()
|
||||
} else {
|
||||
err = ag.TestAllPlugins()
|
||||
}
|
||||
|
||||
err = ag.Test()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -81,28 +187,33 @@ func main() {
|
||||
}
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
|
||||
signals := make(chan os.Signal)
|
||||
|
||||
signal.Notify(signals, os.Interrupt)
|
||||
|
||||
go func() {
|
||||
<-signals
|
||||
close(shutdown)
|
||||
}()
|
||||
|
||||
log.Print("InfluxDB Agent running")
|
||||
log.Printf("Loaded plugins: %s", strings.Join(plugins, " "))
|
||||
if ag.Debug {
|
||||
log.Printf("Debug: enabled")
|
||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v\n",
|
||||
ag.Interval, ag.Debug, ag.Hostname)
|
||||
}
|
||||
log.Printf("Starting Telegraf (version %s)\n", Version)
|
||||
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("Loaded plugins: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("Tags enabled: %s", c.ListTags())
|
||||
|
||||
if config.URL != "" {
|
||||
log.Printf("Sending metrics to: %s", config.URL)
|
||||
log.Printf("Tags enabled: %v", config.ListTags())
|
||||
if *fPidfile != "" {
|
||||
f, err := os.Create(*fPidfile)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to create pidfile: %s", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||
|
||||
f.Close()
|
||||
}
|
||||
|
||||
ag.Run(shutdown)
|
||||
}
|
||||
|
||||
func usageExit() {
|
||||
fmt.Println(usage)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
302
config.go
302
config.go
@@ -1,302 +0,0 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
"github.com/naoina/toml"
|
||||
"github.com/naoina/toml/ast"
|
||||
)
|
||||
|
||||
type Duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *Duration) UnmarshalTOML(b []byte) error {
|
||||
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Duration = dur
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
URL string
|
||||
Username string
|
||||
Password string
|
||||
Database string
|
||||
UserAgent string
|
||||
Tags map[string]string
|
||||
|
||||
agent *ast.Table
|
||||
plugins map[string]*ast.Table
|
||||
}
|
||||
|
||||
func (c *Config) Plugins() map[string]*ast.Table {
|
||||
return c.plugins
|
||||
}
|
||||
|
||||
type ConfiguredPlugin struct {
|
||||
Name string
|
||||
|
||||
Drop []string
|
||||
Pass []string
|
||||
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
func (cp *ConfiguredPlugin) ShouldPass(measurement string) bool {
|
||||
if cp.Pass != nil {
|
||||
for _, pat := range cp.Pass {
|
||||
if strings.HasPrefix(measurement, pat) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
if cp.Drop != nil {
|
||||
for _, pat := range cp.Drop {
|
||||
if strings.HasPrefix(measurement, pat) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Config) ApplyAgent(v interface{}) error {
|
||||
if c.agent != nil {
|
||||
return toml.UnmarshalTable(c.agent, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) ApplyPlugin(name string, v interface{}) (*ConfiguredPlugin, error) {
|
||||
cp := &ConfiguredPlugin{Name: name}
|
||||
|
||||
if tbl, ok := c.plugins[name]; ok {
|
||||
|
||||
if node, ok := tbl.Fields["pass"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
cp.Pass = append(cp.Pass, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["drop"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
cp.Drop = append(cp.Drop, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["interval"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
dur, err := time.ParseDuration(str.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cp.Interval = dur
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "drop")
|
||||
delete(tbl.Fields, "pass")
|
||||
delete(tbl.Fields, "interval")
|
||||
return cp, toml.UnmarshalTable(tbl, v)
|
||||
}
|
||||
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
func (c *Config) PluginsDeclared() []string {
|
||||
var plugins []string
|
||||
|
||||
for name, _ := range c.plugins {
|
||||
plugins = append(plugins, name)
|
||||
}
|
||||
|
||||
sort.Strings(plugins)
|
||||
|
||||
return plugins
|
||||
}
|
||||
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{}
|
||||
}
|
||||
|
||||
var ErrInvalidConfig = errors.New("invalid configuration")
|
||||
|
||||
func LoadConfig(path string) (*Config, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tbl, err := toml.Parse(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &Config{
|
||||
plugins: make(map[string]*ast.Table),
|
||||
}
|
||||
|
||||
for name, val := range tbl.Fields {
|
||||
subtbl, ok := val.(*ast.Table)
|
||||
if !ok {
|
||||
return nil, ErrInvalidConfig
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "influxdb":
|
||||
err := toml.UnmarshalTable(subtbl, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "agent":
|
||||
c.agent = subtbl
|
||||
default:
|
||||
c.plugins[name] = subtbl
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *Config) ListTags() string {
|
||||
var tags []string
|
||||
|
||||
for k, v := range c.Tags {
|
||||
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
|
||||
sort.Strings(tags)
|
||||
|
||||
return strings.Join(tags, " ")
|
||||
}
|
||||
|
||||
type hasConfig interface {
|
||||
BasicConfig() string
|
||||
}
|
||||
|
||||
type hasDescr interface {
|
||||
Description() string
|
||||
}
|
||||
|
||||
var header = `# Telegraf configuration
|
||||
|
||||
# If this file is missing an [agent] section, you must first generate a
|
||||
# valid config with 'telegraf -sample-config > telegraf.toml'
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared plugins.
|
||||
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
# as a section with no variables. To deactivate a plugin, comment
|
||||
# out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# One rule that plugins conform to is wherever a connection string
|
||||
# can be passed, the values '' and 'localhost' are treated specially.
|
||||
# They indicate to the plugin to use their own builtin configuration to
|
||||
# connect to the local system.
|
||||
|
||||
# NOTE: The configuration has a few required parameters. They are marked
|
||||
# with 'required'. Be sure to edit those to make this configuration work.
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[influxdb]
|
||||
# The full HTTP endpoint URL for your InfluxDB instance
|
||||
url = "http://localhost:8086" # required.
|
||||
|
||||
# The target database for metrics. This database must already exist
|
||||
database = "telegraf" # required.
|
||||
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
|
||||
# Set the user agent for the POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
# tags = { "dc": "us-east-1" }
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
|
||||
# [influxdb.tags]
|
||||
# dc = "us-east-1"
|
||||
|
||||
# Configuration for telegraf itself
|
||||
# [agent]
|
||||
# interval = "10s"
|
||||
# debug = false
|
||||
# hostname = "prod3241"
|
||||
|
||||
# PLUGINS
|
||||
|
||||
`
|
||||
|
||||
func PrintSampleConfig() {
|
||||
fmt.Printf(header)
|
||||
|
||||
var names []string
|
||||
|
||||
for name, _ := range plugins.Plugins {
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
|
||||
for _, name := range names {
|
||||
creator := plugins.Plugins[name]
|
||||
|
||||
plugin := creator()
|
||||
|
||||
fmt.Printf("# %s\n[%s]\n", plugin.Description(), name)
|
||||
|
||||
var config string
|
||||
|
||||
config = strings.TrimSpace(plugin.SampleConfig())
|
||||
|
||||
if config == "" {
|
||||
fmt.Printf(" # no configuration\n\n")
|
||||
} else {
|
||||
fmt.Printf("\n")
|
||||
lines := strings.Split(config, "\n")
|
||||
for _, line := range lines {
|
||||
fmt.Printf("%s\n", line)
|
||||
}
|
||||
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,134 +0,0 @@
|
||||
# Telegraf configuration
|
||||
|
||||
# If this file is missing an [agent] section, you must first generate a
|
||||
# valid config with 'telegraf -sample-config > telegraf.toml'
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared plugins.
|
||||
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
# as a section with no variables. To deactivate a plugin, comment
|
||||
# out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# One rule that plugins conform to is wherever a connection string
|
||||
# can be passed, the values '' and 'localhost' are treated specially.
|
||||
# They indicate to the plugin to use their own builtin configuration to
|
||||
# connect to the local system.
|
||||
|
||||
# NOTE: The configuration has a few required parameters. They are marked
|
||||
# with 'required'. Be sure to edit those to make this configuration work.
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[influxdb]
|
||||
# The full HTTP endpoint URL for your InfluxDB instance
|
||||
url = "http://localhost:8086" # required.
|
||||
|
||||
# The target database for metrics. This database must already exist
|
||||
database = "telegraf" # required.
|
||||
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
|
||||
# Set the user agent for the POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
# tags = { "dc": "us-east-1" }
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
|
||||
# [influxdb.tags]
|
||||
# dc = "us-east-1"
|
||||
|
||||
# Configuration for telegraf itself
|
||||
# [agent]
|
||||
# interval = "10s"
|
||||
# debug = false
|
||||
# hostname = "prod3241"
|
||||
|
||||
# PLUGINS
|
||||
|
||||
# Read metrics about cpu usage
|
||||
[cpu]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
[disk]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about docker containers
|
||||
[docker]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
[io]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about memory usage
|
||||
[mem]
|
||||
# no configuration
|
||||
|
||||
# Read metrics from one or many mysql servers
|
||||
[mysql]
|
||||
|
||||
# specify servers via a url matching:
|
||||
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
||||
# e.g. root:root@http://10.0.0.18/?tls=false
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics about network interface usage
|
||||
[net]
|
||||
|
||||
# By default, telegraf gathers stats from any up interface (excluding loopback)
|
||||
# Setting interfaces will tell it to gather these explicit interfaces,
|
||||
# regardless of status.
|
||||
#
|
||||
# interfaces = ["eth0", ... ]
|
||||
|
||||
# Read metrics from one or many postgresql servers
|
||||
[postgresql]
|
||||
|
||||
# specify servers via an array of tables
|
||||
[[postgresql.servers]]
|
||||
|
||||
# specify address via a url matching:
|
||||
# postgres://[pqgotest[:password]]@localhost?sslmode=[disable|verify-ca|verify-full]
|
||||
# or a simple string:
|
||||
# host=localhost user=pqotest password=... sslmode=...
|
||||
#
|
||||
# All connection parameters are optional. By default, the host is localhost
|
||||
# and the user is the currently running user. For localhost, we default
|
||||
# to sslmode=disable as well.
|
||||
#
|
||||
|
||||
address = "sslmode=disable"
|
||||
|
||||
# A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# databases are gathered.
|
||||
|
||||
# databases = ["app_production", "blah_testing"]
|
||||
|
||||
# [[postgresql.servers]]
|
||||
# address = "influx@remoteserver"
|
||||
|
||||
# Read metrics from one or many redis servers
|
||||
[redis]
|
||||
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port. ie localhost, 10.10.3.33:18832, etc.
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
[swap]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about system load
|
||||
[system]
|
||||
# no configuration
|
||||
|
||||
11
etc/logrotate.d/telegraf
Normal file
11
etc/logrotate.d/telegraf
Normal file
@@ -0,0 +1,11 @@
|
||||
/var/log/telegraf/telegraf.log
|
||||
{
|
||||
rotate 6
|
||||
daily
|
||||
missingok
|
||||
dateext
|
||||
copytruncate
|
||||
notifempty
|
||||
compress
|
||||
}
|
||||
|
||||
119
etc/telegraf.conf
Normal file
119
etc/telegraf.conf
Normal file
@@ -0,0 +1,119 @@
|
||||
# Telegraf configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs.
|
||||
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
# as a section with no variables. To deactivate a plugin, comment
|
||||
# out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# One rule that plugins conform to is wherever a connection string
|
||||
# can be passed, the values '' and 'localhost' are treated specially.
|
||||
# They indicate to the plugin to use their own builtin configuration to
|
||||
# connect to the local system.
|
||||
|
||||
# NOTE: The configuration has a few required parameters. They are marked
|
||||
# with 'required'. Be sure to edit those to make this configuration work.
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
[tags]
|
||||
# dc = "us-east-1"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
# Default data collection interval for all plugins
|
||||
interval = "10s"
|
||||
# Rounds collection interval to 'interval'
|
||||
# ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
round_interval = true
|
||||
|
||||
# Default data flushing interval for all outputs. You should not set this below
|
||||
# interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
flush_interval = "10s"
|
||||
# Jitter the flush interval by a random amount. This is primarily to avoid
|
||||
# large write spikes for users running a large number of telegraf instances.
|
||||
# ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
# Run telegraf in debug mode
|
||||
debug = false
|
||||
# Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
|
||||
|
||||
###############################################################################
|
||||
# OUTPUTS #
|
||||
###############################################################################
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[[outputs.influxdb]]
|
||||
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||
# Multiple urls can be specified but it is assumed that they are part of the same
|
||||
# cluster, this means that only ONE of the urls will be written to each interval.
|
||||
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
||||
urls = ["http://localhost:8086"] # required
|
||||
# The target database for metrics (telegraf will create it if not exists)
|
||||
database = "telegraf" # required
|
||||
# Precision of writes, valid values are n, u, ms, s, m, and h
|
||||
# note: using second precision greatly helps InfluxDB compression
|
||||
precision = "s"
|
||||
|
||||
# Connection timeout (for the connection with InfluxDB), formatted as a string.
|
||||
# If not provided, will default to 0 (no timeout)
|
||||
# timeout = "5s"
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
||||
# udp_payload = 512
|
||||
|
||||
|
||||
###############################################################################
|
||||
# INPUTS #
|
||||
###############################################################################
|
||||
|
||||
# Read metrics about cpu usage
|
||||
[[inputs.cpu]]
|
||||
# Whether to report per-cpu stats or not
|
||||
percpu = true
|
||||
# Whether to report total system cpu stats or not
|
||||
totalcpu = true
|
||||
# Comment this line if you want the raw CPU time metrics
|
||||
drop = ["cpu_time"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
[[inputs.disk]]
|
||||
# By default, telegraf gather stats for all mountpoints.
|
||||
# Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||
# Mountpoints=["/"]
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
[[inputs.diskio]]
|
||||
# By default, telegraf will gather stats for all devices including
|
||||
# disk partitions.
|
||||
# Setting devices will restrict the stats to the specified devices.
|
||||
# Devices=["sda","sdb"]
|
||||
# Uncomment the following line if you do not need disk serial numbers.
|
||||
# SkipSerialNumber = true
|
||||
|
||||
# Read metrics about memory usage
|
||||
[[inputs.mem]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
[[inputs.swap]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about system load & uptime
|
||||
[[inputs.system]]
|
||||
# no configuration
|
||||
|
||||
|
||||
###############################################################################
|
||||
# SERVICE INPUTS #
|
||||
###############################################################################
|
||||
707
internal/config/config.go
Normal file
707
internal/config/config.go
Normal file
@@ -0,0 +1,707 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdb/telegraf/plugins/outputs"
|
||||
|
||||
"github.com/naoina/toml"
|
||||
"github.com/naoina/toml/ast"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
)
|
||||
|
||||
// Config specifies the URL/user/password for the database that telegraf
|
||||
// will be logging to, as well as all the plugins that the user has
|
||||
// specified
|
||||
type Config struct {
|
||||
Tags map[string]string
|
||||
InputFilters []string
|
||||
OutputFilters []string
|
||||
|
||||
Agent *AgentConfig
|
||||
Inputs []*RunningInput
|
||||
Outputs []*RunningOutput
|
||||
}
|
||||
|
||||
func NewConfig() *Config {
|
||||
c := &Config{
|
||||
// Agent defaults:
|
||||
Agent: &AgentConfig{
|
||||
Interval: internal.Duration{Duration: 10 * time.Second},
|
||||
RoundInterval: true,
|
||||
FlushInterval: internal.Duration{Duration: 10 * time.Second},
|
||||
FlushRetries: 2,
|
||||
FlushJitter: internal.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
|
||||
Tags: make(map[string]string),
|
||||
Inputs: make([]*RunningInput, 0),
|
||||
Outputs: make([]*RunningOutput, 0),
|
||||
InputFilters: make([]string, 0),
|
||||
OutputFilters: make([]string, 0),
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type AgentConfig struct {
|
||||
// Interval at which to gather information
|
||||
Interval internal.Duration
|
||||
|
||||
// RoundInterval rounds collection interval to 'interval'.
|
||||
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
||||
RoundInterval bool
|
||||
|
||||
// Interval at which to flush data
|
||||
FlushInterval internal.Duration
|
||||
|
||||
// FlushRetries is the number of times to retry each data flush
|
||||
FlushRetries int
|
||||
|
||||
// FlushJitter tells
|
||||
FlushJitter internal.Duration
|
||||
|
||||
// TODO(cam): Remove UTC and Precision parameters, they are no longer
|
||||
// valid for the agent config. Leaving them here for now for backwards-
|
||||
// compatability
|
||||
UTC bool `toml:"utc"`
|
||||
Precision string
|
||||
|
||||
// Option for running in debug mode
|
||||
Debug bool
|
||||
Hostname string
|
||||
}
|
||||
|
||||
// TagFilter is the name of a tag, and the values on which to filter
|
||||
type TagFilter struct {
|
||||
Name string
|
||||
Filter []string
|
||||
}
|
||||
|
||||
type RunningOutput struct {
|
||||
Name string
|
||||
Output outputs.Output
|
||||
Config *OutputConfig
|
||||
}
|
||||
|
||||
type RunningInput struct {
|
||||
Name string
|
||||
Input inputs.Input
|
||||
Config *InputConfig
|
||||
}
|
||||
|
||||
// Filter containing drop/pass and tagdrop/tagpass rules
|
||||
type Filter struct {
|
||||
Drop []string
|
||||
Pass []string
|
||||
|
||||
TagDrop []TagFilter
|
||||
TagPass []TagFilter
|
||||
|
||||
IsActive bool
|
||||
}
|
||||
|
||||
// InputConfig containing a name, interval, and filter
|
||||
type InputConfig struct {
|
||||
Name string
|
||||
NameOverride string
|
||||
MeasurementPrefix string
|
||||
MeasurementSuffix string
|
||||
Tags map[string]string
|
||||
Filter Filter
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
// OutputConfig containing name and filter
|
||||
type OutputConfig struct {
|
||||
Name string
|
||||
Filter Filter
|
||||
}
|
||||
|
||||
// Filter returns filtered slice of client.Points based on whether filters
|
||||
// are active for this RunningOutput.
|
||||
func (ro *RunningOutput) FilterPoints(points []*client.Point) []*client.Point {
|
||||
if !ro.Config.Filter.IsActive {
|
||||
return points
|
||||
}
|
||||
|
||||
var filteredPoints []*client.Point
|
||||
for i := range points {
|
||||
if !ro.Config.Filter.ShouldPass(points[i].Name()) || !ro.Config.Filter.ShouldTagsPass(points[i].Tags()) {
|
||||
continue
|
||||
}
|
||||
filteredPoints = append(filteredPoints, points[i])
|
||||
}
|
||||
return filteredPoints
|
||||
}
|
||||
|
||||
// ShouldPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f Filter) ShouldPass(fieldkey string) bool {
|
||||
if f.Pass != nil {
|
||||
for _, pat := range f.Pass {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(fieldkey, pat) || internal.Glob(pat, fieldkey) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.Drop != nil {
|
||||
for _, pat := range f.Drop {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(fieldkey, pat) || internal.Glob(pat, fieldkey) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldTagsPass returns true if the metric should pass, false if should drop
|
||||
// based on the tagdrop/tagpass filter parameters
|
||||
func (f Filter) ShouldTagsPass(tags map[string]string) bool {
|
||||
if f.TagPass != nil {
|
||||
for _, pat := range f.TagPass {
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
for _, filter := range pat.Filter {
|
||||
if internal.Glob(filter, tagval) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.TagDrop != nil {
|
||||
for _, pat := range f.TagDrop {
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
for _, filter := range pat.Filter {
|
||||
if internal.Glob(filter, tagval) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Inputs returns a list of strings of the configured inputs.
|
||||
func (c *Config) InputNames() []string {
|
||||
var name []string
|
||||
for _, input := range c.Inputs {
|
||||
name = append(name, input.Name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// Outputs returns a list of strings of the configured inputs.
|
||||
func (c *Config) OutputNames() []string {
|
||||
var name []string
|
||||
for _, output := range c.Outputs {
|
||||
name = append(name, output.Name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// ListTags returns a string of tags specified in the config,
|
||||
// line-protocol style
|
||||
func (c *Config) ListTags() string {
|
||||
var tags []string
|
||||
|
||||
for k, v := range c.Tags {
|
||||
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
|
||||
sort.Strings(tags)
|
||||
|
||||
return strings.Join(tags, " ")
|
||||
}
|
||||
|
||||
var header = `# Telegraf configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs.
|
||||
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
# as a section with no variables. To deactivate a plugin, comment
|
||||
# out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# One rule that plugins conform to is wherever a connection string
|
||||
# can be passed, the values '' and 'localhost' are treated specially.
|
||||
# They indicate to the plugin to use their own builtin configuration to
|
||||
# connect to the local system.
|
||||
|
||||
# NOTE: The configuration has a few required parameters. They are marked
|
||||
# with 'required'. Be sure to edit those to make this configuration work.
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
[tags]
|
||||
# dc = "us-east-1"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
# Default data collection interval for all inputs
|
||||
interval = "10s"
|
||||
# Rounds collection interval to 'interval'
|
||||
# ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
round_interval = true
|
||||
|
||||
# Default data flushing interval for all outputs. You should not set this below
|
||||
# interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
flush_interval = "10s"
|
||||
# Jitter the flush interval by a random amount. This is primarily to avoid
|
||||
# large write spikes for users running a large number of telegraf instances.
|
||||
# ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
# Run telegraf in debug mode
|
||||
debug = false
|
||||
# Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
|
||||
|
||||
###############################################################################
|
||||
# OUTPUTS #
|
||||
###############################################################################
|
||||
|
||||
`
|
||||
|
||||
var pluginHeader = `
|
||||
|
||||
###############################################################################
|
||||
# INPUTS #
|
||||
###############################################################################
|
||||
|
||||
`
|
||||
|
||||
var serviceInputHeader = `
|
||||
|
||||
###############################################################################
|
||||
# SERVICE INPUTS #
|
||||
###############################################################################
|
||||
`
|
||||
|
||||
// PrintSampleConfig prints the sample config
|
||||
func PrintSampleConfig(pluginFilters []string, outputFilters []string) {
|
||||
fmt.Printf(header)
|
||||
|
||||
// Filter outputs
|
||||
var onames []string
|
||||
for oname := range outputs.Outputs {
|
||||
if len(outputFilters) == 0 || sliceContains(oname, outputFilters) {
|
||||
onames = append(onames, oname)
|
||||
}
|
||||
}
|
||||
sort.Strings(onames)
|
||||
|
||||
// Print Outputs
|
||||
for _, oname := range onames {
|
||||
creator := outputs.Outputs[oname]
|
||||
output := creator()
|
||||
printConfig(oname, output, "outputs")
|
||||
}
|
||||
|
||||
// Filter inputs
|
||||
var pnames []string
|
||||
for pname := range inputs.Inputs {
|
||||
if len(pluginFilters) == 0 || sliceContains(pname, pluginFilters) {
|
||||
pnames = append(pnames, pname)
|
||||
}
|
||||
}
|
||||
sort.Strings(pnames)
|
||||
|
||||
// Print Inputs
|
||||
fmt.Printf(pluginHeader)
|
||||
servInputs := make(map[string]inputs.ServiceInput)
|
||||
for _, pname := range pnames {
|
||||
creator := inputs.Inputs[pname]
|
||||
input := creator()
|
||||
|
||||
switch p := input.(type) {
|
||||
case inputs.ServiceInput:
|
||||
servInputs[pname] = p
|
||||
continue
|
||||
}
|
||||
|
||||
printConfig(pname, input, "inputs")
|
||||
}
|
||||
|
||||
// Print Service Inputs
|
||||
fmt.Printf(serviceInputHeader)
|
||||
for name, input := range servInputs {
|
||||
printConfig(name, input, "inputs")
|
||||
}
|
||||
}
|
||||
|
||||
type printer interface {
|
||||
Description() string
|
||||
SampleConfig() string
|
||||
}
|
||||
|
||||
func printConfig(name string, p printer, op string) {
|
||||
fmt.Printf("\n# %s\n[[%s.%s]]", p.Description(), op, name)
|
||||
config := p.SampleConfig()
|
||||
if config == "" {
|
||||
fmt.Printf("\n # no configuration\n")
|
||||
} else {
|
||||
fmt.Printf(config)
|
||||
}
|
||||
}
|
||||
|
||||
func sliceContains(name string, list []string) bool {
|
||||
for _, b := range list {
|
||||
if b == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PrintInputConfig prints the config usage of a single input.
|
||||
func PrintInputConfig(name string) error {
|
||||
if creator, ok := inputs.Inputs[name]; ok {
|
||||
printConfig(name, creator(), "inputs")
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("Input %s not found", name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PrintOutputConfig prints the config usage of a single output.
|
||||
func PrintOutputConfig(name string) error {
|
||||
if creator, ok := outputs.Outputs[name]; ok {
|
||||
printConfig(name, creator(), "outputs")
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("Output %s not found", name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) LoadDirectory(path string) error {
|
||||
directoryEntries, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range directoryEntries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entry.Name()
|
||||
if len(name) < 6 || name[len(name)-5:] != ".conf" {
|
||||
continue
|
||||
}
|
||||
err := c.LoadConfig(filepath.Join(path, name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadConfig loads the given config file and applies it to c
|
||||
func (c *Config) LoadConfig(path string) error {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tbl, err := toml.Parse(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for name, val := range tbl.Fields {
|
||||
subTable, ok := val.(*ast.Table)
|
||||
if !ok {
|
||||
return errors.New("invalid configuration")
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "agent":
|
||||
if err = toml.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||
log.Printf("Could not parse [agent] config\n")
|
||||
return err
|
||||
}
|
||||
case "tags":
|
||||
if err = toml.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||
log.Printf("Could not parse [tags] config\n")
|
||||
return err
|
||||
}
|
||||
case "outputs":
|
||||
for pluginName, pluginVal := range subTable.Fields {
|
||||
switch pluginSubTable := pluginVal.(type) {
|
||||
case *ast.Table:
|
||||
if err = c.addOutput(pluginName, pluginSubTable); err != nil {
|
||||
return err
|
||||
}
|
||||
case []*ast.Table:
|
||||
for _, t := range pluginSubTable {
|
||||
if err = c.addOutput(pluginName, t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Unsupported config format: %s",
|
||||
pluginName)
|
||||
}
|
||||
}
|
||||
case "inputs", "plugins":
|
||||
for pluginName, pluginVal := range subTable.Fields {
|
||||
switch pluginSubTable := pluginVal.(type) {
|
||||
case *ast.Table:
|
||||
if err = c.addInput(pluginName, pluginSubTable); err != nil {
|
||||
return err
|
||||
}
|
||||
case []*ast.Table:
|
||||
for _, t := range pluginSubTable {
|
||||
if err = c.addInput(pluginName, t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Unsupported config format: %s",
|
||||
pluginName)
|
||||
}
|
||||
}
|
||||
// Assume it's an input input for legacy config file support if no other
|
||||
// identifiers are present
|
||||
default:
|
||||
if err = c.addInput(name, subTable); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) addOutput(name string, table *ast.Table) error {
|
||||
if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) {
|
||||
return nil
|
||||
}
|
||||
creator, ok := outputs.Outputs[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("Undefined but requested output: %s", name)
|
||||
}
|
||||
output := creator()
|
||||
|
||||
outputConfig, err := buildOutput(name, table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := toml.UnmarshalTable(table, output); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ro := &RunningOutput{
|
||||
Name: name,
|
||||
Output: output,
|
||||
Config: outputConfig,
|
||||
}
|
||||
c.Outputs = append(c.Outputs, ro)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) addInput(name string, table *ast.Table) error {
|
||||
if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) {
|
||||
return nil
|
||||
}
|
||||
// Legacy support renaming io input to diskio
|
||||
if name == "io" {
|
||||
name = "diskio"
|
||||
}
|
||||
|
||||
creator, ok := inputs.Inputs[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("Undefined but requested input: %s", name)
|
||||
}
|
||||
input := creator()
|
||||
|
||||
pluginConfig, err := buildInput(name, table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := toml.UnmarshalTable(table, input); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rp := &RunningInput{
|
||||
Name: name,
|
||||
Input: input,
|
||||
Config: pluginConfig,
|
||||
}
|
||||
c.Inputs = append(c.Inputs, rp)
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildFilter builds a Filter (tagpass/tagdrop/pass/drop) to
|
||||
// be inserted into the OutputConfig/InputConfig to be used for prefix
|
||||
// filtering on tags and measurements
|
||||
func buildFilter(tbl *ast.Table) Filter {
|
||||
f := Filter{}
|
||||
|
||||
if node, ok := tbl.Fields["pass"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.Pass = append(f.Pass, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["drop"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.Drop = append(f.Drop, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["tagpass"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
for name, val := range subtbl.Fields {
|
||||
if kv, ok := val.(*ast.KeyValue); ok {
|
||||
tagfilter := &TagFilter{Name: name}
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
tagfilter.Filter = append(tagfilter.Filter, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
f.TagPass = append(f.TagPass, *tagfilter)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["tagdrop"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
for name, val := range subtbl.Fields {
|
||||
if kv, ok := val.(*ast.KeyValue); ok {
|
||||
tagfilter := &TagFilter{Name: name}
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
tagfilter.Filter = append(tagfilter.Filter, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
f.TagDrop = append(f.TagDrop, *tagfilter)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "drop")
|
||||
delete(tbl.Fields, "pass")
|
||||
delete(tbl.Fields, "tagdrop")
|
||||
delete(tbl.Fields, "tagpass")
|
||||
return f
|
||||
}
|
||||
|
||||
// buildInput parses input specific items from the ast.Table,
|
||||
// builds the filter and returns a
|
||||
// InputConfig to be inserted into RunningInput
|
||||
func buildInput(name string, tbl *ast.Table) (*InputConfig, error) {
|
||||
cp := &InputConfig{Name: name}
|
||||
if node, ok := tbl.Fields["interval"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
dur, err := time.ParseDuration(str.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cp.Interval = dur
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_prefix"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
cp.MeasurementPrefix = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_suffix"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
cp.MeasurementSuffix = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_override"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
cp.NameOverride = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cp.Tags = make(map[string]string)
|
||||
if node, ok := tbl.Fields["tags"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
log.Printf("Could not parse tags for input %s\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "name_prefix")
|
||||
delete(tbl.Fields, "name_suffix")
|
||||
delete(tbl.Fields, "name_override")
|
||||
delete(tbl.Fields, "interval")
|
||||
delete(tbl.Fields, "tags")
|
||||
cp.Filter = buildFilter(tbl)
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
// buildOutput parses output specific items from the ast.Table, builds the filter and returns an
|
||||
// OutputConfig to be inserted into RunningInput
|
||||
// Note: error exists in the return for future calls that might require error
|
||||
func buildOutput(name string, tbl *ast.Table) (*OutputConfig, error) {
|
||||
oc := &OutputConfig{
|
||||
Name: name,
|
||||
Filter: buildFilter(tbl),
|
||||
}
|
||||
return oc, nil
|
||||
}
|
||||
292
internal/config/config_test.go
Normal file
292
internal/config/config_test.go
Normal file
@@ -0,0 +1,292 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdb/telegraf/plugins/inputs/exec"
|
||||
"github.com/influxdb/telegraf/plugins/inputs/memcached"
|
||||
"github.com/influxdb/telegraf/plugins/inputs/procstat"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConfig_LoadSingleInput(t *testing.T) {
|
||||
c := NewConfig()
|
||||
c.LoadConfig("./testdata/single_plugin.toml")
|
||||
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"localhost"}
|
||||
|
||||
mConfig := &InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: Filter{
|
||||
Drop: []string{"other", "stuff"},
|
||||
Pass: []string{"some", "strings"},
|
||||
TagDrop: []TagFilter{
|
||||
TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []TagFilter{
|
||||
TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
},
|
||||
Interval: 5 * time.Second,
|
||||
}
|
||||
mConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, memcached, c.Inputs[0].Input,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
}
|
||||
|
||||
func TestConfig_LoadDirectory(t *testing.T) {
|
||||
c := NewConfig()
|
||||
err := c.LoadConfig("./testdata/single_plugin.toml")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
err = c.LoadDirectory("./testdata/subconfig")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"localhost"}
|
||||
|
||||
mConfig := &InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: Filter{
|
||||
Drop: []string{"other", "stuff"},
|
||||
Pass: []string{"some", "strings"},
|
||||
TagDrop: []TagFilter{
|
||||
TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []TagFilter{
|
||||
TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
},
|
||||
Interval: 5 * time.Second,
|
||||
}
|
||||
mConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, memcached, c.Inputs[0].Input,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
|
||||
ex := inputs.Inputs["exec"]().(*exec.Exec)
|
||||
ex.Command = "/usr/bin/myothercollector --foo=bar"
|
||||
eConfig := &InputConfig{
|
||||
Name: "exec",
|
||||
MeasurementSuffix: "_myothercollector",
|
||||
}
|
||||
eConfig.Tags = make(map[string]string)
|
||||
assert.Equal(t, ex, c.Inputs[1].Input,
|
||||
"Merged Testdata did not produce a correct exec struct.")
|
||||
assert.Equal(t, eConfig, c.Inputs[1].Config,
|
||||
"Merged Testdata did not produce correct exec metadata.")
|
||||
|
||||
memcached.Servers = []string{"192.168.1.1"}
|
||||
assert.Equal(t, memcached, c.Inputs[2].Input,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Inputs[2].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
|
||||
pstat := inputs.Inputs["procstat"]().(*procstat.Procstat)
|
||||
pstat.PidFile = "/var/run/grafana-server.pid"
|
||||
|
||||
pConfig := &InputConfig{Name: "procstat"}
|
||||
pConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, pstat, c.Inputs[3].Input,
|
||||
"Merged Testdata did not produce a correct procstat struct.")
|
||||
assert.Equal(t, pConfig, c.Inputs[3].Config,
|
||||
"Merged Testdata did not produce correct procstat metadata.")
|
||||
}
|
||||
|
||||
func TestFilter_Empty(t *testing.T) {
|
||||
f := Filter{}
|
||||
|
||||
measurements := []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"barfoo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"supercalifradjulisticexpialidocious",
|
||||
}
|
||||
|
||||
for _, measurement := range measurements {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_Pass(t *testing.T) {
|
||||
f := Filter{
|
||||
Pass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_Drop(t *testing.T) {
|
||||
f := Filter{
|
||||
Drop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagPass(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagPass: filters,
|
||||
}
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagDrop(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
9
internal/config/testdata/single_plugin.toml
vendored
Normal file
9
internal/config/testdata/single_plugin.toml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
[[inputs.memcached]]
|
||||
servers = ["localhost"]
|
||||
pass = ["some", "strings"]
|
||||
drop = ["other", "stuff"]
|
||||
interval = "5s"
|
||||
[inputs.memcached.tagpass]
|
||||
goodtag = ["mytag"]
|
||||
[inputs.memcached.tagdrop]
|
||||
badtag = ["othertag"]
|
||||
4
internal/config/testdata/subconfig/exec.conf
vendored
Normal file
4
internal/config/testdata/subconfig/exec.conf
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
[[inputs.exec]]
|
||||
# the command to run
|
||||
command = "/usr/bin/myothercollector --foo=bar"
|
||||
name_suffix = "_myothercollector"
|
||||
9
internal/config/testdata/subconfig/memcached.conf
vendored
Normal file
9
internal/config/testdata/subconfig/memcached.conf
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
[[inputs.memcached]]
|
||||
servers = ["192.168.1.1"]
|
||||
pass = ["some", "strings"]
|
||||
drop = ["other", "stuff"]
|
||||
interval = "5s"
|
||||
[inputs.memcached.tagpass]
|
||||
goodtag = ["mytag"]
|
||||
[inputs.memcached.tagdrop]
|
||||
badtag = ["othertag"]
|
||||
2
internal/config/testdata/subconfig/procstat.conf
vendored
Normal file
2
internal/config/testdata/subconfig/procstat.conf
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
[[inputs.procstat]]
|
||||
pid_file = "/var/run/grafana-server.pid"
|
||||
301
internal/config/testdata/telegraf-agent.toml
vendored
Normal file
301
internal/config/testdata/telegraf-agent.toml
vendored
Normal file
@@ -0,0 +1,301 @@
|
||||
# Telegraf configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs.
|
||||
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
# as a section with no variables. To deactivate a plugin, comment
|
||||
# out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# One rule that plugins conform to is wherever a connection string
|
||||
# can be passed, the values '' and 'localhost' are treated specially.
|
||||
# They indicate to the plugin to use their own builtin configuration to
|
||||
# connect to the local system.
|
||||
|
||||
# NOTE: The configuration has a few required parameters. They are marked
|
||||
# with 'required'. Be sure to edit those to make this configuration work.
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
[tags]
|
||||
dc = "us-east-1"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
# Default data collection interval for all plugins
|
||||
interval = "10s"
|
||||
|
||||
# run telegraf in debug mode
|
||||
debug = false
|
||||
|
||||
# Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
|
||||
|
||||
###############################################################################
|
||||
# OUTPUTS #
|
||||
###############################################################################
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[[outputs.influxdb]]
|
||||
# The full HTTP endpoint URL for your InfluxDB instance
|
||||
# Multiple urls can be specified for InfluxDB cluster support. Server to
|
||||
# write to will be randomly chosen each interval.
|
||||
urls = ["http://localhost:8086"] # required.
|
||||
|
||||
# The target database for metrics. This database must already exist
|
||||
database = "telegraf" # required.
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = ["udp://localhost:8089"]
|
||||
database = "udp-telegraf"
|
||||
|
||||
# Configuration for the Kafka server to send metrics to
|
||||
[[outputs.kafka]]
|
||||
# URLs of kafka brokers
|
||||
brokers = ["localhost:9092"]
|
||||
# Kafka topic for producer messages
|
||||
topic = "telegraf"
|
||||
# Telegraf tag to use as a routing key
|
||||
# ie, if this tag exists, it's value will be used as the routing key
|
||||
routing_tag = "host"
|
||||
|
||||
|
||||
###############################################################################
|
||||
# PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
# Read Apache status information (mod_status)
|
||||
[[inputs.apache]]
|
||||
# An array of Apache status URI to gather stats.
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
|
||||
# Read metrics about cpu usage
|
||||
[[inputs.cpu]]
|
||||
# Whether to report per-cpu stats or not
|
||||
percpu = true
|
||||
# Whether to report total system cpu stats or not
|
||||
totalcpu = true
|
||||
# Comment this line if you want the raw CPU time metrics
|
||||
drop = ["cpu_time"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
[[inputs.diskio]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics from one or many disque servers
|
||||
[[inputs.disque]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
|
||||
# 10.0.0.1:10000, etc.
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read stats from one or more Elasticsearch servers or clusters
|
||||
[[inputs.elasticsearch]]
|
||||
# specify a list of one or more Elasticsearch servers
|
||||
servers = ["http://localhost:9200"]
|
||||
|
||||
# set local to false when you want to read the indices stats from all nodes
|
||||
# within the cluster
|
||||
local = true
|
||||
|
||||
# Read flattened metrics from one or more commands that output JSON to stdout
|
||||
[[inputs.exec]]
|
||||
# the command to run
|
||||
command = "/usr/bin/mycollector --foo=bar"
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
# Read metrics of haproxy, via socket or csv stats page
|
||||
[[inputs.haproxy]]
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port. ie localhost, 10.10.3.33:1936, etc.
|
||||
#
|
||||
# If no servers are specified, then default to 127.0.0.1:1936
|
||||
servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
|
||||
# Or you can also use local socket(not work yet)
|
||||
# servers = ["socket:/run/haproxy/admin.sock"]
|
||||
|
||||
# Read flattened metrics from one or more JSON HTTP endpoints
|
||||
[[inputs.httpjson]]
|
||||
# a name for the service being polled
|
||||
name = "webserver_stats"
|
||||
|
||||
# URL of each server in the service's cluster
|
||||
servers = [
|
||||
"http://localhost:9999/stats/",
|
||||
"http://localhost:9998/stats/",
|
||||
]
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
# HTTP parameters (all values must be strings)
|
||||
[httpjson.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
[[inputs.diskio]]
|
||||
# no configuration
|
||||
|
||||
# read metrics from a Kafka topic
|
||||
[[inputs.kafka_consumer]]
|
||||
# topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
# an array of Zookeeper connection strings
|
||||
zookeeper_peers = ["localhost:2181"]
|
||||
# the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
# Maximum number of points to buffer between collection intervals
|
||||
point_buffer = 100000
|
||||
# Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
# Read metrics from a LeoFS Server via SNMP
|
||||
[[inputs.leofs]]
|
||||
# An array of URI to gather stats about LeoFS.
|
||||
# Specify an ip or hostname with port. ie 127.0.0.1:4020
|
||||
#
|
||||
# If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port.
|
||||
servers = ["127.0.0.1:4021"]
|
||||
|
||||
# Read metrics from local Lustre service on OST, MDS
|
||||
[[inputs.lustre2]]
|
||||
# An array of /proc globs to search for Lustre stats
|
||||
# If not specified, the default will work on Lustre 2.5.x
|
||||
#
|
||||
# ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"]
|
||||
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
|
||||
|
||||
# Read metrics about memory usage
|
||||
[[inputs.mem]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics from one or many memcached servers
|
||||
[[inputs.memcached]]
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port. ie localhost, 10.0.0.1:11211, etc.
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics from one or many MongoDB servers
|
||||
[[inputs.mongodb]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
|
||||
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
||||
#
|
||||
# If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port.
|
||||
servers = ["127.0.0.1:27017"]
|
||||
|
||||
# Read metrics from one or many mysql servers
|
||||
[[inputs.mysql]]
|
||||
# specify servers via a url matching:
|
||||
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
||||
# e.g.
|
||||
# servers = ["root:root@http://10.0.0.18/?tls=false"]
|
||||
# servers = ["root:passwd@tcp(127.0.0.1:3306)/"]
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics about network interface usage
|
||||
[[inputs.net]]
|
||||
# By default, telegraf gathers stats from any up interface (excluding loopback)
|
||||
# Setting interfaces will tell it to gather these explicit interfaces,
|
||||
# regardless of status.
|
||||
#
|
||||
# interfaces = ["eth0", ... ]
|
||||
|
||||
# Read Nginx's basic status information (ngx_http_stub_status_module)
|
||||
[[inputs.nginx]]
|
||||
# An array of Nginx stub_status URI to gather stats.
|
||||
urls = ["http://localhost/status"]
|
||||
|
||||
# Ping given url(s) and return statistics
|
||||
[[inputs.ping]]
|
||||
# urls to ping
|
||||
urls = ["www.google.com"] # required
|
||||
# number of pings to send (ping -c <COUNT>)
|
||||
count = 1 # required
|
||||
# interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
|
||||
ping_interval = 0.0
|
||||
# ping timeout, in s. 0 == no timeout (ping -t <TIMEOUT>)
|
||||
timeout = 0.0
|
||||
# interface to send ping from (ping -I <INTERFACE>)
|
||||
interface = ""
|
||||
|
||||
# Read metrics from one or many postgresql servers
|
||||
[[inputs.postgresql]]
|
||||
# specify address via a url matching:
|
||||
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
|
||||
# or a simple string:
|
||||
# host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
||||
#
|
||||
# All connection parameters are optional. By default, the host is localhost
|
||||
# and the user is the currently running user. For localhost, we default
|
||||
# to sslmode=disable as well.
|
||||
#
|
||||
# Without the dbname parameter, the driver will default to a database
|
||||
# with the same name as the user. This dbname is just for instantiating a
|
||||
# connection with the server and doesn't restrict the databases we are trying
|
||||
# to grab metrics for.
|
||||
#
|
||||
|
||||
address = "sslmode=disable"
|
||||
|
||||
# A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# databases are gathered.
|
||||
|
||||
# databases = ["app_production", "blah_testing"]
|
||||
|
||||
# [[postgresql.servers]]
|
||||
# address = "influx@remoteserver"
|
||||
|
||||
# Read metrics from one or many prometheus clients
|
||||
[[inputs.prometheus]]
|
||||
# An array of urls to scrape metrics from.
|
||||
urls = ["http://localhost:9100/metrics"]
|
||||
|
||||
# Read metrics from one or many RabbitMQ servers via the management API
|
||||
[[inputs.rabbitmq]]
|
||||
# Specify servers via an array of tables
|
||||
# name = "rmq-server-1" # optional tag
|
||||
# url = "http://localhost:15672"
|
||||
# username = "guest"
|
||||
# password = "guest"
|
||||
|
||||
# A list of nodes to pull metrics about. If not specified, metrics for
|
||||
# all nodes are gathered.
|
||||
# nodes = ["rabbit@node1", "rabbit@node2"]
|
||||
|
||||
# Read metrics from one or many redis servers
|
||||
[[inputs.redis]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port add password. ie redis://localhost, redis://10.10.3.33:18832,
|
||||
# 10.0.0.1:10000, etc.
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics from one or many RethinkDB servers
|
||||
[[inputs.rethinkdb]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105,
|
||||
# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
||||
#
|
||||
# If no servers are specified, then 127.0.0.1 is used as the host and 28015 as the port.
|
||||
servers = ["127.0.0.1:28015"]
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
[[inputs.swap]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about system load & uptime
|
||||
[[inputs.system]]
|
||||
# no configuration
|
||||
153
internal/internal.go
Normal file
153
internal/internal.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Duration just wraps time.Duration
|
||||
type Duration struct {
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
// UnmarshalTOML parses the duration from the TOML config file
|
||||
func (d *Duration) UnmarshalTOML(b []byte) error {
|
||||
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Duration = dur
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var NotImplementedError = errors.New("not implemented yet")
|
||||
|
||||
type JSONFlattener struct {
|
||||
Fields map[string]interface{}
|
||||
}
|
||||
|
||||
// FlattenJSON flattens nested maps/interfaces into a fields map
|
||||
func (f *JSONFlattener) FlattenJSON(
|
||||
fieldname string,
|
||||
v interface{},
|
||||
) error {
|
||||
if f.Fields == nil {
|
||||
f.Fields = make(map[string]interface{})
|
||||
}
|
||||
fieldname = strings.Trim(fieldname, "_")
|
||||
switch t := v.(type) {
|
||||
case map[string]interface{}:
|
||||
for k, v := range t {
|
||||
err := f.FlattenJSON(fieldname+"_"+k+"_", v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case float64:
|
||||
f.Fields[fieldname] = t
|
||||
case bool, string, []interface{}, nil:
|
||||
// ignored types
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("JSON Flattener: got unexpected type %T with value %v (%s)",
|
||||
t, t, fieldname)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadLines reads contents from a file and splits them by new lines.
|
||||
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
|
||||
func ReadLines(filename string) ([]string, error) {
|
||||
return ReadLinesOffsetN(filename, 0, -1)
|
||||
}
|
||||
|
||||
// ReadLines reads contents from file and splits them by new line.
|
||||
// The offset tells at which line number to start.
|
||||
// The count determines the number of lines to read (starting from offset):
|
||||
// n >= 0: at most n lines
|
||||
// n < 0: whole file
|
||||
func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return []string{""}, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var ret []string
|
||||
|
||||
r := bufio.NewReader(f)
|
||||
for i := 0; i < n+int(offset) || n < 0; i++ {
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if i < int(offset) {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, strings.Trim(line, "\n"))
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Glob will test a string pattern, potentially containing globs, against a
|
||||
// subject string. The result is a simple true/false, determining whether or
|
||||
// not the glob pattern matched the subject text.
|
||||
//
|
||||
// Adapted from https://github.com/ryanuber/go-glob/blob/master/glob.go
|
||||
// thanks Ryan Uber!
|
||||
func Glob(pattern, measurement string) bool {
|
||||
// Empty pattern can only match empty subject
|
||||
if pattern == "" {
|
||||
return measurement == pattern
|
||||
}
|
||||
|
||||
// If the pattern _is_ a glob, it matches everything
|
||||
if pattern == "*" {
|
||||
return true
|
||||
}
|
||||
|
||||
parts := strings.Split(pattern, "*")
|
||||
|
||||
if len(parts) == 1 {
|
||||
// No globs in pattern, so test for match
|
||||
return pattern == measurement
|
||||
}
|
||||
|
||||
leadingGlob := strings.HasPrefix(pattern, "*")
|
||||
trailingGlob := strings.HasSuffix(pattern, "*")
|
||||
end := len(parts) - 1
|
||||
|
||||
for i, part := range parts {
|
||||
switch i {
|
||||
case 0:
|
||||
if leadingGlob {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(measurement, part) {
|
||||
return false
|
||||
}
|
||||
case end:
|
||||
if len(measurement) > 0 {
|
||||
return trailingGlob || strings.HasSuffix(measurement, part)
|
||||
}
|
||||
default:
|
||||
if !strings.Contains(measurement, part) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Trim evaluated text from measurement as we loop over the pattern.
|
||||
idx := strings.Index(measurement, part) + len(part)
|
||||
measurement = measurement[idx:]
|
||||
}
|
||||
|
||||
// All parts of the pattern matched
|
||||
return true
|
||||
}
|
||||
44
internal/internal_test.go
Normal file
44
internal/internal_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package internal
|
||||
|
||||
import "testing"
|
||||
|
||||
func testGlobMatch(t *testing.T, pattern, subj string) {
|
||||
if !Glob(pattern, subj) {
|
||||
t.Errorf("%s should match %s", pattern, subj)
|
||||
}
|
||||
}
|
||||
|
||||
func testGlobNoMatch(t *testing.T, pattern, subj string) {
|
||||
if Glob(pattern, subj) {
|
||||
t.Errorf("%s should not match %s", pattern, subj)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyPattern(t *testing.T) {
|
||||
testGlobMatch(t, "", "")
|
||||
testGlobNoMatch(t, "", "test")
|
||||
}
|
||||
|
||||
func TestPatternWithoutGlobs(t *testing.T) {
|
||||
testGlobMatch(t, "test", "test")
|
||||
}
|
||||
|
||||
func TestGlob(t *testing.T) {
|
||||
for _, pattern := range []string{
|
||||
"*test", // Leading glob
|
||||
"this*", // Trailing glob
|
||||
"*is*a*", // Lots of globs
|
||||
"**test**", // Double glob characters
|
||||
"**is**a***test*", // Varying number of globs
|
||||
} {
|
||||
testGlobMatch(t, pattern, "this_is_a_test")
|
||||
}
|
||||
|
||||
for _, pattern := range []string{
|
||||
"test*", // Implicit substring match should fail
|
||||
"*is", // Partial match should fail
|
||||
"*no*", // Globs without a match between them should fail
|
||||
} {
|
||||
testGlobNoMatch(t, pattern, "this_is_a_test")
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdb/telegraf/plugins/mysql"
|
||||
_ "github.com/influxdb/telegraf/plugins/postgresql"
|
||||
_ "github.com/influxdb/telegraf/plugins/redis"
|
||||
_ "github.com/influxdb/telegraf/plugins/system"
|
||||
)
|
||||
265
plugins/inputs/aerospike/README.md
Normal file
265
plugins/inputs/aerospike/README.md
Normal file
@@ -0,0 +1,265 @@
|
||||
## Telegraf Plugin: Aerospike
|
||||
|
||||
#### Plugin arguments:
|
||||
- **servers** string array: List of aerospike servers to query (def: 127.0.0.1:3000)
|
||||
|
||||
#### Description
|
||||
|
||||
The aerospike plugin queries aerospike server(s) and get node statistics. It also collects stats for
|
||||
all the configured namespaces.
|
||||
|
||||
For what the measurements mean, please consult the [Aerospike Metrics Reference Docs](http://www.aerospike.com/docs/reference/metrics).
|
||||
|
||||
The metric names, to make it less complicated in querying, have replaced all `-` with `_` as Aerospike metrics come in both forms (no idea why).
|
||||
|
||||
# Measurements:
|
||||
#### Aerospike Statistics [values]:
|
||||
|
||||
Meta:
|
||||
- units: Integer
|
||||
|
||||
Measurement names:
|
||||
- batch_index_queue
|
||||
- batch_index_unused_buffers
|
||||
- batch_queue
|
||||
- batch_tree_count
|
||||
- client_connections
|
||||
- data_used_bytes_memory
|
||||
- index_used_bytes_memory
|
||||
- info_queue
|
||||
- migrate_progress_recv
|
||||
- migrate_progress_send
|
||||
- migrate_rx_objs
|
||||
- migrate_tx_objs
|
||||
- objects
|
||||
- ongoing_write_reqs
|
||||
- partition_absent
|
||||
- partition_actual
|
||||
- partition_desync
|
||||
- partition_object_count
|
||||
- partition_ref_count
|
||||
- partition_replica
|
||||
- proxy_in_progress
|
||||
- query_agg_avg_rec_count
|
||||
- query_avg_rec_count
|
||||
- query_lookup_avg_rec_count
|
||||
- queue
|
||||
- record_locks
|
||||
- record_refs
|
||||
- sindex_used_bytes_memory
|
||||
- sindex_gc_garbage_cleaned
|
||||
- system_free_mem_pct
|
||||
- total_bytes_disk
|
||||
- total_bytes_memory
|
||||
- tree_count
|
||||
- scans_active
|
||||
- uptime
|
||||
- used_bytes_disk
|
||||
- used_bytes_memory
|
||||
- cluster_size
|
||||
- waiting_transactions
|
||||
|
||||
#### Aerospike Statistics [cumulative]:
|
||||
|
||||
Meta:
|
||||
- units: Integer
|
||||
|
||||
Measurement names:
|
||||
- batch_errors
|
||||
- batch_index_complete
|
||||
- batch_index_errors
|
||||
- batch_index_initiate
|
||||
- batch_index_timeout
|
||||
- batch_initiate
|
||||
- batch_timeout
|
||||
- err_duplicate_proxy_request
|
||||
- err_out_of_space
|
||||
- err_replica_non_null_node
|
||||
- err_replica_null_node
|
||||
- err_rw_cant_put_unique
|
||||
- err_rw_pending_limit
|
||||
- err_rw_request_not_found
|
||||
- err_storage_queue_full
|
||||
- err_sync_copy_null_master
|
||||
- err_sync_copy_null_node
|
||||
- err_tsvc_requests
|
||||
- err_write_fail_bin_exists
|
||||
- err_write_fail_generation
|
||||
- err_write_fail_generation_xdr
|
||||
- err_write_fail_incompatible_type
|
||||
- err_write_fail_key_exists
|
||||
- err_write_fail_key_mismatch
|
||||
- err_write_fail_not_found
|
||||
- err_write_fail_noxdr
|
||||
- err_write_fail_parameter
|
||||
- err_write_fail_prole_delete
|
||||
- err_write_fail_prole_generation
|
||||
- err_write_fail_prole_unknown
|
||||
- err_write_fail_unknown
|
||||
- fabric_msgs_rcvd
|
||||
- fabric_msgs_sent
|
||||
- heartbeat_received_foreign
|
||||
- heartbeat_received_self
|
||||
- migrate_msgs_recv
|
||||
- migrate_msgs_sent
|
||||
- migrate_num_incoming_accepted
|
||||
- migrate_num_incoming_refused
|
||||
- proxy_action
|
||||
- proxy_initiate
|
||||
- proxy_retry
|
||||
- proxy_retry_new_dest
|
||||
- proxy_retry_q_full
|
||||
- proxy_retry_same_dest
|
||||
- proxy_unproxy
|
||||
- query_abort
|
||||
- query_agg
|
||||
- query_agg_abort
|
||||
- query_agg_err
|
||||
- query_agg_success
|
||||
- query_bad_records
|
||||
- query_fail
|
||||
- query_long_queue_full
|
||||
- query_long_running
|
||||
- query_lookup_abort
|
||||
- query_lookup_err
|
||||
- query_lookups
|
||||
- query_lookup_success
|
||||
- query_reqs
|
||||
- query_short_queue_full
|
||||
- query_short_running
|
||||
- query_success
|
||||
- query_tracked
|
||||
- read_dup_prole
|
||||
- reaped_fds
|
||||
- rw_err_ack_badnode
|
||||
- rw_err_ack_internal
|
||||
- rw_err_ack_nomatch
|
||||
- rw_err_dup_cluster_key
|
||||
- rw_err_dup_internal
|
||||
- rw_err_dup_send
|
||||
- rw_err_write_cluster_key
|
||||
- rw_err_write_internal
|
||||
- rw_err_write_send
|
||||
- sindex_ucgarbage_found
|
||||
- sindex_gc_locktimedout
|
||||
- sindex_gc_inactivity_dur
|
||||
- sindex_gc_activity_dur
|
||||
- sindex_gc_list_creation_time
|
||||
- sindex_gc_list_deletion_time
|
||||
- sindex_gc_objects_validated
|
||||
- sindex_gc_garbage_found
|
||||
- stat_cluster_key_err_ack_dup_trans_reenqueue
|
||||
- stat_cluster_key_err_ack_rw_trans_reenqueue
|
||||
- stat_cluster_key_prole_retry
|
||||
- stat_cluster_key_regular_processed
|
||||
- stat_cluster_key_trans_to_proxy_retry
|
||||
- stat_deleted_set_object
|
||||
- stat_delete_success
|
||||
- stat_duplicate_operation
|
||||
- stat_evicted_objects
|
||||
- stat_evicted_objects_time
|
||||
- stat_evicted_set_objects
|
||||
- stat_expired_objects
|
||||
- stat_nsup_deletes_not_shipped
|
||||
- stat_proxy_errs
|
||||
- stat_proxy_reqs
|
||||
- stat_proxy_reqs_xdr
|
||||
- stat_proxy_success
|
||||
- stat_read_errs_notfound
|
||||
- stat_read_errs_other
|
||||
- stat_read_reqs
|
||||
- stat_read_reqs_xdr
|
||||
- stat_read_success
|
||||
- stat_rw_timeout
|
||||
- stat_slow_trans_queue_batch_pop
|
||||
- stat_slow_trans_queue_pop
|
||||
- stat_slow_trans_queue_push
|
||||
- stat_write_errs
|
||||
- stat_write_errs_notfound
|
||||
- stat_write_errs_other
|
||||
- stat_write_reqs
|
||||
- stat_write_reqs_xdr
|
||||
- stat_write_success
|
||||
- stat_xdr_pipe_miss
|
||||
- stat_xdr_pipe_writes
|
||||
- stat_zero_bin_records
|
||||
- storage_defrag_corrupt_record
|
||||
- storage_defrag_wait
|
||||
- transactions
|
||||
- basic_scans_succeeded
|
||||
- basic_scans_failed
|
||||
- aggr_scans_succeeded
|
||||
- aggr_scans_failed
|
||||
- udf_bg_scans_succeeded
|
||||
- udf_bg_scans_failed
|
||||
- udf_delete_err_others
|
||||
- udf_delete_reqs
|
||||
- udf_delete_success
|
||||
- udf_lua_errs
|
||||
- udf_query_rec_reqs
|
||||
- udf_read_errs_other
|
||||
- udf_read_reqs
|
||||
- udf_read_success
|
||||
- udf_replica_writes
|
||||
- udf_scan_rec_reqs
|
||||
- udf_write_err_others
|
||||
- udf_write_reqs
|
||||
- udf_write_success
|
||||
- write_master
|
||||
- write_prole
|
||||
|
||||
#### Aerospike Statistics [percentage]:
|
||||
|
||||
Meta:
|
||||
- units: percent (out of 100)
|
||||
|
||||
Measurement names:
|
||||
- free_pct_disk
|
||||
- free_pct_memory
|
||||
|
||||
# Measurements:
|
||||
#### Aerospike Namespace Statistics [values]:
|
||||
|
||||
Meta:
|
||||
- units: Integer
|
||||
- tags: `namespace=<namespace>`
|
||||
|
||||
Measurement names:
|
||||
- available_bin_names
|
||||
- available_pct
|
||||
- current_time
|
||||
- data_used_bytes_memory
|
||||
- index_used_bytes_memory
|
||||
- master_objects
|
||||
- max_evicted_ttl
|
||||
- max_void_time
|
||||
- non_expirable_objects
|
||||
- objects
|
||||
- prole_objects
|
||||
- sindex_used_bytes_memory
|
||||
- total_bytes_disk
|
||||
- total_bytes_memory
|
||||
- used_bytes_disk
|
||||
- used_bytes_memory
|
||||
|
||||
#### Aerospike Namespace Statistics [cumulative]:
|
||||
|
||||
Meta:
|
||||
- units: Integer
|
||||
- tags: `namespace=<namespace>`
|
||||
|
||||
Measurement names:
|
||||
- evicted_objects
|
||||
- expired_objects
|
||||
- set_deleted_objects
|
||||
- set_evicted_objects
|
||||
|
||||
#### Aerospike Namespace Statistics [percentage]:
|
||||
|
||||
Meta:
|
||||
- units: percent (out of 100)
|
||||
- tags: `namespace=<namespace>`
|
||||
|
||||
Measurement names:
|
||||
- free_pct_disk
|
||||
- free_pct_memory
|
||||
342
plugins/inputs/aerospike/aerospike.go
Normal file
342
plugins/inputs/aerospike/aerospike.go
Normal file
@@ -0,0 +1,342 @@
|
||||
package aerospike
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
MSG_HEADER_SIZE = 8
|
||||
MSG_TYPE = 1 // Info is 1
|
||||
MSG_VERSION = 2
|
||||
)
|
||||
|
||||
var (
|
||||
STATISTICS_COMMAND = []byte("statistics\n")
|
||||
NAMESPACES_COMMAND = []byte("namespaces\n")
|
||||
)
|
||||
|
||||
type aerospikeMessageHeader struct {
|
||||
Version uint8
|
||||
Type uint8
|
||||
DataLen [6]byte
|
||||
}
|
||||
|
||||
type aerospikeMessage struct {
|
||||
aerospikeMessageHeader
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func (msg *aerospikeMessage) Serialize() []byte {
|
||||
msg.DataLen = msgLenToBytes(int64(len(msg.Data)))
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
binary.Write(buf, binary.BigEndian, msg.aerospikeMessageHeader)
|
||||
binary.Write(buf, binary.BigEndian, msg.Data[:])
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
type aerospikeInfoCommand struct {
|
||||
msg *aerospikeMessage
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/info.go
|
||||
func (nfo *aerospikeInfoCommand) parseMultiResponse() (map[string]string, error) {
|
||||
responses := make(map[string]string)
|
||||
offset := int64(0)
|
||||
begin := int64(0)
|
||||
|
||||
dataLen := int64(len(nfo.msg.Data))
|
||||
|
||||
// Create reusable StringBuilder for performance.
|
||||
for offset < dataLen {
|
||||
b := nfo.msg.Data[offset]
|
||||
|
||||
if b == '\t' {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
offset++
|
||||
begin = offset
|
||||
|
||||
// Parse field value.
|
||||
for offset < dataLen {
|
||||
if nfo.msg.Data[offset] == '\n' {
|
||||
break
|
||||
}
|
||||
offset++
|
||||
}
|
||||
|
||||
if offset > begin {
|
||||
value := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = string(value)
|
||||
} else {
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
offset++
|
||||
begin = offset
|
||||
} else if b == '\n' {
|
||||
if offset > begin {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
offset++
|
||||
begin = offset
|
||||
} else {
|
||||
offset++
|
||||
}
|
||||
}
|
||||
|
||||
if offset > begin {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
return responses, nil
|
||||
}
|
||||
|
||||
type Aerospike struct {
|
||||
Servers []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Aerospike servers to connect to (with port)
|
||||
# Default: servers = ["localhost:3000"]
|
||||
#
|
||||
# This plugin will query all namespaces the aerospike
|
||||
# server has configured and get stats for them.
|
||||
servers = ["localhost:3000"]
|
||||
`
|
||||
|
||||
func (a *Aerospike) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (a *Aerospike) Description() string {
|
||||
return "Read stats from an aerospike server"
|
||||
}
|
||||
|
||||
func (a *Aerospike) Gather(acc inputs.Accumulator) error {
|
||||
if len(a.Servers) == 0 {
|
||||
return a.gatherServer("127.0.0.1:3000", acc)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, server := range a.Servers {
|
||||
wg.Add(1)
|
||||
go func(server string) {
|
||||
defer wg.Done()
|
||||
outerr = a.gatherServer(server, acc)
|
||||
}(server)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (a *Aerospike) gatherServer(host string, acc inputs.Accumulator) error {
|
||||
aerospikeInfo, err := getMap(STATISTICS_COMMAND, host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike info failed: %s", err)
|
||||
}
|
||||
readAerospikeStats(aerospikeInfo, acc, host, "")
|
||||
namespaces, err := getList(NAMESPACES_COMMAND, host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike namespace list failed: %s", err)
|
||||
}
|
||||
for ix := range namespaces {
|
||||
nsInfo, err := getMap([]byte("namespace/"+namespaces[ix]+"\n"), host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike namespace '%s' query failed: %s", namespaces[ix], err)
|
||||
}
|
||||
readAerospikeStats(nsInfo, acc, host, namespaces[ix])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMap(key []byte, host string) (map[string]string, error) {
|
||||
data, err := get(key, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get data: %s", err)
|
||||
}
|
||||
parsed, err := unmarshalMapInfo(data, string(key))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
|
||||
}
|
||||
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func getList(key []byte, host string) ([]string, error) {
|
||||
data, err := get(key, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get data: %s", err)
|
||||
}
|
||||
parsed, err := unmarshalListInfo(data, string(key))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
|
||||
}
|
||||
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func get(key []byte, host string) (map[string]string, error) {
|
||||
var err error
|
||||
var data map[string]string
|
||||
|
||||
asInfo := &aerospikeInfoCommand{
|
||||
msg: &aerospikeMessage{
|
||||
aerospikeMessageHeader: aerospikeMessageHeader{
|
||||
Version: uint8(MSG_VERSION),
|
||||
Type: uint8(MSG_TYPE),
|
||||
DataLen: msgLenToBytes(int64(len(key))),
|
||||
},
|
||||
Data: key,
|
||||
},
|
||||
}
|
||||
|
||||
cmd := asInfo.msg.Serialize()
|
||||
addr, err := net.ResolveTCPAddr("tcp", host)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Lookup failed for '%s': %s", host, err)
|
||||
}
|
||||
|
||||
conn, err := net.DialTCP("tcp", nil, addr)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Connection failed for '%s': %s", host, err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
_, err = conn.Write(cmd)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to send to '%s': %s", host, err)
|
||||
}
|
||||
|
||||
msgHeader := bytes.NewBuffer(make([]byte, MSG_HEADER_SIZE))
|
||||
_, err = readLenFromConn(conn, msgHeader.Bytes(), MSG_HEADER_SIZE)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to read header: %s", err)
|
||||
}
|
||||
err = binary.Read(msgHeader, binary.BigEndian, &asInfo.msg.aerospikeMessageHeader)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to unmarshal header: %s", err)
|
||||
}
|
||||
|
||||
msgLen := msgLenFromBytes(asInfo.msg.aerospikeMessageHeader.DataLen)
|
||||
|
||||
if int64(len(asInfo.msg.Data)) != msgLen {
|
||||
asInfo.msg.Data = make([]byte, msgLen)
|
||||
}
|
||||
|
||||
_, err = readLenFromConn(conn, asInfo.msg.Data, len(asInfo.msg.Data))
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to read from connection to '%s': %s", host, err)
|
||||
}
|
||||
|
||||
data, err = asInfo.parseMultiResponse()
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to parse response from '%s': %s", host, err)
|
||||
}
|
||||
|
||||
return data, err
|
||||
}
|
||||
|
||||
func readAerospikeStats(
|
||||
stats map[string]string,
|
||||
acc inputs.Accumulator,
|
||||
host string,
|
||||
namespace string,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{
|
||||
"aerospike_host": host,
|
||||
"namespace": "_service",
|
||||
}
|
||||
|
||||
if namespace != "" {
|
||||
tags["namespace"] = namespace
|
||||
}
|
||||
for key, value := range stats {
|
||||
// We are going to ignore all string based keys
|
||||
val, err := strconv.ParseInt(value, 10, 64)
|
||||
if err == nil {
|
||||
if strings.Contains(key, "-") {
|
||||
key = strings.Replace(key, "-", "_", -1)
|
||||
}
|
||||
fields[key] = val
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) {
|
||||
key = strings.TrimSuffix(key, "\n")
|
||||
res := map[string]string{}
|
||||
|
||||
v, exists := infoMap[key]
|
||||
if !exists {
|
||||
return res, fmt.Errorf("Key '%s' missing from info", key)
|
||||
}
|
||||
|
||||
values := strings.Split(v, ";")
|
||||
for i := range values {
|
||||
kv := strings.Split(values[i], "=")
|
||||
if len(kv) > 1 {
|
||||
res[kv[0]] = kv[1]
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func unmarshalListInfo(infoMap map[string]string, key string) ([]string, error) {
|
||||
key = strings.TrimSuffix(key, "\n")
|
||||
|
||||
v, exists := infoMap[key]
|
||||
if !exists {
|
||||
return []string{}, fmt.Errorf("Key '%s' missing from info", key)
|
||||
}
|
||||
|
||||
values := strings.Split(v, ";")
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func readLenFromConn(c net.Conn, buffer []byte, length int) (total int, err error) {
|
||||
var r int
|
||||
for total < length {
|
||||
r, err = c.Read(buffer[total:length])
|
||||
total += r
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func msgLenToBytes(DataLen int64) [6]byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(DataLen))
|
||||
res := [6]byte{}
|
||||
copy(res[:], b[2:])
|
||||
return res
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func msgLenFromBytes(buf [6]byte) int64 {
|
||||
nbytes := append([]byte{0, 0}, buf[:]...)
|
||||
DataLen := binary.BigEndian.Uint64(nbytes)
|
||||
return int64(DataLen)
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("aerospike", func() inputs.Input {
|
||||
return &Aerospike{}
|
||||
})
|
||||
}
|
||||
118
plugins/inputs/aerospike/aerospike_test.go
Normal file
118
plugins/inputs/aerospike/aerospike_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package aerospike
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAerospikeStatistics(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
a := &Aerospike{
|
||||
Servers: []string{testutil.GetLocalHost() + ":3000"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Only use a few of the metrics
|
||||
asMetrics := []string{
|
||||
"transactions",
|
||||
"stat_write_errs",
|
||||
"stat_read_reqs",
|
||||
"stat_write_reqs",
|
||||
}
|
||||
|
||||
for _, metric := range asMetrics {
|
||||
assert.True(t, acc.HasIntField("aerospike", metric), metric)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAerospikeMsgLenFromToBytes(t *testing.T) {
|
||||
var i int64 = 8
|
||||
assert.True(t, i == msgLenFromBytes(msgLenToBytes(i)))
|
||||
}
|
||||
|
||||
func TestReadAerospikeStatsNoNamespace(t *testing.T) {
|
||||
// Also test for re-writing
|
||||
var acc testutil.Accumulator
|
||||
stats := map[string]string{
|
||||
"stat-write-errs": "12345",
|
||||
"stat_read_reqs": "12345",
|
||||
}
|
||||
readAerospikeStats(stats, &acc, "host1", "")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"stat_write_errs": int64(12345),
|
||||
"stat_read_reqs": int64(12345),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"aerospike_host": "host1",
|
||||
"namespace": "_service",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func TestReadAerospikeStatsNamespace(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
stats := map[string]string{
|
||||
"stat_write_errs": "12345",
|
||||
"stat_read_reqs": "12345",
|
||||
}
|
||||
readAerospikeStats(stats, &acc, "host1", "test")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"stat_write_errs": int64(12345),
|
||||
"stat_read_reqs": int64(12345),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"aerospike_host": "host1",
|
||||
"namespace": "test",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func TestAerospikeUnmarshalList(t *testing.T) {
|
||||
i := map[string]string{
|
||||
"test": "one;two;three",
|
||||
}
|
||||
|
||||
expected := []string{"one", "two", "three"}
|
||||
|
||||
list, err := unmarshalListInfo(i, "test2")
|
||||
assert.True(t, err != nil)
|
||||
|
||||
list, err = unmarshalListInfo(i, "test")
|
||||
assert.True(t, err == nil)
|
||||
equal := true
|
||||
for ix := range expected {
|
||||
if list[ix] != expected[ix] {
|
||||
equal = false
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, equal)
|
||||
}
|
||||
|
||||
func TestAerospikeUnmarshalMap(t *testing.T) {
|
||||
i := map[string]string{
|
||||
"test": "key1=value1;key2=value2",
|
||||
}
|
||||
|
||||
expected := map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
}
|
||||
m, err := unmarshalMapInfo(i, "test")
|
||||
assert.True(t, err == nil)
|
||||
assert.True(t, reflect.DeepEqual(m, expected))
|
||||
}
|
||||
37
plugins/inputs/all/all.go
Normal file
37
plugins/inputs/all/all.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/aerospike"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/apache"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/bcache"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/disque"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/elasticsearch"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/exec"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/haproxy"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/httpjson"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/influxdb"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/leofs"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/lustre2"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/mailchimp"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/memcached"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/mongodb"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/mysql"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/nginx"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/phpfpm"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/ping"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/postgresql"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/procstat"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/prometheus"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/puppetagent"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/rabbitmq"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/redis"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/rethinkdb"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/statsd"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/system"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/trig"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/twemproxy"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/zfs"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/zookeeper"
|
||||
)
|
||||
45
plugins/inputs/apache/README.md
Normal file
45
plugins/inputs/apache/README.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# Telegraf plugin: Apache
|
||||
|
||||
#### Plugin arguments:
|
||||
- **urls** []string: List of apache-status URLs to collect from.
|
||||
|
||||
#### Description
|
||||
|
||||
The Apache plugin collects from the /server-status?auto URL. See
|
||||
[apache.org/server-status?auto](http://www.apache.org/server-status?auto) for an
|
||||
example. And
|
||||
[here](http://httpd.apache.org/docs/2.2/mod/mod_status.html) for the apache
|
||||
mod_status documentation.
|
||||
|
||||
# Measurements:
|
||||
|
||||
Meta:
|
||||
- tags: `port=<port>`, `server=url`
|
||||
|
||||
- apache_TotalAccesses
|
||||
- apache_TotalkBytes
|
||||
- apache_CPULoad
|
||||
- apache_Uptime
|
||||
- apache_ReqPerSec
|
||||
- apache_BytesPerSec
|
||||
- apache_BytesPerReq
|
||||
- apache_BusyWorkers
|
||||
- apache_IdleWorkers
|
||||
- apache_ConnsTotal
|
||||
- apache_ConnsAsyncWriting
|
||||
- apache_ConnsAsyncKeepAlive
|
||||
- apache_ConnsAsyncClosing
|
||||
|
||||
### Scoreboard measurements
|
||||
|
||||
- apache_scboard_waiting
|
||||
- apache_scboard_starting
|
||||
- apache_scboard_reading
|
||||
- apache_scboard_sending
|
||||
- apache_scboard_keepalive
|
||||
- apache_scboard_dnslookup
|
||||
- apache_scboard_closing
|
||||
- apache_scboard_logging
|
||||
- apache_scboard_finishing
|
||||
- apache_scboard_idle_cleanup
|
||||
- apache_scboard_open
|
||||
170
plugins/inputs/apache/apache.go
Normal file
170
plugins/inputs/apache/apache.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package apache
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Apache struct {
|
||||
Urls []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# An array of Apache status URI to gather stats.
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
`
|
||||
|
||||
func (n *Apache) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (n *Apache) Description() string {
|
||||
return "Read Apache status information (mod_status)"
|
||||
}
|
||||
|
||||
func (n *Apache) Gather(acc inputs.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
var outerr error
|
||||
|
||||
for _, u := range n.Urls {
|
||||
addr, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
outerr = n.gatherUrl(addr, acc)
|
||||
}(addr)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{Transport: tr}
|
||||
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc inputs.Accumulator) error {
|
||||
resp, err := client.Get(addr.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
|
||||
}
|
||||
|
||||
tags := getTags(addr)
|
||||
|
||||
sc := bufio.NewScanner(resp.Body)
|
||||
fields := make(map[string]interface{})
|
||||
for sc.Scan() {
|
||||
line := sc.Text()
|
||||
if strings.Contains(line, ":") {
|
||||
parts := strings.SplitN(line, ":", 2)
|
||||
key, part := strings.Replace(parts[0], " ", "", -1), strings.TrimSpace(parts[1])
|
||||
|
||||
switch key {
|
||||
case "Scoreboard":
|
||||
for field, value := range n.gatherScores(part) {
|
||||
fields[field] = value
|
||||
}
|
||||
default:
|
||||
value, err := strconv.ParseFloat(part, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("apache", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Apache) gatherScores(data string) map[string]interface{} {
|
||||
var waiting, open int = 0, 0
|
||||
var S, R, W, K, D, C, L, G, I int = 0, 0, 0, 0, 0, 0, 0, 0, 0
|
||||
|
||||
for _, s := range strings.Split(data, "") {
|
||||
|
||||
switch s {
|
||||
case "_":
|
||||
waiting++
|
||||
case "S":
|
||||
S++
|
||||
case "R":
|
||||
R++
|
||||
case "W":
|
||||
W++
|
||||
case "K":
|
||||
K++
|
||||
case "D":
|
||||
D++
|
||||
case "C":
|
||||
C++
|
||||
case "L":
|
||||
L++
|
||||
case "G":
|
||||
G++
|
||||
case "I":
|
||||
I++
|
||||
case ".":
|
||||
open++
|
||||
}
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"scboard_waiting": float64(waiting),
|
||||
"scboard_starting": float64(S),
|
||||
"scboard_reading": float64(R),
|
||||
"scboard_sending": float64(W),
|
||||
"scboard_keepalive": float64(K),
|
||||
"scboard_dnslookup": float64(D),
|
||||
"scboard_closing": float64(C),
|
||||
"scboard_logging": float64(L),
|
||||
"scboard_finishing": float64(G),
|
||||
"scboard_idle_cleanup": float64(I),
|
||||
"scboard_open": float64(open),
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
// Get tag(s) for the apache plugin
|
||||
func getTags(addr *url.URL) map[string]string {
|
||||
h := addr.Host
|
||||
host, port, err := net.SplitHostPort(h)
|
||||
if err != nil {
|
||||
host = addr.Host
|
||||
if addr.Scheme == "http" {
|
||||
port = "80"
|
||||
} else if addr.Scheme == "https" {
|
||||
port = "443"
|
||||
} else {
|
||||
port = ""
|
||||
}
|
||||
}
|
||||
return map[string]string{"server": host, "port": port}
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("apache", func() inputs.Input {
|
||||
return &Apache{}
|
||||
})
|
||||
}
|
||||
73
plugins/inputs/apache/apache_test.go
Normal file
73
plugins/inputs/apache/apache_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package apache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var apacheStatus = `
|
||||
Total Accesses: 129811861
|
||||
Total kBytes: 5213701865
|
||||
CPULoad: 6.51929
|
||||
Uptime: 941553
|
||||
ReqPerSec: 137.87
|
||||
BytesPerSec: 5670240
|
||||
BytesPerReq: 41127.4
|
||||
BusyWorkers: 270
|
||||
IdleWorkers: 630
|
||||
ConnsTotal: 1451
|
||||
ConnsAsyncWriting: 32
|
||||
ConnsAsyncKeepAlive: 945
|
||||
ConnsAsyncClosing: 205
|
||||
Scoreboard: WW_____W_RW_R_W__RRR____WR_W___WW________W_WW_W_____R__R_WR__WRWR_RRRW___R_RWW__WWWRW__R_RW___RR_RW_R__W__WR_WWW______WWR__R___R_WR_W___RW______RR________________W______R__RR______W________________R____R__________________________RW_W____R_____W_R_________________R____RR__W___R_R____RW______R____W______W_W_R_R______R__R_R__________R____W_______WW____W____RR__W_____W_R_______W__________W___W____________W_______WRR_R_W____W_____R____W_WW_R____RRW__W............................................................................................................................................................................................................................................................................................................WRRWR____WR__RR_R___RWR_________W_R____RWRRR____R_R__RW_R___WWW_RW__WR_RRR____W___R____WW_R__R___RR_W_W_RRRRWR__RRWR__RRW_W_RRRW_R_RR_W__RR_RWRR_R__R___RR_RR______R__RR____R_____W_R_R_R__R__R__________W____WW_R___R_R___R_________RR__RR____RWWWW___W_R________R_R____R_W___W___R___W_WRRWW_______R__W_RW_______R________RR__R________W_______________________W_W______________RW_________WR__R___R__R_______________WR_R_________W___RW_____R____________W____......................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................
|
||||
`
|
||||
|
||||
func TestHTTPApache(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, apacheStatus)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := Apache{
|
||||
Urls: []string{ts.URL},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"TotalAccesses": float64(1.29811861e+08),
|
||||
"TotalkBytes": float64(5.213701865e+09),
|
||||
"CPULoad": float64(6.51929),
|
||||
"Uptime": float64(941553),
|
||||
"ReqPerSec": float64(137.87),
|
||||
"BytesPerSec": float64(5.67024e+06),
|
||||
"BytesPerReq": float64(41127.4),
|
||||
"BusyWorkers": float64(270),
|
||||
"IdleWorkers": float64(630),
|
||||
"ConnsTotal": float64(1451),
|
||||
"ConnsAsyncWriting": float64(32),
|
||||
"ConnsAsyncKeepAlive": float64(945),
|
||||
"ConnsAsyncClosing": float64(205),
|
||||
"scboard_waiting": float64(630),
|
||||
"scboard_starting": float64(0),
|
||||
"scboard_reading": float64(157),
|
||||
"scboard_sending": float64(113),
|
||||
"scboard_keepalive": float64(0),
|
||||
"scboard_dnslookup": float64(0),
|
||||
"scboard_closing": float64(0),
|
||||
"scboard_logging": float64(0),
|
||||
"scboard_finishing": float64(0),
|
||||
"scboard_idle_cleanup": float64(0),
|
||||
"scboard_open": float64(2850),
|
||||
}
|
||||
acc.AssertContainsFields(t, "apache", fields)
|
||||
}
|
||||
89
plugins/inputs/bcache/README.md
Normal file
89
plugins/inputs/bcache/README.md
Normal file
@@ -0,0 +1,89 @@
|
||||
# Telegraf plugin: bcache
|
||||
|
||||
Get bcache stat from stats_total directory and dirty_data file.
|
||||
|
||||
# Measurements
|
||||
|
||||
Meta:
|
||||
|
||||
- tags: `backing_dev=dev bcache_dev=dev`
|
||||
|
||||
Measurement names:
|
||||
|
||||
- dirty_data
|
||||
- bypassed
|
||||
- cache_bypass_hits
|
||||
- cache_bypass_misses
|
||||
- cache_hit_ratio
|
||||
- cache_hits
|
||||
- cache_miss_collisions
|
||||
- cache_misses
|
||||
- cache_readaheads
|
||||
|
||||
### Description
|
||||
|
||||
```
|
||||
dirty_data
|
||||
Amount of dirty data for this backing device in the cache. Continuously
|
||||
updated unlike the cache set's version, but may be slightly off.
|
||||
|
||||
bypassed
|
||||
Amount of IO (both reads and writes) that has bypassed the cache
|
||||
|
||||
|
||||
cache_bypass_hits
|
||||
cache_bypass_misses
|
||||
Hits and misses for IO that is intended to skip the cache are still counted,
|
||||
but broken out here.
|
||||
|
||||
cache_hits
|
||||
cache_misses
|
||||
cache_hit_ratio
|
||||
Hits and misses are counted per individual IO as bcache sees them; a
|
||||
partial hit is counted as a miss.
|
||||
|
||||
cache_miss_collisions
|
||||
Counts instances where data was going to be inserted into the cache from a
|
||||
cache miss, but raced with a write and data was already present (usually 0
|
||||
since the synchronization for cache misses was rewritten)
|
||||
|
||||
cache_readaheads
|
||||
Count of times readahead occurred.
|
||||
```
|
||||
|
||||
# Example output
|
||||
|
||||
Using this configuration:
|
||||
|
||||
```
|
||||
[bcache]
|
||||
# Bcache sets path
|
||||
# If not specified, then default is:
|
||||
# bcachePath = "/sys/fs/bcache"
|
||||
#
|
||||
# By default, telegraf gather stats for all bcache devices
|
||||
# Setting devices will restrict the stats to the specified
|
||||
# bcache devices.
|
||||
# bcacheDevs = ["bcache0", ...]
|
||||
```
|
||||
|
||||
When run with:
|
||||
|
||||
```
|
||||
./telegraf -config telegraf.conf -input-filter bcache -test
|
||||
```
|
||||
|
||||
It produces:
|
||||
|
||||
```
|
||||
* Plugin: bcache, Collection 1
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_dirty_data value=11639194
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_bypassed value=5167704440832
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_bypass_hits value=146270986
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_bypass_misses value=0
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_hit_ratio value=90
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_hits value=511941651
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_miss_collisions value=157678
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_misses value=50647396
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_readaheads value=0
|
||||
```
|
||||
141
plugins/inputs/bcache/bcache.go
Normal file
141
plugins/inputs/bcache/bcache.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package bcache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Bcache struct {
|
||||
BcachePath string
|
||||
BcacheDevs []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Bcache sets path
|
||||
# If not specified, then default is:
|
||||
# bcachePath = "/sys/fs/bcache"
|
||||
#
|
||||
# By default, telegraf gather stats for all bcache devices
|
||||
# Setting devices will restrict the stats to the specified
|
||||
# bcache devices.
|
||||
# bcacheDevs = ["bcache0", ...]
|
||||
`
|
||||
|
||||
func (b *Bcache) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (b *Bcache) Description() string {
|
||||
return "Read metrics of bcache from stats_total and dirty_data"
|
||||
}
|
||||
|
||||
func getTags(bdev string) map[string]string {
|
||||
backingDevFile, _ := os.Readlink(bdev)
|
||||
backingDevPath := strings.Split(backingDevFile, "/")
|
||||
backingDev := backingDevPath[len(backingDevPath)-2]
|
||||
|
||||
bcacheDevFile, _ := os.Readlink(bdev + "/dev")
|
||||
bcacheDevPath := strings.Split(bcacheDevFile, "/")
|
||||
bcacheDev := bcacheDevPath[len(bcacheDevPath)-1]
|
||||
|
||||
return map[string]string{"backing_dev": backingDev, "bcache_dev": bcacheDev}
|
||||
}
|
||||
|
||||
func prettyToBytes(v string) uint64 {
|
||||
var factors = map[string]uint64{
|
||||
"k": 1 << 10,
|
||||
"M": 1 << 20,
|
||||
"G": 1 << 30,
|
||||
"T": 1 << 40,
|
||||
"P": 1 << 50,
|
||||
"E": 1 << 60,
|
||||
}
|
||||
var factor uint64
|
||||
factor = 1
|
||||
prefix := v[len(v)-1 : len(v)]
|
||||
if factors[prefix] != 0 {
|
||||
v = v[:len(v)-1]
|
||||
factor = factors[prefix]
|
||||
}
|
||||
result, _ := strconv.ParseFloat(v, 32)
|
||||
result = result * float64(factor)
|
||||
|
||||
return uint64(result)
|
||||
}
|
||||
|
||||
func (b *Bcache) gatherBcache(bdev string, acc inputs.Accumulator) error {
|
||||
tags := getTags(bdev)
|
||||
metrics, err := filepath.Glob(bdev + "/stats_total/*")
|
||||
if len(metrics) < 0 {
|
||||
return errors.New("Can't read any stats file")
|
||||
}
|
||||
file, err := ioutil.ReadFile(bdev + "/dirty_data")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rawValue := strings.TrimSpace(string(file))
|
||||
value := prettyToBytes(rawValue)
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
fields["dirty_data"] = value
|
||||
|
||||
for _, path := range metrics {
|
||||
key := filepath.Base(path)
|
||||
file, err := ioutil.ReadFile(path)
|
||||
rawValue := strings.TrimSpace(string(file))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if key == "bypassed" {
|
||||
value := prettyToBytes(rawValue)
|
||||
fields[key] = value
|
||||
} else {
|
||||
value, _ := strconv.ParseUint(rawValue, 10, 64)
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
acc.AddFields("bcache", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Bcache) Gather(acc inputs.Accumulator) error {
|
||||
bcacheDevsChecked := make(map[string]bool)
|
||||
var restrictDevs bool
|
||||
if len(b.BcacheDevs) != 0 {
|
||||
restrictDevs = true
|
||||
for _, bcacheDev := range b.BcacheDevs {
|
||||
bcacheDevsChecked[bcacheDev] = true
|
||||
}
|
||||
}
|
||||
|
||||
bcachePath := b.BcachePath
|
||||
if len(bcachePath) == 0 {
|
||||
bcachePath = "/sys/fs/bcache"
|
||||
}
|
||||
bdevs, _ := filepath.Glob(bcachePath + "/*/bdev*")
|
||||
if len(bdevs) < 1 {
|
||||
return errors.New("Can't find any bcache device")
|
||||
}
|
||||
for _, bdev := range bdevs {
|
||||
if restrictDevs {
|
||||
bcacheDev := getTags(bdev)["bcache_dev"]
|
||||
if !bcacheDevsChecked[bcacheDev] {
|
||||
continue
|
||||
}
|
||||
}
|
||||
b.gatherBcache(bdev, acc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("bcache", func() inputs.Input {
|
||||
return &Bcache{}
|
||||
})
|
||||
}
|
||||
121
plugins/inputs/bcache/bcache_test.go
Normal file
121
plugins/inputs/bcache/bcache_test.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package bcache
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
dirty_data = "1.5G"
|
||||
bypassed = "4.7T"
|
||||
cache_bypass_hits = "146155333"
|
||||
cache_bypass_misses = "0"
|
||||
cache_hit_ratio = "90"
|
||||
cache_hits = "511469583"
|
||||
cache_miss_collisions = "157567"
|
||||
cache_misses = "50616331"
|
||||
cache_readaheads = "2"
|
||||
)
|
||||
|
||||
var (
|
||||
testBcachePath = os.TempDir() + "/telegraf/sys/fs/bcache"
|
||||
testBcacheUuidPath = testBcachePath + "/663955a3-765a-4737-a9fd-8250a7a78411"
|
||||
testBcacheDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/bcache0"
|
||||
testBcacheBackingDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/md10"
|
||||
)
|
||||
|
||||
func TestBcacheGeneratesMetrics(t *testing.T) {
|
||||
err := os.MkdirAll(testBcacheUuidPath, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.MkdirAll(testBcacheDevPath, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.MkdirAll(testBcacheBackingDevPath+"/bcache", 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.Symlink(testBcacheBackingDevPath+"/bcache", testBcacheUuidPath+"/bdev0")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.Symlink(testBcacheDevPath, testBcacheUuidPath+"/bdev0/dev")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.MkdirAll(testBcacheUuidPath+"/bdev0/stats_total", 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data",
|
||||
[]byte(dirty_data), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed",
|
||||
[]byte(bypassed), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits",
|
||||
[]byte(cache_bypass_hits), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses",
|
||||
[]byte(cache_bypass_misses), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio",
|
||||
[]byte(cache_hit_ratio), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits",
|
||||
[]byte(cache_hits), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions",
|
||||
[]byte(cache_miss_collisions), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses",
|
||||
[]byte(cache_misses), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads",
|
||||
[]byte(cache_readaheads), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"dirty_data": uint64(1610612736),
|
||||
"bypassed": uint64(5167704440832),
|
||||
"cache_bypass_hits": uint64(146155333),
|
||||
"cache_bypass_misses": uint64(0),
|
||||
"cache_hit_ratio": uint64(90),
|
||||
"cache_hits": uint64(511469583),
|
||||
"cache_miss_collisions": uint64(157567),
|
||||
"cache_misses": uint64(50616331),
|
||||
"cache_readaheads": uint64(2),
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"backing_dev": "md10",
|
||||
"bcache_dev": "bcache0",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
// all devs
|
||||
b := &Bcache{BcachePath: testBcachePath}
|
||||
|
||||
err = b.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
acc.AssertContainsTaggedFields(t, "bcache", fields, tags)
|
||||
|
||||
// one exist dev
|
||||
b = &Bcache{BcachePath: testBcachePath, BcacheDevs: []string{"bcache0"}}
|
||||
|
||||
err = b.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
acc.AssertContainsTaggedFields(t, "bcache", fields, tags)
|
||||
|
||||
err = os.RemoveAll(os.TempDir() + "/telegraf")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
204
plugins/inputs/disque/disque.go
Normal file
204
plugins/inputs/disque/disque.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package disque
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Disque struct {
|
||||
Servers []string
|
||||
|
||||
c net.Conn
|
||||
buf []byte
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
|
||||
# 10.0.0.1:10000, etc.
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
`
|
||||
|
||||
func (r *Disque) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (r *Disque) Description() string {
|
||||
return "Read metrics from one or many disque servers"
|
||||
}
|
||||
|
||||
var Tracking = map[string]string{
|
||||
"uptime_in_seconds": "uptime",
|
||||
"connected_clients": "clients",
|
||||
"blocked_clients": "blocked_clients",
|
||||
"used_memory": "used_memory",
|
||||
"used_memory_rss": "used_memory_rss",
|
||||
"used_memory_peak": "used_memory_peak",
|
||||
"total_connections_received": "total_connections_received",
|
||||
"total_commands_processed": "total_commands_processed",
|
||||
"instantaneous_ops_per_sec": "instantaneous_ops_per_sec",
|
||||
"latest_fork_usec": "latest_fork_usec",
|
||||
"mem_fragmentation_ratio": "mem_fragmentation_ratio",
|
||||
"used_cpu_sys": "used_cpu_sys",
|
||||
"used_cpu_user": "used_cpu_user",
|
||||
"used_cpu_sys_children": "used_cpu_sys_children",
|
||||
"used_cpu_user_children": "used_cpu_user_children",
|
||||
"registered_jobs": "registered_jobs",
|
||||
"registered_queues": "registered_queues",
|
||||
}
|
||||
|
||||
var ErrProtocolError = errors.New("disque protocol error")
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (g *Disque) Gather(acc inputs.Accumulator) error {
|
||||
if len(g.Servers) == 0 {
|
||||
url := &url.URL{
|
||||
Host: ":7711",
|
||||
}
|
||||
g.gatherServer(url, acc)
|
||||
return nil
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, serv := range g.Servers {
|
||||
u, err := url.Parse(serv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err)
|
||||
} else if u.Scheme == "" {
|
||||
// fallback to simple string based address (i.e. "10.0.0.1:10000")
|
||||
u.Scheme = "tcp"
|
||||
u.Host = serv
|
||||
u.Path = ""
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = g.gatherServer(u, acc)
|
||||
}(serv)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
const defaultPort = "7711"
|
||||
|
||||
func (g *Disque) gatherServer(addr *url.URL, acc inputs.Accumulator) error {
|
||||
if g.c == nil {
|
||||
|
||||
_, _, err := net.SplitHostPort(addr.Host)
|
||||
if err != nil {
|
||||
addr.Host = addr.Host + ":" + defaultPort
|
||||
}
|
||||
|
||||
c, err := net.Dial("tcp", addr.Host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to disque server '%s': %s", addr.Host, err)
|
||||
}
|
||||
|
||||
if addr.User != nil {
|
||||
pwd, set := addr.User.Password()
|
||||
if set && pwd != "" {
|
||||
c.Write([]byte(fmt.Sprintf("AUTH %s\r\n", pwd)))
|
||||
|
||||
r := bufio.NewReader(c)
|
||||
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if line[0] != '+' {
|
||||
return fmt.Errorf("%s", strings.TrimSpace(line)[1:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
g.c = c
|
||||
}
|
||||
|
||||
g.c.Write([]byte("info\r\n"))
|
||||
|
||||
r := bufio.NewReader(g.c)
|
||||
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if line[0] != '$' {
|
||||
return fmt.Errorf("bad line start: %s", ErrProtocolError)
|
||||
}
|
||||
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
szStr := line[1:]
|
||||
|
||||
sz, err := strconv.Atoi(szStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad size string <<%s>>: %s", szStr, ErrProtocolError)
|
||||
}
|
||||
|
||||
var read int
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{"host": addr.String()}
|
||||
for read < sz {
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
read += len(line)
|
||||
|
||||
if len(line) == 1 || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.SplitN(line, ":", 2)
|
||||
|
||||
name := string(parts[0])
|
||||
|
||||
metric, ok := Tracking[name]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
val := strings.TrimSpace(parts[1])
|
||||
|
||||
ival, err := strconv.ParseUint(val, 10, 64)
|
||||
if err == nil {
|
||||
fields[metric] = ival
|
||||
continue
|
||||
}
|
||||
|
||||
fval, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields[metric] = fval
|
||||
}
|
||||
acc.AddFields("disque", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("disque", func() inputs.Input {
|
||||
return &Disque{}
|
||||
})
|
||||
}
|
||||
217
plugins/inputs/disque/disque_test.go
Normal file
217
plugins/inputs/disque/disque_test.go
Normal file
@@ -0,0 +1,217 @@
|
||||
package disque
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDisqueGeneratesMetrics(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
l, err := net.Listen("tcp", "localhost:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer l.Close()
|
||||
|
||||
go func() {
|
||||
c, err := l.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
buf := bufio.NewReader(c)
|
||||
|
||||
for {
|
||||
line, err := buf.ReadString('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if line != "info\r\n" {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(c, "$%d\n", len(testOutput))
|
||||
c.Write([]byte(testOutput))
|
||||
}
|
||||
}()
|
||||
|
||||
addr := fmt.Sprintf("disque://%s", l.Addr().String())
|
||||
|
||||
r := &Disque{
|
||||
Servers: []string{addr},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"uptime": uint64(1452705),
|
||||
"clients": uint64(31),
|
||||
"blocked_clients": uint64(13),
|
||||
"used_memory": uint64(1840104),
|
||||
"used_memory_rss": uint64(3227648),
|
||||
"used_memory_peak": uint64(89603656),
|
||||
"total_connections_received": uint64(5062777),
|
||||
"total_commands_processed": uint64(12308396),
|
||||
"instantaneous_ops_per_sec": uint64(18),
|
||||
"latest_fork_usec": uint64(1644),
|
||||
"registered_jobs": uint64(360),
|
||||
"registered_queues": uint64(12),
|
||||
"mem_fragmentation_ratio": float64(1.75),
|
||||
"used_cpu_sys": float64(19585.73),
|
||||
"used_cpu_user": float64(11255.96),
|
||||
"used_cpu_sys_children": float64(1.75),
|
||||
"used_cpu_user_children": float64(1.91),
|
||||
}
|
||||
acc.AssertContainsFields(t, "disque", fields)
|
||||
}
|
||||
|
||||
func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
l, err := net.Listen("tcp", "localhost:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer l.Close()
|
||||
|
||||
go func() {
|
||||
c, err := l.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
buf := bufio.NewReader(c)
|
||||
|
||||
for {
|
||||
line, err := buf.ReadString('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if line != "info\r\n" {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(c, "$%d\n", len(testOutput))
|
||||
c.Write([]byte(testOutput))
|
||||
}
|
||||
}()
|
||||
|
||||
addr := fmt.Sprintf("disque://%s", l.Addr().String())
|
||||
|
||||
r := &Disque{
|
||||
Servers: []string{addr},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"uptime": uint64(1452705),
|
||||
"clients": uint64(31),
|
||||
"blocked_clients": uint64(13),
|
||||
"used_memory": uint64(1840104),
|
||||
"used_memory_rss": uint64(3227648),
|
||||
"used_memory_peak": uint64(89603656),
|
||||
"total_connections_received": uint64(5062777),
|
||||
"total_commands_processed": uint64(12308396),
|
||||
"instantaneous_ops_per_sec": uint64(18),
|
||||
"latest_fork_usec": uint64(1644),
|
||||
"registered_jobs": uint64(360),
|
||||
"registered_queues": uint64(12),
|
||||
"mem_fragmentation_ratio": float64(1.75),
|
||||
"used_cpu_sys": float64(19585.73),
|
||||
"used_cpu_user": float64(11255.96),
|
||||
"used_cpu_sys_children": float64(1.75),
|
||||
"used_cpu_user_children": float64(1.91),
|
||||
}
|
||||
acc.AssertContainsFields(t, "disque", fields)
|
||||
}
|
||||
|
||||
const testOutput = `# Server
|
||||
disque_version:0.0.1
|
||||
disque_git_sha1:b5247598
|
||||
disque_git_dirty:0
|
||||
disque_build_id:379fda78983a60c6
|
||||
os:Linux 3.13.0-44-generic x86_64
|
||||
arch_bits:64
|
||||
multiplexing_api:epoll
|
||||
gcc_version:4.8.2
|
||||
process_id:32420
|
||||
run_id:1cfdfa4c6bc3f285182db5427522a8a4c16e42e4
|
||||
tcp_port:7711
|
||||
uptime_in_seconds:1452705
|
||||
uptime_in_days:16
|
||||
hz:10
|
||||
config_file:/usr/local/etc/disque/disque.conf
|
||||
|
||||
# Clients
|
||||
connected_clients:31
|
||||
client_longest_output_list:0
|
||||
client_biggest_input_buf:0
|
||||
blocked_clients:13
|
||||
|
||||
# Memory
|
||||
used_memory:1840104
|
||||
used_memory_human:1.75M
|
||||
used_memory_rss:3227648
|
||||
used_memory_peak:89603656
|
||||
used_memory_peak_human:85.45M
|
||||
mem_fragmentation_ratio:1.75
|
||||
mem_allocator:jemalloc-3.6.0
|
||||
|
||||
# Jobs
|
||||
registered_jobs:360
|
||||
|
||||
# Queues
|
||||
registered_queues:12
|
||||
|
||||
# Persistence
|
||||
loading:0
|
||||
aof_enabled:1
|
||||
aof_state:on
|
||||
aof_rewrite_in_progress:0
|
||||
aof_rewrite_scheduled:0
|
||||
aof_last_rewrite_time_sec:0
|
||||
aof_current_rewrite_time_sec:-1
|
||||
aof_last_bgrewrite_status:ok
|
||||
aof_last_write_status:ok
|
||||
aof_current_size:41952430
|
||||
aof_base_size:9808
|
||||
aof_pending_rewrite:0
|
||||
aof_buffer_length:0
|
||||
aof_rewrite_buffer_length:0
|
||||
aof_pending_bio_fsync:0
|
||||
aof_delayed_fsync:1
|
||||
|
||||
# Stats
|
||||
total_connections_received:5062777
|
||||
total_commands_processed:12308396
|
||||
instantaneous_ops_per_sec:18
|
||||
total_net_input_bytes:1346996528
|
||||
total_net_output_bytes:1967551763
|
||||
instantaneous_input_kbps:1.38
|
||||
instantaneous_output_kbps:1.78
|
||||
rejected_connections:0
|
||||
latest_fork_usec:1644
|
||||
|
||||
# CPU
|
||||
used_cpu_sys:19585.73
|
||||
used_cpu_user:11255.96
|
||||
used_cpu_sys_children:1.75
|
||||
used_cpu_user_children:1.91
|
||||
`
|
||||
320
plugins/inputs/elasticsearch/README.md
Normal file
320
plugins/inputs/elasticsearch/README.md
Normal file
@@ -0,0 +1,320 @@
|
||||
# Elasticsearch plugin
|
||||
|
||||
#### Plugin arguments:
|
||||
- **servers** []string: list of one or more Elasticsearch servers
|
||||
- **local** boolean: If false, it will read the indices stats from all nodes
|
||||
- **cluster_health** boolean: If true, it will also obtain cluster level stats
|
||||
|
||||
#### Description
|
||||
|
||||
The [elasticsearch](https://www.elastic.co/) plugin queries endpoints to obtain
|
||||
[node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html)
|
||||
and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) stats.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
[elasticsearch]
|
||||
|
||||
servers = ["http://localhost:9200"]
|
||||
|
||||
local = true
|
||||
|
||||
cluster_health = true
|
||||
```
|
||||
|
||||
# Measurements
|
||||
#### cluster measurements (utilizes fields instead of single values):
|
||||
|
||||
contains `status`, `timed_out`, `number_of_nodes`, `number_of_data_nodes`,
|
||||
`active_primary_shards`, `active_shards`, `relocating_shards`,
|
||||
`initializing_shards`, `unassigned_shards` fields
|
||||
- elasticsearch_cluster_health
|
||||
|
||||
contains `status`, `number_of_shards`, `number_of_replicas`,
|
||||
`active_primary_shards`, `active_shards`, `relocating_shards`,
|
||||
`initializing_shards`, `unassigned_shards` fields
|
||||
- elasticsearch_indices
|
||||
|
||||
#### node measurements:
|
||||
|
||||
field data circuit breaker measurement names:
|
||||
- elasticsearch_breakers_fielddata_estimated_size_in_bytes value=0
|
||||
- elasticsearch_breakers_fielddata_overhead value=1.03
|
||||
- elasticsearch_breakers_fielddata_tripped value=0
|
||||
- elasticsearch_breakers_fielddata_limit_size_in_bytes value=623326003
|
||||
- elasticsearch_breakers_request_estimated_size_in_bytes value=0
|
||||
- elasticsearch_breakers_request_overhead value=1.0
|
||||
- elasticsearch_breakers_request_tripped value=0
|
||||
- elasticsearch_breakers_request_limit_size_in_bytes value=415550668
|
||||
- elasticsearch_breakers_parent_overhead value=1.0
|
||||
- elasticsearch_breakers_parent_tripped value=0
|
||||
- elasticsearch_breakers_parent_limit_size_in_bytes value=727213670
|
||||
- elasticsearch_breakers_parent_estimated_size_in_bytes value=0
|
||||
|
||||
File system information, data path, free disk space, read/write measurement names:
|
||||
- elasticsearch_fs_timestamp value=1436460392946
|
||||
- elasticsearch_fs_total_free_in_bytes value=16909316096
|
||||
- elasticsearch_fs_total_available_in_bytes value=15894814720
|
||||
- elasticsearch_fs_total_total_in_bytes value=19507089408
|
||||
|
||||
indices size, document count, indexing and deletion times, search times,
|
||||
field cache size, merges and flushes measurement names:
|
||||
- elasticsearch_indices_id_cache_memory_size_in_bytes value=0
|
||||
- elasticsearch_indices_completion_size_in_bytes value=0
|
||||
- elasticsearch_indices_suggest_total value=0
|
||||
- elasticsearch_indices_suggest_time_in_millis value=0
|
||||
- elasticsearch_indices_suggest_current value=0
|
||||
- elasticsearch_indices_query_cache_memory_size_in_bytes value=0
|
||||
- elasticsearch_indices_query_cache_evictions value=0
|
||||
- elasticsearch_indices_query_cache_hit_count value=0
|
||||
- elasticsearch_indices_query_cache_miss_count value=0
|
||||
- elasticsearch_indices_store_size_in_bytes value=37715234
|
||||
- elasticsearch_indices_store_throttle_time_in_millis value=215
|
||||
- elasticsearch_indices_merges_current_docs value=0
|
||||
- elasticsearch_indices_merges_current_size_in_bytes value=0
|
||||
- elasticsearch_indices_merges_total value=133
|
||||
- elasticsearch_indices_merges_total_time_in_millis value=21060
|
||||
- elasticsearch_indices_merges_total_docs value=203672
|
||||
- elasticsearch_indices_merges_total_size_in_bytes value=142900226
|
||||
- elasticsearch_indices_merges_current value=0
|
||||
- elasticsearch_indices_filter_cache_memory_size_in_bytes value=7384
|
||||
- elasticsearch_indices_filter_cache_evictions value=0
|
||||
- elasticsearch_indices_indexing_index_total value=84790
|
||||
- elasticsearch_indices_indexing_index_time_in_millis value=29680
|
||||
- elasticsearch_indices_indexing_index_current value=0
|
||||
- elasticsearch_indices_indexing_noop_update_total value=0
|
||||
- elasticsearch_indices_indexing_throttle_time_in_millis value=0
|
||||
- elasticsearch_indices_indexing_delete_tota value=13879
|
||||
- elasticsearch_indices_indexing_delete_time_in_millis value=1139
|
||||
- elasticsearch_indices_indexing_delete_current value=0
|
||||
- elasticsearch_indices_get_exists_time_in_millis value=0
|
||||
- elasticsearch_indices_get_missing_total value=1
|
||||
- elasticsearch_indices_get_missing_time_in_millis value=2
|
||||
- elasticsearch_indices_get_current value=0
|
||||
- elasticsearch_indices_get_total value=1
|
||||
- elasticsearch_indices_get_time_in_millis value=2
|
||||
- elasticsearch_indices_get_exists_total value=0
|
||||
- elasticsearch_indices_refresh_total value=1076
|
||||
- elasticsearch_indices_refresh_total_time_in_millis value=20078
|
||||
- elasticsearch_indices_percolate_current value=0
|
||||
- elasticsearch_indices_percolate_memory_size_in_bytes value=-1
|
||||
- elasticsearch_indices_percolate_queries value=0
|
||||
- elasticsearch_indices_percolate_total value=0
|
||||
- elasticsearch_indices_percolate_time_in_millis value=0
|
||||
- elasticsearch_indices_translog_operations value=17702
|
||||
- elasticsearch_indices_translog_size_in_bytes value=17
|
||||
- elasticsearch_indices_recovery_current_as_source value=0
|
||||
- elasticsearch_indices_recovery_current_as_target value=0
|
||||
- elasticsearch_indices_recovery_throttle_time_in_millis value=0
|
||||
- elasticsearch_indices_docs_count value=29652
|
||||
- elasticsearch_indices_docs_deleted value=5229
|
||||
- elasticsearch_indices_flush_total_time_in_millis value=2401
|
||||
- elasticsearch_indices_flush_total value=115
|
||||
- elasticsearch_indices_fielddata_memory_size_in_bytes value=12996
|
||||
- elasticsearch_indices_fielddata_evictions value=0
|
||||
- elasticsearch_indices_search_fetch_current value=0
|
||||
- elasticsearch_indices_search_open_contexts value=0
|
||||
- elasticsearch_indices_search_query_total value=1452
|
||||
- elasticsearch_indices_search_query_time_in_millis value=5695
|
||||
- elasticsearch_indices_search_query_current value=0
|
||||
- elasticsearch_indices_search_fetch_total value=414
|
||||
- elasticsearch_indices_search_fetch_time_in_millis value=146
|
||||
- elasticsearch_indices_warmer_current value=0
|
||||
- elasticsearch_indices_warmer_total value=2319
|
||||
- elasticsearch_indices_warmer_total_time_in_millis value=448
|
||||
- elasticsearch_indices_segments_count value=134
|
||||
- elasticsearch_indices_segments_memory_in_bytes value=1285212
|
||||
- elasticsearch_indices_segments_index_writer_memory_in_bytes value=0
|
||||
- elasticsearch_indices_segments_index_writer_max_memory_in_bytes value=172368955
|
||||
- elasticsearch_indices_segments_version_map_memory_in_bytes value=611844
|
||||
- elasticsearch_indices_segments_fixed_bit_set_memory_in_bytes value=0
|
||||
|
||||
HTTP connection measurement names:
|
||||
- elasticsearch_http_current_open value=3
|
||||
- elasticsearch_http_total_opened value=3
|
||||
|
||||
JVM stats, memory pool information, garbage collection, buffer pools measurement names:
|
||||
- elasticsearch_jvm_timestamp value=1436460392945
|
||||
- elasticsearch_jvm_uptime_in_millis value=202245
|
||||
- elasticsearch_jvm_mem_non_heap_used_in_bytes value=39634576
|
||||
- elasticsearch_jvm_mem_non_heap_committed_in_bytes value=40841216
|
||||
- elasticsearch_jvm_mem_pools_young_max_in_bytes value=279183360
|
||||
- elasticsearch_jvm_mem_pools_young_peak_used_in_bytes value=71630848
|
||||
- elasticsearch_jvm_mem_pools_young_peak_max_in_bytes value=279183360
|
||||
- elasticsearch_jvm_mem_pools_young_used_in_bytes value=32685760
|
||||
- elasticsearch_jvm_mem_pools_survivor_peak_used_in_bytes value=8912888
|
||||
- elasticsearch_jvm_mem_pools_survivor_peak_max_in_bytes value=34865152
|
||||
- elasticsearch_jvm_mem_pools_survivor_used_in_bytes value=8912880
|
||||
- elasticsearch_jvm_mem_pools_survivor_max_in_bytes value=34865152
|
||||
- elasticsearch_jvm_mem_pools_old_peak_max_in_bytes value=724828160
|
||||
- elasticsearch_jvm_mem_pools_old_used_in_bytes value=11110928
|
||||
- elasticsearch_jvm_mem_pools_old_max_in_bytes value=724828160
|
||||
- elasticsearch_jvm_mem_pools_old_peak_used_in_bytes value=14354608
|
||||
- elasticsearch_jvm_mem_heap_used_in_bytes value=52709568
|
||||
- elasticsearch_jvm_mem_heap_used_percent value=5
|
||||
- elasticsearch_jvm_mem_heap_committed_in_bytes value=259522560
|
||||
- elasticsearch_jvm_mem_heap_max_in_bytes value=1038876672
|
||||
- elasticsearch_jvm_threads_peak_count value=45
|
||||
- elasticsearch_jvm_threads_count value=44
|
||||
- elasticsearch_jvm_gc_collectors_young_collection_count value=2
|
||||
- elasticsearch_jvm_gc_collectors_young_collection_time_in_millis value=98
|
||||
- elasticsearch_jvm_gc_collectors_old_collection_count value=1
|
||||
- elasticsearch_jvm_gc_collectors_old_collection_time_in_millis value=24
|
||||
- elasticsearch_jvm_buffer_pools_direct_count value=40
|
||||
- elasticsearch_jvm_buffer_pools_direct_used_in_bytes value=6304239
|
||||
- elasticsearch_jvm_buffer_pools_direct_total_capacity_in_bytes value=6304239
|
||||
- elasticsearch_jvm_buffer_pools_mapped_count value=0
|
||||
- elasticsearch_jvm_buffer_pools_mapped_used_in_bytes value=0
|
||||
- elasticsearch_jvm_buffer_pools_mapped_total_capacity_in_bytes value=0
|
||||
|
||||
TCP information measurement names:
|
||||
- elasticsearch_network_tcp_in_errs value=0
|
||||
- elasticsearch_network_tcp_passive_opens value=16
|
||||
- elasticsearch_network_tcp_curr_estab value=29
|
||||
- elasticsearch_network_tcp_in_segs value=113
|
||||
- elasticsearch_network_tcp_out_segs value=97
|
||||
- elasticsearch_network_tcp_retrans_segs value=0
|
||||
- elasticsearch_network_tcp_attempt_fails value=0
|
||||
- elasticsearch_network_tcp_active_opens value=13
|
||||
- elasticsearch_network_tcp_estab_resets value=0
|
||||
- elasticsearch_network_tcp_out_rsts value=0
|
||||
|
||||
Operating system stats, load average, cpu, mem, swap measurement names:
|
||||
- elasticsearch_os_swap_used_in_bytes value=0
|
||||
- elasticsearch_os_swap_free_in_bytes value=487997440
|
||||
- elasticsearch_os_timestamp value=1436460392944
|
||||
- elasticsearch_os_uptime_in_millis value=25092
|
||||
- elasticsearch_os_cpu_sys value=0
|
||||
- elasticsearch_os_cpu_user value=0
|
||||
- elasticsearch_os_cpu_idle value=99
|
||||
- elasticsearch_os_cpu_usage value=0
|
||||
- elasticsearch_os_cpu_stolen value=0
|
||||
- elasticsearch_os_mem_free_percent value=74
|
||||
- elasticsearch_os_mem_used_percent value=25
|
||||
- elasticsearch_os_mem_actual_free_in_bytes value=1565470720
|
||||
- elasticsearch_os_mem_actual_used_in_bytes value=534159360
|
||||
- elasticsearch_os_mem_free_in_bytes value=477761536
|
||||
- elasticsearch_os_mem_used_in_bytes value=1621868544
|
||||
|
||||
Process statistics, memory consumption, cpu usage, open file descriptors measurement names:
|
||||
- elasticsearch_process_mem_resident_in_bytes value=246382592
|
||||
- elasticsearch_process_mem_share_in_bytes value=18747392
|
||||
- elasticsearch_process_mem_total_virtual_in_bytes value=4747890688
|
||||
- elasticsearch_process_timestamp value=1436460392945
|
||||
- elasticsearch_process_open_file_descriptors value=160
|
||||
- elasticsearch_process_cpu_total_in_millis value=15480
|
||||
- elasticsearch_process_cpu_percent value=2
|
||||
- elasticsearch_process_cpu_sys_in_millis value=1870
|
||||
- elasticsearch_process_cpu_user_in_millis value=13610
|
||||
|
||||
Statistics about each thread pool, including current size, queue and rejected tasks measurement names:
|
||||
- elasticsearch_thread_pool_merge_threads value=6
|
||||
- elasticsearch_thread_pool_merge_queue value=4
|
||||
- elasticsearch_thread_pool_merge_active value=5
|
||||
- elasticsearch_thread_pool_merge_rejected value=2
|
||||
- elasticsearch_thread_pool_merge_largest value=5
|
||||
- elasticsearch_thread_pool_merge_completed value=1
|
||||
- elasticsearch_thread_pool_bulk_threads value=4
|
||||
- elasticsearch_thread_pool_bulk_queue value=5
|
||||
- elasticsearch_thread_pool_bulk_active value=7
|
||||
- elasticsearch_thread_pool_bulk_rejected value=3
|
||||
- elasticsearch_thread_pool_bulk_largest value=1
|
||||
- elasticsearch_thread_pool_bulk_completed value=4
|
||||
- elasticsearch_thread_pool_warmer_threads value=2
|
||||
- elasticsearch_thread_pool_warmer_queue value=7
|
||||
- elasticsearch_thread_pool_warmer_active value=3
|
||||
- elasticsearch_thread_pool_warmer_rejected value=2
|
||||
- elasticsearch_thread_pool_warmer_largest value=3
|
||||
- elasticsearch_thread_pool_warmer_completed value=1
|
||||
- elasticsearch_thread_pool_get_largest value=2
|
||||
- elasticsearch_thread_pool_get_completed value=1
|
||||
- elasticsearch_thread_pool_get_threads value=1
|
||||
- elasticsearch_thread_pool_get_queue value=8
|
||||
- elasticsearch_thread_pool_get_active value=4
|
||||
- elasticsearch_thread_pool_get_rejected value=3
|
||||
- elasticsearch_thread_pool_index_threads value=6
|
||||
- elasticsearch_thread_pool_index_queue value=8
|
||||
- elasticsearch_thread_pool_index_active value=4
|
||||
- elasticsearch_thread_pool_index_rejected value=2
|
||||
- elasticsearch_thread_pool_index_largest value=3
|
||||
- elasticsearch_thread_pool_index_completed value=6
|
||||
- elasticsearch_thread_pool_suggest_threads value=2
|
||||
- elasticsearch_thread_pool_suggest_queue value=7
|
||||
- elasticsearch_thread_pool_suggest_active value=2
|
||||
- elasticsearch_thread_pool_suggest_rejected value=1
|
||||
- elasticsearch_thread_pool_suggest_largest value=8
|
||||
- elasticsearch_thread_pool_suggest_completed value=3
|
||||
- elasticsearch_thread_pool_fetch_shard_store_queue value=7
|
||||
- elasticsearch_thread_pool_fetch_shard_store_active value=4
|
||||
- elasticsearch_thread_pool_fetch_shard_store_rejected value=2
|
||||
- elasticsearch_thread_pool_fetch_shard_store_largest value=4
|
||||
- elasticsearch_thread_pool_fetch_shard_store_completed value=1
|
||||
- elasticsearch_thread_pool_fetch_shard_store_threads value=1
|
||||
- elasticsearch_thread_pool_management_threads value=2
|
||||
- elasticsearch_thread_pool_management_queue value=3
|
||||
- elasticsearch_thread_pool_management_active value=1
|
||||
- elasticsearch_thread_pool_management_rejected value=6
|
||||
- elasticsearch_thread_pool_management_largest value=2
|
||||
- elasticsearch_thread_pool_management_completed value=22
|
||||
- elasticsearch_thread_pool_percolate_queue value=23
|
||||
- elasticsearch_thread_pool_percolate_active value=13
|
||||
- elasticsearch_thread_pool_percolate_rejected value=235
|
||||
- elasticsearch_thread_pool_percolate_largest value=23
|
||||
- elasticsearch_thread_pool_percolate_completed value=33
|
||||
- elasticsearch_thread_pool_percolate_threads value=123
|
||||
- elasticsearch_thread_pool_listener_active value=4
|
||||
- elasticsearch_thread_pool_listener_rejected value=8
|
||||
- elasticsearch_thread_pool_listener_largest value=1
|
||||
- elasticsearch_thread_pool_listener_completed value=1
|
||||
- elasticsearch_thread_pool_listener_threads value=1
|
||||
- elasticsearch_thread_pool_listener_queue value=2
|
||||
- elasticsearch_thread_pool_search_rejected value=7
|
||||
- elasticsearch_thread_pool_search_largest value=2
|
||||
- elasticsearch_thread_pool_search_completed value=4
|
||||
- elasticsearch_thread_pool_search_threads value=5
|
||||
- elasticsearch_thread_pool_search_queue value=7
|
||||
- elasticsearch_thread_pool_search_active value=2
|
||||
- elasticsearch_thread_pool_fetch_shard_started_threads value=3
|
||||
- elasticsearch_thread_pool_fetch_shard_started_queue value=1
|
||||
- elasticsearch_thread_pool_fetch_shard_started_active value=5
|
||||
- elasticsearch_thread_pool_fetch_shard_started_rejected value=6
|
||||
- elasticsearch_thread_pool_fetch_shard_started_largest value=4
|
||||
- elasticsearch_thread_pool_fetch_shard_started_completed value=54
|
||||
- elasticsearch_thread_pool_refresh_rejected value=4
|
||||
- elasticsearch_thread_pool_refresh_largest value=8
|
||||
- elasticsearch_thread_pool_refresh_completed value=3
|
||||
- elasticsearch_thread_pool_refresh_threads value=23
|
||||
- elasticsearch_thread_pool_refresh_queue value=7
|
||||
- elasticsearch_thread_pool_refresh_active value=3
|
||||
- elasticsearch_thread_pool_optimize_threads value=3
|
||||
- elasticsearch_thread_pool_optimize_queue value=4
|
||||
- elasticsearch_thread_pool_optimize_active value=1
|
||||
- elasticsearch_thread_pool_optimize_rejected value=2
|
||||
- elasticsearch_thread_pool_optimize_largest value=7
|
||||
- elasticsearch_thread_pool_optimize_completed value=3
|
||||
- elasticsearch_thread_pool_snapshot_largest value=1
|
||||
- elasticsearch_thread_pool_snapshot_completed value=0
|
||||
- elasticsearch_thread_pool_snapshot_threads value=8
|
||||
- elasticsearch_thread_pool_snapshot_queue value=5
|
||||
- elasticsearch_thread_pool_snapshot_active value=6
|
||||
- elasticsearch_thread_pool_snapshot_rejected value=2
|
||||
- elasticsearch_thread_pool_generic_threads value=1
|
||||
- elasticsearch_thread_pool_generic_queue value=4
|
||||
- elasticsearch_thread_pool_generic_active value=6
|
||||
- elasticsearch_thread_pool_generic_rejected value=3
|
||||
- elasticsearch_thread_pool_generic_largest value=2
|
||||
- elasticsearch_thread_pool_generic_completed value=27
|
||||
- elasticsearch_thread_pool_flush_threads value=3
|
||||
- elasticsearch_thread_pool_flush_queue value=8
|
||||
- elasticsearch_thread_pool_flush_active value=0
|
||||
- elasticsearch_thread_pool_flush_rejected value=1
|
||||
- elasticsearch_thread_pool_flush_largest value=5
|
||||
- elasticsearch_thread_pool_flush_completed value=3
|
||||
|
||||
Transport statistics about sent and received bytes in cluster communication measurement names:
|
||||
- elasticsearch_transport_server_open value=13
|
||||
- elasticsearch_transport_rx_count value=6
|
||||
- elasticsearch_transport_rx_size_in_bytes value=1380
|
||||
- elasticsearch_transport_tx_count value=6
|
||||
- elasticsearch_transport_tx_size_in_bytes value=1380
|
||||
226
plugins/inputs/elasticsearch/elasticsearch.go
Normal file
226
plugins/inputs/elasticsearch/elasticsearch.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const statsPath = "/_nodes/stats"
|
||||
const statsPathLocal = "/_nodes/_local/stats"
|
||||
const healthPath = "/_cluster/health"
|
||||
|
||||
type node struct {
|
||||
Host string `json:"host"`
|
||||
Name string `json:"name"`
|
||||
Attributes map[string]string `json:"attributes"`
|
||||
Indices interface{} `json:"indices"`
|
||||
OS interface{} `json:"os"`
|
||||
Process interface{} `json:"process"`
|
||||
JVM interface{} `json:"jvm"`
|
||||
ThreadPool interface{} `json:"thread_pool"`
|
||||
FS interface{} `json:"fs"`
|
||||
Transport interface{} `json:"transport"`
|
||||
HTTP interface{} `json:"http"`
|
||||
Breakers interface{} `json:"breakers"`
|
||||
}
|
||||
|
||||
type clusterHealth struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Status string `json:"status"`
|
||||
TimedOut bool `json:"timed_out"`
|
||||
NumberOfNodes int `json:"number_of_nodes"`
|
||||
NumberOfDataNodes int `json:"number_of_data_nodes"`
|
||||
ActivePrimaryShards int `json:"active_primary_shards"`
|
||||
ActiveShards int `json:"active_shards"`
|
||||
RelocatingShards int `json:"relocating_shards"`
|
||||
InitializingShards int `json:"initializing_shards"`
|
||||
UnassignedShards int `json:"unassigned_shards"`
|
||||
Indices map[string]indexHealth `json:"indices"`
|
||||
}
|
||||
|
||||
type indexHealth struct {
|
||||
Status string `json:"status"`
|
||||
NumberOfShards int `json:"number_of_shards"`
|
||||
NumberOfReplicas int `json:"number_of_replicas"`
|
||||
ActivePrimaryShards int `json:"active_primary_shards"`
|
||||
ActiveShards int `json:"active_shards"`
|
||||
RelocatingShards int `json:"relocating_shards"`
|
||||
InitializingShards int `json:"initializing_shards"`
|
||||
UnassignedShards int `json:"unassigned_shards"`
|
||||
}
|
||||
|
||||
const sampleConfig = `
|
||||
# specify a list of one or more Elasticsearch servers
|
||||
servers = ["http://localhost:9200"]
|
||||
|
||||
# set local to false when you want to read the indices stats from all nodes
|
||||
# within the cluster
|
||||
local = true
|
||||
|
||||
# set cluster_health to true when you want to also obtain cluster level stats
|
||||
cluster_health = false
|
||||
`
|
||||
|
||||
// Elasticsearch is a plugin to read stats from one or many Elasticsearch
|
||||
// servers.
|
||||
type Elasticsearch struct {
|
||||
Local bool
|
||||
Servers []string
|
||||
ClusterHealth bool
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewElasticsearch return a new instance of Elasticsearch
|
||||
func NewElasticsearch() *Elasticsearch {
|
||||
return &Elasticsearch{client: http.DefaultClient}
|
||||
}
|
||||
|
||||
// SampleConfig returns sample configuration for this plugin.
|
||||
func (e *Elasticsearch) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description returns the plugin description.
|
||||
func (e *Elasticsearch) Description() string {
|
||||
return "Read stats from one or more Elasticsearch servers or clusters"
|
||||
}
|
||||
|
||||
// Gather reads the stats from Elasticsearch and writes it to the
|
||||
// Accumulator.
|
||||
func (e *Elasticsearch) Gather(acc inputs.Accumulator) error {
|
||||
for _, serv := range e.Servers {
|
||||
var url string
|
||||
if e.Local {
|
||||
url = serv + statsPathLocal
|
||||
} else {
|
||||
url = serv + statsPath
|
||||
}
|
||||
if err := e.gatherNodeStats(url, acc); err != nil {
|
||||
return err
|
||||
}
|
||||
if e.ClusterHealth {
|
||||
e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", serv), acc)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherNodeStats(url string, acc inputs.Accumulator) error {
|
||||
nodeStats := &struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Nodes map[string]*node `json:"nodes"`
|
||||
}{}
|
||||
if err := e.gatherData(url, nodeStats); err != nil {
|
||||
return err
|
||||
}
|
||||
for id, n := range nodeStats.Nodes {
|
||||
tags := map[string]string{
|
||||
"node_id": id,
|
||||
"node_host": n.Host,
|
||||
"node_name": n.Name,
|
||||
"cluster_name": nodeStats.ClusterName,
|
||||
}
|
||||
|
||||
for k, v := range n.Attributes {
|
||||
tags["node_attribute_"+k] = v
|
||||
}
|
||||
|
||||
stats := map[string]interface{}{
|
||||
"indices": n.Indices,
|
||||
"os": n.OS,
|
||||
"process": n.Process,
|
||||
"jvm": n.JVM,
|
||||
"thread_pool": n.ThreadPool,
|
||||
"fs": n.FS,
|
||||
"transport": n.Transport,
|
||||
"http": n.HTTP,
|
||||
"breakers": n.Breakers,
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
for p, s := range stats {
|
||||
f := internal.JSONFlattener{}
|
||||
err := f.FlattenJSON("", s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc.AddFields("elasticsearch_"+p, f.Fields, tags, now)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherClusterStats(url string, acc inputs.Accumulator) error {
|
||||
clusterStats := &clusterHealth{}
|
||||
if err := e.gatherData(url, clusterStats); err != nil {
|
||||
return err
|
||||
}
|
||||
measurementTime := time.Now()
|
||||
clusterFields := map[string]interface{}{
|
||||
"status": clusterStats.Status,
|
||||
"timed_out": clusterStats.TimedOut,
|
||||
"number_of_nodes": clusterStats.NumberOfNodes,
|
||||
"number_of_data_nodes": clusterStats.NumberOfDataNodes,
|
||||
"active_primary_shards": clusterStats.ActivePrimaryShards,
|
||||
"active_shards": clusterStats.ActiveShards,
|
||||
"relocating_shards": clusterStats.RelocatingShards,
|
||||
"initializing_shards": clusterStats.InitializingShards,
|
||||
"unassigned_shards": clusterStats.UnassignedShards,
|
||||
}
|
||||
acc.AddFields(
|
||||
"elasticsearch_cluster_health",
|
||||
clusterFields,
|
||||
map[string]string{"name": clusterStats.ClusterName},
|
||||
measurementTime,
|
||||
)
|
||||
|
||||
for name, health := range clusterStats.Indices {
|
||||
indexFields := map[string]interface{}{
|
||||
"status": health.Status,
|
||||
"number_of_shards": health.NumberOfShards,
|
||||
"number_of_replicas": health.NumberOfReplicas,
|
||||
"active_primary_shards": health.ActivePrimaryShards,
|
||||
"active_shards": health.ActiveShards,
|
||||
"relocating_shards": health.RelocatingShards,
|
||||
"initializing_shards": health.InitializingShards,
|
||||
"unassigned_shards": health.UnassignedShards,
|
||||
}
|
||||
acc.AddFields(
|
||||
"elasticsearch_indices",
|
||||
indexFields,
|
||||
map[string]string{"index": name},
|
||||
measurementTime,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherData(url string, v interface{}) error {
|
||||
r, err := e.client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
// NOTE: we are not going to read/discard r.Body under the assumption we'd prefer
|
||||
// to let the underlying transport close the connection and re-establish a new one for
|
||||
// future calls.
|
||||
return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d",
|
||||
r.StatusCode, http.StatusOK)
|
||||
}
|
||||
if err = json.NewDecoder(r.Body).Decode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("elasticsearch", func() inputs.Input {
|
||||
return NewElasticsearch()
|
||||
})
|
||||
}
|
||||
86
plugins/inputs/elasticsearch/elasticsearch_test.go
Normal file
86
plugins/inputs/elasticsearch/elasticsearch_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type transportMock struct {
|
||||
statusCode int
|
||||
body string
|
||||
}
|
||||
|
||||
func newTransportMock(statusCode int, body string) http.RoundTripper {
|
||||
return &transportMock{
|
||||
statusCode: statusCode,
|
||||
body: body,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{
|
||||
Header: make(http.Header),
|
||||
Request: r,
|
||||
StatusCode: t.statusCode,
|
||||
}
|
||||
res.Header.Set("Content-Type", "application/json")
|
||||
res.Body = ioutil.NopCloser(strings.NewReader(t.body))
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func TestElasticsearch(t *testing.T) {
|
||||
es := NewElasticsearch()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.client.Transport = newTransportMock(http.StatusOK, statsResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
if err := es.Gather(&acc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"cluster_name": "es-testcluster",
|
||||
"node_attribute_master": "true",
|
||||
"node_id": "SDFsfSDFsdfFSDSDfSFDSDF",
|
||||
"node_name": "test.host.com",
|
||||
"node_host": "test",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices", indicesExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_os", osExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_process", processExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", jvmExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", threadPoolExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_fs", fsExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_transport", transportExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_http", httpExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", breakersExpected, tags)
|
||||
}
|
||||
|
||||
func TestGatherClusterStats(t *testing.T) {
|
||||
es := NewElasticsearch()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.ClusterHealth = true
|
||||
es.client.Transport = newTransportMock(http.StatusOK, clusterResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, es.Gather(&acc))
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
|
||||
clusterHealthExpected,
|
||||
map[string]string{"name": "elasticsearch_telegraf"})
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices",
|
||||
v1IndexExpected,
|
||||
map[string]string{"index": "v1"})
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices",
|
||||
v2IndexExpected,
|
||||
map[string]string{"index": "v2"})
|
||||
}
|
||||
759
plugins/inputs/elasticsearch/testdata_test.go
Normal file
759
plugins/inputs/elasticsearch/testdata_test.go
Normal file
@@ -0,0 +1,759 @@
|
||||
package elasticsearch
|
||||
|
||||
const clusterResponse = `
|
||||
{
|
||||
"cluster_name": "elasticsearch_telegraf",
|
||||
"status": "green",
|
||||
"timed_out": false,
|
||||
"number_of_nodes": 3,
|
||||
"number_of_data_nodes": 3,
|
||||
"active_primary_shards": 5,
|
||||
"active_shards": 15,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
"indices": {
|
||||
"v1": {
|
||||
"status": "green",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 10,
|
||||
"active_shards": 20,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0
|
||||
},
|
||||
"v2": {
|
||||
"status": "red",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 0,
|
||||
"active_shards": 0,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 20
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var clusterHealthExpected = map[string]interface{}{
|
||||
"status": "green",
|
||||
"timed_out": false,
|
||||
"number_of_nodes": 3,
|
||||
"number_of_data_nodes": 3,
|
||||
"active_primary_shards": 5,
|
||||
"active_shards": 15,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
}
|
||||
|
||||
var v1IndexExpected = map[string]interface{}{
|
||||
"status": "green",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 10,
|
||||
"active_shards": 20,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
}
|
||||
|
||||
var v2IndexExpected = map[string]interface{}{
|
||||
"status": "red",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 0,
|
||||
"active_shards": 0,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 20,
|
||||
}
|
||||
|
||||
const statsResponse = `
|
||||
{
|
||||
"cluster_name": "es-testcluster",
|
||||
"nodes": {
|
||||
"SDFsfSDFsdfFSDSDfSFDSDF": {
|
||||
"timestamp": 1436365550135,
|
||||
"name": "test.host.com",
|
||||
"transport_address": "inet[/127.0.0.1:9300]",
|
||||
"host": "test",
|
||||
"ip": [
|
||||
"inet[/127.0.0.1:9300]",
|
||||
"NONE"
|
||||
],
|
||||
"attributes": {
|
||||
"master": "true"
|
||||
},
|
||||
"indices": {
|
||||
"docs": {
|
||||
"count": 29652,
|
||||
"deleted": 5229
|
||||
},
|
||||
"store": {
|
||||
"size_in_bytes": 37715234,
|
||||
"throttle_time_in_millis": 215
|
||||
},
|
||||
"indexing": {
|
||||
"index_total": 84790,
|
||||
"index_time_in_millis": 29680,
|
||||
"index_current": 0,
|
||||
"delete_total": 13879,
|
||||
"delete_time_in_millis": 1139,
|
||||
"delete_current": 0,
|
||||
"noop_update_total": 0,
|
||||
"is_throttled": false,
|
||||
"throttle_time_in_millis": 0
|
||||
},
|
||||
"get": {
|
||||
"total": 1,
|
||||
"time_in_millis": 2,
|
||||
"exists_total": 0,
|
||||
"exists_time_in_millis": 0,
|
||||
"missing_total": 1,
|
||||
"missing_time_in_millis": 2,
|
||||
"current": 0
|
||||
},
|
||||
"search": {
|
||||
"open_contexts": 0,
|
||||
"query_total": 1452,
|
||||
"query_time_in_millis": 5695,
|
||||
"query_current": 0,
|
||||
"fetch_total": 414,
|
||||
"fetch_time_in_millis": 146,
|
||||
"fetch_current": 0
|
||||
},
|
||||
"merges": {
|
||||
"current": 0,
|
||||
"current_docs": 0,
|
||||
"current_size_in_bytes": 0,
|
||||
"total": 133,
|
||||
"total_time_in_millis": 21060,
|
||||
"total_docs": 203672,
|
||||
"total_size_in_bytes": 142900226
|
||||
},
|
||||
"refresh": {
|
||||
"total": 1076,
|
||||
"total_time_in_millis": 20078
|
||||
},
|
||||
"flush": {
|
||||
"total": 115,
|
||||
"total_time_in_millis": 2401
|
||||
},
|
||||
"warmer": {
|
||||
"current": 0,
|
||||
"total": 2319,
|
||||
"total_time_in_millis": 448
|
||||
},
|
||||
"filter_cache": {
|
||||
"memory_size_in_bytes": 7384,
|
||||
"evictions": 0
|
||||
},
|
||||
"id_cache": {
|
||||
"memory_size_in_bytes": 0
|
||||
},
|
||||
"fielddata": {
|
||||
"memory_size_in_bytes": 12996,
|
||||
"evictions": 0
|
||||
},
|
||||
"percolate": {
|
||||
"total": 0,
|
||||
"time_in_millis": 0,
|
||||
"current": 0,
|
||||
"memory_size_in_bytes": -1,
|
||||
"memory_size": "-1b",
|
||||
"queries": 0
|
||||
},
|
||||
"completion": {
|
||||
"size_in_bytes": 0
|
||||
},
|
||||
"segments": {
|
||||
"count": 134,
|
||||
"memory_in_bytes": 1285212,
|
||||
"index_writer_memory_in_bytes": 0,
|
||||
"index_writer_max_memory_in_bytes": 172368955,
|
||||
"version_map_memory_in_bytes": 611844,
|
||||
"fixed_bit_set_memory_in_bytes": 0
|
||||
},
|
||||
"translog": {
|
||||
"operations": 17702,
|
||||
"size_in_bytes": 17
|
||||
},
|
||||
"suggest": {
|
||||
"total": 0,
|
||||
"time_in_millis": 0,
|
||||
"current": 0
|
||||
},
|
||||
"query_cache": {
|
||||
"memory_size_in_bytes": 0,
|
||||
"evictions": 0,
|
||||
"hit_count": 0,
|
||||
"miss_count": 0
|
||||
},
|
||||
"recovery": {
|
||||
"current_as_source": 0,
|
||||
"current_as_target": 0,
|
||||
"throttle_time_in_millis": 0
|
||||
}
|
||||
},
|
||||
"os": {
|
||||
"timestamp": 1436460392944,
|
||||
"load_average": [
|
||||
0.01,
|
||||
0.04,
|
||||
0.05
|
||||
],
|
||||
"mem": {
|
||||
"free_in_bytes": 477761536,
|
||||
"used_in_bytes": 1621868544,
|
||||
"free_percent": 74,
|
||||
"used_percent": 25,
|
||||
"actual_free_in_bytes": 1565470720,
|
||||
"actual_used_in_bytes": 534159360
|
||||
},
|
||||
"swap": {
|
||||
"used_in_bytes": 0,
|
||||
"free_in_bytes": 487997440
|
||||
}
|
||||
},
|
||||
"process": {
|
||||
"timestamp": 1436460392945,
|
||||
"open_file_descriptors": 160,
|
||||
"cpu": {
|
||||
"percent": 2,
|
||||
"sys_in_millis": 1870,
|
||||
"user_in_millis": 13610,
|
||||
"total_in_millis": 15480
|
||||
},
|
||||
"mem": {
|
||||
"total_virtual_in_bytes": 4747890688
|
||||
}
|
||||
},
|
||||
"jvm": {
|
||||
"timestamp": 1436460392945,
|
||||
"uptime_in_millis": 202245,
|
||||
"mem": {
|
||||
"heap_used_in_bytes": 52709568,
|
||||
"heap_used_percent": 5,
|
||||
"heap_committed_in_bytes": 259522560,
|
||||
"heap_max_in_bytes": 1038876672,
|
||||
"non_heap_used_in_bytes": 39634576,
|
||||
"non_heap_committed_in_bytes": 40841216,
|
||||
"pools": {
|
||||
"young": {
|
||||
"used_in_bytes": 32685760,
|
||||
"max_in_bytes": 279183360,
|
||||
"peak_used_in_bytes": 71630848,
|
||||
"peak_max_in_bytes": 279183360
|
||||
},
|
||||
"survivor": {
|
||||
"used_in_bytes": 8912880,
|
||||
"max_in_bytes": 34865152,
|
||||
"peak_used_in_bytes": 8912888,
|
||||
"peak_max_in_bytes": 34865152
|
||||
},
|
||||
"old": {
|
||||
"used_in_bytes": 11110928,
|
||||
"max_in_bytes": 724828160,
|
||||
"peak_used_in_bytes": 14354608,
|
||||
"peak_max_in_bytes": 724828160
|
||||
}
|
||||
}
|
||||
},
|
||||
"threads": {
|
||||
"count": 44,
|
||||
"peak_count": 45
|
||||
},
|
||||
"gc": {
|
||||
"collectors": {
|
||||
"young": {
|
||||
"collection_count": 2,
|
||||
"collection_time_in_millis": 98
|
||||
},
|
||||
"old": {
|
||||
"collection_count": 1,
|
||||
"collection_time_in_millis": 24
|
||||
}
|
||||
}
|
||||
},
|
||||
"buffer_pools": {
|
||||
"direct": {
|
||||
"count": 40,
|
||||
"used_in_bytes": 6304239,
|
||||
"total_capacity_in_bytes": 6304239
|
||||
},
|
||||
"mapped": {
|
||||
"count": 0,
|
||||
"used_in_bytes": 0,
|
||||
"total_capacity_in_bytes": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"thread_pool": {
|
||||
"percolate": {
|
||||
"threads": 123,
|
||||
"queue": 23,
|
||||
"active": 13,
|
||||
"rejected": 235,
|
||||
"largest": 23,
|
||||
"completed": 33
|
||||
},
|
||||
"fetch_shard_started": {
|
||||
"threads": 3,
|
||||
"queue": 1,
|
||||
"active": 5,
|
||||
"rejected": 6,
|
||||
"largest": 4,
|
||||
"completed": 54
|
||||
},
|
||||
"listener": {
|
||||
"threads": 1,
|
||||
"queue": 2,
|
||||
"active": 4,
|
||||
"rejected": 8,
|
||||
"largest": 1,
|
||||
"completed": 1
|
||||
},
|
||||
"index": {
|
||||
"threads": 6,
|
||||
"queue": 8,
|
||||
"active": 4,
|
||||
"rejected": 2,
|
||||
"largest": 3,
|
||||
"completed": 6
|
||||
},
|
||||
"refresh": {
|
||||
"threads": 23,
|
||||
"queue": 7,
|
||||
"active": 3,
|
||||
"rejected": 4,
|
||||
"largest": 8,
|
||||
"completed": 3
|
||||
},
|
||||
"suggest": {
|
||||
"threads": 2,
|
||||
"queue": 7,
|
||||
"active": 2,
|
||||
"rejected": 1,
|
||||
"largest": 8,
|
||||
"completed": 3
|
||||
},
|
||||
"generic": {
|
||||
"threads": 1,
|
||||
"queue": 4,
|
||||
"active": 6,
|
||||
"rejected": 3,
|
||||
"largest": 2,
|
||||
"completed": 27
|
||||
},
|
||||
"warmer": {
|
||||
"threads": 2,
|
||||
"queue": 7,
|
||||
"active": 3,
|
||||
"rejected": 2,
|
||||
"largest": 3,
|
||||
"completed": 1
|
||||
},
|
||||
"search": {
|
||||
"threads": 5,
|
||||
"queue": 7,
|
||||
"active": 2,
|
||||
"rejected": 7,
|
||||
"largest": 2,
|
||||
"completed": 4
|
||||
},
|
||||
"flush": {
|
||||
"threads": 3,
|
||||
"queue": 8,
|
||||
"active": 0,
|
||||
"rejected": 1,
|
||||
"largest": 5,
|
||||
"completed": 3
|
||||
},
|
||||
"optimize": {
|
||||
"threads": 3,
|
||||
"queue": 4,
|
||||
"active": 1,
|
||||
"rejected": 2,
|
||||
"largest": 7,
|
||||
"completed": 3
|
||||
},
|
||||
"fetch_shard_store": {
|
||||
"threads": 1,
|
||||
"queue": 7,
|
||||
"active": 4,
|
||||
"rejected": 2,
|
||||
"largest": 4,
|
||||
"completed": 1
|
||||
},
|
||||
"management": {
|
||||
"threads": 2,
|
||||
"queue": 3,
|
||||
"active": 1,
|
||||
"rejected": 6,
|
||||
"largest": 2,
|
||||
"completed": 22
|
||||
},
|
||||
"get": {
|
||||
"threads": 1,
|
||||
"queue": 8,
|
||||
"active": 4,
|
||||
"rejected": 3,
|
||||
"largest": 2,
|
||||
"completed": 1
|
||||
},
|
||||
"merge": {
|
||||
"threads": 6,
|
||||
"queue": 4,
|
||||
"active": 5,
|
||||
"rejected": 2,
|
||||
"largest": 5,
|
||||
"completed": 1
|
||||
},
|
||||
"bulk": {
|
||||
"threads": 4,
|
||||
"queue": 5,
|
||||
"active": 7,
|
||||
"rejected": 3,
|
||||
"largest": 1,
|
||||
"completed": 4
|
||||
},
|
||||
"snapshot": {
|
||||
"threads": 8,
|
||||
"queue": 5,
|
||||
"active": 6,
|
||||
"rejected": 2,
|
||||
"largest": 1,
|
||||
"completed": 0
|
||||
}
|
||||
},
|
||||
"fs": {
|
||||
"timestamp": 1436460392946,
|
||||
"total": {
|
||||
"total_in_bytes": 19507089408,
|
||||
"free_in_bytes": 16909316096,
|
||||
"available_in_bytes": 15894814720
|
||||
},
|
||||
"data": [
|
||||
{
|
||||
"path": "/usr/share/elasticsearch/data/elasticsearch/nodes/0",
|
||||
"mount": "/usr/share/elasticsearch/data",
|
||||
"type": "ext4",
|
||||
"total_in_bytes": 19507089408,
|
||||
"free_in_bytes": 16909316096,
|
||||
"available_in_bytes": 15894814720
|
||||
}
|
||||
]
|
||||
},
|
||||
"transport": {
|
||||
"server_open": 13,
|
||||
"rx_count": 6,
|
||||
"rx_size_in_bytes": 1380,
|
||||
"tx_count": 6,
|
||||
"tx_size_in_bytes": 1380
|
||||
},
|
||||
"http": {
|
||||
"current_open": 3,
|
||||
"total_opened": 3
|
||||
},
|
||||
"breakers": {
|
||||
"fielddata": {
|
||||
"limit_size_in_bytes": 623326003,
|
||||
"limit_size": "594.4mb",
|
||||
"estimated_size_in_bytes": 0,
|
||||
"estimated_size": "0b",
|
||||
"overhead": 1.03,
|
||||
"tripped": 0
|
||||
},
|
||||
"request": {
|
||||
"limit_size_in_bytes": 415550668,
|
||||
"limit_size": "396.2mb",
|
||||
"estimated_size_in_bytes": 0,
|
||||
"estimated_size": "0b",
|
||||
"overhead": 1.0,
|
||||
"tripped": 0
|
||||
},
|
||||
"parent": {
|
||||
"limit_size_in_bytes": 727213670,
|
||||
"limit_size": "693.5mb",
|
||||
"estimated_size_in_bytes": 0,
|
||||
"estimated_size": "0b",
|
||||
"overhead": 1.0,
|
||||
"tripped": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var indicesExpected = map[string]interface{}{
|
||||
"id_cache_memory_size_in_bytes": float64(0),
|
||||
"completion_size_in_bytes": float64(0),
|
||||
"suggest_total": float64(0),
|
||||
"suggest_time_in_millis": float64(0),
|
||||
"suggest_current": float64(0),
|
||||
"query_cache_memory_size_in_bytes": float64(0),
|
||||
"query_cache_evictions": float64(0),
|
||||
"query_cache_hit_count": float64(0),
|
||||
"query_cache_miss_count": float64(0),
|
||||
"store_size_in_bytes": float64(37715234),
|
||||
"store_throttle_time_in_millis": float64(215),
|
||||
"merges_current_docs": float64(0),
|
||||
"merges_current_size_in_bytes": float64(0),
|
||||
"merges_total": float64(133),
|
||||
"merges_total_time_in_millis": float64(21060),
|
||||
"merges_total_docs": float64(203672),
|
||||
"merges_total_size_in_bytes": float64(142900226),
|
||||
"merges_current": float64(0),
|
||||
"filter_cache_memory_size_in_bytes": float64(7384),
|
||||
"filter_cache_evictions": float64(0),
|
||||
"indexing_index_total": float64(84790),
|
||||
"indexing_index_time_in_millis": float64(29680),
|
||||
"indexing_index_current": float64(0),
|
||||
"indexing_noop_update_total": float64(0),
|
||||
"indexing_throttle_time_in_millis": float64(0),
|
||||
"indexing_delete_total": float64(13879),
|
||||
"indexing_delete_time_in_millis": float64(1139),
|
||||
"indexing_delete_current": float64(0),
|
||||
"get_exists_time_in_millis": float64(0),
|
||||
"get_missing_total": float64(1),
|
||||
"get_missing_time_in_millis": float64(2),
|
||||
"get_current": float64(0),
|
||||
"get_total": float64(1),
|
||||
"get_time_in_millis": float64(2),
|
||||
"get_exists_total": float64(0),
|
||||
"refresh_total": float64(1076),
|
||||
"refresh_total_time_in_millis": float64(20078),
|
||||
"percolate_current": float64(0),
|
||||
"percolate_memory_size_in_bytes": float64(-1),
|
||||
"percolate_queries": float64(0),
|
||||
"percolate_total": float64(0),
|
||||
"percolate_time_in_millis": float64(0),
|
||||
"translog_operations": float64(17702),
|
||||
"translog_size_in_bytes": float64(17),
|
||||
"recovery_current_as_source": float64(0),
|
||||
"recovery_current_as_target": float64(0),
|
||||
"recovery_throttle_time_in_millis": float64(0),
|
||||
"docs_count": float64(29652),
|
||||
"docs_deleted": float64(5229),
|
||||
"flush_total_time_in_millis": float64(2401),
|
||||
"flush_total": float64(115),
|
||||
"fielddata_memory_size_in_bytes": float64(12996),
|
||||
"fielddata_evictions": float64(0),
|
||||
"search_fetch_current": float64(0),
|
||||
"search_open_contexts": float64(0),
|
||||
"search_query_total": float64(1452),
|
||||
"search_query_time_in_millis": float64(5695),
|
||||
"search_query_current": float64(0),
|
||||
"search_fetch_total": float64(414),
|
||||
"search_fetch_time_in_millis": float64(146),
|
||||
"warmer_current": float64(0),
|
||||
"warmer_total": float64(2319),
|
||||
"warmer_total_time_in_millis": float64(448),
|
||||
"segments_count": float64(134),
|
||||
"segments_memory_in_bytes": float64(1285212),
|
||||
"segments_index_writer_memory_in_bytes": float64(0),
|
||||
"segments_index_writer_max_memory_in_bytes": float64(172368955),
|
||||
"segments_version_map_memory_in_bytes": float64(611844),
|
||||
"segments_fixed_bit_set_memory_in_bytes": float64(0),
|
||||
}
|
||||
|
||||
var osExpected = map[string]interface{}{
|
||||
"swap_used_in_bytes": float64(0),
|
||||
"swap_free_in_bytes": float64(487997440),
|
||||
"timestamp": float64(1436460392944),
|
||||
"mem_free_percent": float64(74),
|
||||
"mem_used_percent": float64(25),
|
||||
"mem_actual_free_in_bytes": float64(1565470720),
|
||||
"mem_actual_used_in_bytes": float64(534159360),
|
||||
"mem_free_in_bytes": float64(477761536),
|
||||
"mem_used_in_bytes": float64(1621868544),
|
||||
}
|
||||
|
||||
var processExpected = map[string]interface{}{
|
||||
"mem_total_virtual_in_bytes": float64(4747890688),
|
||||
"timestamp": float64(1436460392945),
|
||||
"open_file_descriptors": float64(160),
|
||||
"cpu_total_in_millis": float64(15480),
|
||||
"cpu_percent": float64(2),
|
||||
"cpu_sys_in_millis": float64(1870),
|
||||
"cpu_user_in_millis": float64(13610),
|
||||
}
|
||||
|
||||
var jvmExpected = map[string]interface{}{
|
||||
"timestamp": float64(1436460392945),
|
||||
"uptime_in_millis": float64(202245),
|
||||
"mem_non_heap_used_in_bytes": float64(39634576),
|
||||
"mem_non_heap_committed_in_bytes": float64(40841216),
|
||||
"mem_pools_young_max_in_bytes": float64(279183360),
|
||||
"mem_pools_young_peak_used_in_bytes": float64(71630848),
|
||||
"mem_pools_young_peak_max_in_bytes": float64(279183360),
|
||||
"mem_pools_young_used_in_bytes": float64(32685760),
|
||||
"mem_pools_survivor_peak_used_in_bytes": float64(8912888),
|
||||
"mem_pools_survivor_peak_max_in_bytes": float64(34865152),
|
||||
"mem_pools_survivor_used_in_bytes": float64(8912880),
|
||||
"mem_pools_survivor_max_in_bytes": float64(34865152),
|
||||
"mem_pools_old_peak_max_in_bytes": float64(724828160),
|
||||
"mem_pools_old_used_in_bytes": float64(11110928),
|
||||
"mem_pools_old_max_in_bytes": float64(724828160),
|
||||
"mem_pools_old_peak_used_in_bytes": float64(14354608),
|
||||
"mem_heap_used_in_bytes": float64(52709568),
|
||||
"mem_heap_used_percent": float64(5),
|
||||
"mem_heap_committed_in_bytes": float64(259522560),
|
||||
"mem_heap_max_in_bytes": float64(1038876672),
|
||||
"threads_peak_count": float64(45),
|
||||
"threads_count": float64(44),
|
||||
"gc_collectors_young_collection_count": float64(2),
|
||||
"gc_collectors_young_collection_time_in_millis": float64(98),
|
||||
"gc_collectors_old_collection_count": float64(1),
|
||||
"gc_collectors_old_collection_time_in_millis": float64(24),
|
||||
"buffer_pools_direct_count": float64(40),
|
||||
"buffer_pools_direct_used_in_bytes": float64(6304239),
|
||||
"buffer_pools_direct_total_capacity_in_bytes": float64(6304239),
|
||||
"buffer_pools_mapped_count": float64(0),
|
||||
"buffer_pools_mapped_used_in_bytes": float64(0),
|
||||
"buffer_pools_mapped_total_capacity_in_bytes": float64(0),
|
||||
}
|
||||
|
||||
var threadPoolExpected = map[string]interface{}{
|
||||
"merge_threads": float64(6),
|
||||
"merge_queue": float64(4),
|
||||
"merge_active": float64(5),
|
||||
"merge_rejected": float64(2),
|
||||
"merge_largest": float64(5),
|
||||
"merge_completed": float64(1),
|
||||
"bulk_threads": float64(4),
|
||||
"bulk_queue": float64(5),
|
||||
"bulk_active": float64(7),
|
||||
"bulk_rejected": float64(3),
|
||||
"bulk_largest": float64(1),
|
||||
"bulk_completed": float64(4),
|
||||
"warmer_threads": float64(2),
|
||||
"warmer_queue": float64(7),
|
||||
"warmer_active": float64(3),
|
||||
"warmer_rejected": float64(2),
|
||||
"warmer_largest": float64(3),
|
||||
"warmer_completed": float64(1),
|
||||
"get_largest": float64(2),
|
||||
"get_completed": float64(1),
|
||||
"get_threads": float64(1),
|
||||
"get_queue": float64(8),
|
||||
"get_active": float64(4),
|
||||
"get_rejected": float64(3),
|
||||
"index_threads": float64(6),
|
||||
"index_queue": float64(8),
|
||||
"index_active": float64(4),
|
||||
"index_rejected": float64(2),
|
||||
"index_largest": float64(3),
|
||||
"index_completed": float64(6),
|
||||
"suggest_threads": float64(2),
|
||||
"suggest_queue": float64(7),
|
||||
"suggest_active": float64(2),
|
||||
"suggest_rejected": float64(1),
|
||||
"suggest_largest": float64(8),
|
||||
"suggest_completed": float64(3),
|
||||
"fetch_shard_store_queue": float64(7),
|
||||
"fetch_shard_store_active": float64(4),
|
||||
"fetch_shard_store_rejected": float64(2),
|
||||
"fetch_shard_store_largest": float64(4),
|
||||
"fetch_shard_store_completed": float64(1),
|
||||
"fetch_shard_store_threads": float64(1),
|
||||
"management_threads": float64(2),
|
||||
"management_queue": float64(3),
|
||||
"management_active": float64(1),
|
||||
"management_rejected": float64(6),
|
||||
"management_largest": float64(2),
|
||||
"management_completed": float64(22),
|
||||
"percolate_queue": float64(23),
|
||||
"percolate_active": float64(13),
|
||||
"percolate_rejected": float64(235),
|
||||
"percolate_largest": float64(23),
|
||||
"percolate_completed": float64(33),
|
||||
"percolate_threads": float64(123),
|
||||
"listener_active": float64(4),
|
||||
"listener_rejected": float64(8),
|
||||
"listener_largest": float64(1),
|
||||
"listener_completed": float64(1),
|
||||
"listener_threads": float64(1),
|
||||
"listener_queue": float64(2),
|
||||
"search_rejected": float64(7),
|
||||
"search_largest": float64(2),
|
||||
"search_completed": float64(4),
|
||||
"search_threads": float64(5),
|
||||
"search_queue": float64(7),
|
||||
"search_active": float64(2),
|
||||
"fetch_shard_started_threads": float64(3),
|
||||
"fetch_shard_started_queue": float64(1),
|
||||
"fetch_shard_started_active": float64(5),
|
||||
"fetch_shard_started_rejected": float64(6),
|
||||
"fetch_shard_started_largest": float64(4),
|
||||
"fetch_shard_started_completed": float64(54),
|
||||
"refresh_rejected": float64(4),
|
||||
"refresh_largest": float64(8),
|
||||
"refresh_completed": float64(3),
|
||||
"refresh_threads": float64(23),
|
||||
"refresh_queue": float64(7),
|
||||
"refresh_active": float64(3),
|
||||
"optimize_threads": float64(3),
|
||||
"optimize_queue": float64(4),
|
||||
"optimize_active": float64(1),
|
||||
"optimize_rejected": float64(2),
|
||||
"optimize_largest": float64(7),
|
||||
"optimize_completed": float64(3),
|
||||
"snapshot_largest": float64(1),
|
||||
"snapshot_completed": float64(0),
|
||||
"snapshot_threads": float64(8),
|
||||
"snapshot_queue": float64(5),
|
||||
"snapshot_active": float64(6),
|
||||
"snapshot_rejected": float64(2),
|
||||
"generic_threads": float64(1),
|
||||
"generic_queue": float64(4),
|
||||
"generic_active": float64(6),
|
||||
"generic_rejected": float64(3),
|
||||
"generic_largest": float64(2),
|
||||
"generic_completed": float64(27),
|
||||
"flush_threads": float64(3),
|
||||
"flush_queue": float64(8),
|
||||
"flush_active": float64(0),
|
||||
"flush_rejected": float64(1),
|
||||
"flush_largest": float64(5),
|
||||
"flush_completed": float64(3),
|
||||
}
|
||||
|
||||
var fsExpected = map[string]interface{}{
|
||||
"timestamp": float64(1436460392946),
|
||||
"total_free_in_bytes": float64(16909316096),
|
||||
"total_available_in_bytes": float64(15894814720),
|
||||
"total_total_in_bytes": float64(19507089408),
|
||||
}
|
||||
|
||||
var transportExpected = map[string]interface{}{
|
||||
"server_open": float64(13),
|
||||
"rx_count": float64(6),
|
||||
"rx_size_in_bytes": float64(1380),
|
||||
"tx_count": float64(6),
|
||||
"tx_size_in_bytes": float64(1380),
|
||||
}
|
||||
|
||||
var httpExpected = map[string]interface{}{
|
||||
"current_open": float64(3),
|
||||
"total_opened": float64(3),
|
||||
}
|
||||
|
||||
var breakersExpected = map[string]interface{}{
|
||||
"fielddata_estimated_size_in_bytes": float64(0),
|
||||
"fielddata_overhead": float64(1.03),
|
||||
"fielddata_tripped": float64(0),
|
||||
"fielddata_limit_size_in_bytes": float64(623326003),
|
||||
"request_estimated_size_in_bytes": float64(0),
|
||||
"request_overhead": float64(1.0),
|
||||
"request_tripped": float64(0),
|
||||
"request_limit_size_in_bytes": float64(415550668),
|
||||
"parent_overhead": float64(1.0),
|
||||
"parent_tripped": float64(0),
|
||||
"parent_limit_size_in_bytes": float64(727213670),
|
||||
"parent_estimated_size_in_bytes": float64(0),
|
||||
}
|
||||
42
plugins/inputs/exec/README.md
Normal file
42
plugins/inputs/exec/README.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Exec Plugin
|
||||
|
||||
The exec plugin can execute arbitrary commands which output JSON. Then it flattens JSON and finds
|
||||
all numeric values, treating them as floats.
|
||||
|
||||
For example, if you have a json-returning command called mycollector, you could
|
||||
setup the exec plugin with:
|
||||
|
||||
```
|
||||
[[exec.commands]]
|
||||
command = "/usr/bin/mycollector --output=json"
|
||||
name = "mycollector"
|
||||
interval = 10
|
||||
```
|
||||
|
||||
The name is used as a prefix for the measurements.
|
||||
|
||||
The interval is used to determine how often a particular command should be run. Each
|
||||
time the exec plugin runs, it will only run a particular command if it has been at least
|
||||
`interval` seconds since the exec plugin last ran the command.
|
||||
|
||||
|
||||
# Sample
|
||||
|
||||
Let's say that we have a command named "mycollector", which gives the following output:
|
||||
```json
|
||||
{
|
||||
"a": 0.5,
|
||||
"b": {
|
||||
"c": "some text",
|
||||
"d": 0.1,
|
||||
"e": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The collected metrics will be:
|
||||
```
|
||||
exec_mycollector_a value=0.5
|
||||
exec_mycollector_b_d value=0.1
|
||||
exec_mycollector_b_e value=5
|
||||
```
|
||||
91
plugins/inputs/exec/exec.go
Normal file
91
plugins/inputs/exec/exec.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package exec
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
"github.com/gonuts/go-shellquote"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const sampleConfig = `
|
||||
# the command to run
|
||||
command = "/usr/bin/mycollector --foo=bar"
|
||||
|
||||
# measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
`
|
||||
|
||||
type Exec struct {
|
||||
Command string
|
||||
|
||||
runner Runner
|
||||
}
|
||||
|
||||
type Runner interface {
|
||||
Run(*Exec) ([]byte, error)
|
||||
}
|
||||
|
||||
type CommandRunner struct{}
|
||||
|
||||
func (c CommandRunner) Run(e *Exec) ([]byte, error) {
|
||||
split_cmd, err := shellquote.Split(e.Command)
|
||||
if err != nil || len(split_cmd) == 0 {
|
||||
return nil, fmt.Errorf("exec: unable to parse command, %s", err)
|
||||
}
|
||||
|
||||
cmd := exec.Command(split_cmd[0], split_cmd[1:]...)
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("exec: %s for command '%s'", err, e.Command)
|
||||
}
|
||||
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
|
||||
func NewExec() *Exec {
|
||||
return &Exec{runner: CommandRunner{}}
|
||||
}
|
||||
|
||||
func (e *Exec) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (e *Exec) Description() string {
|
||||
return "Read flattened metrics from one or more commands that output JSON to stdout"
|
||||
}
|
||||
|
||||
func (e *Exec) Gather(acc inputs.Accumulator) error {
|
||||
out, err := e.runner.Run(e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var jsonOut interface{}
|
||||
err = json.Unmarshal(out, &jsonOut)
|
||||
if err != nil {
|
||||
return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s",
|
||||
e.Command, err)
|
||||
}
|
||||
|
||||
f := internal.JSONFlattener{}
|
||||
err = f.FlattenJSON("", jsonOut)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
acc.AddFields("exec", f.Fields, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("exec", func() inputs.Input {
|
||||
return NewExec()
|
||||
})
|
||||
}
|
||||
95
plugins/inputs/exec/exec_test.go
Normal file
95
plugins/inputs/exec/exec_test.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package exec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Midnight 9/22/2015
|
||||
const baseTimeSeconds = 1442905200
|
||||
|
||||
const validJson = `
|
||||
{
|
||||
"status": "green",
|
||||
"num_processes": 82,
|
||||
"cpu": {
|
||||
"status": "red",
|
||||
"nil_status": null,
|
||||
"used": 8234,
|
||||
"free": 32
|
||||
},
|
||||
"percent": 0.81,
|
||||
"users": [0, 1, 2, 3]
|
||||
}`
|
||||
|
||||
const malformedJson = `
|
||||
{
|
||||
"status": "green",
|
||||
`
|
||||
|
||||
type runnerMock struct {
|
||||
out []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func newRunnerMock(out []byte, err error) Runner {
|
||||
return &runnerMock{
|
||||
out: out,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (r runnerMock) Run(e *Exec) ([]byte, error) {
|
||||
if r.err != nil {
|
||||
return nil, r.err
|
||||
}
|
||||
return r.out, nil
|
||||
}
|
||||
|
||||
func TestExec(t *testing.T) {
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(validJson), nil),
|
||||
Command: "testcommand arg1",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, acc.NFields(), 4, "non-numeric measurements should be ignored")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"num_processes": float64(82),
|
||||
"cpu_used": float64(8234),
|
||||
"cpu_free": float64(32),
|
||||
"percent": float64(0.81),
|
||||
}
|
||||
acc.AssertContainsFields(t, "exec", fields)
|
||||
}
|
||||
|
||||
func TestExecMalformed(t *testing.T) {
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(malformedJson), nil),
|
||||
Command: "badcommand arg1",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
|
||||
}
|
||||
|
||||
func TestCommandError(t *testing.T) {
|
||||
e := &Exec{
|
||||
runner: newRunnerMock(nil, fmt.Errorf("exit status code 1")),
|
||||
Command: "badcommand",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
|
||||
}
|
||||
364
plugins/inputs/haproxy/haproxy.go
Normal file
364
plugins/inputs/haproxy/haproxy.go
Normal file
@@ -0,0 +1,364 @@
|
||||
package haproxy
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
//CSV format: https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#9.1
|
||||
const (
|
||||
HF_PXNAME = 0 // 0. pxname [LFBS]: proxy name
|
||||
HF_SVNAME = 1 // 1. svname [LFBS]: service name (FRONTEND for frontend, BACKEND for backend, any name for server/listener)
|
||||
HF_QCUR = 2 //2. qcur [..BS]: current queued requests. For the backend this reports the number queued without a server assigned.
|
||||
HF_QMAX = 3 //3. qmax [..BS]: max value of qcur
|
||||
HF_SCUR = 4 // 4. scur [LFBS]: current sessions
|
||||
HF_SMAX = 5 //5. smax [LFBS]: max sessions
|
||||
HF_SLIM = 6 //6. slim [LFBS]: configured session limit
|
||||
HF_STOT = 7 //7. stot [LFBS]: cumulative number of connections
|
||||
HF_BIN = 8 //8. bin [LFBS]: bytes in
|
||||
HF_BOUT = 9 //9. bout [LFBS]: bytes out
|
||||
HF_DREQ = 10 //10. dreq [LFB.]: requests denied because of security concerns.
|
||||
HF_DRESP = 11 //11. dresp [LFBS]: responses denied because of security concerns.
|
||||
HF_EREQ = 12 //12. ereq [LF..]: request errors. Some of the possible causes are:
|
||||
HF_ECON = 13 //13. econ [..BS]: number of requests that encountered an error trying to
|
||||
HF_ERESP = 14 //14. eresp [..BS]: response errors. srv_abrt will be counted here also. Some other errors are: - write error on the client socket (won't be counted for the server stat) - failure applying filters to the response.
|
||||
HF_WRETR = 15 //15. wretr [..BS]: number of times a connection to a server was retried.
|
||||
HF_WREDIS = 16 //16. wredis [..BS]: number of times a request was redispatched to another server. The server value counts the number of times that server was switched away from.
|
||||
HF_STATUS = 17 //17. status [LFBS]: status (UP/DOWN/NOLB/MAINT/MAINT(via)...)
|
||||
HF_WEIGHT = 18 //18. weight [..BS]: total weight (backend), server weight (server)
|
||||
HF_ACT = 19 //19. act [..BS]: number of active servers (backend), server is active (server)
|
||||
HF_BCK = 20 //20. bck [..BS]: number of backup servers (backend), server is backup (server)
|
||||
HF_CHKFAIL = 21 //21. chkfail [...S]: number of failed checks. (Only counts checks failed when the server is up.)
|
||||
HF_CHKDOWN = 22 //22. chkdown [..BS]: number of UP->DOWN transitions. The backend counter counts transitions to the whole backend being down, rather than the sum of the counters for each server.
|
||||
HF_LASTCHG = 23 //23. lastchg [..BS]: number of seconds since the last UP<->DOWN transition
|
||||
HF_DOWNTIME = 24 //24. downtime [..BS]: total downtime (in seconds). The value for the backend is the downtime for the whole backend, not the sum of the server downtime.
|
||||
HF_QLIMIT = 25 //25. qlimit [...S]: configured maxqueue for the server, or nothing in the value is 0 (default, meaning no limit)
|
||||
HF_PID = 26 //26. pid [LFBS]: process id (0 for first instance, 1 for second, ...)
|
||||
HF_IID = 27 //27. iid [LFBS]: unique proxy id
|
||||
HF_SID = 28 //28. sid [L..S]: server id (unique inside a proxy)
|
||||
HF_THROTTLE = 29 //29. throttle [...S]: current throttle percentage for the server, when slowstart is active, or no value if not in slowstart.
|
||||
HF_LBTOT = 30 //30. lbtot [..BS]: total number of times a server was selected, either for new sessions, or when re-dispatching. The server counter is the number of times that server was selected.
|
||||
HF_TRACKED = 31 //31. tracked [...S]: id of proxy/server if tracking is enabled.
|
||||
HF_TYPE = 32 //32. type [LFBS]: (0 = frontend, 1 = backend, 2 = server, 3 = socket/listener)
|
||||
HF_RATE = 33 //33. rate [.FBS]: number of sessions per second over last elapsed second
|
||||
HF_RATE_LIM = 34 //34. rate_lim [.F..]: configured limit on new sessions per second
|
||||
HF_RATE_MAX = 35 //35. rate_max [.FBS]: max number of new sessions per second
|
||||
HF_CHECK_STATUS = 36 //36. check_status [...S]: status of last health check, one of:
|
||||
HF_CHECK_CODE = 37 //37. check_code [...S]: layer5-7 code, if available
|
||||
HF_CHECK_DURATION = 38 //38. check_duration [...S]: time in ms took to finish last health check
|
||||
HF_HRSP_1xx = 39 //39. hrsp_1xx [.FBS]: http responses with 1xx code
|
||||
HF_HRSP_2xx = 40 //40. hrsp_2xx [.FBS]: http responses with 2xx code
|
||||
HF_HRSP_3xx = 41 //41. hrsp_3xx [.FBS]: http responses with 3xx code
|
||||
HF_HRSP_4xx = 42 //42. hrsp_4xx [.FBS]: http responses with 4xx code
|
||||
HF_HRSP_5xx = 43 //43. hrsp_5xx [.FBS]: http responses with 5xx code
|
||||
HF_HRSP_OTHER = 44 //44. hrsp_other [.FBS]: http responses with other codes (protocol error)
|
||||
HF_HANAFAIL = 45 //45. hanafail [...S]: failed health checks details
|
||||
HF_REQ_RATE = 46 //46. req_rate [.F..]: HTTP requests per second over last elapsed second
|
||||
HF_REQ_RATE_MAX = 47 //47. req_rate_max [.F..]: max number of HTTP requests per second observed
|
||||
HF_REQ_TOT = 48 //48. req_tot [.F..]: total number of HTTP requests received
|
||||
HF_CLI_ABRT = 49 //49. cli_abrt [..BS]: number of data transfers aborted by the client
|
||||
HF_SRV_ABRT = 50 //50. srv_abrt [..BS]: number of data transfers aborted by the server (inc. in eresp)
|
||||
HF_COMP_IN = 51 //51. comp_in [.FB.]: number of HTTP response bytes fed to the compressor
|
||||
HF_COMP_OUT = 52 //52. comp_out [.FB.]: number of HTTP response bytes emitted by the compressor
|
||||
HF_COMP_BYP = 53 //53. comp_byp [.FB.]: number of bytes that bypassed the HTTP compressor (CPU/BW limit)
|
||||
HF_COMP_RSP = 54 //54. comp_rsp [.FB.]: number of HTTP responses that were compressed
|
||||
HF_LASTSESS = 55 //55. lastsess [..BS]: number of seconds since last session assigned to server/backend
|
||||
HF_LAST_CHK = 56 //56. last_chk [...S]: last health check contents or textual error
|
||||
HF_LAST_AGT = 57 //57. last_agt [...S]: last agent check contents or textual error
|
||||
HF_QTIME = 58 //58. qtime [..BS]:
|
||||
HF_CTIME = 59 //59. ctime [..BS]:
|
||||
HF_RTIME = 60 //60. rtime [..BS]: (0 for TCP)
|
||||
HF_TTIME = 61 //61. ttime [..BS]: the average total session time in ms over the 1024 last requests
|
||||
)
|
||||
|
||||
type haproxy struct {
|
||||
Servers []string
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port. ie localhost, 10.10.3.33:1936, etc.
|
||||
#
|
||||
# If no servers are specified, then default to 127.0.0.1:1936
|
||||
servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
|
||||
# Or you can also use local socket(not work yet)
|
||||
# servers = ["socket://run/haproxy/admin.sock"]
|
||||
`
|
||||
|
||||
func (r *haproxy) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (r *haproxy) Description() string {
|
||||
return "Read metrics of haproxy, via socket or csv stats page"
|
||||
}
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (g *haproxy) Gather(acc inputs.Accumulator) error {
|
||||
if len(g.Servers) == 0 {
|
||||
return g.gatherServer("http://127.0.0.1:1936", acc)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, serv := range g.Servers {
|
||||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = g.gatherServer(serv, acc)
|
||||
}(serv)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (g *haproxy) gatherServer(addr string, acc inputs.Accumulator) error {
|
||||
if g.client == nil {
|
||||
|
||||
client := &http.Client{}
|
||||
g.client = client
|
||||
}
|
||||
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable parse server address '%s': %s", addr, err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s/;csv", u.Scheme, u.Host, u.Path), nil)
|
||||
if u.User != nil {
|
||||
p, _ := u.User.Password()
|
||||
req.SetBasicAuth(u.User.Username(), p)
|
||||
}
|
||||
|
||||
res, err := g.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to haproxy server '%s': %s", addr, err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return fmt.Errorf("Unable to get valid stat result from '%s': %s", addr, err)
|
||||
}
|
||||
|
||||
return importCsvResult(res.Body, acc, u.Host)
|
||||
}
|
||||
|
||||
func importCsvResult(r io.Reader, acc inputs.Accumulator, host string) error {
|
||||
csv := csv.NewReader(r)
|
||||
result, err := csv.ReadAll()
|
||||
now := time.Now()
|
||||
|
||||
for _, row := range result {
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{
|
||||
"server": host,
|
||||
"proxy": row[HF_PXNAME],
|
||||
"sv": row[HF_SVNAME],
|
||||
}
|
||||
for field, v := range row {
|
||||
switch field {
|
||||
case HF_QCUR:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["qcur"] = ival
|
||||
}
|
||||
case HF_QMAX:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["qmax"] = ival
|
||||
}
|
||||
case HF_SCUR:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["scur"] = ival
|
||||
}
|
||||
case HF_SMAX:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["smax"] = ival
|
||||
}
|
||||
case HF_STOT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["stot"] = ival
|
||||
}
|
||||
case HF_BIN:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["bin"] = ival
|
||||
}
|
||||
case HF_BOUT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["bout"] = ival
|
||||
}
|
||||
case HF_DREQ:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["dreq"] = ival
|
||||
}
|
||||
case HF_DRESP:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["dresp"] = ival
|
||||
}
|
||||
case HF_EREQ:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["ereq"] = ival
|
||||
}
|
||||
case HF_ECON:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["econ"] = ival
|
||||
}
|
||||
case HF_ERESP:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["eresp"] = ival
|
||||
}
|
||||
case HF_WRETR:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["wretr"] = ival
|
||||
}
|
||||
case HF_WREDIS:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["wredis"] = ival
|
||||
}
|
||||
case HF_ACT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["active_servers"] = ival
|
||||
}
|
||||
case HF_BCK:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["backup_servers"] = ival
|
||||
}
|
||||
case HF_DOWNTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["downtime"] = ival
|
||||
}
|
||||
case HF_THROTTLE:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["throttle"] = ival
|
||||
}
|
||||
case HF_LBTOT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["lbtot"] = ival
|
||||
}
|
||||
case HF_RATE:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["rate"] = ival
|
||||
}
|
||||
case HF_RATE_MAX:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["rate_max"] = ival
|
||||
}
|
||||
case HF_CHECK_DURATION:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["check_duration"] = ival
|
||||
}
|
||||
case HF_HRSP_1xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["http_response.1xx"] = ival
|
||||
}
|
||||
case HF_HRSP_2xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["http_response.2xx"] = ival
|
||||
}
|
||||
case HF_HRSP_3xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["http_response.3xx"] = ival
|
||||
}
|
||||
case HF_HRSP_4xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["http_response.4xx"] = ival
|
||||
}
|
||||
case HF_HRSP_5xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["http_response.5xx"] = ival
|
||||
}
|
||||
case HF_REQ_RATE:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["req_rate"] = ival
|
||||
}
|
||||
case HF_REQ_RATE_MAX:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["req_rate_max"] = ival
|
||||
}
|
||||
case HF_REQ_TOT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["req_tot"] = ival
|
||||
}
|
||||
case HF_CLI_ABRT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["cli_abort"] = ival
|
||||
}
|
||||
case HF_SRV_ABRT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["srv_abort"] = ival
|
||||
}
|
||||
case HF_QTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["qtime"] = ival
|
||||
}
|
||||
case HF_CTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["ctime"] = ival
|
||||
}
|
||||
case HF_RTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["rtime"] = ival
|
||||
}
|
||||
case HF_TTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["ttime"] = ival
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("haproxy", fields, tags, now)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("haproxy", func() inputs.Input {
|
||||
return &haproxy{}
|
||||
})
|
||||
}
|
||||
179
plugins/inputs/haproxy/haproxy_test.go
Normal file
179
plugins/inputs/haproxy/haproxy_test.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package haproxy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
)
|
||||
|
||||
func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
|
||||
//We create a fake server to return test data
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
username, password, ok := r.BasicAuth()
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
fmt.Fprint(w, "Unauthorized")
|
||||
return
|
||||
}
|
||||
|
||||
if username == "user" && password == "password" {
|
||||
fmt.Fprint(w, csvOutputSample)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
fmt.Fprint(w, "Unauthorized")
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
//Now we tested again above server, with our authentication data
|
||||
r := &haproxy{
|
||||
Servers: []string{strings.Replace(ts.URL, "http://", "http://user:password@", 1)},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"server": ts.Listener.Addr().String(),
|
||||
"proxy": "be_app",
|
||||
"sv": "host0",
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"active_servers": uint64(1),
|
||||
"backup_servers": uint64(0),
|
||||
"bin": uint64(510913516),
|
||||
"bout": uint64(2193856571),
|
||||
"check_duration": uint64(10),
|
||||
"cli_abort": uint64(73),
|
||||
"ctime": uint64(2),
|
||||
"downtime": uint64(0),
|
||||
"dresp": uint64(0),
|
||||
"econ": uint64(0),
|
||||
"eresp": uint64(1),
|
||||
"http_response.1xx": uint64(0),
|
||||
"http_response.2xx": uint64(119534),
|
||||
"http_response.3xx": uint64(48051),
|
||||
"http_response.4xx": uint64(2345),
|
||||
"http_response.5xx": uint64(1056),
|
||||
"lbtot": uint64(171013),
|
||||
"qcur": uint64(0),
|
||||
"qmax": uint64(0),
|
||||
"qtime": uint64(0),
|
||||
"rate": uint64(3),
|
||||
"rate_max": uint64(12),
|
||||
"rtime": uint64(312),
|
||||
"scur": uint64(1),
|
||||
"smax": uint64(32),
|
||||
"srv_abort": uint64(1),
|
||||
"stot": uint64(171014),
|
||||
"ttime": uint64(2341),
|
||||
"wredis": uint64(0),
|
||||
"wretr": uint64(1),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
|
||||
|
||||
//Here, we should get error because we don't pass authentication data
|
||||
r = &haproxy{
|
||||
Servers: []string{ts.URL},
|
||||
}
|
||||
|
||||
err = r.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprint(w, csvOutputSample)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
r := &haproxy{
|
||||
Servers: []string{ts.URL},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"proxy": "be_app",
|
||||
"server": ts.Listener.Addr().String(),
|
||||
"sv": "host0",
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"active_servers": uint64(1),
|
||||
"backup_servers": uint64(0),
|
||||
"bin": uint64(510913516),
|
||||
"bout": uint64(2193856571),
|
||||
"check_duration": uint64(10),
|
||||
"cli_abort": uint64(73),
|
||||
"ctime": uint64(2),
|
||||
"downtime": uint64(0),
|
||||
"dresp": uint64(0),
|
||||
"econ": uint64(0),
|
||||
"eresp": uint64(1),
|
||||
"http_response.1xx": uint64(0),
|
||||
"http_response.2xx": uint64(119534),
|
||||
"http_response.3xx": uint64(48051),
|
||||
"http_response.4xx": uint64(2345),
|
||||
"http_response.5xx": uint64(1056),
|
||||
"lbtot": uint64(171013),
|
||||
"qcur": uint64(0),
|
||||
"qmax": uint64(0),
|
||||
"qtime": uint64(0),
|
||||
"rate": uint64(3),
|
||||
"rate_max": uint64(12),
|
||||
"rtime": uint64(312),
|
||||
"scur": uint64(1),
|
||||
"smax": uint64(32),
|
||||
"srv_abort": uint64(1),
|
||||
"stot": uint64(171014),
|
||||
"ttime": uint64(2341),
|
||||
"wredis": uint64(0),
|
||||
"wretr": uint64(1),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
|
||||
}
|
||||
|
||||
//When not passing server config, we default to localhost
|
||||
//We just want to make sure we did request stat from localhost
|
||||
func TestHaproxyDefaultGetFromLocalhost(t *testing.T) {
|
||||
r := &haproxy{}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := r.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "127.0.0.1:1936/;csv")
|
||||
}
|
||||
|
||||
const csvOutputSample = `
|
||||
# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,last_agt,qtime,ctime,rtime,ttime,
|
||||
fe_app,FRONTEND,,81,288,713,2000,1094063,5557055817,24096715169,1102,80,95740,,,17,19,OPEN,,,,,,,,,2,16,113,13,114,,0,18,0,102,,,,0,1314093,537036,123452,11966,1360,,35,140,1987928,,,0,0,0,0,,,,,,,,
|
||||
be_static,host0,0,0,0,3,,3209,1141294,17389596,,0,,0,0,0,0,no check,1,1,0,,,,,,2,17,1,,3209,,2,0,,7,,,,0,218,1497,1494,0,0,0,,,,0,0,,,,,2,,,0,2,23,545,
|
||||
be_static,BACKEND,0,0,0,3,200,3209,1141294,17389596,0,0,,0,0,0,0,UP,1,1,0,,0,70698,0,,2,17,0,,3209,,1,0,,7,,,,0,218,1497,1494,0,0,,,,,0,0,0,0,0,0,2,,,0,2,23,545,
|
||||
be_static,host0,0,0,0,1,,28,17313,466003,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,1,,28,,2,0,,1,L4OK,,1,0,17,6,5,0,0,0,,,,0,0,,,,,2103,,,0,1,1,36,
|
||||
be_static,host4,0,0,0,1,,28,15358,1281073,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,2,,28,,2,0,,1,L4OK,,1,0,20,5,3,0,0,0,,,,0,0,,,,,2076,,,0,1,1,54,
|
||||
be_static,host5,0,0,0,1,,28,17547,1970404,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,3,,28,,2,0,,1,L4OK,,0,0,20,5,3,0,0,0,,,,0,0,,,,,1495,,,0,1,1,53,
|
||||
be_static,host6,0,0,0,1,,28,14105,1328679,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,4,,28,,2,0,,1,L4OK,,0,0,18,8,2,0,0,0,,,,0,0,,,,,1418,,,0,0,1,49,
|
||||
be_static,host7,0,0,0,1,,28,15258,1965185,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,5,,28,,2,0,,1,L4OK,,0,0,17,8,3,0,0,0,,,,0,0,,,,,935,,,0,0,1,28,
|
||||
be_static,host8,0,0,0,1,,28,12934,1034779,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,6,,28,,2,0,,1,L4OK,,0,0,17,9,2,0,0,0,,,,0,0,,,,,582,,,0,1,1,66,
|
||||
be_static,host9,0,0,0,1,,28,13434,134063,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,7,,28,,2,0,,1,L4OK,,0,0,17,8,3,0,0,0,,,,0,0,,,,,539,,,0,0,1,80,
|
||||
be_static,host1,0,0,0,1,,28,7873,1209688,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,8,,28,,2,0,,1,L4OK,,0,0,22,6,0,0,0,0,,,,0,0,,,,,487,,,0,0,1,36,
|
||||
be_static,host2,0,0,0,1,,28,13830,1085929,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,9,,28,,2,0,,1,L4OK,,0,0,19,6,3,0,0,0,,,,0,0,,,,,338,,,0,1,1,38,
|
||||
be_static,host3,0,0,0,1,,28,17959,1259760,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,10,,28,,2,0,,1,L4OK,,1,0,20,6,2,0,0,0,,,,0,0,,,,,92,,,0,1,1,17,
|
||||
be_static,BACKEND,0,0,0,2,200,307,160276,13322728,0,0,,0,0,0,0,UP,11,11,0,,0,70698,0,,2,18,0,,307,,1,0,,4,,,,0,205,73,29,0,0,,,,,0,0,0,0,0,0,92,,,0,1,3,381,
|
||||
be_app,host0,0,0,1,32,,171014,510913516,2193856571,,0,,0,1,1,0,UP,100,1,0,1,0,70698,0,,2,19,1,,171013,,2,3,,12,L7OK,301,10,0,119534,48051,2345,1056,0,0,,,,73,1,,,,,0,Moved Permanently,,0,2,312,2341,
|
||||
be_app,host4,0,0,2,29,,171013,499318742,2195595896,12,34,,0,2,0,0,UP,100,1,0,2,0,70698,0,,2,19,2,,171013,,2,3,,12,L7OK,301,12,0,119572,47882,2441,1088,0,0,,,,84,2,,,,,0,Moved Permanently,,0,2,316,2355,
|
||||
`
|
||||
148
plugins/inputs/httpjson/README.md
Normal file
148
plugins/inputs/httpjson/README.md
Normal file
@@ -0,0 +1,148 @@
|
||||
# HTTP JSON Plugin
|
||||
|
||||
The httpjson plugin can collect data from remote URLs which respond with JSON. Then it flattens JSON and finds all numeric values, treating them as floats.
|
||||
|
||||
For example, if you have a service called _mycollector_, which has HTTP endpoint for gathering stats at http://my.service.com/_stats, you would configure the HTTP JSON
|
||||
plugin like this:
|
||||
|
||||
```
|
||||
[[httpjson.services]]
|
||||
name = "mycollector"
|
||||
|
||||
servers = [
|
||||
"http://my.service.com/_stats"
|
||||
]
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
```
|
||||
|
||||
`name` is used as a prefix for the measurements.
|
||||
|
||||
`method` specifies HTTP method to use for requests.
|
||||
|
||||
You can also specify which keys from server response should be considered tags:
|
||||
|
||||
```
|
||||
[[httpjson.services]]
|
||||
...
|
||||
|
||||
tag_keys = [
|
||||
"role",
|
||||
"version"
|
||||
]
|
||||
```
|
||||
|
||||
You can also specify additional request parameters for the service:
|
||||
|
||||
```
|
||||
[[httpjson.services]]
|
||||
...
|
||||
|
||||
[httpjson.services.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
|
||||
```
|
||||
|
||||
|
||||
# Example:
|
||||
|
||||
Let's say that we have a service named "mycollector" configured like this:
|
||||
|
||||
```
|
||||
[httpjson]
|
||||
[[httpjson.services]]
|
||||
name = "mycollector"
|
||||
|
||||
servers = [
|
||||
"http://my.service.com/_stats"
|
||||
]
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
tag_keys = ["service"]
|
||||
```
|
||||
|
||||
which responds with the following JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"service": "service01",
|
||||
"a": 0.5,
|
||||
"b": {
|
||||
"c": "some text",
|
||||
"d": 0.1,
|
||||
"e": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The collected metrics will be:
|
||||
```
|
||||
httpjson_mycollector_a,service='service01',server='http://my.service.com/_stats' value=0.5
|
||||
httpjson_mycollector_b_d,service='service01',server='http://my.service.com/_stats' value=0.1
|
||||
httpjson_mycollector_b_e,service='service01',server='http://my.service.com/_stats' value=5
|
||||
```
|
||||
|
||||
# Example 2, Multiple Services:
|
||||
|
||||
There is also the option to collect JSON from multiple services, here is an
|
||||
example doing that.
|
||||
|
||||
```
|
||||
[httpjson]
|
||||
[[httpjson.services]]
|
||||
name = "mycollector1"
|
||||
|
||||
servers = [
|
||||
"http://my.service1.com/_stats"
|
||||
]
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
[[httpjson.services]]
|
||||
name = "mycollector2"
|
||||
|
||||
servers = [
|
||||
"http://service.net/json/stats"
|
||||
]
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "POST"
|
||||
```
|
||||
|
||||
The services respond with the following JSON:
|
||||
|
||||
mycollector1:
|
||||
```json
|
||||
{
|
||||
"a": 0.5,
|
||||
"b": {
|
||||
"c": "some text",
|
||||
"d": 0.1,
|
||||
"e": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
mycollector2:
|
||||
```json
|
||||
{
|
||||
"load": 100,
|
||||
"users": 1335
|
||||
}
|
||||
```
|
||||
|
||||
The collected metrics will be:
|
||||
|
||||
```
|
||||
httpjson_mycollector1_a,server='http://my.service.com/_stats' value=0.5
|
||||
httpjson_mycollector1_b_d,server='http://my.service.com/_stats' value=0.1
|
||||
httpjson_mycollector1_b_e,server='http://my.service.com/_stats' value=5
|
||||
|
||||
httpjson_mycollector2_load,server='http://service.net/json/stats' value=100
|
||||
httpjson_mycollector2_users,server='http://service.net/json/stats' value=1335
|
||||
```
|
||||
216
plugins/inputs/httpjson/httpjson.go
Normal file
216
plugins/inputs/httpjson/httpjson.go
Normal file
@@ -0,0 +1,216 @@
|
||||
package httpjson
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type HttpJson struct {
|
||||
Name string
|
||||
Servers []string
|
||||
Method string
|
||||
TagKeys []string
|
||||
Parameters map[string]string
|
||||
client HTTPClient
|
||||
}
|
||||
|
||||
type HTTPClient interface {
|
||||
// Returns the result of an http request
|
||||
//
|
||||
// Parameters:
|
||||
// req: HTTP request object
|
||||
//
|
||||
// Returns:
|
||||
// http.Response: HTTP respons object
|
||||
// error : Any error that may have occurred
|
||||
MakeRequest(req *http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
type RealHTTPClient struct {
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
return c.client.Do(req)
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# a name for the service being polled
|
||||
name = "webserver_stats"
|
||||
|
||||
# URL of each server in the service's cluster
|
||||
servers = [
|
||||
"http://localhost:9999/stats/",
|
||||
"http://localhost:9998/stats/",
|
||||
]
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
# List of tag names to extract from top-level of JSON server response
|
||||
# tag_keys = [
|
||||
# "my_tag_1",
|
||||
# "my_tag_2"
|
||||
# ]
|
||||
|
||||
# HTTP parameters (all values must be strings)
|
||||
[inputs.httpjson.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
`
|
||||
|
||||
func (h *HttpJson) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (h *HttpJson) Description() string {
|
||||
return "Read flattened metrics from one or more JSON HTTP endpoints"
|
||||
}
|
||||
|
||||
// Gathers data for all servers.
|
||||
func (h *HttpJson) Gather(acc inputs.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
errorChannel := make(chan error, len(h.Servers))
|
||||
|
||||
for _, server := range h.Servers {
|
||||
wg.Add(1)
|
||||
go func(server string) {
|
||||
defer wg.Done()
|
||||
if err := h.gatherServer(acc, server); err != nil {
|
||||
errorChannel <- err
|
||||
}
|
||||
}(server)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errorChannel)
|
||||
|
||||
// Get all errors and return them as one giant error
|
||||
errorStrings := []string{}
|
||||
for err := range errorChannel {
|
||||
errorStrings = append(errorStrings, err.Error())
|
||||
}
|
||||
|
||||
if len(errorStrings) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errors.New(strings.Join(errorStrings, "\n"))
|
||||
}
|
||||
|
||||
// Gathers data from a particular server
|
||||
// Parameters:
|
||||
// acc : The telegraf Accumulator to use
|
||||
// serverURL: endpoint to send request to
|
||||
// service : the service being queried
|
||||
//
|
||||
// Returns:
|
||||
// error: Any error that may have occurred
|
||||
func (h *HttpJson) gatherServer(
|
||||
acc inputs.Accumulator,
|
||||
serverURL string,
|
||||
) error {
|
||||
resp, err := h.sendRequest(serverURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var jsonOut map[string]interface{}
|
||||
if err = json.Unmarshal([]byte(resp), &jsonOut); err != nil {
|
||||
return errors.New("Error decoding JSON response")
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"server": serverURL,
|
||||
}
|
||||
|
||||
for _, tag := range h.TagKeys {
|
||||
switch v := jsonOut[tag].(type) {
|
||||
case string:
|
||||
tags[tag] = v
|
||||
}
|
||||
delete(jsonOut, tag)
|
||||
}
|
||||
|
||||
f := internal.JSONFlattener{}
|
||||
err = f.FlattenJSON("", jsonOut)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var msrmnt_name string
|
||||
if h.Name == "" {
|
||||
msrmnt_name = "httpjson"
|
||||
} else {
|
||||
msrmnt_name = "httpjson_" + h.Name
|
||||
}
|
||||
acc.AddFields(msrmnt_name, f.Fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sends an HTTP request to the server using the HttpJson object's HTTPClient
|
||||
// Parameters:
|
||||
// serverURL: endpoint to send request to
|
||||
//
|
||||
// Returns:
|
||||
// string: body of the response
|
||||
// error : Any error that may have occurred
|
||||
func (h *HttpJson) sendRequest(serverURL string) (string, error) {
|
||||
// Prepare URL
|
||||
requestURL, err := url.Parse(serverURL)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Invalid server URL \"%s\"", serverURL)
|
||||
}
|
||||
|
||||
params := url.Values{}
|
||||
for k, v := range h.Parameters {
|
||||
params.Add(k, v)
|
||||
}
|
||||
requestURL.RawQuery = params.Encode()
|
||||
|
||||
// Create + send request
|
||||
req, err := http.NewRequest(h.Method, requestURL.String(), nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
resp, err := h.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return string(body), err
|
||||
}
|
||||
|
||||
// Process response
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
|
||||
requestURL.String(),
|
||||
resp.StatusCode,
|
||||
http.StatusText(resp.StatusCode),
|
||||
http.StatusOK,
|
||||
http.StatusText(http.StatusOK))
|
||||
return string(body), err
|
||||
}
|
||||
|
||||
return string(body), err
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("httpjson", func() inputs.Input {
|
||||
return &HttpJson{client: RealHTTPClient{client: &http.Client{}}}
|
||||
})
|
||||
}
|
||||
198
plugins/inputs/httpjson/httpjson_test.go
Normal file
198
plugins/inputs/httpjson/httpjson_test.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package httpjson
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const validJSON = `
|
||||
{
|
||||
"parent": {
|
||||
"child": 3,
|
||||
"ignored_child": "hi"
|
||||
},
|
||||
"ignored_null": null,
|
||||
"integer": 4,
|
||||
"ignored_list": [3, 4],
|
||||
"ignored_parent": {
|
||||
"another_ignored_list": [4],
|
||||
"another_ignored_null": null,
|
||||
"ignored_string": "hello, world!"
|
||||
}
|
||||
}`
|
||||
|
||||
const validJSONTags = `
|
||||
{
|
||||
"value": 15,
|
||||
"role": "master",
|
||||
"build": "123"
|
||||
}`
|
||||
|
||||
var expectedFields = map[string]interface{}{
|
||||
"parent_child": float64(3),
|
||||
"integer": float64(4),
|
||||
}
|
||||
|
||||
const invalidJSON = "I don't think this is JSON"
|
||||
|
||||
const empty = ""
|
||||
|
||||
type mockHTTPClient struct {
|
||||
responseBody string
|
||||
statusCode int
|
||||
}
|
||||
|
||||
// Mock implementation of MakeRequest. Usually returns an http.Response with
|
||||
// hard-coded responseBody and statusCode. However, if the request uses a
|
||||
// nonstandard method, it uses status code 405 (method not allowed)
|
||||
func (c mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
resp := http.Response{}
|
||||
resp.StatusCode = c.statusCode
|
||||
|
||||
// basic error checking on request method
|
||||
allowedMethods := []string{"GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT"}
|
||||
methodValid := false
|
||||
for _, method := range allowedMethods {
|
||||
if req.Method == method {
|
||||
methodValid = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !methodValid {
|
||||
resp.StatusCode = 405 // Method not allowed
|
||||
}
|
||||
|
||||
resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody))
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Generates a pointer to an HttpJson object that uses a mock HTTP client.
|
||||
// Parameters:
|
||||
// response : Body of the response that the mock HTTP client should return
|
||||
// statusCode: HTTP status code the mock HTTP client should return
|
||||
//
|
||||
// Returns:
|
||||
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
|
||||
func genMockHttpJson(response string, statusCode int) []*HttpJson {
|
||||
return []*HttpJson{
|
||||
&HttpJson{
|
||||
client: mockHTTPClient{responseBody: response, statusCode: statusCode},
|
||||
Servers: []string{
|
||||
"http://server1.example.com/metrics/",
|
||||
"http://server2.example.com/metrics/",
|
||||
},
|
||||
Name: "my_webapp",
|
||||
Method: "GET",
|
||||
Parameters: map[string]string{
|
||||
"httpParam1": "12",
|
||||
"httpParam2": "the second parameter",
|
||||
},
|
||||
},
|
||||
&HttpJson{
|
||||
client: mockHTTPClient{responseBody: response, statusCode: statusCode},
|
||||
Servers: []string{
|
||||
"http://server3.example.com/metrics/",
|
||||
"http://server4.example.com/metrics/",
|
||||
},
|
||||
Name: "other_webapp",
|
||||
Method: "POST",
|
||||
Parameters: map[string]string{
|
||||
"httpParam1": "12",
|
||||
"httpParam2": "the second parameter",
|
||||
},
|
||||
TagKeys: []string{
|
||||
"role",
|
||||
"build",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJson200(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 200)
|
||||
|
||||
for _, service := range httpjson {
|
||||
var acc testutil.Accumulator
|
||||
err := service.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 4, acc.NFields())
|
||||
for _, srv := range service.Servers {
|
||||
tags := map[string]string{"server": srv}
|
||||
mname := "httpjson_" + service.Name
|
||||
acc.AssertContainsTaggedFields(t, mname, expectedFields, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test response to HTTP 500
|
||||
func TestHttpJson500(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 500)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson[0].Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
// Test response to HTTP 405
|
||||
func TestHttpJsonBadMethod(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 200)
|
||||
httpjson[0].Method = "NOT_A_REAL_METHOD"
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson[0].Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
// Test response to malformed JSON
|
||||
func TestHttpJsonBadJson(t *testing.T) {
|
||||
httpjson := genMockHttpJson(invalidJSON, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson[0].Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
// Test response to empty string as response objectgT
|
||||
func TestHttpJsonEmptyResponse(t *testing.T) {
|
||||
httpjson := genMockHttpJson(empty, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson[0].Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJson200Tags(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSONTags, 200)
|
||||
|
||||
for _, service := range httpjson {
|
||||
if service.Name == "other_webapp" {
|
||||
var acc testutil.Accumulator
|
||||
err := service.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, acc.NFields())
|
||||
for _, srv := range service.Servers {
|
||||
tags := map[string]string{"server": srv, "role": "master", "build": "123"}
|
||||
fields := map[string]interface{}{"value": float64(15)}
|
||||
mname := "httpjson_" + service.Name
|
||||
acc.AssertContainsTaggedFields(t, mname, fields, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
72
plugins/inputs/influxdb/README.md
Normal file
72
plugins/inputs/influxdb/README.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# influxdb plugin
|
||||
|
||||
The influxdb plugin collects InfluxDB-formatted data from JSON endpoints.
|
||||
|
||||
With a configuration of:
|
||||
|
||||
```toml
|
||||
[[inputs.influxdb]]
|
||||
urls = [
|
||||
"http://127.0.0.1:8086/debug/vars",
|
||||
"http://192.168.2.1:8086/debug/vars"
|
||||
]
|
||||
```
|
||||
|
||||
And if 127.0.0.1 responds with this JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"k1": {
|
||||
"name": "fruit",
|
||||
"tags": {
|
||||
"kind": "apple"
|
||||
},
|
||||
"values": {
|
||||
"inventory": 371,
|
||||
"sold": 112
|
||||
}
|
||||
},
|
||||
"k2": {
|
||||
"name": "fruit",
|
||||
"tags": {
|
||||
"kind": "banana"
|
||||
},
|
||||
"values": {
|
||||
"inventory": 1000,
|
||||
"sold": 403
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
And if 192.168.2.1 responds like so:
|
||||
|
||||
```json
|
||||
{
|
||||
"k3": {
|
||||
"name": "transactions",
|
||||
"tags": {},
|
||||
"values": {
|
||||
"total": 100,
|
||||
"balance": 184.75
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Then the collected metrics will be:
|
||||
|
||||
```
|
||||
influxdb_fruit,url='http://127.0.0.1:8086/debug/vars',kind='apple' inventory=371.0,sold=112.0
|
||||
influxdb_fruit,url='http://127.0.0.1:8086/debug/vars',kind='banana' inventory=1000.0,sold=403.0
|
||||
|
||||
influxdb_transactions,url='http://192.168.2.1:8086/debug/vars' total=100.0,balance=184.75
|
||||
```
|
||||
|
||||
There are two important details to note about the collected metrics:
|
||||
|
||||
1. Even though the values in JSON are being displayed as integers, the metrics are reported as floats.
|
||||
JSON encoders usually don't print the fractional part for round floats.
|
||||
Because you cannot change the type of an existing field in InfluxDB, we assume all numbers are floats.
|
||||
|
||||
2. The top-level keys' names (in the example above, `"k1"`, `"k2"`, and `"k3"`) are not considered when recording the metrics.
|
||||
146
plugins/inputs/influxdb/influxdb.go
Normal file
146
plugins/inputs/influxdb/influxdb.go
Normal file
@@ -0,0 +1,146 @@
|
||||
package influxdb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type InfluxDB struct {
|
||||
URLs []string `toml:"urls"`
|
||||
}
|
||||
|
||||
func (*InfluxDB) Description() string {
|
||||
return "Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints"
|
||||
}
|
||||
|
||||
func (*InfluxDB) SampleConfig() string {
|
||||
return `
|
||||
# Works with InfluxDB debug endpoints out of the box,
|
||||
# but other services can use this format too.
|
||||
# See the influxdb plugin's README for more details.
|
||||
|
||||
# Multiple URLs from which to read InfluxDB-formatted JSON
|
||||
urls = [
|
||||
"http://localhost:8086/debug/vars"
|
||||
]
|
||||
`
|
||||
}
|
||||
|
||||
func (i *InfluxDB) Gather(acc inputs.Accumulator) error {
|
||||
errorChannel := make(chan error, len(i.URLs))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, u := range i.URLs {
|
||||
wg.Add(1)
|
||||
go func(url string) {
|
||||
defer wg.Done()
|
||||
if err := i.gatherURL(acc, url); err != nil {
|
||||
errorChannel <- fmt.Errorf("[url=%s]: %s", url, err)
|
||||
}
|
||||
}(u)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errorChannel)
|
||||
|
||||
// If there weren't any errors, we can return nil now.
|
||||
if len(errorChannel) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// There were errors, so join them all together as one big error.
|
||||
errorStrings := make([]string, 0, len(errorChannel))
|
||||
for err := range errorChannel {
|
||||
errorStrings = append(errorStrings, err.Error())
|
||||
}
|
||||
|
||||
return errors.New(strings.Join(errorStrings, "\n"))
|
||||
}
|
||||
|
||||
type point struct {
|
||||
Name string `json:"name"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
Values map[string]interface{} `json:"values"`
|
||||
}
|
||||
|
||||
// Gathers data from a particular URL
|
||||
// Parameters:
|
||||
// acc : The telegraf Accumulator to use
|
||||
// url : endpoint to send request to
|
||||
//
|
||||
// Returns:
|
||||
// error: Any error that may have occurred
|
||||
func (i *InfluxDB) gatherURL(
|
||||
acc inputs.Accumulator,
|
||||
url string,
|
||||
) error {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// It would be nice to be able to decode into a map[string]point, but
|
||||
// we'll get a decoder error like:
|
||||
// `json: cannot unmarshal array into Go value of type influxdb.point`
|
||||
// if any of the values aren't objects.
|
||||
// To avoid that error, we decode by hand.
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
|
||||
// Parse beginning of object
|
||||
if t, err := dec.Token(); err != nil {
|
||||
return err
|
||||
} else if t != json.Delim('{') {
|
||||
return errors.New("document root must be a JSON object")
|
||||
}
|
||||
|
||||
// Loop through rest of object
|
||||
for {
|
||||
// Nothing left in this object, we're done
|
||||
if !dec.More() {
|
||||
break
|
||||
}
|
||||
|
||||
// Read in a string key. We don't do anything with the top-level keys, so it's discarded.
|
||||
_, err := dec.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Attempt to parse a whole object into a point.
|
||||
// It might be a non-object, like a string or array.
|
||||
// If we fail to decode it into a point, ignore it and move on.
|
||||
var p point
|
||||
if err := dec.Decode(&p); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the object was a point, but was not fully initialized, ignore it and move on.
|
||||
if p.Name == "" || p.Tags == nil || p.Values == nil || len(p.Values) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Add a tag to indicate the source of the data.
|
||||
p.Tags["url"] = url
|
||||
|
||||
acc.AddFields(
|
||||
p.Name,
|
||||
p.Values,
|
||||
p.Tags,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("influxdb", func() inputs.Input {
|
||||
return &InfluxDB{}
|
||||
})
|
||||
}
|
||||
97
plugins/inputs/influxdb/influxdb_test.go
Normal file
97
plugins/inputs/influxdb/influxdb_test.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package influxdb_test
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs/influxdb"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBasic(t *testing.T) {
|
||||
js := `
|
||||
{
|
||||
"_1": {
|
||||
"name": "foo",
|
||||
"tags": {
|
||||
"id": "ex1"
|
||||
},
|
||||
"values": {
|
||||
"i": -1,
|
||||
"f": 0.5,
|
||||
"b": true,
|
||||
"s": "string"
|
||||
}
|
||||
},
|
||||
"ignored": {
|
||||
"willBeRecorded": false
|
||||
},
|
||||
"ignoredAndNested": {
|
||||
"hash": {
|
||||
"is": "nested"
|
||||
}
|
||||
},
|
||||
"array": [
|
||||
"makes parsing more difficult than necessary"
|
||||
],
|
||||
"string": "makes parsing more difficult than necessary",
|
||||
"_2": {
|
||||
"name": "bar",
|
||||
"tags": {
|
||||
"id": "ex2"
|
||||
},
|
||||
"values": {
|
||||
"x": "x"
|
||||
}
|
||||
},
|
||||
"pointWithoutFields_willNotBeIncluded": {
|
||||
"name": "asdf",
|
||||
"tags": {
|
||||
"id": "ex3"
|
||||
},
|
||||
"values": {}
|
||||
}
|
||||
}
|
||||
`
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/endpoint" {
|
||||
_, _ = w.Write([]byte(js))
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer fakeServer.Close()
|
||||
|
||||
plugin := &influxdb.InfluxDB{
|
||||
URLs: []string{fakeServer.URL + "/endpoint"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
|
||||
require.Len(t, acc.Points, 2)
|
||||
fields := map[string]interface{}{
|
||||
// JSON will truncate floats to integer representations.
|
||||
// Since there's no distinction in JSON, we can't assume it's an int.
|
||||
"i": -1.0,
|
||||
"f": 0.5,
|
||||
"b": true,
|
||||
"s": "string",
|
||||
}
|
||||
tags := map[string]string{
|
||||
"id": "ex1",
|
||||
"url": fakeServer.URL + "/endpoint",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "foo", fields, tags)
|
||||
|
||||
fields = map[string]interface{}{
|
||||
"x": "x",
|
||||
}
|
||||
tags = map[string]string{
|
||||
"id": "ex2",
|
||||
"url": fakeServer.URL + "/endpoint",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "bar", fields, tags)
|
||||
}
|
||||
51
plugins/inputs/jolokia/README.md
Normal file
51
plugins/inputs/jolokia/README.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Telegraf plugin: Jolokia
|
||||
|
||||
#### Plugin arguments:
|
||||
- **context** string: Context root used of jolokia url
|
||||
- **servers** []Server: List of servers
|
||||
+ **name** string: Server's logical name
|
||||
+ **host** string: Server's ip address or hostname
|
||||
+ **port** string: Server's listening port
|
||||
- **metrics** []Metric
|
||||
+ **name** string: Name of the measure
|
||||
+ **jmx** string: Jmx path that identifies mbeans attributes
|
||||
+ **pass** []string: Attributes to retain when collecting values
|
||||
+ **drop** []string: Attributes to drop when collecting values
|
||||
|
||||
#### Description
|
||||
|
||||
The Jolokia plugin collects JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics
|
||||
are collected for each server configured.
|
||||
|
||||
See: https://jolokia.org/
|
||||
|
||||
# Measurements:
|
||||
Jolokia plugin produces one measure for each metric configured, adding Server's `name`, `host` and `port` as tags.
|
||||
|
||||
Given a configuration like:
|
||||
|
||||
```ini
|
||||
[jolokia]
|
||||
|
||||
[[jolokia.servers]]
|
||||
name = "as-service-1"
|
||||
host = "127.0.0.1"
|
||||
port = "8080"
|
||||
|
||||
[[jolokia.servers]]
|
||||
name = "as-service-2"
|
||||
host = "127.0.0.1"
|
||||
port = "8180"
|
||||
|
||||
[[jolokia.metrics]]
|
||||
name = "heap_memory_usage"
|
||||
jmx = "/java.lang:type=Memory/HeapMemoryUsage"
|
||||
pass = ["used", "max"]
|
||||
```
|
||||
|
||||
The collected metrics will be:
|
||||
|
||||
```
|
||||
jolokia_heap_memory_usage name=as-service-1,host=127.0.0.1,port=8080 used=xxx,max=yyy
|
||||
jolokia_heap_memory_usage name=as-service-2,host=127.0.0.1,port=8180 used=vvv,max=zzz
|
||||
```
|
||||
163
plugins/inputs/jolokia/jolokia.go
Normal file
163
plugins/inputs/jolokia/jolokia.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package jolokia
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
Name string
|
||||
Host string
|
||||
Username string
|
||||
Password string
|
||||
Port string
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
Name string
|
||||
Jmx string
|
||||
}
|
||||
|
||||
type JolokiaClient interface {
|
||||
MakeRequest(req *http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
type JolokiaClientImpl struct {
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func (c JolokiaClientImpl) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
return c.client.Do(req)
|
||||
}
|
||||
|
||||
type Jolokia struct {
|
||||
jClient JolokiaClient
|
||||
Context string
|
||||
Servers []Server
|
||||
Metrics []Metric
|
||||
}
|
||||
|
||||
func (j *Jolokia) SampleConfig() string {
|
||||
return `
|
||||
# This is the context root used to compose the jolokia url
|
||||
context = "/jolokia/read"
|
||||
|
||||
# List of servers exposing jolokia read service
|
||||
[[inputs.jolokia.servers]]
|
||||
name = "stable"
|
||||
host = "192.168.103.2"
|
||||
port = "8180"
|
||||
# username = "myuser"
|
||||
# password = "mypassword"
|
||||
|
||||
# List of metrics collected on above servers
|
||||
# Each metric consists in a name, a jmx path and either a pass or drop slice attributes
|
||||
# This collect all heap memory usage metrics
|
||||
[[inputs.jolokia.metrics]]
|
||||
name = "heap_memory_usage"
|
||||
jmx = "/java.lang:type=Memory/HeapMemoryUsage"
|
||||
`
|
||||
}
|
||||
|
||||
func (j *Jolokia) Description() string {
|
||||
return "Read JMX metrics through Jolokia"
|
||||
}
|
||||
|
||||
func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
|
||||
// Create + send request
|
||||
req, err := http.NewRequest("GET", requestUrl.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := j.jClient.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Process response
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
|
||||
requestUrl,
|
||||
resp.StatusCode,
|
||||
http.StatusText(resp.StatusCode),
|
||||
http.StatusOK,
|
||||
http.StatusText(http.StatusOK))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// read body
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unmarshal json
|
||||
var jsonOut map[string]interface{}
|
||||
if err = json.Unmarshal([]byte(body), &jsonOut); err != nil {
|
||||
return nil, errors.New("Error decoding JSON response")
|
||||
}
|
||||
|
||||
return jsonOut, nil
|
||||
}
|
||||
|
||||
func (j *Jolokia) Gather(acc inputs.Accumulator) error {
|
||||
context := j.Context //"/jolokia/read"
|
||||
servers := j.Servers
|
||||
metrics := j.Metrics
|
||||
tags := make(map[string]string)
|
||||
|
||||
for _, server := range servers {
|
||||
tags["server"] = server.Name
|
||||
tags["port"] = server.Port
|
||||
tags["host"] = server.Host
|
||||
fields := make(map[string]interface{})
|
||||
for _, metric := range metrics {
|
||||
|
||||
measurement := metric.Name
|
||||
jmxPath := metric.Jmx
|
||||
|
||||
// Prepare URL
|
||||
requestUrl, err := url.Parse("http://" + server.Host + ":" +
|
||||
server.Port + context + jmxPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if server.Username != "" || server.Password != "" {
|
||||
requestUrl.User = url.UserPassword(server.Username, server.Password)
|
||||
}
|
||||
|
||||
out, _ := j.getAttr(requestUrl)
|
||||
|
||||
if values, ok := out["value"]; ok {
|
||||
switch t := values.(type) {
|
||||
case map[string]interface{}:
|
||||
for k, v := range t {
|
||||
fields[measurement+"_"+k] = v
|
||||
}
|
||||
case interface{}:
|
||||
fields[measurement] = t
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n",
|
||||
requestUrl.String())
|
||||
}
|
||||
}
|
||||
acc.AddFields("jolokia", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("jolokia", func() inputs.Input {
|
||||
return &Jolokia{jClient: &JolokiaClientImpl{client: &http.Client{}}}
|
||||
})
|
||||
}
|
||||
116
plugins/inputs/jolokia/jolokia_test.go
Normal file
116
plugins/inputs/jolokia/jolokia_test.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package jolokia
|
||||
|
||||
import (
|
||||
_ "fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
_ "github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const validMultiValueJSON = `
|
||||
{
|
||||
"request":{
|
||||
"mbean":"java.lang:type=Memory",
|
||||
"attribute":"HeapMemoryUsage",
|
||||
"type":"read"
|
||||
},
|
||||
"value":{
|
||||
"init":67108864,
|
||||
"committed":456130560,
|
||||
"max":477626368,
|
||||
"used":203288528
|
||||
},
|
||||
"timestamp":1446129191,
|
||||
"status":200
|
||||
}`
|
||||
|
||||
const validSingleValueJSON = `
|
||||
{
|
||||
"request":{
|
||||
"path":"used",
|
||||
"mbean":"java.lang:type=Memory",
|
||||
"attribute":"HeapMemoryUsage",
|
||||
"type":"read"
|
||||
},
|
||||
"value":209274376,
|
||||
"timestamp":1446129256,
|
||||
"status":200
|
||||
}`
|
||||
|
||||
const invalidJSON = "I don't think this is JSON"
|
||||
|
||||
const empty = ""
|
||||
|
||||
var Servers = []Server{Server{Name: "as1", Host: "127.0.0.1", Port: "8080"}}
|
||||
var HeapMetric = Metric{Name: "heap_memory_usage", Jmx: "/java.lang:type=Memory/HeapMemoryUsage"}
|
||||
var UsedHeapMetric = Metric{Name: "heap_memory_usage", Jmx: "/java.lang:type=Memory/HeapMemoryUsage"}
|
||||
|
||||
type jolokiaClientStub struct {
|
||||
responseBody string
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
resp := http.Response{}
|
||||
resp.StatusCode = c.statusCode
|
||||
resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody))
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Generates a pointer to an HttpJson object that uses a mock HTTP client.
|
||||
// Parameters:
|
||||
// response : Body of the response that the mock HTTP client should return
|
||||
// statusCode: HTTP status code the mock HTTP client should return
|
||||
//
|
||||
// Returns:
|
||||
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
|
||||
func genJolokiaClientStub(response string, statusCode int, servers []Server, metrics []Metric) *Jolokia {
|
||||
return &Jolokia{
|
||||
jClient: jolokiaClientStub{responseBody: response, statusCode: statusCode},
|
||||
Servers: servers,
|
||||
Metrics: metrics,
|
||||
}
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJsonMultiValue(t *testing.T) {
|
||||
jolokia := genJolokiaClientStub(validMultiValueJSON, 200, Servers, []Metric{HeapMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := jolokia.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(acc.Points))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"heap_memory_usage_init": 67108864.0,
|
||||
"heap_memory_usage_committed": 456130560.0,
|
||||
"heap_memory_usage_max": 477626368.0,
|
||||
"heap_memory_usage_used": 203288528.0,
|
||||
}
|
||||
tags := map[string]string{
|
||||
"host": "127.0.0.1",
|
||||
"port": "8080",
|
||||
"server": "as1",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "jolokia", fields, tags)
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJsonOn404(t *testing.T) {
|
||||
|
||||
jolokia := genJolokiaClientStub(validMultiValueJSON, 404, Servers,
|
||||
[]Metric{UsedHeapMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := jolokia.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 0, len(acc.Points))
|
||||
}
|
||||
24
plugins/inputs/kafka_consumer/README.md
Normal file
24
plugins/inputs/kafka_consumer/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Kafka Consumer
|
||||
|
||||
The [Kafka](http://kafka.apache.org/) consumer plugin polls a specified Kafka
|
||||
topic and adds messages to InfluxDB. The plugin assumes messages follow the
|
||||
line protocol. [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/consumergroup)
|
||||
is used to talk to the Kafka cluster so multiple instances of telegraf can read
|
||||
from the same topic in parallel.
|
||||
|
||||
## Testing
|
||||
|
||||
Running integration tests requires running Zookeeper & Kafka. The following
|
||||
commands assume you're on OS X & using [boot2docker](http://boot2docker.io/) or docker-machine through [Docker Toolbox](https://www.docker.com/docker-toolbox).
|
||||
|
||||
To start Kafka & Zookeeper:
|
||||
|
||||
```
|
||||
docker run -d -p 2181:2181 -p 9092:9092 --env ADVERTISED_HOST=`boot2docker ip || docker-machine ip <your_machine_name>` --env ADVERTISED_PORT=9092 spotify/kafka
|
||||
```
|
||||
|
||||
To run tests:
|
||||
|
||||
```
|
||||
go test
|
||||
```
|
||||
166
plugins/inputs/kafka_consumer/kafka_consumer.go
Normal file
166
plugins/inputs/kafka_consumer/kafka_consumer.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package kafka_consumer
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/wvanbergen/kafka/consumergroup"
|
||||
)
|
||||
|
||||
type Kafka struct {
|
||||
ConsumerGroup string
|
||||
Topics []string
|
||||
ZookeeperPeers []string
|
||||
Consumer *consumergroup.ConsumerGroup
|
||||
PointBuffer int
|
||||
Offset string
|
||||
|
||||
sync.Mutex
|
||||
|
||||
// channel for all incoming kafka messages
|
||||
in <-chan *sarama.ConsumerMessage
|
||||
// channel for all kafka consumer errors
|
||||
errs <-chan *sarama.ConsumerError
|
||||
// channel for all incoming parsed kafka points
|
||||
pointChan chan models.Point
|
||||
done chan struct{}
|
||||
|
||||
// doNotCommitMsgs tells the parser not to call CommitUpTo on the consumer
|
||||
// this is mostly for test purposes, but there may be a use-case for it later.
|
||||
doNotCommitMsgs bool
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
# an array of Zookeeper connection strings
|
||||
zookeeper_peers = ["localhost:2181"]
|
||||
# the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
# Maximum number of points to buffer between collection intervals
|
||||
point_buffer = 100000
|
||||
# Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
`
|
||||
|
||||
func (k *Kafka) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (k *Kafka) Description() string {
|
||||
return "Read line-protocol metrics from Kafka topic(s)"
|
||||
}
|
||||
|
||||
func (k *Kafka) Start() error {
|
||||
k.Lock()
|
||||
defer k.Unlock()
|
||||
var consumerErr error
|
||||
|
||||
config := consumergroup.NewConfig()
|
||||
switch strings.ToLower(k.Offset) {
|
||||
case "oldest", "":
|
||||
config.Offsets.Initial = sarama.OffsetOldest
|
||||
case "newest":
|
||||
config.Offsets.Initial = sarama.OffsetNewest
|
||||
default:
|
||||
log.Printf("WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
|
||||
k.Offset)
|
||||
config.Offsets.Initial = sarama.OffsetOldest
|
||||
}
|
||||
|
||||
if k.Consumer == nil || k.Consumer.Closed() {
|
||||
k.Consumer, consumerErr = consumergroup.JoinConsumerGroup(
|
||||
k.ConsumerGroup,
|
||||
k.Topics,
|
||||
k.ZookeeperPeers,
|
||||
config,
|
||||
)
|
||||
if consumerErr != nil {
|
||||
return consumerErr
|
||||
}
|
||||
|
||||
// Setup message and error channels
|
||||
k.in = k.Consumer.Messages()
|
||||
k.errs = k.Consumer.Errors()
|
||||
}
|
||||
|
||||
k.done = make(chan struct{})
|
||||
if k.PointBuffer == 0 {
|
||||
k.PointBuffer = 100000
|
||||
}
|
||||
k.pointChan = make(chan models.Point, k.PointBuffer)
|
||||
|
||||
// Start the kafka message reader
|
||||
go k.parser()
|
||||
log.Printf("Started the kafka consumer service, peers: %v, topics: %v\n",
|
||||
k.ZookeeperPeers, k.Topics)
|
||||
return nil
|
||||
}
|
||||
|
||||
// parser() reads all incoming messages from the consumer, and parses them into
|
||||
// influxdb metric points.
|
||||
func (k *Kafka) parser() {
|
||||
for {
|
||||
select {
|
||||
case <-k.done:
|
||||
return
|
||||
case err := <-k.errs:
|
||||
log.Printf("Kafka Consumer Error: %s\n", err.Error())
|
||||
case msg := <-k.in:
|
||||
points, err := models.ParsePoints(msg.Value)
|
||||
if err != nil {
|
||||
log.Printf("Could not parse kafka message: %s, error: %s",
|
||||
string(msg.Value), err.Error())
|
||||
}
|
||||
|
||||
for _, point := range points {
|
||||
select {
|
||||
case k.pointChan <- point:
|
||||
continue
|
||||
default:
|
||||
log.Printf("Kafka Consumer buffer is full, dropping a point." +
|
||||
" You may want to increase the point_buffer setting")
|
||||
}
|
||||
}
|
||||
|
||||
if !k.doNotCommitMsgs {
|
||||
// TODO(cam) this locking can be removed if this PR gets merged:
|
||||
// https://github.com/wvanbergen/kafka/pull/84
|
||||
k.Lock()
|
||||
k.Consumer.CommitUpto(msg)
|
||||
k.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Kafka) Stop() {
|
||||
k.Lock()
|
||||
defer k.Unlock()
|
||||
close(k.done)
|
||||
if err := k.Consumer.Close(); err != nil {
|
||||
log.Printf("Error closing kafka consumer: %s\n", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Kafka) Gather(acc inputs.Accumulator) error {
|
||||
k.Lock()
|
||||
defer k.Unlock()
|
||||
npoints := len(k.pointChan)
|
||||
for i := 0; i < npoints; i++ {
|
||||
point := <-k.pointChan
|
||||
acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("kafka_consumer", func() inputs.Input {
|
||||
return &Kafka{}
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,91 @@
|
||||
package kafka_consumer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestReadsMetricsFromKafka(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
brokerPeers := []string{testutil.GetLocalHost() + ":9092"}
|
||||
zkPeers := []string{testutil.GetLocalHost() + ":2181"}
|
||||
testTopic := fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix())
|
||||
|
||||
// Send a Kafka message to the kafka host
|
||||
msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257"
|
||||
producer, err := sarama.NewSyncProducer(brokerPeers, nil)
|
||||
require.NoError(t, err)
|
||||
_, _, err = producer.SendMessage(
|
||||
&sarama.ProducerMessage{
|
||||
Topic: testTopic,
|
||||
Value: sarama.StringEncoder(msg),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
defer producer.Close()
|
||||
|
||||
// Start the Kafka Consumer
|
||||
k := &Kafka{
|
||||
ConsumerGroup: "telegraf_test_consumers",
|
||||
Topics: []string{testTopic},
|
||||
ZookeeperPeers: zkPeers,
|
||||
PointBuffer: 100000,
|
||||
Offset: "oldest",
|
||||
}
|
||||
if err := k.Start(); err != nil {
|
||||
t.Fatal(err.Error())
|
||||
} else {
|
||||
defer k.Stop()
|
||||
}
|
||||
|
||||
waitForPoint(k, t)
|
||||
|
||||
// Verify that we can now gather the sent message
|
||||
var acc testutil.Accumulator
|
||||
// Sanity check
|
||||
assert.Equal(t, 0, len(acc.Points), "There should not be any points")
|
||||
|
||||
// Gather points
|
||||
err = k.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
if len(acc.Points) == 1 {
|
||||
point := acc.Points[0]
|
||||
assert.Equal(t, "cpu_load_short", point.Measurement)
|
||||
assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
|
||||
assert.Equal(t, map[string]string{
|
||||
"host": "server01",
|
||||
"direction": "in",
|
||||
"region": "us-west",
|
||||
}, point.Tags)
|
||||
assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix())
|
||||
} else {
|
||||
t.Errorf("No points found in accumulator, expected 1")
|
||||
}
|
||||
}
|
||||
|
||||
// Waits for the metric that was sent to the kafka broker to arrive at the kafka
|
||||
// consumer
|
||||
func waitForPoint(k *Kafka, t *testing.T) {
|
||||
// Give the kafka container up to 2 seconds to get the point to the consumer
|
||||
ticker := time.NewTicker(5 * time.Millisecond)
|
||||
counter := 0
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
counter++
|
||||
if counter > 1000 {
|
||||
t.Fatal("Waited for 5s, point never arrived to consumer")
|
||||
} else if len(k.pointChan) == 1 {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
99
plugins/inputs/kafka_consumer/kafka_consumer_test.go
Normal file
99
plugins/inputs/kafka_consumer/kafka_consumer_test.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package kafka_consumer
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257"
|
||||
invalidMsg = "cpu_load_short,host=server01 1422568543702900257"
|
||||
pointBuffer = 5
|
||||
)
|
||||
|
||||
func NewTestKafka() (*Kafka, chan *sarama.ConsumerMessage) {
|
||||
in := make(chan *sarama.ConsumerMessage, pointBuffer)
|
||||
k := Kafka{
|
||||
ConsumerGroup: "test",
|
||||
Topics: []string{"telegraf"},
|
||||
ZookeeperPeers: []string{"localhost:2181"},
|
||||
PointBuffer: pointBuffer,
|
||||
Offset: "oldest",
|
||||
in: in,
|
||||
doNotCommitMsgs: true,
|
||||
errs: make(chan *sarama.ConsumerError, pointBuffer),
|
||||
done: make(chan struct{}),
|
||||
pointChan: make(chan models.Point, pointBuffer),
|
||||
}
|
||||
return &k, in
|
||||
}
|
||||
|
||||
// Test that the parser parses kafka messages into points
|
||||
func TestRunParser(t *testing.T) {
|
||||
k, in := NewTestKafka()
|
||||
defer close(k.done)
|
||||
|
||||
go k.parser()
|
||||
in <- saramaMsg(testMsg)
|
||||
time.Sleep(time.Millisecond)
|
||||
|
||||
assert.Equal(t, len(k.pointChan), 1)
|
||||
}
|
||||
|
||||
// Test that the parser ignores invalid messages
|
||||
func TestRunParserInvalidMsg(t *testing.T) {
|
||||
k, in := NewTestKafka()
|
||||
defer close(k.done)
|
||||
|
||||
go k.parser()
|
||||
in <- saramaMsg(invalidMsg)
|
||||
time.Sleep(time.Millisecond)
|
||||
|
||||
assert.Equal(t, len(k.pointChan), 0)
|
||||
}
|
||||
|
||||
// Test that points are dropped when we hit the buffer limit
|
||||
func TestRunParserRespectsBuffer(t *testing.T) {
|
||||
k, in := NewTestKafka()
|
||||
defer close(k.done)
|
||||
|
||||
go k.parser()
|
||||
for i := 0; i < pointBuffer+1; i++ {
|
||||
in <- saramaMsg(testMsg)
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
|
||||
assert.Equal(t, len(k.pointChan), 5)
|
||||
}
|
||||
|
||||
// Test that the parser parses kafka messages into points
|
||||
func TestRunParserAndGather(t *testing.T) {
|
||||
k, in := NewTestKafka()
|
||||
defer close(k.done)
|
||||
|
||||
go k.parser()
|
||||
in <- saramaMsg(testMsg)
|
||||
time.Sleep(time.Millisecond)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
k.Gather(&acc)
|
||||
|
||||
assert.Equal(t, len(acc.Points), 1)
|
||||
acc.AssertContainsFields(t, "cpu_load_short",
|
||||
map[string]interface{}{"value": float64(23422)})
|
||||
}
|
||||
|
||||
func saramaMsg(val string) *sarama.ConsumerMessage {
|
||||
return &sarama.ConsumerMessage{
|
||||
Key: nil,
|
||||
Value: []byte(val),
|
||||
Offset: 0,
|
||||
Partition: 0,
|
||||
}
|
||||
}
|
||||
231
plugins/inputs/leofs/leofs.go
Normal file
231
plugins/inputs/leofs/leofs.go
Normal file
@@ -0,0 +1,231 @@
|
||||
package leofs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const oid = ".1.3.6.1.4.1.35450"
|
||||
|
||||
// For Manager Master
|
||||
const defaultEndpoint = "127.0.0.1:4020"
|
||||
|
||||
type ServerType int
|
||||
|
||||
const (
|
||||
ServerTypeManagerMaster ServerType = iota
|
||||
ServerTypeManagerSlave
|
||||
ServerTypeStorage
|
||||
ServerTypeGateway
|
||||
)
|
||||
|
||||
type LeoFS struct {
|
||||
Servers []string
|
||||
}
|
||||
|
||||
var KeyMapping = map[ServerType][]string{
|
||||
ServerTypeManagerMaster: {
|
||||
"num_of_processes",
|
||||
"total_memory_usage",
|
||||
"system_memory_usage",
|
||||
"processes_memory_usage",
|
||||
"ets_memory_usage",
|
||||
"num_of_processes_5min",
|
||||
"total_memory_usage_5min",
|
||||
"system_memory_usage_5min",
|
||||
"processes_memory_usage_5min",
|
||||
"ets_memory_usage_5min",
|
||||
"used_allocated_memory",
|
||||
"allocated_memory",
|
||||
"used_allocated_memory_5min",
|
||||
"allocated_memory_5min",
|
||||
},
|
||||
ServerTypeManagerSlave: {
|
||||
"num_of_processes",
|
||||
"total_memory_usage",
|
||||
"system_memory_usage",
|
||||
"processes_memory_usage",
|
||||
"ets_memory_usage",
|
||||
"num_of_processes_5min",
|
||||
"total_memory_usage_5min",
|
||||
"system_memory_usage_5min",
|
||||
"processes_memory_usage_5min",
|
||||
"ets_memory_usage_5min",
|
||||
"used_allocated_memory",
|
||||
"allocated_memory",
|
||||
"used_allocated_memory_5min",
|
||||
"allocated_memory_5min",
|
||||
},
|
||||
ServerTypeStorage: {
|
||||
"num_of_processes",
|
||||
"total_memory_usage",
|
||||
"system_memory_usage",
|
||||
"processes_memory_usage",
|
||||
"ets_memory_usage",
|
||||
"num_of_processes_5min",
|
||||
"total_memory_usage_5min",
|
||||
"system_memory_usage_5min",
|
||||
"processes_memory_usage_5min",
|
||||
"ets_memory_usage_5min",
|
||||
"num_of_writes",
|
||||
"num_of_reads",
|
||||
"num_of_deletes",
|
||||
"num_of_writes_5min",
|
||||
"num_of_reads_5min",
|
||||
"num_of_deletes_5min",
|
||||
"num_of_active_objects",
|
||||
"total_objects",
|
||||
"total_size_of_active_objects",
|
||||
"total_size",
|
||||
"num_of_replication_messages",
|
||||
"num_of_sync-vnode_messages",
|
||||
"num_of_rebalance_messages",
|
||||
"used_allocated_memory",
|
||||
"allocated_memory",
|
||||
"used_allocated_memory_5min",
|
||||
"allocated_memory_5min",
|
||||
},
|
||||
ServerTypeGateway: {
|
||||
"num_of_processes",
|
||||
"total_memory_usage",
|
||||
"system_memory_usage",
|
||||
"processes_memory_usage",
|
||||
"ets_memory_usage",
|
||||
"num_of_processes_5min",
|
||||
"total_memory_usage_5min",
|
||||
"system_memory_usage_5min",
|
||||
"processes_memory_usage_5min",
|
||||
"ets_memory_usage_5min",
|
||||
"num_of_writes",
|
||||
"num_of_reads",
|
||||
"num_of_deletes",
|
||||
"num_of_writes_5min",
|
||||
"num_of_reads_5min",
|
||||
"num_of_deletes_5min",
|
||||
"count_of_cache-hit",
|
||||
"count_of_cache-miss",
|
||||
"total_of_files",
|
||||
"total_cached_size",
|
||||
"used_allocated_memory",
|
||||
"allocated_memory",
|
||||
"used_allocated_memory_5min",
|
||||
"allocated_memory_5min",
|
||||
},
|
||||
}
|
||||
|
||||
var serverTypeMapping = map[string]ServerType{
|
||||
"4020": ServerTypeManagerMaster,
|
||||
"4021": ServerTypeManagerSlave,
|
||||
"4010": ServerTypeStorage,
|
||||
"4011": ServerTypeStorage,
|
||||
"4012": ServerTypeStorage,
|
||||
"4013": ServerTypeStorage,
|
||||
"4000": ServerTypeGateway,
|
||||
"4001": ServerTypeGateway,
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# An array of URI to gather stats about LeoFS.
|
||||
# Specify an ip or hostname with port. ie 127.0.0.1:4020
|
||||
#
|
||||
# If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port.
|
||||
servers = ["127.0.0.1:4021"]
|
||||
`
|
||||
|
||||
func (l *LeoFS) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (l *LeoFS) Description() string {
|
||||
return "Read metrics from a LeoFS Server via SNMP"
|
||||
}
|
||||
|
||||
func (l *LeoFS) Gather(acc inputs.Accumulator) error {
|
||||
if len(l.Servers) == 0 {
|
||||
l.gatherServer(defaultEndpoint, ServerTypeManagerMaster, acc)
|
||||
return nil
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
var outerr error
|
||||
for _, endpoint := range l.Servers {
|
||||
_, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse the address:%s, err:%s", endpoint, err)
|
||||
}
|
||||
port, err := retrieveTokenAfterColon(endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
st, ok := serverTypeMapping[port]
|
||||
if !ok {
|
||||
st = ServerTypeStorage
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(endpoint string, st ServerType) {
|
||||
defer wg.Done()
|
||||
outerr = l.gatherServer(endpoint, st, acc)
|
||||
}(endpoint, st)
|
||||
}
|
||||
wg.Wait()
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc inputs.Accumulator) error {
|
||||
cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", endpoint, oid)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cmd.Start()
|
||||
defer cmd.Wait()
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
if !scanner.Scan() {
|
||||
return fmt.Errorf("Unable to retrieve the node name")
|
||||
}
|
||||
nodeName, err := retrieveTokenAfterColon(scanner.Text())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodeNameTrimmed := strings.Trim(nodeName, "\"")
|
||||
tags := map[string]string{
|
||||
"node": nodeNameTrimmed,
|
||||
}
|
||||
i := 0
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for scanner.Scan() {
|
||||
key := KeyMapping[serverType][i]
|
||||
val, err := retrieveTokenAfterColon(scanner.Text())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fVal, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse the value:%s, err:%s", val, err)
|
||||
}
|
||||
fields[key] = fVal
|
||||
i++
|
||||
}
|
||||
acc.AddFields("leofs", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
func retrieveTokenAfterColon(line string) (string, error) {
|
||||
tokens := strings.Split(line, ":")
|
||||
if len(tokens) != 2 {
|
||||
return "", fmt.Errorf("':' not found in the line:%s", line)
|
||||
}
|
||||
return strings.TrimSpace(tokens[1]), nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("leofs", func() inputs.Input {
|
||||
return &LeoFS{}
|
||||
})
|
||||
}
|
||||
173
plugins/inputs/leofs/leofs_test.go
Normal file
173
plugins/inputs/leofs/leofs_test.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package leofs
|
||||
|
||||
import (
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var fakeSNMP4Manager = `
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
|
||||
const output = ` + "`" + `iso.3.6.1.4.1.35450.15.1.0 = STRING: "manager_888@127.0.0.1"
|
||||
iso.3.6.1.4.1.35450.15.2.0 = Gauge32: 186
|
||||
iso.3.6.1.4.1.35450.15.3.0 = Gauge32: 46235519
|
||||
iso.3.6.1.4.1.35450.15.4.0 = Gauge32: 32168525
|
||||
iso.3.6.1.4.1.35450.15.5.0 = Gauge32: 14066068
|
||||
iso.3.6.1.4.1.35450.15.6.0 = Gauge32: 5512968
|
||||
iso.3.6.1.4.1.35450.15.7.0 = Gauge32: 186
|
||||
iso.3.6.1.4.1.35450.15.8.0 = Gauge32: 46269006
|
||||
iso.3.6.1.4.1.35450.15.9.0 = Gauge32: 32202867
|
||||
iso.3.6.1.4.1.35450.15.10.0 = Gauge32: 14064995
|
||||
iso.3.6.1.4.1.35450.15.11.0 = Gauge32: 5492634
|
||||
iso.3.6.1.4.1.35450.15.12.0 = Gauge32: 60
|
||||
iso.3.6.1.4.1.35450.15.13.0 = Gauge32: 43515904
|
||||
iso.3.6.1.4.1.35450.15.14.0 = Gauge32: 60
|
||||
iso.3.6.1.4.1.35450.15.15.0 = Gauge32: 43533983` + "`" +
|
||||
`
|
||||
func main() {
|
||||
fmt.Println(output)
|
||||
}
|
||||
`
|
||||
|
||||
var fakeSNMP4Storage = `
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
|
||||
const output = ` + "`" + `iso.3.6.1.4.1.35450.34.1.0 = STRING: "storage_0@127.0.0.1"
|
||||
iso.3.6.1.4.1.35450.34.2.0 = Gauge32: 512
|
||||
iso.3.6.1.4.1.35450.34.3.0 = Gauge32: 38126307
|
||||
iso.3.6.1.4.1.35450.34.4.0 = Gauge32: 22308716
|
||||
iso.3.6.1.4.1.35450.34.5.0 = Gauge32: 15816448
|
||||
iso.3.6.1.4.1.35450.34.6.0 = Gauge32: 5232008
|
||||
iso.3.6.1.4.1.35450.34.7.0 = Gauge32: 512
|
||||
iso.3.6.1.4.1.35450.34.8.0 = Gauge32: 38113176
|
||||
iso.3.6.1.4.1.35450.34.9.0 = Gauge32: 22313398
|
||||
iso.3.6.1.4.1.35450.34.10.0 = Gauge32: 15798779
|
||||
iso.3.6.1.4.1.35450.34.11.0 = Gauge32: 5237315
|
||||
iso.3.6.1.4.1.35450.34.12.0 = Gauge32: 191
|
||||
iso.3.6.1.4.1.35450.34.13.0 = Gauge32: 824
|
||||
iso.3.6.1.4.1.35450.34.14.0 = Gauge32: 0
|
||||
iso.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105
|
||||
iso.3.6.1.4.1.35450.34.16.0 = Gauge32: 196654
|
||||
iso.3.6.1.4.1.35450.34.17.0 = Gauge32: 0
|
||||
iso.3.6.1.4.1.35450.34.18.0 = Gauge32: 2052
|
||||
iso.3.6.1.4.1.35450.34.19.0 = Gauge32: 50296
|
||||
iso.3.6.1.4.1.35450.34.20.0 = Gauge32: 35
|
||||
iso.3.6.1.4.1.35450.34.21.0 = Gauge32: 898
|
||||
iso.3.6.1.4.1.35450.34.22.0 = Gauge32: 0
|
||||
iso.3.6.1.4.1.35450.34.23.0 = Gauge32: 0
|
||||
iso.3.6.1.4.1.35450.34.24.0 = Gauge32: 0
|
||||
iso.3.6.1.4.1.35450.34.31.0 = Gauge32: 51
|
||||
iso.3.6.1.4.1.35450.34.32.0 = Gauge32: 53219328
|
||||
iso.3.6.1.4.1.35450.34.33.0 = Gauge32: 51
|
||||
iso.3.6.1.4.1.35450.34.34.0 = Gauge32: 53351083` + "`" +
|
||||
`
|
||||
func main() {
|
||||
fmt.Println(output)
|
||||
}
|
||||
`
|
||||
|
||||
var fakeSNMP4Gateway = `
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
|
||||
const output = ` + "`" + `iso.3.6.1.4.1.35450.34.1.0 = STRING: "gateway_0@127.0.0.1"
|
||||
iso.3.6.1.4.1.35450.34.2.0 = Gauge32: 465
|
||||
iso.3.6.1.4.1.35450.34.3.0 = Gauge32: 61676335
|
||||
iso.3.6.1.4.1.35450.34.4.0 = Gauge32: 46890415
|
||||
iso.3.6.1.4.1.35450.34.5.0 = Gauge32: 14785011
|
||||
iso.3.6.1.4.1.35450.34.6.0 = Gauge32: 5578855
|
||||
iso.3.6.1.4.1.35450.34.7.0 = Gauge32: 465
|
||||
iso.3.6.1.4.1.35450.34.8.0 = Gauge32: 61644426
|
||||
iso.3.6.1.4.1.35450.34.9.0 = Gauge32: 46880358
|
||||
iso.3.6.1.4.1.35450.34.10.0 = Gauge32: 14763002
|
||||
iso.3.6.1.4.1.35450.34.11.0 = Gauge32: 5582125
|
||||
iso.3.6.1.4.1.35450.34.12.0 = Gauge32: 191
|
||||
iso.3.6.1.4.1.35450.34.13.0 = Gauge32: 827
|
||||
iso.3.6.1.4.1.35450.34.14.0 = Gauge32: 0
|
||||
iso.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105
|
||||
iso.3.6.1.4.1.35450.34.16.0 = Gauge32: 196650
|
||||
iso.3.6.1.4.1.35450.34.17.0 = Gauge32: 0
|
||||
iso.3.6.1.4.1.35450.34.18.0 = Gauge32: 30256
|
||||
iso.3.6.1.4.1.35450.34.19.0 = Gauge32: 532158
|
||||
iso.3.6.1.4.1.35450.34.20.0 = Gauge32: 34
|
||||
iso.3.6.1.4.1.35450.34.21.0 = Gauge32: 1
|
||||
iso.3.6.1.4.1.35450.34.31.0 = Gauge32: 53
|
||||
iso.3.6.1.4.1.35450.34.32.0 = Gauge32: 55050240
|
||||
iso.3.6.1.4.1.35450.34.33.0 = Gauge32: 53
|
||||
iso.3.6.1.4.1.35450.34.34.0 = Gauge32: 55186538` + "`" +
|
||||
`
|
||||
func main() {
|
||||
fmt.Println(output)
|
||||
}
|
||||
`
|
||||
|
||||
func makeFakeSNMPSrc(code string) string {
|
||||
path := os.TempDir() + "/test.go"
|
||||
err := ioutil.WriteFile(path, []byte(code), 0600)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func buildFakeSNMPCmd(src string) {
|
||||
err := exec.Command("go", "build", "-o", "snmpwalk", src).Run()
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testMain(t *testing.T, code string, endpoint string, serverType ServerType) {
|
||||
// Build the fake snmpwalk for test
|
||||
src := makeFakeSNMPSrc(code)
|
||||
defer os.Remove(src)
|
||||
buildFakeSNMPCmd(src)
|
||||
defer os.Remove("./snmpwalk")
|
||||
envPathOrigin := os.Getenv("PATH")
|
||||
// Refer to the fake snmpwalk
|
||||
os.Setenv("PATH", ".")
|
||||
defer os.Setenv("PATH", envPathOrigin)
|
||||
|
||||
l := &LeoFS{
|
||||
Servers: []string{endpoint},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
|
||||
err := l.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
floatMetrics := KeyMapping[serverType]
|
||||
|
||||
for _, metric := range floatMetrics {
|
||||
assert.True(t, acc.HasFloatField("leofs", metric), metric)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeoFSManagerMasterMetrics(t *testing.T) {
|
||||
testMain(t, fakeSNMP4Manager, "localhost:4020", ServerTypeManagerMaster)
|
||||
}
|
||||
|
||||
func TestLeoFSManagerSlaveMetrics(t *testing.T) {
|
||||
testMain(t, fakeSNMP4Manager, "localhost:4021", ServerTypeManagerSlave)
|
||||
}
|
||||
|
||||
func TestLeoFSStorageMetrics(t *testing.T) {
|
||||
testMain(t, fakeSNMP4Storage, "localhost:4010", ServerTypeStorage)
|
||||
}
|
||||
|
||||
func TestLeoFSGatewayMetrics(t *testing.T) {
|
||||
testMain(t, fakeSNMP4Gateway, "localhost:4000", ServerTypeGateway)
|
||||
}
|
||||
250
plugins/inputs/lustre2/lustre2.go
Normal file
250
plugins/inputs/lustre2/lustre2.go
Normal file
@@ -0,0 +1,250 @@
|
||||
/*
|
||||
Lustre 2.x telegraf plugin
|
||||
|
||||
Lustre (http://lustre.org/) is an open-source, parallel file system
|
||||
for HPC environments. It stores statistics about its activity in
|
||||
/proc
|
||||
|
||||
*/
|
||||
package lustre2
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// Lustre proc files can change between versions, so we want to future-proof
|
||||
// by letting people choose what to look at.
|
||||
type Lustre2 struct {
|
||||
Ost_procfiles []string
|
||||
Mds_procfiles []string
|
||||
|
||||
// allFields maps and OST name to the metric fields associated with that OST
|
||||
allFields map[string]map[string]interface{}
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# An array of /proc globs to search for Lustre stats
|
||||
# If not specified, the default will work on Lustre 2.5.x
|
||||
#
|
||||
# ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"]
|
||||
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
|
||||
`
|
||||
|
||||
/* The wanted fields would be a []string if not for the
|
||||
lines that start with read_bytes/write_bytes and contain
|
||||
both the byte count and the function call count
|
||||
*/
|
||||
type mapping struct {
|
||||
inProc string // What to look for at the start of a line in /proc/fs/lustre/*
|
||||
field uint32 // which field to extract from that line
|
||||
reportAs string // What measurement name to use
|
||||
tag string // Additional tag to add for this metric
|
||||
}
|
||||
|
||||
var wanted_ost_fields = []*mapping{
|
||||
{
|
||||
inProc: "write_bytes",
|
||||
field: 6,
|
||||
reportAs: "write_bytes",
|
||||
},
|
||||
{ // line starts with 'write_bytes', but value write_calls is in second column
|
||||
inProc: "write_bytes",
|
||||
field: 1,
|
||||
reportAs: "write_calls",
|
||||
},
|
||||
{
|
||||
inProc: "read_bytes",
|
||||
field: 6,
|
||||
reportAs: "read_bytes",
|
||||
},
|
||||
{ // line starts with 'read_bytes', but value read_calls is in second column
|
||||
inProc: "read_bytes",
|
||||
field: 1,
|
||||
reportAs: "read_calls",
|
||||
},
|
||||
{
|
||||
inProc: "cache_hit",
|
||||
},
|
||||
{
|
||||
inProc: "cache_miss",
|
||||
},
|
||||
{
|
||||
inProc: "cache_access",
|
||||
},
|
||||
}
|
||||
|
||||
var wanted_mds_fields = []*mapping{
|
||||
{
|
||||
inProc: "open",
|
||||
},
|
||||
{
|
||||
inProc: "close",
|
||||
},
|
||||
{
|
||||
inProc: "mknod",
|
||||
},
|
||||
{
|
||||
inProc: "link",
|
||||
},
|
||||
{
|
||||
inProc: "unlink",
|
||||
},
|
||||
{
|
||||
inProc: "mkdir",
|
||||
},
|
||||
{
|
||||
inProc: "rmdir",
|
||||
},
|
||||
{
|
||||
inProc: "rename",
|
||||
},
|
||||
{
|
||||
inProc: "getattr",
|
||||
},
|
||||
{
|
||||
inProc: "setattr",
|
||||
},
|
||||
{
|
||||
inProc: "getxattr",
|
||||
},
|
||||
{
|
||||
inProc: "setxattr",
|
||||
},
|
||||
{
|
||||
inProc: "statfs",
|
||||
},
|
||||
{
|
||||
inProc: "sync",
|
||||
},
|
||||
{
|
||||
inProc: "samedir_rename",
|
||||
},
|
||||
{
|
||||
inProc: "crossdir_rename",
|
||||
},
|
||||
}
|
||||
|
||||
func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, acc inputs.Accumulator) error {
|
||||
files, err := filepath.Glob(fileglob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
/* Turn /proc/fs/lustre/obdfilter/<ost_name>/stats and similar
|
||||
* into just the object store target name
|
||||
* Assumpion: the target name is always second to last,
|
||||
* which is true in Lustre 2.1->2.5
|
||||
*/
|
||||
path := strings.Split(file, "/")
|
||||
name := path[len(path)-2]
|
||||
var fields map[string]interface{}
|
||||
fields, ok := l.allFields[name]
|
||||
if !ok {
|
||||
fields = make(map[string]interface{})
|
||||
l.allFields[name] = fields
|
||||
}
|
||||
|
||||
lines, err := internal.ReadLines(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, line := range lines {
|
||||
parts := strings.Fields(line)
|
||||
for _, wanted := range wanted_fields {
|
||||
var data uint64
|
||||
if parts[0] == wanted.inProc {
|
||||
wanted_field := wanted.field
|
||||
// if not set, assume field[1]. Shouldn't be field[0], as
|
||||
// that's a string
|
||||
if wanted_field == 0 {
|
||||
wanted_field = 1
|
||||
}
|
||||
data, err = strconv.ParseUint((parts[wanted_field]), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
report_name := wanted.inProc
|
||||
if wanted.reportAs != "" {
|
||||
report_name = wanted.reportAs
|
||||
}
|
||||
fields[report_name] = data
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SampleConfig returns sample configuration message
|
||||
func (l *Lustre2) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description returns description of Lustre2 plugin
|
||||
func (l *Lustre2) Description() string {
|
||||
return "Read metrics from local Lustre service on OST, MDS"
|
||||
}
|
||||
|
||||
// Gather reads stats from all lustre targets
|
||||
func (l *Lustre2) Gather(acc inputs.Accumulator) error {
|
||||
l.allFields = make(map[string]map[string]interface{})
|
||||
|
||||
if len(l.Ost_procfiles) == 0 {
|
||||
// read/write bytes are in obdfilter/<ost_name>/stats
|
||||
err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats",
|
||||
wanted_ost_fields, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// cache counters are in osd-ldiskfs/<ost_name>/stats
|
||||
err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats",
|
||||
wanted_ost_fields, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(l.Mds_procfiles) == 0 {
|
||||
// Metadata server stats
|
||||
err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats",
|
||||
wanted_mds_fields, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, procfile := range l.Ost_procfiles {
|
||||
err := l.GetLustreProcStats(procfile, wanted_ost_fields, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, procfile := range l.Mds_procfiles {
|
||||
err := l.GetLustreProcStats(procfile, wanted_mds_fields, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for name, fields := range l.allFields {
|
||||
tags := map[string]string{
|
||||
"name": name,
|
||||
}
|
||||
acc.AddFields("lustre2", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("lustre2", func() inputs.Input {
|
||||
return &Lustre2{}
|
||||
})
|
||||
}
|
||||
130
plugins/inputs/lustre2/lustre2_test.go
Normal file
130
plugins/inputs/lustre2/lustre2_test.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package lustre2
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Set config file variables to point to fake directory structure instead of /proc?
|
||||
|
||||
const obdfilterProcContents = `snapshot_time 1438693064.430544 secs.usecs
|
||||
read_bytes 203238095 samples [bytes] 4096 1048576 78026117632000
|
||||
write_bytes 71893382 samples [bytes] 1 1048576 15201500833981
|
||||
get_info 1182008495 samples [reqs]
|
||||
set_info_async 2 samples [reqs]
|
||||
connect 1117 samples [reqs]
|
||||
reconnect 1160 samples [reqs]
|
||||
disconnect 1084 samples [reqs]
|
||||
statfs 3575885 samples [reqs]
|
||||
create 698 samples [reqs]
|
||||
destroy 3190060 samples [reqs]
|
||||
setattr 605647 samples [reqs]
|
||||
punch 805187 samples [reqs]
|
||||
sync 6608753 samples [reqs]
|
||||
preprw 275131477 samples [reqs]
|
||||
commitrw 275131477 samples [reqs]
|
||||
quotactl 229231 samples [reqs]
|
||||
ping 78020757 samples [reqs]
|
||||
`
|
||||
|
||||
const osdldiskfsProcContents = `snapshot_time 1438693135.640551 secs.usecs
|
||||
get_page 275132812 samples [usec] 0 3147 1320420955 22041662259
|
||||
cache_access 19047063027 samples [pages] 1 1 19047063027
|
||||
cache_hit 7393729777 samples [pages] 1 1 7393729777
|
||||
cache_miss 11653333250 samples [pages] 1 1 11653333250
|
||||
`
|
||||
|
||||
const mdtProcContents = `snapshot_time 1438693238.20113 secs.usecs
|
||||
open 1024577037 samples [reqs]
|
||||
close 873243496 samples [reqs]
|
||||
mknod 349042 samples [reqs]
|
||||
link 445 samples [reqs]
|
||||
unlink 3549417 samples [reqs]
|
||||
mkdir 705499 samples [reqs]
|
||||
rmdir 227434 samples [reqs]
|
||||
rename 629196 samples [reqs]
|
||||
getattr 1503663097 samples [reqs]
|
||||
setattr 1898364 samples [reqs]
|
||||
getxattr 6145349681 samples [reqs]
|
||||
setxattr 83969 samples [reqs]
|
||||
statfs 2916320 samples [reqs]
|
||||
sync 434081 samples [reqs]
|
||||
samedir_rename 259625 samples [reqs]
|
||||
crossdir_rename 369571 samples [reqs]
|
||||
`
|
||||
|
||||
func TestLustre2GeneratesMetrics(t *testing.T) {
|
||||
|
||||
tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/"
|
||||
ost_name := "OST0001"
|
||||
|
||||
mdtdir := tempdir + "/mdt/"
|
||||
err := os.MkdirAll(mdtdir+"/"+ost_name, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
osddir := tempdir + "/osd-ldiskfs/"
|
||||
err = os.MkdirAll(osddir+"/"+ost_name, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
obddir := tempdir + "/obdfilter/"
|
||||
err = os.MkdirAll(obddir+"/"+ost_name, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(mdtdir+"/"+ost_name+"/md_stats", []byte(mdtProcContents), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(osddir+"/"+ost_name+"/stats", []byte(osdldiskfsProcContents), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(obddir+"/"+ost_name+"/stats", []byte(obdfilterProcContents), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
m := &Lustre2{
|
||||
Ost_procfiles: []string{obddir + "/*/stats", osddir + "/*/stats"},
|
||||
Mds_procfiles: []string{mdtdir + "/*/md_stats"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = m.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"name": ost_name,
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"cache_access": uint64(19047063027),
|
||||
"cache_hit": uint64(7393729777),
|
||||
"cache_miss": uint64(11653333250),
|
||||
"close": uint64(873243496),
|
||||
"crossdir_rename": uint64(369571),
|
||||
"getattr": uint64(1503663097),
|
||||
"getxattr": uint64(6145349681),
|
||||
"link": uint64(445),
|
||||
"mkdir": uint64(705499),
|
||||
"mknod": uint64(349042),
|
||||
"open": uint64(1024577037),
|
||||
"read_bytes": uint64(78026117632000),
|
||||
"read_calls": uint64(203238095),
|
||||
"rename": uint64(629196),
|
||||
"rmdir": uint64(227434),
|
||||
"samedir_rename": uint64(259625),
|
||||
"setattr": uint64(1898364),
|
||||
"setxattr": uint64(83969),
|
||||
"statfs": uint64(2916320),
|
||||
"sync": uint64(434081),
|
||||
"unlink": uint64(3549417),
|
||||
"write_bytes": uint64(15201500833981),
|
||||
"write_calls": uint64(71893382),
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "lustre2", fields, tags)
|
||||
|
||||
err = os.RemoveAll(os.TempDir() + "/telegraf")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
234
plugins/inputs/mailchimp/chimp_api.go
Normal file
234
plugins/inputs/mailchimp/chimp_api.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package mailchimp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
reports_endpoint string = "/3.0/reports"
|
||||
reports_endpoint_campaign string = "/3.0/reports/%s"
|
||||
)
|
||||
|
||||
var mailchimp_datacenter = regexp.MustCompile("[a-z]+[0-9]+$")
|
||||
|
||||
type ChimpAPI struct {
|
||||
Transport http.RoundTripper
|
||||
Debug bool
|
||||
|
||||
sync.Mutex
|
||||
|
||||
url *url.URL
|
||||
}
|
||||
|
||||
type ReportsParams struct {
|
||||
Count string
|
||||
Offset string
|
||||
SinceSendTime string
|
||||
BeforeSendTime string
|
||||
}
|
||||
|
||||
func (p *ReportsParams) String() string {
|
||||
v := url.Values{}
|
||||
if p.Count != "" {
|
||||
v.Set("count", p.Count)
|
||||
}
|
||||
if p.Offset != "" {
|
||||
v.Set("offset", p.Offset)
|
||||
}
|
||||
if p.BeforeSendTime != "" {
|
||||
v.Set("before_send_time", p.BeforeSendTime)
|
||||
}
|
||||
if p.SinceSendTime != "" {
|
||||
v.Set("since_send_time", p.SinceSendTime)
|
||||
}
|
||||
return v.Encode()
|
||||
}
|
||||
|
||||
func NewChimpAPI(apiKey string) *ChimpAPI {
|
||||
u := &url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = fmt.Sprintf("%s.api.mailchimp.com", mailchimp_datacenter.FindString(apiKey))
|
||||
u.User = url.UserPassword("", apiKey)
|
||||
return &ChimpAPI{url: u}
|
||||
}
|
||||
|
||||
type APIError struct {
|
||||
Status int `json:"status"`
|
||||
Type string `json:"type"`
|
||||
Title string `json:"title"`
|
||||
Detail string `json:"detail"`
|
||||
Instance string `json:"instance"`
|
||||
}
|
||||
|
||||
func (e APIError) Error() string {
|
||||
return fmt.Sprintf("ERROR %v: %v. See %v", e.Status, e.Title, e.Type)
|
||||
}
|
||||
|
||||
func chimpErrorCheck(body []byte) error {
|
||||
var e APIError
|
||||
json.Unmarshal(body, &e)
|
||||
if e.Title != "" || e.Status != 0 {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ChimpAPI) GetReports(params ReportsParams) (ReportsResponse, error) {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
a.url.Path = reports_endpoint
|
||||
|
||||
var response ReportsResponse
|
||||
rawjson, err := runChimp(a, params)
|
||||
if err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(rawjson, &response)
|
||||
if err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (a *ChimpAPI) GetReport(campaignID string) (Report, error) {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
a.url.Path = fmt.Sprintf(reports_endpoint_campaign, campaignID)
|
||||
|
||||
var response Report
|
||||
rawjson, err := runChimp(a, ReportsParams{})
|
||||
if err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(rawjson, &response)
|
||||
if err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) {
|
||||
client := &http.Client{Transport: api.Transport}
|
||||
|
||||
var b bytes.Buffer
|
||||
req, err := http.NewRequest("GET", api.url.String(), &b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.URL.RawQuery = params.String()
|
||||
req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin")
|
||||
if api.Debug {
|
||||
log.Printf("Request URL: %s", req.URL.String())
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if api.Debug {
|
||||
log.Printf("Response Body:%s", string(body))
|
||||
}
|
||||
|
||||
if err = chimpErrorCheck(body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
type ReportsResponse struct {
|
||||
Reports []Report `json:"reports"`
|
||||
TotalItems int `json:"total_items"`
|
||||
}
|
||||
|
||||
type Report struct {
|
||||
ID string `json:"id"`
|
||||
CampaignTitle string `json:"campaign_title"`
|
||||
Type string `json:"type"`
|
||||
EmailsSent int `json:"emails_sent"`
|
||||
AbuseReports int `json:"abuse_reports"`
|
||||
Unsubscribed int `json:"unsubscribed"`
|
||||
SendTime string `json:"send_time"`
|
||||
|
||||
TimeSeries []TimeSerie
|
||||
Bounces Bounces `json:"bounces"`
|
||||
Forwards Forwards `json:"forwards"`
|
||||
Opens Opens `json:"opens"`
|
||||
Clicks Clicks `json:"clicks"`
|
||||
FacebookLikes FacebookLikes `json:"facebook_likes"`
|
||||
IndustryStats IndustryStats `json:"industry_stats"`
|
||||
ListStats ListStats `json:"list_stats"`
|
||||
}
|
||||
|
||||
type Bounces struct {
|
||||
HardBounces int `json:"hard_bounces"`
|
||||
SoftBounces int `json:"soft_bounces"`
|
||||
SyntaxErrors int `json:"syntax_errors"`
|
||||
}
|
||||
|
||||
type Forwards struct {
|
||||
ForwardsCount int `json:"forwards_count"`
|
||||
ForwardsOpens int `json:"forwards_opens"`
|
||||
}
|
||||
|
||||
type Opens struct {
|
||||
OpensTotal int `json:"opens_total"`
|
||||
UniqueOpens int `json:"unique_opens"`
|
||||
OpenRate float64 `json:"open_rate"`
|
||||
LastOpen string `json:"last_open"`
|
||||
}
|
||||
|
||||
type Clicks struct {
|
||||
ClicksTotal int `json:"clicks_total"`
|
||||
UniqueClicks int `json:"unique_clicks"`
|
||||
UniqueSubscriberClicks int `json:"unique_subscriber_clicks"`
|
||||
ClickRate float64 `json:"click_rate"`
|
||||
LastClick string `json:"last_click"`
|
||||
}
|
||||
|
||||
type FacebookLikes struct {
|
||||
RecipientLikes int `json:"recipient_likes"`
|
||||
UniqueLikes int `json:"unique_likes"`
|
||||
FacebookLikes int `json:"facebook_likes"`
|
||||
}
|
||||
|
||||
type IndustryStats struct {
|
||||
Type string `json:"type"`
|
||||
OpenRate float64 `json:"open_rate"`
|
||||
ClickRate float64 `json:"click_rate"`
|
||||
BounceRate float64 `json:"bounce_rate"`
|
||||
UnopenRate float64 `json:"unopen_rate"`
|
||||
UnsubRate float64 `json:"unsub_rate"`
|
||||
AbuseRate float64 `json:"abuse_rate"`
|
||||
}
|
||||
|
||||
type ListStats struct {
|
||||
SubRate float64 `json:"sub_rate"`
|
||||
UnsubRate float64 `json:"unsub_rate"`
|
||||
OpenRate float64 `json:"open_rate"`
|
||||
ClickRate float64 `json:"click_rate"`
|
||||
}
|
||||
|
||||
type TimeSerie struct {
|
||||
TimeStamp string `json:"timestamp"`
|
||||
EmailsSent int `json:"emails_sent"`
|
||||
UniqueOpens int `json:"unique_opens"`
|
||||
RecipientsClick int `json:"recipients_click"`
|
||||
}
|
||||
116
plugins/inputs/mailchimp/mailchimp.go
Normal file
116
plugins/inputs/mailchimp/mailchimp.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package mailchimp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type MailChimp struct {
|
||||
api *ChimpAPI
|
||||
|
||||
ApiKey string
|
||||
DaysOld int
|
||||
CampaignId string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# MailChimp API key
|
||||
# get from https://admin.mailchimp.com/account/api/
|
||||
api_key = "" # required
|
||||
# Reports for campaigns sent more than days_old ago will not be collected.
|
||||
# 0 means collect all.
|
||||
days_old = 0
|
||||
# Campaign ID to get, if empty gets all campaigns, this option overrides days_old
|
||||
# campaign_id = ""
|
||||
`
|
||||
|
||||
func (m *MailChimp) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (m *MailChimp) Description() string {
|
||||
return "Gathers metrics from the /3.0/reports MailChimp API"
|
||||
}
|
||||
|
||||
func (m *MailChimp) Gather(acc inputs.Accumulator) error {
|
||||
if m.api == nil {
|
||||
m.api = NewChimpAPI(m.ApiKey)
|
||||
}
|
||||
m.api.Debug = false
|
||||
|
||||
if m.CampaignId == "" {
|
||||
since := ""
|
||||
if m.DaysOld > 0 {
|
||||
now := time.Now()
|
||||
d, _ := time.ParseDuration(fmt.Sprintf("%dh", 24*m.DaysOld))
|
||||
since = now.Add(-d).Format(time.RFC3339)
|
||||
}
|
||||
|
||||
reports, err := m.api.GetReports(ReportsParams{
|
||||
SinceSendTime: since,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
for _, report := range reports.Reports {
|
||||
gatherReport(acc, report, now)
|
||||
}
|
||||
} else {
|
||||
report, err := m.api.GetReport(m.CampaignId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
now := time.Now()
|
||||
gatherReport(acc, report, now)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func gatherReport(acc inputs.Accumulator, report Report, now time.Time) {
|
||||
tags := make(map[string]string)
|
||||
tags["id"] = report.ID
|
||||
tags["campaign_title"] = report.CampaignTitle
|
||||
fields := map[string]interface{}{
|
||||
"emails_sent": report.EmailsSent,
|
||||
"abuse_reports": report.AbuseReports,
|
||||
"unsubscribed": report.Unsubscribed,
|
||||
"hard_bounces": report.Bounces.HardBounces,
|
||||
"soft_bounces": report.Bounces.SoftBounces,
|
||||
"syntax_errors": report.Bounces.SyntaxErrors,
|
||||
"forwards_count": report.Forwards.ForwardsCount,
|
||||
"forwards_opens": report.Forwards.ForwardsOpens,
|
||||
"opens_total": report.Opens.OpensTotal,
|
||||
"unique_opens": report.Opens.UniqueOpens,
|
||||
"open_rate": report.Opens.OpenRate,
|
||||
"clicks_total": report.Clicks.ClicksTotal,
|
||||
"unique_clicks": report.Clicks.UniqueClicks,
|
||||
"unique_subscriber_clicks": report.Clicks.UniqueSubscriberClicks,
|
||||
"click_rate": report.Clicks.ClickRate,
|
||||
"facebook_recipient_likes": report.FacebookLikes.RecipientLikes,
|
||||
"facebook_unique_likes": report.FacebookLikes.UniqueLikes,
|
||||
"facebook_likes": report.FacebookLikes.FacebookLikes,
|
||||
"industry_type": report.IndustryStats.Type,
|
||||
"industry_open_rate": report.IndustryStats.OpenRate,
|
||||
"industry_click_rate": report.IndustryStats.ClickRate,
|
||||
"industry_bounce_rate": report.IndustryStats.BounceRate,
|
||||
"industry_unopen_rate": report.IndustryStats.UnopenRate,
|
||||
"industry_unsub_rate": report.IndustryStats.UnsubRate,
|
||||
"industry_abuse_rate": report.IndustryStats.AbuseRate,
|
||||
"list_stats_sub_rate": report.ListStats.SubRate,
|
||||
"list_stats_unsub_rate": report.ListStats.UnsubRate,
|
||||
"list_stats_open_rate": report.ListStats.OpenRate,
|
||||
"list_stats_click_rate": report.ListStats.ClickRate,
|
||||
}
|
||||
acc.AddFields("mailchimp", fields, tags, now)
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("mailchimp", func() inputs.Input {
|
||||
return &MailChimp{}
|
||||
})
|
||||
}
|
||||
774
plugins/inputs/mailchimp/mailchimp_test.go
Normal file
774
plugins/inputs/mailchimp/mailchimp_test.go
Normal file
@@ -0,0 +1,774 @@
|
||||
package mailchimp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMailChimpGatherReports(t *testing.T) {
|
||||
ts := httptest.NewServer(
|
||||
http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, sampleReports)
|
||||
},
|
||||
))
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.ParseRequestURI(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
api := &ChimpAPI{
|
||||
url: u,
|
||||
Debug: true,
|
||||
}
|
||||
m := MailChimp{
|
||||
api: api,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err = m.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := make(map[string]string)
|
||||
tags["id"] = "42694e9e57"
|
||||
tags["campaign_title"] = "Freddie's Jokes Vol. 1"
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"emails_sent": int(200),
|
||||
"abuse_reports": int(0),
|
||||
"unsubscribed": int(2),
|
||||
"hard_bounces": int(0),
|
||||
"soft_bounces": int(2),
|
||||
"syntax_errors": int(0),
|
||||
"forwards_count": int(0),
|
||||
"forwards_opens": int(0),
|
||||
"opens_total": int(186),
|
||||
"unique_opens": int(100),
|
||||
"clicks_total": int(42),
|
||||
"unique_clicks": int(400),
|
||||
"unique_subscriber_clicks": int(42),
|
||||
"facebook_recipient_likes": int(5),
|
||||
"facebook_unique_likes": int(8),
|
||||
"facebook_likes": int(42),
|
||||
"open_rate": float64(42),
|
||||
"click_rate": float64(42),
|
||||
"industry_open_rate": float64(0.17076777144396),
|
||||
"industry_click_rate": float64(0.027431311866951),
|
||||
"industry_bounce_rate": float64(0.0063767751251474),
|
||||
"industry_unopen_rate": float64(0.82285545343089),
|
||||
"industry_unsub_rate": float64(0.001436957032815),
|
||||
"industry_abuse_rate": float64(0.00021111996110887),
|
||||
"list_stats_sub_rate": float64(10),
|
||||
"list_stats_unsub_rate": float64(20),
|
||||
"list_stats_open_rate": float64(42),
|
||||
"list_stats_click_rate": float64(42),
|
||||
"industry_type": "Social Networks and Online Communities",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "mailchimp", fields, tags)
|
||||
}
|
||||
|
||||
func TestMailChimpGatherReport(t *testing.T) {
|
||||
ts := httptest.NewServer(
|
||||
http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, sampleReport)
|
||||
},
|
||||
))
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.ParseRequestURI(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
api := &ChimpAPI{
|
||||
url: u,
|
||||
Debug: true,
|
||||
}
|
||||
m := MailChimp{
|
||||
api: api,
|
||||
CampaignId: "test",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err = m.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := make(map[string]string)
|
||||
tags["id"] = "42694e9e57"
|
||||
tags["campaign_title"] = "Freddie's Jokes Vol. 1"
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"emails_sent": int(200),
|
||||
"abuse_reports": int(0),
|
||||
"unsubscribed": int(2),
|
||||
"hard_bounces": int(0),
|
||||
"soft_bounces": int(2),
|
||||
"syntax_errors": int(0),
|
||||
"forwards_count": int(0),
|
||||
"forwards_opens": int(0),
|
||||
"opens_total": int(186),
|
||||
"unique_opens": int(100),
|
||||
"clicks_total": int(42),
|
||||
"unique_clicks": int(400),
|
||||
"unique_subscriber_clicks": int(42),
|
||||
"facebook_recipient_likes": int(5),
|
||||
"facebook_unique_likes": int(8),
|
||||
"facebook_likes": int(42),
|
||||
"open_rate": float64(42),
|
||||
"click_rate": float64(42),
|
||||
"industry_open_rate": float64(0.17076777144396),
|
||||
"industry_click_rate": float64(0.027431311866951),
|
||||
"industry_bounce_rate": float64(0.0063767751251474),
|
||||
"industry_unopen_rate": float64(0.82285545343089),
|
||||
"industry_unsub_rate": float64(0.001436957032815),
|
||||
"industry_abuse_rate": float64(0.00021111996110887),
|
||||
"list_stats_sub_rate": float64(10),
|
||||
"list_stats_unsub_rate": float64(20),
|
||||
"list_stats_open_rate": float64(42),
|
||||
"list_stats_click_rate": float64(42),
|
||||
"industry_type": "Social Networks and Online Communities",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "mailchimp", fields, tags)
|
||||
|
||||
}
|
||||
|
||||
func TestMailChimpGatherError(t *testing.T) {
|
||||
ts := httptest.NewServer(
|
||||
http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, sampleError)
|
||||
},
|
||||
))
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.ParseRequestURI(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
api := &ChimpAPI{
|
||||
url: u,
|
||||
Debug: true,
|
||||
}
|
||||
m := MailChimp{
|
||||
api: api,
|
||||
CampaignId: "test",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err = m.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
var sampleReports = `
|
||||
{
|
||||
"reports": [
|
||||
{
|
||||
"id": "42694e9e57",
|
||||
"campaign_title": "Freddie's Jokes Vol. 1",
|
||||
"type": "regular",
|
||||
"emails_sent": 200,
|
||||
"abuse_reports": 0,
|
||||
"unsubscribed": 2,
|
||||
"send_time": "2015-09-15T19:05:51+00:00",
|
||||
"bounces": {
|
||||
"hard_bounces": 0,
|
||||
"soft_bounces": 2,
|
||||
"syntax_errors": 0
|
||||
},
|
||||
"forwards": {
|
||||
"forwards_count": 0,
|
||||
"forwards_opens": 0
|
||||
},
|
||||
"opens": {
|
||||
"opens_total": 186,
|
||||
"unique_opens": 100,
|
||||
"open_rate": 42,
|
||||
"last_open": "2015-09-15T19:15:47+00:00"
|
||||
},
|
||||
"clicks": {
|
||||
"clicks_total": 42,
|
||||
"unique_clicks": 400,
|
||||
"unique_subscriber_clicks": 42,
|
||||
"click_rate": 42,
|
||||
"last_click": "2015-09-15T19:15:47+00:00"
|
||||
},
|
||||
"facebook_likes": {
|
||||
"recipient_likes": 5,
|
||||
"unique_likes": 8,
|
||||
"facebook_likes": 42
|
||||
},
|
||||
"industry_stats": {
|
||||
"type": "Social Networks and Online Communities",
|
||||
"open_rate": 0.17076777144396,
|
||||
"click_rate": 0.027431311866951,
|
||||
"bounce_rate": 0.0063767751251474,
|
||||
"unopen_rate": 0.82285545343089,
|
||||
"unsub_rate": 0.001436957032815,
|
||||
"abuse_rate": 0.00021111996110887
|
||||
},
|
||||
"list_stats": {
|
||||
"sub_rate": 10,
|
||||
"unsub_rate": 20,
|
||||
"open_rate": 42,
|
||||
"click_rate": 42
|
||||
},
|
||||
"timeseries": [
|
||||
{
|
||||
"timestamp": "2015-09-15T19:00:00+00:00",
|
||||
"emails_sent": 198,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-15T20:00:00+00:00",
|
||||
"emails_sent": 2,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-15T21:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-15T22:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-15T23:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T00:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T01:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T02:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T03:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T04:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T05:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T06:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T07:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T08:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T09:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T10:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T11:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T12:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T13:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T14:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T15:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T16:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T17:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T18:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
}
|
||||
],
|
||||
"share_report": {
|
||||
"share_url": "http://usX.vip-reports.net/reports/summary?u=xxxx&id=xxxx",
|
||||
"share_password": "freddielikesjokes"
|
||||
},
|
||||
"delivery_status": {
|
||||
"enabled": false
|
||||
},
|
||||
"_links": [
|
||||
{
|
||||
"rel": "parent",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Collection.json",
|
||||
"schema": "https://api.mailchimp.com/schema/3.0/CollectionLinks/Reports.json"
|
||||
},
|
||||
{
|
||||
"rel": "self",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Instance.json"
|
||||
},
|
||||
{
|
||||
"rel": "campaign",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/campaigns/42694e9e57",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Campaigns/Instance.json"
|
||||
},
|
||||
{
|
||||
"rel": "sub-reports",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/sub-reports",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Sub/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "abuse-reports",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/abuse-reports",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Abuse/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "advice",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/advice",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Advice/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "click-details",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/click-details",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/ClickDetails/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "domain-performance",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/domain-performance",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/DomainPerformance/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "eepurl",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/eepurl",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Eepurl/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "email-activity",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/email-activity",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/EmailActivity/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "locations",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/locations",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Locations/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "sent-to",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/sent-to",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/SentTo/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "unsubscribed",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/unsubscribed",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Unsubs/Collection.json"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"_links": [
|
||||
{
|
||||
"rel": "parent",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Root.json"
|
||||
},
|
||||
{
|
||||
"rel": "self",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Collection.json",
|
||||
"schema": "https://api.mailchimp.com/schema/3.0/CollectionLinks/Reports.json"
|
||||
}
|
||||
],
|
||||
"total_items": 1
|
||||
}
|
||||
`
|
||||
|
||||
var sampleReport = `
|
||||
{
|
||||
"id": "42694e9e57",
|
||||
"campaign_title": "Freddie's Jokes Vol. 1",
|
||||
"type": "regular",
|
||||
"emails_sent": 200,
|
||||
"abuse_reports": 0,
|
||||
"unsubscribed": 2,
|
||||
"send_time": "2015-09-15T19:05:51+00:00",
|
||||
"bounces": {
|
||||
"hard_bounces": 0,
|
||||
"soft_bounces": 2,
|
||||
"syntax_errors": 0
|
||||
},
|
||||
"forwards": {
|
||||
"forwards_count": 0,
|
||||
"forwards_opens": 0
|
||||
},
|
||||
"opens": {
|
||||
"opens_total": 186,
|
||||
"unique_opens": 100,
|
||||
"open_rate": 42,
|
||||
"last_open": "2015-09-15T19:15:47+00:00"
|
||||
},
|
||||
"clicks": {
|
||||
"clicks_total": 42,
|
||||
"unique_clicks": 400,
|
||||
"unique_subscriber_clicks": 42,
|
||||
"click_rate": 42,
|
||||
"last_click": "2015-09-15T19:15:47+00:00"
|
||||
},
|
||||
"facebook_likes": {
|
||||
"recipient_likes": 5,
|
||||
"unique_likes": 8,
|
||||
"facebook_likes": 42
|
||||
},
|
||||
"industry_stats": {
|
||||
"type": "Social Networks and Online Communities",
|
||||
"open_rate": 0.17076777144396,
|
||||
"click_rate": 0.027431311866951,
|
||||
"bounce_rate": 0.0063767751251474,
|
||||
"unopen_rate": 0.82285545343089,
|
||||
"unsub_rate": 0.001436957032815,
|
||||
"abuse_rate": 0.00021111996110887
|
||||
},
|
||||
"list_stats": {
|
||||
"sub_rate": 10,
|
||||
"unsub_rate": 20,
|
||||
"open_rate": 42,
|
||||
"click_rate": 42
|
||||
},
|
||||
"timeseries": [
|
||||
{
|
||||
"timestamp": "2015-09-15T19:00:00+00:00",
|
||||
"emails_sent": 198,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-15T20:00:00+00:00",
|
||||
"emails_sent": 2,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-15T21:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-15T22:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-15T23:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T00:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T01:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T02:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T03:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T04:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T05:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T06:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T07:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T08:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T09:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T10:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T11:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T12:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T13:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T14:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T15:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T16:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T17:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
},
|
||||
{
|
||||
"timestamp": "2015-09-16T18:00:00+00:00",
|
||||
"emails_sent": 0,
|
||||
"unique_opens": 0,
|
||||
"recipients_clicks": 0
|
||||
}
|
||||
],
|
||||
"share_report": {
|
||||
"share_url": "http://usX.vip-reports.net/reports/summary?u=xxxx&id=xxxx",
|
||||
"share_password": "freddielikesjokes"
|
||||
},
|
||||
"delivery_status": {
|
||||
"enabled": false
|
||||
},
|
||||
"_links": [
|
||||
{
|
||||
"rel": "parent",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Collection.json",
|
||||
"schema": "https://api.mailchimp.com/schema/3.0/CollectionLinks/Reports.json"
|
||||
},
|
||||
{
|
||||
"rel": "self",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Instance.json"
|
||||
},
|
||||
{
|
||||
"rel": "campaign",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/campaigns/42694e9e57",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Campaigns/Instance.json"
|
||||
},
|
||||
{
|
||||
"rel": "sub-reports",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/sub-reports",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Sub/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "abuse-reports",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/abuse-reports",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Abuse/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "advice",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/advice",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Advice/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "click-details",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/click-details",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/ClickDetails/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "domain-performance",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/domain-performance",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/DomainPerformance/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "eepurl",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/eepurl",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Eepurl/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "email-activity",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/email-activity",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/EmailActivity/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "locations",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/locations",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Locations/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "sent-to",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/sent-to",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/SentTo/Collection.json"
|
||||
},
|
||||
{
|
||||
"rel": "unsubscribed",
|
||||
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/unsubscribed",
|
||||
"method": "GET",
|
||||
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Unsubs/Collection.json"
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
||||
var sampleError = `
|
||||
{
|
||||
"type": "http://developer.mailchimp.com/documentation/mailchimp/guides/error-glossary/",
|
||||
"title": "API Key Invalid",
|
||||
"status": 401,
|
||||
"detail": "Your API key may be invalid, or you've attempted to access the wrong datacenter.",
|
||||
"instance": ""
|
||||
}
|
||||
`
|
||||
184
plugins/inputs/memcached/memcached.go
Normal file
184
plugins/inputs/memcached/memcached.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package memcached
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// Memcached is a memcached plugin
|
||||
type Memcached struct {
|
||||
Servers []string
|
||||
UnixSockets []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port. ie localhost, 10.0.0.1:11211, etc.
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost:11211"]
|
||||
# unix_sockets = ["/var/run/memcached.sock"]
|
||||
`
|
||||
|
||||
var defaultTimeout = 5 * time.Second
|
||||
|
||||
// The list of metrics that should be sent
|
||||
var sendMetrics = []string{
|
||||
"get_hits",
|
||||
"get_misses",
|
||||
"evictions",
|
||||
"limit_maxbytes",
|
||||
"bytes",
|
||||
"uptime",
|
||||
"curr_items",
|
||||
"total_items",
|
||||
"curr_connections",
|
||||
"total_connections",
|
||||
"connection_structures",
|
||||
"cmd_get",
|
||||
"cmd_set",
|
||||
"delete_hits",
|
||||
"delete_misses",
|
||||
"incr_hits",
|
||||
"incr_misses",
|
||||
"decr_hits",
|
||||
"decr_misses",
|
||||
"cas_hits",
|
||||
"cas_misses",
|
||||
"evictions",
|
||||
"bytes_read",
|
||||
"bytes_written",
|
||||
"threads",
|
||||
"conn_yields",
|
||||
}
|
||||
|
||||
// SampleConfig returns sample configuration message
|
||||
func (m *Memcached) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description returns description of Memcached plugin
|
||||
func (m *Memcached) Description() string {
|
||||
return "Read metrics from one or many memcached servers"
|
||||
}
|
||||
|
||||
// Gather reads stats from all configured servers accumulates stats
|
||||
func (m *Memcached) Gather(acc inputs.Accumulator) error {
|
||||
if len(m.Servers) == 0 && len(m.UnixSockets) == 0 {
|
||||
return m.gatherServer(":11211", false, acc)
|
||||
}
|
||||
|
||||
for _, serverAddress := range m.Servers {
|
||||
if err := m.gatherServer(serverAddress, false, acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, unixAddress := range m.UnixSockets {
|
||||
if err := m.gatherServer(unixAddress, true, acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Memcached) gatherServer(
|
||||
address string,
|
||||
unix bool,
|
||||
acc inputs.Accumulator,
|
||||
) error {
|
||||
var conn net.Conn
|
||||
if unix {
|
||||
conn, err := net.DialTimeout("unix", address, defaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
} else {
|
||||
_, _, err := net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
address = address + ":11211"
|
||||
}
|
||||
|
||||
conn, err = net.DialTimeout("tcp", address, defaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
}
|
||||
|
||||
// Extend connection
|
||||
conn.SetDeadline(time.Now().Add(defaultTimeout))
|
||||
|
||||
// Read and write buffer
|
||||
rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
|
||||
|
||||
// Send command
|
||||
if _, err := fmt.Fprint(rw, "stats\r\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := rw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
values, err := parseResponse(rw.Reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add server address as a tag
|
||||
tags := map[string]string{"server": address}
|
||||
|
||||
// Process values
|
||||
fields := make(map[string]interface{})
|
||||
for _, key := range sendMetrics {
|
||||
if value, ok := values[key]; ok {
|
||||
// Mostly it is the number
|
||||
if iValue, errParse := strconv.ParseInt(value, 10, 64); errParse == nil {
|
||||
fields[key] = iValue
|
||||
} else {
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("memcached", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseResponse(r *bufio.Reader) (map[string]string, error) {
|
||||
values := make(map[string]string)
|
||||
|
||||
for {
|
||||
// Read line
|
||||
line, _, errRead := r.ReadLine()
|
||||
if errRead != nil {
|
||||
return values, errRead
|
||||
}
|
||||
// Done
|
||||
if bytes.Equal(line, []byte("END")) {
|
||||
break
|
||||
}
|
||||
// Read values
|
||||
s := bytes.SplitN(line, []byte(" "), 3)
|
||||
if len(s) != 3 || !bytes.Equal(s[0], []byte("STAT")) {
|
||||
return values, fmt.Errorf("unexpected line in stats response: %q", line)
|
||||
}
|
||||
|
||||
// Save values
|
||||
values[string(s[1])] = string(s[2])
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("memcached", func() inputs.Input {
|
||||
return &Memcached{}
|
||||
})
|
||||
}
|
||||
160
plugins/inputs/memcached/memcached_test.go
Normal file
160
plugins/inputs/memcached/memcached_test.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package memcached
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMemcachedGeneratesMetrics(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
m := &Memcached{
|
||||
Servers: []string{testutil.GetLocalHost()},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := m.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
intMetrics := []string{"get_hits", "get_misses", "evictions",
|
||||
"limit_maxbytes", "bytes", "uptime", "curr_items", "total_items",
|
||||
"curr_connections", "total_connections", "connection_structures", "cmd_get",
|
||||
"cmd_set", "delete_hits", "delete_misses", "incr_hits", "incr_misses",
|
||||
"decr_hits", "decr_misses", "cas_hits", "cas_misses", "evictions",
|
||||
"bytes_read", "bytes_written", "threads", "conn_yields"}
|
||||
|
||||
for _, metric := range intMetrics {
|
||||
assert.True(t, acc.HasIntField("memcached", metric), metric)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemcachedParseMetrics(t *testing.T) {
|
||||
r := bufio.NewReader(strings.NewReader(memcachedStats))
|
||||
values, err := parseResponse(r)
|
||||
require.NoError(t, err, "Error parsing memcached response")
|
||||
|
||||
tests := []struct {
|
||||
key string
|
||||
value string
|
||||
}{
|
||||
{"pid", "23235"},
|
||||
{"uptime", "194"},
|
||||
{"time", "1449174679"},
|
||||
{"version", "1.4.14 (Ubuntu)"},
|
||||
{"libevent", "2.0.21-stable"},
|
||||
{"pointer_size", "64"},
|
||||
{"rusage_user", "0.000000"},
|
||||
{"rusage_system", "0.007566"},
|
||||
{"curr_connections", "5"},
|
||||
{"total_connections", "6"},
|
||||
{"connection_structures", "6"},
|
||||
{"reserved_fds", "20"},
|
||||
{"cmd_get", "0"},
|
||||
{"cmd_set", "0"},
|
||||
{"cmd_flush", "0"},
|
||||
{"cmd_touch", "0"},
|
||||
{"get_hits", "0"},
|
||||
{"get_misses", "0"},
|
||||
{"delete_misses", "0"},
|
||||
{"delete_hits", "0"},
|
||||
{"incr_misses", "0"},
|
||||
{"incr_hits", "0"},
|
||||
{"decr_misses", "0"},
|
||||
{"decr_hits", "0"},
|
||||
{"cas_misses", "0"},
|
||||
{"cas_hits", "0"},
|
||||
{"cas_badval", "0"},
|
||||
{"touch_hits", "0"},
|
||||
{"touch_misses", "0"},
|
||||
{"auth_cmds", "0"},
|
||||
{"auth_errors", "0"},
|
||||
{"bytes_read", "7"},
|
||||
{"bytes_written", "0"},
|
||||
{"limit_maxbytes", "67108864"},
|
||||
{"accepting_conns", "1"},
|
||||
{"listen_disabled_num", "0"},
|
||||
{"threads", "4"},
|
||||
{"conn_yields", "0"},
|
||||
{"hash_power_level", "16"},
|
||||
{"hash_bytes", "524288"},
|
||||
{"hash_is_expanding", "0"},
|
||||
{"expired_unfetched", "0"},
|
||||
{"evicted_unfetched", "0"},
|
||||
{"bytes", "0"},
|
||||
{"curr_items", "0"},
|
||||
{"total_items", "0"},
|
||||
{"evictions", "0"},
|
||||
{"reclaimed", "0"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
value, ok := values[test.key]
|
||||
if !ok {
|
||||
t.Errorf("Did not find key for metric %s in values", test.key)
|
||||
continue
|
||||
}
|
||||
if value != test.value {
|
||||
t.Errorf("Metric: %s, Expected: %s, actual: %s",
|
||||
test.key, test.value, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var memcachedStats = `STAT pid 23235
|
||||
STAT uptime 194
|
||||
STAT time 1449174679
|
||||
STAT version 1.4.14 (Ubuntu)
|
||||
STAT libevent 2.0.21-stable
|
||||
STAT pointer_size 64
|
||||
STAT rusage_user 0.000000
|
||||
STAT rusage_system 0.007566
|
||||
STAT curr_connections 5
|
||||
STAT total_connections 6
|
||||
STAT connection_structures 6
|
||||
STAT reserved_fds 20
|
||||
STAT cmd_get 0
|
||||
STAT cmd_set 0
|
||||
STAT cmd_flush 0
|
||||
STAT cmd_touch 0
|
||||
STAT get_hits 0
|
||||
STAT get_misses 0
|
||||
STAT delete_misses 0
|
||||
STAT delete_hits 0
|
||||
STAT incr_misses 0
|
||||
STAT incr_hits 0
|
||||
STAT decr_misses 0
|
||||
STAT decr_hits 0
|
||||
STAT cas_misses 0
|
||||
STAT cas_hits 0
|
||||
STAT cas_badval 0
|
||||
STAT touch_hits 0
|
||||
STAT touch_misses 0
|
||||
STAT auth_cmds 0
|
||||
STAT auth_errors 0
|
||||
STAT bytes_read 7
|
||||
STAT bytes_written 0
|
||||
STAT limit_maxbytes 67108864
|
||||
STAT accepting_conns 1
|
||||
STAT listen_disabled_num 0
|
||||
STAT threads 4
|
||||
STAT conn_yields 0
|
||||
STAT hash_power_level 16
|
||||
STAT hash_bytes 524288
|
||||
STAT hash_is_expanding 0
|
||||
STAT expired_unfetched 0
|
||||
STAT evicted_unfetched 0
|
||||
STAT bytes 0
|
||||
STAT curr_items 0
|
||||
STAT total_items 0
|
||||
STAT evictions 0
|
||||
STAT reclaimed 0
|
||||
END
|
||||
`
|
||||
@@ -1,4 +1,4 @@
|
||||
package plugins
|
||||
package inputs
|
||||
|
||||
import "github.com/stretchr/testify/mock"
|
||||
|
||||
146
plugins/inputs/mongodb/mongodb.go
Normal file
146
plugins/inputs/mongodb/mongodb.go
Normal file
@@ -0,0 +1,146 @@
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"gopkg.in/mgo.v2"
|
||||
)
|
||||
|
||||
type MongoDB struct {
|
||||
Servers []string
|
||||
Ssl Ssl
|
||||
mongos map[string]*Server
|
||||
}
|
||||
|
||||
type Ssl struct {
|
||||
Enabled bool
|
||||
CaCerts []string `toml:"cacerts"`
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
|
||||
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
||||
#
|
||||
# If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port.
|
||||
servers = ["127.0.0.1:27017"]
|
||||
`
|
||||
|
||||
func (m *MongoDB) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (*MongoDB) Description() string {
|
||||
return "Read metrics from one or many MongoDB servers"
|
||||
}
|
||||
|
||||
var localhost = &url.URL{Host: "127.0.0.1:27017"}
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (m *MongoDB) Gather(acc inputs.Accumulator) error {
|
||||
if len(m.Servers) == 0 {
|
||||
m.gatherServer(m.getMongoServer(localhost), acc)
|
||||
return nil
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, serv := range m.Servers {
|
||||
u, err := url.Parse(serv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err)
|
||||
} else if u.Scheme == "" {
|
||||
u.Scheme = "mongodb"
|
||||
// fallback to simple string based address (i.e. "10.0.0.1:10000")
|
||||
u.Host = serv
|
||||
if u.Path == u.Host {
|
||||
u.Path = ""
|
||||
}
|
||||
}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
outerr = m.gatherServer(m.getMongoServer(u), acc)
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (m *MongoDB) getMongoServer(url *url.URL) *Server {
|
||||
if _, ok := m.mongos[url.Host]; !ok {
|
||||
m.mongos[url.Host] = &Server{
|
||||
Url: url,
|
||||
}
|
||||
}
|
||||
return m.mongos[url.Host]
|
||||
}
|
||||
|
||||
func (m *MongoDB) gatherServer(server *Server, acc inputs.Accumulator) error {
|
||||
if server.Session == nil {
|
||||
var dialAddrs []string
|
||||
if server.Url.User != nil {
|
||||
dialAddrs = []string{server.Url.String()}
|
||||
} else {
|
||||
dialAddrs = []string{server.Url.Host}
|
||||
}
|
||||
dialInfo, err := mgo.ParseURL(dialAddrs[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse URL (%s), %s\n",
|
||||
dialAddrs[0], err.Error())
|
||||
}
|
||||
dialInfo.Direct = true
|
||||
dialInfo.Timeout = time.Duration(10) * time.Second
|
||||
|
||||
if m.Ssl.Enabled {
|
||||
tlsConfig := &tls.Config{}
|
||||
if len(m.Ssl.CaCerts) > 0 {
|
||||
roots := x509.NewCertPool()
|
||||
for _, caCert := range m.Ssl.CaCerts {
|
||||
ok := roots.AppendCertsFromPEM([]byte(caCert))
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to parse root certificate")
|
||||
}
|
||||
}
|
||||
tlsConfig.RootCAs = roots
|
||||
} else {
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
}
|
||||
dialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {
|
||||
conn, err := tls.Dial("tcp", addr.String(), tlsConfig)
|
||||
if err != nil {
|
||||
fmt.Printf("error in Dial, %s\n", err.Error())
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
}
|
||||
|
||||
sess, err := mgo.DialWithInfo(dialInfo)
|
||||
if err != nil {
|
||||
fmt.Printf("error dialing over ssl, %s\n", err.Error())
|
||||
return fmt.Errorf("Unable to connect to MongoDB, %s\n", err.Error())
|
||||
}
|
||||
server.Session = sess
|
||||
}
|
||||
return server.gatherData(acc)
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("mongodb", func() inputs.Input {
|
||||
return &MongoDB{
|
||||
mongos: make(map[string]*Server),
|
||||
}
|
||||
})
|
||||
}
|
||||
108
plugins/inputs/mongodb/mongodb_data.go
Normal file
108
plugins/inputs/mongodb/mongodb_data.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type MongodbData struct {
|
||||
StatLine *StatLine
|
||||
Fields map[string]interface{}
|
||||
Tags map[string]string
|
||||
}
|
||||
|
||||
func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData {
|
||||
if statLine.NodeType != "" && statLine.NodeType != "UNK" {
|
||||
tags["state"] = statLine.NodeType
|
||||
}
|
||||
return &MongodbData{
|
||||
StatLine: statLine,
|
||||
Tags: tags,
|
||||
Fields: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
var DefaultStats = map[string]string{
|
||||
"inserts_per_sec": "Insert",
|
||||
"queries_per_sec": "Query",
|
||||
"updates_per_sec": "Update",
|
||||
"deletes_per_sec": "Delete",
|
||||
"getmores_per_sec": "GetMore",
|
||||
"commands_per_sec": "Command",
|
||||
"flushes_per_sec": "Flushes",
|
||||
"vsize_megabytes": "Virtual",
|
||||
"resident_megabytes": "Resident",
|
||||
"queued_reads": "QueuedReaders",
|
||||
"queued_writes": "QueuedWriters",
|
||||
"active_reads": "ActiveReaders",
|
||||
"active_writes": "ActiveWriters",
|
||||
"net_in_bytes": "NetIn",
|
||||
"net_out_bytes": "NetOut",
|
||||
"open_connections": "NumConnections",
|
||||
}
|
||||
|
||||
var DefaultReplStats = map[string]string{
|
||||
"repl_inserts_per_sec": "InsertR",
|
||||
"repl_queries_per_sec": "QueryR",
|
||||
"repl_updates_per_sec": "UpdateR",
|
||||
"repl_deletes_per_sec": "DeleteR",
|
||||
"repl_getmores_per_sec": "GetMoreR",
|
||||
"repl_commands_per_sec": "CommandR",
|
||||
"member_status": "NodeType",
|
||||
}
|
||||
|
||||
var MmapStats = map[string]string{
|
||||
"mapped_megabytes": "Mapped",
|
||||
"non-mapped_megabytes": "NonMapped",
|
||||
"page_faults_per_sec": "Faults",
|
||||
}
|
||||
|
||||
var WiredTigerStats = map[string]string{
|
||||
"percent_cache_dirty": "CacheDirtyPercent",
|
||||
"percent_cache_used": "CacheUsedPercent",
|
||||
}
|
||||
|
||||
func (d *MongodbData) AddDefaultStats() {
|
||||
statLine := reflect.ValueOf(d.StatLine).Elem()
|
||||
d.addStat(statLine, DefaultStats)
|
||||
if d.StatLine.NodeType != "" {
|
||||
d.addStat(statLine, DefaultReplStats)
|
||||
}
|
||||
if d.StatLine.StorageEngine == "mmapv1" {
|
||||
d.addStat(statLine, MmapStats)
|
||||
} else if d.StatLine.StorageEngine == "wiredTiger" {
|
||||
for key, value := range WiredTigerStats {
|
||||
val := statLine.FieldByName(value).Interface()
|
||||
percentVal := fmt.Sprintf("%.1f", val.(float64)*100)
|
||||
floatVal, _ := strconv.ParseFloat(percentVal, 64)
|
||||
d.add(key, floatVal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *MongodbData) addStat(
|
||||
statLine reflect.Value,
|
||||
stats map[string]string,
|
||||
) {
|
||||
for key, value := range stats {
|
||||
val := statLine.FieldByName(value).Interface()
|
||||
d.add(key, val)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *MongodbData) add(key string, val interface{}) {
|
||||
d.Fields[key] = val
|
||||
}
|
||||
|
||||
func (d *MongodbData) flush(acc inputs.Accumulator) {
|
||||
acc.AddFields(
|
||||
"mongodb",
|
||||
d.Fields,
|
||||
d.Tags,
|
||||
d.StatLine.Time,
|
||||
)
|
||||
d.Fields = make(map[string]interface{})
|
||||
}
|
||||
133
plugins/inputs/mongodb/mongodb_data_test.go
Normal file
133
plugins/inputs/mongodb/mongodb_data_test.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var tags = make(map[string]string)
|
||||
|
||||
func TestAddNonReplStats(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
StorageEngine: "",
|
||||
Time: time.Now(),
|
||||
Insert: 0,
|
||||
Query: 0,
|
||||
Update: 0,
|
||||
Delete: 0,
|
||||
GetMore: 0,
|
||||
Command: 0,
|
||||
Flushes: 0,
|
||||
Virtual: 0,
|
||||
Resident: 0,
|
||||
QueuedReaders: 0,
|
||||
QueuedWriters: 0,
|
||||
ActiveReaders: 0,
|
||||
ActiveWriters: 0,
|
||||
NetIn: 0,
|
||||
NetOut: 0,
|
||||
NumConnections: 0,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d.AddDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key, _ := range DefaultStats {
|
||||
assert.True(t, acc.HasIntField("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddReplStats(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
StorageEngine: "mmapv1",
|
||||
Mapped: 0,
|
||||
NonMapped: 0,
|
||||
Faults: 0,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d.AddDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key, _ := range MmapStats {
|
||||
assert.True(t, acc.HasIntField("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddWiredTigerStats(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
StorageEngine: "wiredTiger",
|
||||
CacheDirtyPercent: 0,
|
||||
CacheUsedPercent: 0,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d.AddDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key, _ := range WiredTigerStats {
|
||||
assert.True(t, acc.HasFloatField("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateTag(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
StorageEngine: "",
|
||||
Time: time.Now(),
|
||||
Insert: 0,
|
||||
Query: 0,
|
||||
NodeType: "PRI",
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
stateTags := make(map[string]string)
|
||||
stateTags["state"] = "PRI"
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d.AddDefaultStats()
|
||||
d.flush(&acc)
|
||||
fields := map[string]interface{}{
|
||||
"active_reads": int64(0),
|
||||
"active_writes": int64(0),
|
||||
"commands_per_sec": int64(0),
|
||||
"deletes_per_sec": int64(0),
|
||||
"flushes_per_sec": int64(0),
|
||||
"getmores_per_sec": int64(0),
|
||||
"inserts_per_sec": int64(0),
|
||||
"member_status": "PRI",
|
||||
"net_in_bytes": int64(0),
|
||||
"net_out_bytes": int64(0),
|
||||
"open_connections": int64(0),
|
||||
"queries_per_sec": int64(0),
|
||||
"queued_reads": int64(0),
|
||||
"queued_writes": int64(0),
|
||||
"repl_commands_per_sec": int64(0),
|
||||
"repl_deletes_per_sec": int64(0),
|
||||
"repl_getmores_per_sec": int64(0),
|
||||
"repl_inserts_per_sec": int64(0),
|
||||
"repl_queries_per_sec": int64(0),
|
||||
"repl_updates_per_sec": int64(0),
|
||||
"resident_megabytes": int64(0),
|
||||
"updates_per_sec": int64(0),
|
||||
"vsize_megabytes": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags)
|
||||
}
|
||||
51
plugins/inputs/mongodb/mongodb_server.go
Normal file
51
plugins/inputs/mongodb/mongodb_server.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"gopkg.in/mgo.v2"
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
Url *url.URL
|
||||
Session *mgo.Session
|
||||
lastResult *ServerStatus
|
||||
}
|
||||
|
||||
func (s *Server) getDefaultTags() map[string]string {
|
||||
tags := make(map[string]string)
|
||||
tags["hostname"] = s.Url.Host
|
||||
return tags
|
||||
}
|
||||
|
||||
func (s *Server) gatherData(acc inputs.Accumulator) error {
|
||||
s.Session.SetMode(mgo.Eventual, true)
|
||||
s.Session.SetSocketTimeout(0)
|
||||
result := &ServerStatus{}
|
||||
err := s.Session.DB("admin").Run(bson.D{{"serverStatus", 1}, {"recordStats", 0}}, result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
s.lastResult = result
|
||||
}()
|
||||
|
||||
result.SampleTime = time.Now()
|
||||
if s.lastResult != nil && result != nil {
|
||||
duration := result.SampleTime.Sub(s.lastResult.SampleTime)
|
||||
durationInSeconds := int64(duration.Seconds())
|
||||
if durationInSeconds == 0 {
|
||||
durationInSeconds = 1
|
||||
}
|
||||
data := NewMongodbData(
|
||||
NewStatLine(*s.lastResult, *result, s.Url.Host, true, durationInSeconds),
|
||||
s.getDefaultTags(),
|
||||
)
|
||||
data.AddDefaultStats()
|
||||
data.flush(acc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
43
plugins/inputs/mongodb/mongodb_server_test.go
Normal file
43
plugins/inputs/mongodb/mongodb_server_test.go
Normal file
@@ -0,0 +1,43 @@
|
||||
// +build integration
|
||||
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetDefaultTags(t *testing.T) {
|
||||
var tagTests = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"hostname", server.Url.Host},
|
||||
}
|
||||
defaultTags := server.getDefaultTags()
|
||||
for _, tt := range tagTests {
|
||||
if defaultTags[tt.in] != tt.out {
|
||||
t.Errorf("expected %q, got %q", tt.out, defaultTags[tt.in])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddDefaultStats(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := server.gatherData(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(time.Duration(1) * time.Second)
|
||||
// need to call this twice so it can perform the diff
|
||||
err = server.gatherData(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
for key, _ := range DefaultStats {
|
||||
assert.True(t, acc.HasIntValue(key))
|
||||
}
|
||||
}
|
||||
71
plugins/inputs/mongodb/mongodb_test.go
Normal file
71
plugins/inputs/mongodb/mongodb_test.go
Normal file
@@ -0,0 +1,71 @@
|
||||
// +build integration
|
||||
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gopkg.in/mgo.v2"
|
||||
)
|
||||
|
||||
var connect_url string
|
||||
var server *Server
|
||||
|
||||
func init() {
|
||||
connect_url = os.Getenv("MONGODB_URL")
|
||||
if connect_url == "" {
|
||||
connect_url = "127.0.0.1:27017"
|
||||
server = &Server{Url: &url.URL{Host: connect_url}}
|
||||
} else {
|
||||
full_url, err := url.Parse(connect_url)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to parse URL (%s), %s\n", full_url, err.Error())
|
||||
}
|
||||
server = &Server{Url: full_url}
|
||||
}
|
||||
}
|
||||
|
||||
func testSetup(m *testing.M) {
|
||||
var err error
|
||||
var dialAddrs []string
|
||||
if server.Url.User != nil {
|
||||
dialAddrs = []string{server.Url.String()}
|
||||
} else {
|
||||
dialAddrs = []string{server.Url.Host}
|
||||
}
|
||||
dialInfo, err := mgo.ParseURL(dialAddrs[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to parse URL (%s), %s\n", dialAddrs[0], err.Error())
|
||||
}
|
||||
dialInfo.Direct = true
|
||||
dialInfo.Timeout = time.Duration(10) * time.Second
|
||||
sess, err := mgo.DialWithInfo(dialInfo)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to connect to MongoDB, %s\n", err.Error())
|
||||
}
|
||||
server.Session = sess
|
||||
server.Session, _ = mgo.Dial(server.Url.Host)
|
||||
if err != nil {
|
||||
log.Fatalln(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func testTeardown(m *testing.M) {
|
||||
server.Session.Close()
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// seed randomness for use with tests
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
testSetup(m)
|
||||
res := m.Run()
|
||||
testTeardown(m)
|
||||
|
||||
os.Exit(res)
|
||||
}
|
||||
591
plugins/inputs/mongodb/mongostat.go
Normal file
591
plugins/inputs/mongodb/mongostat.go
Normal file
@@ -0,0 +1,591 @@
|
||||
/***
|
||||
The code contained here came from https://github.com/mongodb/mongo-tools/blob/master/mongostat/stat_types.go
|
||||
and contains modifications so that no other dependency from that project is needed. Other modifications included
|
||||
removing uneccessary code specific to formatting the output and determine the current state of the database. It
|
||||
is licensed under Apache Version 2.0, http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
***/
|
||||
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
MongosProcess = "mongos"
|
||||
)
|
||||
|
||||
// Flags to determine cases when to activate/deactivate columns for output.
|
||||
const (
|
||||
Always = 1 << iota // always activate the column
|
||||
Discover // only active when mongostat is in discover mode
|
||||
Repl // only active if one of the nodes being monitored is in a replset
|
||||
Locks // only active if node is capable of calculating lock info
|
||||
AllOnly // only active if mongostat was run with --all option
|
||||
MMAPOnly // only active if node has mmap-specific fields
|
||||
WTOnly // only active if node has wiredtiger-specific fields
|
||||
)
|
||||
|
||||
type ServerStatus struct {
|
||||
SampleTime time.Time `bson:""`
|
||||
Host string `bson:"host"`
|
||||
Version string `bson:"version"`
|
||||
Process string `bson:"process"`
|
||||
Pid int64 `bson:"pid"`
|
||||
Uptime int64 `bson:"uptime"`
|
||||
UptimeMillis int64 `bson:"uptimeMillis"`
|
||||
UptimeEstimate int64 `bson:"uptimeEstimate"`
|
||||
LocalTime time.Time `bson:"localTime"`
|
||||
Asserts map[string]int64 `bson:"asserts"`
|
||||
BackgroundFlushing *FlushStats `bson:"backgroundFlushing"`
|
||||
ExtraInfo *ExtraInfo `bson:"extra_info"`
|
||||
Connections *ConnectionStats `bson:"connections"`
|
||||
Dur *DurStats `bson:"dur"`
|
||||
GlobalLock *GlobalLockStats `bson:"globalLock"`
|
||||
Locks map[string]LockStats `bson:"locks,omitempty"`
|
||||
Network *NetworkStats `bson:"network"`
|
||||
Opcounters *OpcountStats `bson:"opcounters"`
|
||||
OpcountersRepl *OpcountStats `bson:"opcountersRepl"`
|
||||
RecordStats *DBRecordStats `bson:"recordStats"`
|
||||
Mem *MemStats `bson:"mem"`
|
||||
Repl *ReplStatus `bson:"repl"`
|
||||
ShardCursorType map[string]interface{} `bson:"shardCursorType"`
|
||||
StorageEngine map[string]string `bson:"storageEngine"`
|
||||
WiredTiger *WiredTiger `bson:"wiredTiger"`
|
||||
}
|
||||
|
||||
// WiredTiger stores information related to the WiredTiger storage engine.
|
||||
type WiredTiger struct {
|
||||
Transaction TransactionStats `bson:"transaction"`
|
||||
Concurrent ConcurrentTransactions `bson:"concurrentTransactions"`
|
||||
Cache CacheStats `bson:"cache"`
|
||||
}
|
||||
|
||||
type ConcurrentTransactions struct {
|
||||
Write ConcurrentTransStats `bson:"write"`
|
||||
Read ConcurrentTransStats `bson:"read"`
|
||||
}
|
||||
|
||||
type ConcurrentTransStats struct {
|
||||
Out int64 `bson:"out"`
|
||||
}
|
||||
|
||||
// CacheStats stores cache statistics for WiredTiger.
|
||||
type CacheStats struct {
|
||||
TrackedDirtyBytes int64 `bson:"tracked dirty bytes in the cache"`
|
||||
CurrentCachedBytes int64 `bson:"bytes currently in the cache"`
|
||||
MaxBytesConfigured int64 `bson:"maximum bytes configured"`
|
||||
}
|
||||
|
||||
// TransactionStats stores transaction checkpoints in WiredTiger.
|
||||
type TransactionStats struct {
|
||||
TransCheckpoints int64 `bson:"transaction checkpoints"`
|
||||
}
|
||||
|
||||
// ReplStatus stores data related to replica sets.
|
||||
type ReplStatus struct {
|
||||
SetName interface{} `bson:"setName"`
|
||||
IsMaster interface{} `bson:"ismaster"`
|
||||
Secondary interface{} `bson:"secondary"`
|
||||
IsReplicaSet interface{} `bson:"isreplicaset"`
|
||||
ArbiterOnly interface{} `bson:"arbiterOnly"`
|
||||
Hosts []string `bson:"hosts"`
|
||||
Passives []string `bson:"passives"`
|
||||
Me string `bson:"me"`
|
||||
}
|
||||
|
||||
// DBRecordStats stores data related to memory operations across databases.
|
||||
type DBRecordStats struct {
|
||||
AccessesNotInMemory int64 `bson:"accessesNotInMemory"`
|
||||
PageFaultExceptionsThrown int64 `bson:"pageFaultExceptionsThrown"`
|
||||
DBRecordAccesses map[string]RecordAccesses `bson:",inline"`
|
||||
}
|
||||
|
||||
// RecordAccesses stores data related to memory operations scoped to a database.
|
||||
type RecordAccesses struct {
|
||||
AccessesNotInMemory int64 `bson:"accessesNotInMemory"`
|
||||
PageFaultExceptionsThrown int64 `bson:"pageFaultExceptionsThrown"`
|
||||
}
|
||||
|
||||
// MemStats stores data related to memory statistics.
|
||||
type MemStats struct {
|
||||
Bits int64 `bson:"bits"`
|
||||
Resident int64 `bson:"resident"`
|
||||
Virtual int64 `bson:"virtual"`
|
||||
Supported interface{} `bson:"supported"`
|
||||
Mapped int64 `bson:"mapped"`
|
||||
MappedWithJournal int64 `bson:"mappedWithJournal"`
|
||||
}
|
||||
|
||||
// FlushStats stores information about memory flushes.
|
||||
type FlushStats struct {
|
||||
Flushes int64 `bson:"flushes"`
|
||||
TotalMs int64 `bson:"total_ms"`
|
||||
AverageMs float64 `bson:"average_ms"`
|
||||
LastMs int64 `bson:"last_ms"`
|
||||
LastFinished time.Time `bson:"last_finished"`
|
||||
}
|
||||
|
||||
// ConnectionStats stores information related to incoming database connections.
|
||||
type ConnectionStats struct {
|
||||
Current int64 `bson:"current"`
|
||||
Available int64 `bson:"available"`
|
||||
TotalCreated int64 `bson:"totalCreated"`
|
||||
}
|
||||
|
||||
// DurTiming stores information related to journaling.
|
||||
type DurTiming struct {
|
||||
Dt int64 `bson:"dt"`
|
||||
PrepLogBuffer int64 `bson:"prepLogBuffer"`
|
||||
WriteToJournal int64 `bson:"writeToJournal"`
|
||||
WriteToDataFiles int64 `bson:"writeToDataFiles"`
|
||||
RemapPrivateView int64 `bson:"remapPrivateView"`
|
||||
}
|
||||
|
||||
// DurStats stores information related to journaling statistics.
|
||||
type DurStats struct {
|
||||
Commits int64 `bson:"commits"`
|
||||
JournaledMB int64 `bson:"journaledMB"`
|
||||
WriteToDataFilesMB int64 `bson:"writeToDataFilesMB"`
|
||||
Compression int64 `bson:"compression"`
|
||||
CommitsInWriteLock int64 `bson:"commitsInWriteLock"`
|
||||
EarlyCommits int64 `bson:"earlyCommits"`
|
||||
TimeMs DurTiming
|
||||
}
|
||||
|
||||
// QueueStats stores the number of queued read/write operations.
|
||||
type QueueStats struct {
|
||||
Total int64 `bson:"total"`
|
||||
Readers int64 `bson:"readers"`
|
||||
Writers int64 `bson:"writers"`
|
||||
}
|
||||
|
||||
// ClientStats stores the number of active read/write operations.
|
||||
type ClientStats struct {
|
||||
Total int64 `bson:"total"`
|
||||
Readers int64 `bson:"readers"`
|
||||
Writers int64 `bson:"writers"`
|
||||
}
|
||||
|
||||
// GlobalLockStats stores information related locks in the MMAP storage engine.
|
||||
type GlobalLockStats struct {
|
||||
TotalTime int64 `bson:"totalTime"`
|
||||
LockTime int64 `bson:"lockTime"`
|
||||
CurrentQueue *QueueStats `bson:"currentQueue"`
|
||||
ActiveClients *ClientStats `bson:"activeClients"`
|
||||
}
|
||||
|
||||
// NetworkStats stores information related to network traffic.
|
||||
type NetworkStats struct {
|
||||
BytesIn int64 `bson:"bytesIn"`
|
||||
BytesOut int64 `bson:"bytesOut"`
|
||||
NumRequests int64 `bson:"numRequests"`
|
||||
}
|
||||
|
||||
// OpcountStats stores information related to comamnds and basic CRUD operations.
|
||||
type OpcountStats struct {
|
||||
Insert int64 `bson:"insert"`
|
||||
Query int64 `bson:"query"`
|
||||
Update int64 `bson:"update"`
|
||||
Delete int64 `bson:"delete"`
|
||||
GetMore int64 `bson:"getmore"`
|
||||
Command int64 `bson:"command"`
|
||||
}
|
||||
|
||||
// ReadWriteLockTimes stores time spent holding read/write locks.
|
||||
type ReadWriteLockTimes struct {
|
||||
Read int64 `bson:"R"`
|
||||
Write int64 `bson:"W"`
|
||||
ReadLower int64 `bson:"r"`
|
||||
WriteLower int64 `bson:"w"`
|
||||
}
|
||||
|
||||
// LockStats stores information related to time spent acquiring/holding locks
|
||||
// for a given database.
|
||||
type LockStats struct {
|
||||
TimeLockedMicros ReadWriteLockTimes `bson:"timeLockedMicros"`
|
||||
TimeAcquiringMicros ReadWriteLockTimes `bson:"timeAcquiringMicros"`
|
||||
|
||||
// AcquireCount and AcquireWaitCount are new fields of the lock stats only populated on 3.0 or newer.
|
||||
// Typed as a pointer so that if it is nil, mongostat can assume the field is not populated
|
||||
// with real namespace data.
|
||||
AcquireCount *ReadWriteLockTimes `bson:"acquireCount,omitempty"`
|
||||
AcquireWaitCount *ReadWriteLockTimes `bson:"acquireWaitCount,omitempty"`
|
||||
}
|
||||
|
||||
// ExtraInfo stores additional platform specific information.
|
||||
type ExtraInfo struct {
|
||||
PageFaults *int64 `bson:"page_faults"`
|
||||
}
|
||||
|
||||
// StatHeader describes a single column for mongostat's terminal output,
|
||||
// its formatting, and in which modes it should be displayed.
|
||||
type StatHeader struct {
|
||||
// The text to appear in the column's header cell
|
||||
HeaderText string
|
||||
|
||||
// Bitmask containing flags to determine if this header is active or not
|
||||
ActivateFlags int
|
||||
}
|
||||
|
||||
// StatHeaders are the complete set of data metrics supported by mongostat.
|
||||
var StatHeaders = []StatHeader{
|
||||
{"", Always}, // placeholder for hostname column (blank header text)
|
||||
{"insert", Always},
|
||||
{"query", Always},
|
||||
{"update", Always},
|
||||
{"delete", Always},
|
||||
{"getmore", Always},
|
||||
{"command", Always},
|
||||
{"% dirty", WTOnly},
|
||||
{"% used", WTOnly},
|
||||
{"flushes", Always},
|
||||
{"mapped", MMAPOnly},
|
||||
{"vsize", Always},
|
||||
{"res", Always},
|
||||
{"non-mapped", MMAPOnly | AllOnly},
|
||||
{"faults", MMAPOnly},
|
||||
{"lr|lw %", MMAPOnly | AllOnly},
|
||||
{"lrt|lwt", MMAPOnly | AllOnly},
|
||||
{" locked db", Locks},
|
||||
{"qr|qw", Always},
|
||||
{"ar|aw", Always},
|
||||
{"netIn", Always},
|
||||
{"netOut", Always},
|
||||
{"conn", Always},
|
||||
{"set", Repl},
|
||||
{"repl", Repl},
|
||||
{"time", Always},
|
||||
}
|
||||
|
||||
// NamespacedLocks stores information on the LockStatus of namespaces.
|
||||
type NamespacedLocks map[string]LockStatus
|
||||
|
||||
// LockUsage stores information related to a namespace's lock usage.
|
||||
type LockUsage struct {
|
||||
Namespace string
|
||||
Reads int64
|
||||
Writes int64
|
||||
}
|
||||
|
||||
type lockUsages []LockUsage
|
||||
|
||||
func percentageInt64(value, outOf int64) float64 {
|
||||
if value == 0 || outOf == 0 {
|
||||
return 0
|
||||
}
|
||||
return 100 * (float64(value) / float64(outOf))
|
||||
}
|
||||
|
||||
func averageInt64(value, outOf int64) int64 {
|
||||
if value == 0 || outOf == 0 {
|
||||
return 0
|
||||
}
|
||||
return value / outOf
|
||||
}
|
||||
|
||||
func (slice lockUsages) Len() int {
|
||||
return len(slice)
|
||||
}
|
||||
|
||||
func (slice lockUsages) Less(i, j int) bool {
|
||||
return slice[i].Reads+slice[i].Writes < slice[j].Reads+slice[j].Writes
|
||||
}
|
||||
|
||||
func (slice lockUsages) Swap(i, j int) {
|
||||
slice[i], slice[j] = slice[j], slice[i]
|
||||
}
|
||||
|
||||
// CollectionLockStatus stores a collection's lock statistics.
|
||||
type CollectionLockStatus struct {
|
||||
ReadAcquireWaitsPercentage float64
|
||||
WriteAcquireWaitsPercentage float64
|
||||
ReadAcquireTimeMicros int64
|
||||
WriteAcquireTimeMicros int64
|
||||
}
|
||||
|
||||
// LockStatus stores a database's lock statistics.
|
||||
type LockStatus struct {
|
||||
DBName string
|
||||
Percentage float64
|
||||
Global bool
|
||||
}
|
||||
|
||||
// StatLine is a wrapper for all metrics reported by mongostat for monitored hosts.
|
||||
type StatLine struct {
|
||||
Key string
|
||||
// What storage engine is being used for the node with this stat line
|
||||
StorageEngine string
|
||||
|
||||
Error error
|
||||
IsMongos bool
|
||||
Host string
|
||||
|
||||
// The time at which this StatLine was generated.
|
||||
Time time.Time
|
||||
|
||||
// The last time at which this StatLine was printed to output.
|
||||
LastPrinted time.Time
|
||||
|
||||
// Opcounter fields
|
||||
Insert, Query, Update, Delete, GetMore, Command int64
|
||||
|
||||
// Collection locks (3.0 mmap only)
|
||||
CollectionLocks *CollectionLockStatus
|
||||
|
||||
// Cache utilization (wiredtiger only)
|
||||
CacheDirtyPercent float64
|
||||
CacheUsedPercent float64
|
||||
|
||||
// Replicated Opcounter fields
|
||||
InsertR, QueryR, UpdateR, DeleteR, GetMoreR, CommandR int64
|
||||
Flushes int64
|
||||
Mapped, Virtual, Resident, NonMapped int64
|
||||
Faults int64
|
||||
HighestLocked *LockStatus
|
||||
QueuedReaders, QueuedWriters int64
|
||||
ActiveReaders, ActiveWriters int64
|
||||
NetIn, NetOut int64
|
||||
NumConnections int64
|
||||
ReplSetName string
|
||||
NodeType string
|
||||
}
|
||||
|
||||
func parseLocks(stat ServerStatus) map[string]LockUsage {
|
||||
returnVal := map[string]LockUsage{}
|
||||
for namespace, lockInfo := range stat.Locks {
|
||||
returnVal[namespace] = LockUsage{
|
||||
namespace,
|
||||
lockInfo.TimeLockedMicros.Read + lockInfo.TimeLockedMicros.ReadLower,
|
||||
lockInfo.TimeLockedMicros.Write + lockInfo.TimeLockedMicros.WriteLower,
|
||||
}
|
||||
}
|
||||
return returnVal
|
||||
}
|
||||
|
||||
func computeLockDiffs(prevLocks, curLocks map[string]LockUsage) []LockUsage {
|
||||
lockUsages := lockUsages(make([]LockUsage, 0, len(curLocks)))
|
||||
for namespace, curUsage := range curLocks {
|
||||
prevUsage, hasKey := prevLocks[namespace]
|
||||
if !hasKey {
|
||||
// This namespace didn't appear in the previous batch of lock info,
|
||||
// so we can't compute a diff for it - skip it.
|
||||
continue
|
||||
}
|
||||
// Calculate diff of lock usage for this namespace and add to the list
|
||||
lockUsages = append(lockUsages,
|
||||
LockUsage{
|
||||
namespace,
|
||||
curUsage.Reads - prevUsage.Reads,
|
||||
curUsage.Writes - prevUsage.Writes,
|
||||
})
|
||||
}
|
||||
// Sort the array in order of least to most locked
|
||||
sort.Sort(lockUsages)
|
||||
return lockUsages
|
||||
}
|
||||
|
||||
func diff(newVal, oldVal, sampleTime int64) int64 {
|
||||
d := newVal - oldVal
|
||||
if d < 0 {
|
||||
d = newVal
|
||||
}
|
||||
return d / sampleTime
|
||||
}
|
||||
|
||||
// NewStatLine constructs a StatLine object from two ServerStatus objects.
|
||||
func NewStatLine(oldStat, newStat ServerStatus, key string, all bool, sampleSecs int64) *StatLine {
|
||||
returnVal := &StatLine{
|
||||
Key: key,
|
||||
Host: newStat.Host,
|
||||
Mapped: -1,
|
||||
Virtual: -1,
|
||||
Resident: -1,
|
||||
NonMapped: -1,
|
||||
Faults: -1,
|
||||
}
|
||||
|
||||
// set the storage engine appropriately
|
||||
if newStat.StorageEngine != nil && newStat.StorageEngine["name"] != "" {
|
||||
returnVal.StorageEngine = newStat.StorageEngine["name"]
|
||||
} else {
|
||||
returnVal.StorageEngine = "mmapv1"
|
||||
}
|
||||
|
||||
if newStat.Opcounters != nil && oldStat.Opcounters != nil {
|
||||
returnVal.Insert = diff(newStat.Opcounters.Insert, oldStat.Opcounters.Insert, sampleSecs)
|
||||
returnVal.Query = diff(newStat.Opcounters.Query, oldStat.Opcounters.Query, sampleSecs)
|
||||
returnVal.Update = diff(newStat.Opcounters.Update, oldStat.Opcounters.Update, sampleSecs)
|
||||
returnVal.Delete = diff(newStat.Opcounters.Delete, oldStat.Opcounters.Delete, sampleSecs)
|
||||
returnVal.GetMore = diff(newStat.Opcounters.GetMore, oldStat.Opcounters.GetMore, sampleSecs)
|
||||
returnVal.Command = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs)
|
||||
}
|
||||
|
||||
if newStat.OpcountersRepl != nil && oldStat.OpcountersRepl != nil {
|
||||
returnVal.InsertR = diff(newStat.OpcountersRepl.Insert, oldStat.OpcountersRepl.Insert, sampleSecs)
|
||||
returnVal.QueryR = diff(newStat.OpcountersRepl.Query, oldStat.OpcountersRepl.Query, sampleSecs)
|
||||
returnVal.UpdateR = diff(newStat.OpcountersRepl.Update, oldStat.OpcountersRepl.Update, sampleSecs)
|
||||
returnVal.DeleteR = diff(newStat.OpcountersRepl.Delete, oldStat.OpcountersRepl.Delete, sampleSecs)
|
||||
returnVal.GetMoreR = diff(newStat.OpcountersRepl.GetMore, oldStat.OpcountersRepl.GetMore, sampleSecs)
|
||||
returnVal.CommandR = diff(newStat.OpcountersRepl.Command, oldStat.OpcountersRepl.Command, sampleSecs)
|
||||
}
|
||||
|
||||
returnVal.CacheDirtyPercent = -1
|
||||
returnVal.CacheUsedPercent = -1
|
||||
if newStat.WiredTiger != nil && oldStat.WiredTiger != nil {
|
||||
returnVal.Flushes = newStat.WiredTiger.Transaction.TransCheckpoints - oldStat.WiredTiger.Transaction.TransCheckpoints
|
||||
returnVal.CacheDirtyPercent = float64(newStat.WiredTiger.Cache.TrackedDirtyBytes) / float64(newStat.WiredTiger.Cache.MaxBytesConfigured)
|
||||
returnVal.CacheUsedPercent = float64(newStat.WiredTiger.Cache.CurrentCachedBytes) / float64(newStat.WiredTiger.Cache.MaxBytesConfigured)
|
||||
} else if newStat.BackgroundFlushing != nil && oldStat.BackgroundFlushing != nil {
|
||||
returnVal.Flushes = newStat.BackgroundFlushing.Flushes - oldStat.BackgroundFlushing.Flushes
|
||||
}
|
||||
|
||||
returnVal.Time = newStat.SampleTime
|
||||
returnVal.IsMongos =
|
||||
(newStat.ShardCursorType != nil || strings.HasPrefix(newStat.Process, MongosProcess))
|
||||
|
||||
// BEGIN code modification
|
||||
if oldStat.Mem.Supported.(bool) {
|
||||
// END code modification
|
||||
if !returnVal.IsMongos {
|
||||
returnVal.Mapped = newStat.Mem.Mapped
|
||||
}
|
||||
returnVal.Virtual = newStat.Mem.Virtual
|
||||
returnVal.Resident = newStat.Mem.Resident
|
||||
|
||||
if !returnVal.IsMongos && all {
|
||||
returnVal.NonMapped = newStat.Mem.Virtual - newStat.Mem.Mapped
|
||||
}
|
||||
}
|
||||
|
||||
if newStat.Repl != nil {
|
||||
setName, isReplSet := newStat.Repl.SetName.(string)
|
||||
if isReplSet {
|
||||
returnVal.ReplSetName = setName
|
||||
}
|
||||
// BEGIN code modification
|
||||
if newStat.Repl.IsMaster.(bool) {
|
||||
returnVal.NodeType = "PRI"
|
||||
} else if newStat.Repl.Secondary.(bool) {
|
||||
returnVal.NodeType = "SEC"
|
||||
} else {
|
||||
returnVal.NodeType = "UNK"
|
||||
}
|
||||
// END code modification
|
||||
} else if returnVal.IsMongos {
|
||||
returnVal.NodeType = "RTR"
|
||||
}
|
||||
|
||||
if oldStat.ExtraInfo != nil && newStat.ExtraInfo != nil &&
|
||||
oldStat.ExtraInfo.PageFaults != nil && newStat.ExtraInfo.PageFaults != nil {
|
||||
returnVal.Faults = diff(*(newStat.ExtraInfo.PageFaults), *(oldStat.ExtraInfo.PageFaults), sampleSecs)
|
||||
}
|
||||
if !returnVal.IsMongos && oldStat.Locks != nil {
|
||||
globalCheck, hasGlobal := oldStat.Locks["Global"]
|
||||
if hasGlobal && globalCheck.AcquireCount != nil {
|
||||
// This appears to be a 3.0+ server so the data in these fields do *not* refer to
|
||||
// actual namespaces and thus we can't compute lock %.
|
||||
returnVal.HighestLocked = nil
|
||||
|
||||
// Check if it's a 3.0+ MMAP server so we can still compute collection locks
|
||||
collectionCheck, hasCollection := oldStat.Locks["Collection"]
|
||||
if hasCollection && collectionCheck.AcquireWaitCount != nil {
|
||||
readWaitCountDiff := newStat.Locks["Collection"].AcquireWaitCount.Read - oldStat.Locks["Collection"].AcquireWaitCount.Read
|
||||
readTotalCountDiff := newStat.Locks["Collection"].AcquireCount.Read - oldStat.Locks["Collection"].AcquireCount.Read
|
||||
writeWaitCountDiff := newStat.Locks["Collection"].AcquireWaitCount.Write - oldStat.Locks["Collection"].AcquireWaitCount.Write
|
||||
writeTotalCountDiff := newStat.Locks["Collection"].AcquireCount.Write - oldStat.Locks["Collection"].AcquireCount.Write
|
||||
readAcquireTimeDiff := newStat.Locks["Collection"].TimeAcquiringMicros.Read - oldStat.Locks["Collection"].TimeAcquiringMicros.Read
|
||||
writeAcquireTimeDiff := newStat.Locks["Collection"].TimeAcquiringMicros.Write - oldStat.Locks["Collection"].TimeAcquiringMicros.Write
|
||||
returnVal.CollectionLocks = &CollectionLockStatus{
|
||||
ReadAcquireWaitsPercentage: percentageInt64(readWaitCountDiff, readTotalCountDiff),
|
||||
WriteAcquireWaitsPercentage: percentageInt64(writeWaitCountDiff, writeTotalCountDiff),
|
||||
ReadAcquireTimeMicros: averageInt64(readAcquireTimeDiff, readWaitCountDiff),
|
||||
WriteAcquireTimeMicros: averageInt64(writeAcquireTimeDiff, writeWaitCountDiff),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
prevLocks := parseLocks(oldStat)
|
||||
curLocks := parseLocks(newStat)
|
||||
lockdiffs := computeLockDiffs(prevLocks, curLocks)
|
||||
if len(lockdiffs) == 0 {
|
||||
if newStat.GlobalLock != nil {
|
||||
returnVal.HighestLocked = &LockStatus{
|
||||
DBName: "",
|
||||
Percentage: percentageInt64(newStat.GlobalLock.LockTime, newStat.GlobalLock.TotalTime),
|
||||
Global: true,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Get the entry with the highest lock
|
||||
highestLocked := lockdiffs[len(lockdiffs)-1]
|
||||
|
||||
var timeDiffMillis int64
|
||||
timeDiffMillis = newStat.UptimeMillis - oldStat.UptimeMillis
|
||||
|
||||
lockToReport := highestLocked.Writes
|
||||
|
||||
// if the highest locked namespace is not '.'
|
||||
if highestLocked.Namespace != "." {
|
||||
for _, namespaceLockInfo := range lockdiffs {
|
||||
if namespaceLockInfo.Namespace == "." {
|
||||
lockToReport += namespaceLockInfo.Writes
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// lock data is in microseconds and uptime is in milliseconds - so
|
||||
// divide by 1000 so that they units match
|
||||
lockToReport /= 1000
|
||||
|
||||
returnVal.HighestLocked = &LockStatus{
|
||||
DBName: highestLocked.Namespace,
|
||||
Percentage: percentageInt64(lockToReport, timeDiffMillis),
|
||||
Global: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
returnVal.HighestLocked = nil
|
||||
}
|
||||
|
||||
if newStat.GlobalLock != nil {
|
||||
hasWT := (newStat.WiredTiger != nil && oldStat.WiredTiger != nil)
|
||||
//If we have wiredtiger stats, use those instead
|
||||
if newStat.GlobalLock.CurrentQueue != nil {
|
||||
if hasWT {
|
||||
returnVal.QueuedReaders = newStat.GlobalLock.CurrentQueue.Readers + newStat.GlobalLock.ActiveClients.Readers - newStat.WiredTiger.Concurrent.Read.Out
|
||||
returnVal.QueuedWriters = newStat.GlobalLock.CurrentQueue.Writers + newStat.GlobalLock.ActiveClients.Writers - newStat.WiredTiger.Concurrent.Write.Out
|
||||
if returnVal.QueuedReaders < 0 {
|
||||
returnVal.QueuedReaders = 0
|
||||
}
|
||||
if returnVal.QueuedWriters < 0 {
|
||||
returnVal.QueuedWriters = 0
|
||||
}
|
||||
} else {
|
||||
returnVal.QueuedReaders = newStat.GlobalLock.CurrentQueue.Readers
|
||||
returnVal.QueuedWriters = newStat.GlobalLock.CurrentQueue.Writers
|
||||
}
|
||||
}
|
||||
|
||||
if hasWT {
|
||||
returnVal.ActiveReaders = newStat.WiredTiger.Concurrent.Read.Out
|
||||
returnVal.ActiveWriters = newStat.WiredTiger.Concurrent.Write.Out
|
||||
} else if newStat.GlobalLock.ActiveClients != nil {
|
||||
returnVal.ActiveReaders = newStat.GlobalLock.ActiveClients.Readers
|
||||
returnVal.ActiveWriters = newStat.GlobalLock.ActiveClients.Writers
|
||||
}
|
||||
}
|
||||
|
||||
if oldStat.Network != nil && newStat.Network != nil {
|
||||
returnVal.NetIn = diff(newStat.Network.BytesIn, oldStat.Network.BytesIn, sampleSecs)
|
||||
returnVal.NetOut = diff(newStat.Network.BytesOut, oldStat.Network.BytesOut, sampleSecs)
|
||||
}
|
||||
|
||||
if newStat.Connections != nil {
|
||||
returnVal.NumConnections = newStat.Connections.Current
|
||||
}
|
||||
|
||||
return returnVal
|
||||
}
|
||||
213
plugins/inputs/mysql/mysql.go
Normal file
213
plugins/inputs/mysql/mysql.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Mysql struct {
|
||||
Servers []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# specify servers via a url matching:
|
||||
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
||||
# see https://github.com/go-sql-driver/mysql#dsn-data-source-name
|
||||
# e.g.
|
||||
# root:passwd@tcp(127.0.0.1:3306)/?tls=false
|
||||
# root@tcp(127.0.0.1:3306)/?tls=false
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
`
|
||||
|
||||
func (m *Mysql) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (m *Mysql) Description() string {
|
||||
return "Read metrics from one or many mysql servers"
|
||||
}
|
||||
|
||||
var localhost = ""
|
||||
|
||||
func (m *Mysql) Gather(acc inputs.Accumulator) error {
|
||||
if len(m.Servers) == 0 {
|
||||
// if we can't get stats in this case, thats fine, don't report
|
||||
// an error.
|
||||
m.gatherServer(localhost, acc)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, serv := range m.Servers {
|
||||
err := m.gatherServer(serv, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type mapping struct {
|
||||
onServer string
|
||||
inExport string
|
||||
}
|
||||
|
||||
var mappings = []*mapping{
|
||||
{
|
||||
onServer: "Aborted_",
|
||||
inExport: "aborted_",
|
||||
},
|
||||
{
|
||||
onServer: "Bytes_",
|
||||
inExport: "bytes_",
|
||||
},
|
||||
{
|
||||
onServer: "Com_",
|
||||
inExport: "commands_",
|
||||
},
|
||||
{
|
||||
onServer: "Created_",
|
||||
inExport: "created_",
|
||||
},
|
||||
{
|
||||
onServer: "Handler_",
|
||||
inExport: "handler_",
|
||||
},
|
||||
{
|
||||
onServer: "Innodb_",
|
||||
inExport: "innodb_",
|
||||
},
|
||||
{
|
||||
onServer: "Key_",
|
||||
inExport: "key_",
|
||||
},
|
||||
{
|
||||
onServer: "Open_",
|
||||
inExport: "open_",
|
||||
},
|
||||
{
|
||||
onServer: "Opened_",
|
||||
inExport: "opened_",
|
||||
},
|
||||
{
|
||||
onServer: "Qcache_",
|
||||
inExport: "qcache_",
|
||||
},
|
||||
{
|
||||
onServer: "Table_",
|
||||
inExport: "table_",
|
||||
},
|
||||
{
|
||||
onServer: "Tokudb_",
|
||||
inExport: "tokudb_",
|
||||
},
|
||||
{
|
||||
onServer: "Threads_",
|
||||
inExport: "threads_",
|
||||
},
|
||||
}
|
||||
|
||||
func (m *Mysql) gatherServer(serv string, acc inputs.Accumulator) error {
|
||||
// If user forgot the '/', add it
|
||||
if strings.HasSuffix(serv, ")") {
|
||||
serv = serv + "/"
|
||||
} else if serv == "localhost" {
|
||||
serv = ""
|
||||
}
|
||||
|
||||
db, err := sql.Open("mysql", serv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer db.Close()
|
||||
|
||||
rows, err := db.Query(`SHOW /*!50002 GLOBAL */ STATUS`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var servtag string
|
||||
servtag, err = parseDSN(serv)
|
||||
if err != nil {
|
||||
servtag = "localhost"
|
||||
}
|
||||
tags := map[string]string{"server": servtag}
|
||||
fields := make(map[string]interface{})
|
||||
for rows.Next() {
|
||||
var name string
|
||||
var val interface{}
|
||||
|
||||
err = rows.Scan(&name, &val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var found bool
|
||||
|
||||
for _, mapped := range mappings {
|
||||
if strings.HasPrefix(name, mapped.onServer) {
|
||||
i, _ := strconv.Atoi(string(val.([]byte)))
|
||||
fields[mapped.inExport+name[len(mapped.onServer):]] = i
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "Queries":
|
||||
i, err := strconv.ParseInt(string(val.([]byte)), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields["queries"] = i
|
||||
case "Slow_queries":
|
||||
i, err := strconv.ParseInt(string(val.([]byte)), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields["slow_queries"] = i
|
||||
}
|
||||
}
|
||||
acc.AddFields("mysql", fields, tags)
|
||||
|
||||
conn_rows, err := db.Query("SELECT user, sum(1) FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user")
|
||||
|
||||
for conn_rows.Next() {
|
||||
var user string
|
||||
var connections int64
|
||||
|
||||
err = conn_rows.Scan(&user, &connections)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tags := map[string]string{"server": servtag, "user": user}
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fields["connections"] = connections
|
||||
acc.AddFields("mysql_users", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("mysql", func() inputs.Input {
|
||||
return &Mysql{}
|
||||
})
|
||||
}
|
||||
86
plugins/inputs/mysql/mysql_test.go
Normal file
86
plugins/inputs/mysql/mysql_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMysqlDefaultsToLocal(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
m := &Mysql{
|
||||
Servers: []string{fmt.Sprintf("root@tcp(%s:3306)/", testutil.GetLocalHost())},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := m.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, acc.HasMeasurement("mysql"))
|
||||
}
|
||||
|
||||
func TestMysqlParseDSN(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
output string
|
||||
}{
|
||||
{
|
||||
"",
|
||||
"127.0.0.1:3306",
|
||||
},
|
||||
{
|
||||
"localhost",
|
||||
"127.0.0.1:3306",
|
||||
},
|
||||
{
|
||||
"127.0.0.1",
|
||||
"127.0.0.1:3306",
|
||||
},
|
||||
{
|
||||
"tcp(192.168.1.1:3306)/",
|
||||
"192.168.1.1:3306",
|
||||
},
|
||||
{
|
||||
"tcp(localhost)/",
|
||||
"localhost",
|
||||
},
|
||||
{
|
||||
"root:passwd@tcp(192.168.1.1:3306)/?tls=false",
|
||||
"192.168.1.1:3306",
|
||||
},
|
||||
{
|
||||
"root@tcp(127.0.0.1:3306)/?tls=false",
|
||||
"127.0.0.1:3306",
|
||||
},
|
||||
{
|
||||
"root:passwd@tcp(localhost:3036)/dbname?allowOldPasswords=1",
|
||||
"localhost:3036",
|
||||
},
|
||||
{
|
||||
"root:foo@bar@tcp(192.1.1.1:3306)/?tls=false",
|
||||
"192.1.1.1:3306",
|
||||
},
|
||||
{
|
||||
"root:f00@b4r@tcp(192.1.1.1:3306)/?tls=false",
|
||||
"192.1.1.1:3306",
|
||||
},
|
||||
{
|
||||
"root:fl!p11@tcp(192.1.1.1:3306)/?tls=false",
|
||||
"192.1.1.1:3306",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
output, _ := parseDSN(test.input)
|
||||
if output != test.output {
|
||||
t.Errorf("Expected %s, got %s\n", test.output, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
85
plugins/inputs/mysql/parse_dsn.go
Normal file
85
plugins/inputs/mysql/parse_dsn.go
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// parseDSN parses the DSN string to a config
|
||||
func parseDSN(dsn string) (string, error) {
|
||||
//var user, passwd string
|
||||
var addr, net string
|
||||
|
||||
// [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
|
||||
// Find the last '/' (since the password or the net addr might contain a '/')
|
||||
for i := len(dsn) - 1; i >= 0; i-- {
|
||||
if dsn[i] == '/' {
|
||||
var j, k int
|
||||
|
||||
// left part is empty if i <= 0
|
||||
if i > 0 {
|
||||
// [username[:password]@][protocol[(address)]]
|
||||
// Find the last '@' in dsn[:i]
|
||||
for j = i; j >= 0; j-- {
|
||||
if dsn[j] == '@' {
|
||||
// username[:password]
|
||||
// Find the first ':' in dsn[:j]
|
||||
for k = 0; k < j; k++ {
|
||||
if dsn[k] == ':' {
|
||||
//passwd = dsn[k+1 : j]
|
||||
break
|
||||
}
|
||||
}
|
||||
//user = dsn[:k]
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// [protocol[(address)]]
|
||||
// Find the first '(' in dsn[j+1:i]
|
||||
for k = j + 1; k < i; k++ {
|
||||
if dsn[k] == '(' {
|
||||
// dsn[i-1] must be == ')' if an address is specified
|
||||
if dsn[i-1] != ')' {
|
||||
if strings.ContainsRune(dsn[k+1:i], ')') {
|
||||
return "", errors.New("Invalid DSN unescaped")
|
||||
}
|
||||
return "", errors.New("Invalid DSN Addr")
|
||||
}
|
||||
addr = dsn[k+1 : i-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
net = dsn[j+1 : k]
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Set default network if empty
|
||||
if net == "" {
|
||||
net = "tcp"
|
||||
}
|
||||
|
||||
// Set default address if empty
|
||||
if addr == "" {
|
||||
switch net {
|
||||
case "tcp":
|
||||
addr = "127.0.0.1:3306"
|
||||
case "unix":
|
||||
addr = "/tmp/mysql.sock"
|
||||
default:
|
||||
return "", errors.New("Default addr for network '" + net + "' unknown")
|
||||
}
|
||||
}
|
||||
|
||||
return addr, nil
|
||||
}
|
||||
165
plugins/inputs/nginx/nginx.go
Normal file
165
plugins/inputs/nginx/nginx.go
Normal file
@@ -0,0 +1,165 @@
|
||||
package nginx
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Nginx struct {
|
||||
Urls []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# An array of Nginx stub_status URI to gather stats.
|
||||
urls = ["http://localhost/status"]
|
||||
`
|
||||
|
||||
func (n *Nginx) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (n *Nginx) Description() string {
|
||||
return "Read Nginx's basic status information (ngx_http_stub_status_module)"
|
||||
}
|
||||
|
||||
func (n *Nginx) Gather(acc inputs.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
var outerr error
|
||||
|
||||
for _, u := range n.Urls {
|
||||
addr, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
outerr = n.gatherUrl(addr, acc)
|
||||
}(addr)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{Transport: tr}
|
||||
|
||||
func (n *Nginx) gatherUrl(addr *url.URL, acc inputs.Accumulator) error {
|
||||
resp, err := client.Get(addr.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
|
||||
}
|
||||
r := bufio.NewReader(resp.Body)
|
||||
|
||||
// Active connections
|
||||
_, err = r.ReadString(':')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
active, err := strconv.ParseUint(strings.TrimSpace(line), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Server accepts handled requests
|
||||
_, err = r.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
line, err = r.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data := strings.SplitN(strings.TrimSpace(line), " ", 3)
|
||||
accepts, err := strconv.ParseUint(data[0], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
handled, err := strconv.ParseUint(data[1], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
requests, err := strconv.ParseUint(data[2], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reading/Writing/Waiting
|
||||
line, err = r.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data = strings.SplitN(strings.TrimSpace(line), " ", 6)
|
||||
reading, err := strconv.ParseUint(data[1], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writing, err := strconv.ParseUint(data[3], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
waiting, err := strconv.ParseUint(data[5], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tags := getTags(addr)
|
||||
fields := map[string]interface{}{
|
||||
"active": active,
|
||||
"accepts": accepts,
|
||||
"handled": handled,
|
||||
"requests": requests,
|
||||
"reading": reading,
|
||||
"writing": writing,
|
||||
"waiting": waiting,
|
||||
}
|
||||
acc.AddFields("nginx", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get tag(s) for the nginx plugin
|
||||
func getTags(addr *url.URL) map[string]string {
|
||||
h := addr.Host
|
||||
host, port, err := net.SplitHostPort(h)
|
||||
if err != nil {
|
||||
host = addr.Host
|
||||
if addr.Scheme == "http" {
|
||||
port = "80"
|
||||
} else if addr.Scheme == "https" {
|
||||
port = "443"
|
||||
} else {
|
||||
port = ""
|
||||
}
|
||||
}
|
||||
return map[string]string{"server": host, "port": port}
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("nginx", func() inputs.Input {
|
||||
return &Nginx{}
|
||||
})
|
||||
}
|
||||
85
plugins/inputs/nginx/nginx_test.go
Normal file
85
plugins/inputs/nginx/nginx_test.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package nginx
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const sampleResponse = `
|
||||
Active connections: 585
|
||||
server accepts handled requests
|
||||
85340 85340 35085
|
||||
Reading: 4 Writing: 135 Waiting: 446
|
||||
`
|
||||
|
||||
// Verify that nginx tags are properly parsed based on the server
|
||||
func TestNginxTags(t *testing.T) {
|
||||
urls := []string{"http://localhost/endpoint", "http://localhost:80/endpoint"}
|
||||
var addr *url.URL
|
||||
for _, url1 := range urls {
|
||||
addr, _ = url.Parse(url1)
|
||||
tagMap := getTags(addr)
|
||||
assert.Contains(t, tagMap["server"], "localhost")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNginxGeneratesMetrics(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var rsp string
|
||||
|
||||
if r.URL.Path == "/stub_status" {
|
||||
rsp = sampleResponse
|
||||
} else {
|
||||
panic("Cannot handle request")
|
||||
}
|
||||
|
||||
fmt.Fprintln(w, rsp)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
n := &Nginx{
|
||||
Urls: []string{fmt.Sprintf("%s/stub_status", ts.URL)},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := n.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"active": uint64(585),
|
||||
"accepts": uint64(85340),
|
||||
"handled": uint64(85340),
|
||||
"requests": uint64(35085),
|
||||
"reading": uint64(4),
|
||||
"writing": uint64(135),
|
||||
"waiting": uint64(446),
|
||||
}
|
||||
addr, err := url.Parse(ts.URL)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(addr.Host)
|
||||
if err != nil {
|
||||
host = addr.Host
|
||||
if addr.Scheme == "http" {
|
||||
port = "80"
|
||||
} else if addr.Scheme == "https" {
|
||||
port = "443"
|
||||
} else {
|
||||
port = ""
|
||||
}
|
||||
}
|
||||
|
||||
tags := map[string]string{"server": host, "port": port}
|
||||
acc.AssertContainsTaggedFields(t, "nginx", fields, tags)
|
||||
}
|
||||
85
plugins/inputs/phpfpm/README.md
Normal file
85
plugins/inputs/phpfpm/README.md
Normal file
@@ -0,0 +1,85 @@
|
||||
# Telegraf plugin: phpfpm
|
||||
|
||||
Get phpfpm stat using either HTTP status page or fpm socket.
|
||||
|
||||
# Measurements
|
||||
|
||||
Meta:
|
||||
|
||||
- tags: `url=<ip> pool=poolname`
|
||||
|
||||
Measurement names:
|
||||
|
||||
- accepted_conn
|
||||
- listen_queue
|
||||
- max_listen_queue
|
||||
- listen_queue_len
|
||||
- idle_processes
|
||||
- active_processes
|
||||
- total_processes
|
||||
- max_active_processes
|
||||
- max_children_reached
|
||||
- slow_requests
|
||||
|
||||
# Example output
|
||||
|
||||
Using this configuration:
|
||||
|
||||
```
|
||||
[phpfpm]
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port and path. ie localhost, 10.10.3.33/server-status, etc.
|
||||
#
|
||||
# We can configure in three modes:
|
||||
# - unixsocket: the string is the path to fpm socket like
|
||||
# /var/run/php5-fpm.sock
|
||||
# - http: the URL has to start with http:// or https://
|
||||
# - fcgi: the URL has to start with fcgi:// or cgi://, and socket port must present
|
||||
#
|
||||
# If no servers are specified, then default to 127.0.0.1/server-status
|
||||
urls = ["http://localhost/status", "10.0.0.12:/var/run/php5-fpm-www2.sock", "fcgi://10.0.0.12:9000/status"]
|
||||
```
|
||||
|
||||
When run with:
|
||||
|
||||
```
|
||||
./telegraf -config telegraf.conf -input-filter phpfpm -test
|
||||
```
|
||||
|
||||
It produces:
|
||||
|
||||
```
|
||||
* Plugin: phpfpm, Collection 1
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_idle_processes value=1
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_total_processes value=2
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_max_children_reached value=0
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_max_listen_queue value=0
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_listen_queue value=0
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_listen_queue_len value=0
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_active_processes value=1
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_max_active_processes value=2
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_slow_requests value=0
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_accepted_conn value=305
|
||||
|
||||
> [url="localhost" pool="www2"] phpfpm_max_children_reached value=0
|
||||
> [url="localhost" pool="www2"] phpfpm_slow_requests value=0
|
||||
> [url="localhost" pool="www2"] phpfpm_max_listen_queue value=0
|
||||
> [url="localhost" pool="www2"] phpfpm_active_processes value=1
|
||||
> [url="localhost" pool="www2"] phpfpm_listen_queue_len value=0
|
||||
> [url="localhost" pool="www2"] phpfpm_idle_processes value=1
|
||||
> [url="localhost" pool="www2"] phpfpm_total_processes value=2
|
||||
> [url="localhost" pool="www2"] phpfpm_max_active_processes value=2
|
||||
> [url="localhost" pool="www2"] phpfpm_accepted_conn value=306
|
||||
> [url="localhost" pool="www2"] phpfpm_listen_queue value=0
|
||||
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_max_children_reached value=0
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_slow_requests value=1
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_max_listen_queue value=0
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_active_processes value=1
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_listen_queue_len value=0
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_idle_processes value=2
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_total_processes value=2
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_max_active_processes value=2
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_accepted_conn value=307
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_listen_queue value=0
|
||||
```
|
||||
215
plugins/inputs/phpfpm/phpfpm.go
Normal file
215
plugins/inputs/phpfpm/phpfpm.go
Normal file
@@ -0,0 +1,215 @@
|
||||
package phpfpm
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const (
|
||||
PF_POOL = "pool"
|
||||
PF_PROCESS_MANAGER = "process manager"
|
||||
PF_ACCEPTED_CONN = "accepted conn"
|
||||
PF_LISTEN_QUEUE = "listen queue"
|
||||
PF_MAX_LISTEN_QUEUE = "max listen queue"
|
||||
PF_LISTEN_QUEUE_LEN = "listen queue len"
|
||||
PF_IDLE_PROCESSES = "idle processes"
|
||||
PF_ACTIVE_PROCESSES = "active processes"
|
||||
PF_TOTAL_PROCESSES = "total processes"
|
||||
PF_MAX_ACTIVE_PROCESSES = "max active processes"
|
||||
PF_MAX_CHILDREN_REACHED = "max children reached"
|
||||
PF_SLOW_REQUESTS = "slow requests"
|
||||
)
|
||||
|
||||
type metric map[string]int64
|
||||
type poolStat map[string]metric
|
||||
|
||||
type phpfpm struct {
|
||||
Urls []string
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# An array of addresses to gather stats about. Specify an ip or hostname
|
||||
# with optional port and path.
|
||||
#
|
||||
# Plugin can be configured in three modes (both can be used):
|
||||
# - http: the URL must start with http:// or https://, ex:
|
||||
# "http://localhost/status"
|
||||
# "http://192.168.130.1/status?full"
|
||||
# - unixsocket: path to fpm socket, ex:
|
||||
# "/var/run/php5-fpm.sock"
|
||||
# "192.168.10.10:/var/run/php5-fpm-www2.sock"
|
||||
# - fcgi: the URL mush start with fcgi:// or cgi://, and port must present, ex:
|
||||
# "fcgi://10.0.0.12:9000/status"
|
||||
# "cgi://10.0.10.12:9001/status"
|
||||
#
|
||||
# If no servers are specified, then default to 127.0.0.1/server-status
|
||||
urls = ["http://localhost/status"]
|
||||
`
|
||||
|
||||
func (r *phpfpm) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (r *phpfpm) Description() string {
|
||||
return "Read metrics of phpfpm, via HTTP status page or socket(pending)"
|
||||
}
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (g *phpfpm) Gather(acc inputs.Accumulator) error {
|
||||
if len(g.Urls) == 0 {
|
||||
return g.gatherServer("http://127.0.0.1/status", acc)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, serv := range g.Urls {
|
||||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = g.gatherServer(serv, acc)
|
||||
}(serv)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
// Request status page to get stat raw data
|
||||
func (g *phpfpm) gatherServer(addr string, acc inputs.Accumulator) error {
|
||||
if g.client == nil {
|
||||
|
||||
client := &http.Client{}
|
||||
g.client = client
|
||||
}
|
||||
|
||||
if strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") {
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable parse server address '%s': %s", addr, err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s", u.Scheme,
|
||||
u.Host, u.Path), nil)
|
||||
res, err := g.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to phpfpm status page '%s': %v",
|
||||
addr, err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return fmt.Errorf("Unable to get valid stat result from '%s': %v",
|
||||
addr, err)
|
||||
}
|
||||
|
||||
importMetric(res.Body, acc, u.Host)
|
||||
} else {
|
||||
var (
|
||||
fcgi *FCGIClient
|
||||
fcgiAddr string
|
||||
)
|
||||
if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") {
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable parse server address '%s': %s", addr, err)
|
||||
}
|
||||
socketAddr := strings.Split(u.Host, ":")
|
||||
fcgiIp := socketAddr[0]
|
||||
fcgiPort, _ := strconv.Atoi(socketAddr[1])
|
||||
fcgiAddr = u.Host
|
||||
fcgi, _ = NewClient(fcgiIp, fcgiPort)
|
||||
} else {
|
||||
socketAddr := strings.Split(addr, ":")
|
||||
fcgiAddr = socketAddr[0]
|
||||
fcgi, _ = NewClient("unix", socketAddr[1])
|
||||
}
|
||||
resOut, resErr, err := fcgi.Request(map[string]string{
|
||||
"SCRIPT_NAME": "/status",
|
||||
"SCRIPT_FILENAME": "status",
|
||||
"REQUEST_METHOD": "GET",
|
||||
}, "")
|
||||
|
||||
if len(resErr) == 0 && err == nil {
|
||||
importMetric(bytes.NewReader(resOut), acc, fcgiAddr)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Import HTTP stat data into Telegraf system
|
||||
func importMetric(r io.Reader, acc inputs.Accumulator, host string) (poolStat, error) {
|
||||
stats := make(poolStat)
|
||||
var currentPool string
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
statLine := scanner.Text()
|
||||
keyvalue := strings.Split(statLine, ":")
|
||||
|
||||
if len(keyvalue) < 2 {
|
||||
continue
|
||||
}
|
||||
fieldName := strings.Trim(keyvalue[0], " ")
|
||||
// We start to gather data for a new pool here
|
||||
if fieldName == PF_POOL {
|
||||
currentPool = strings.Trim(keyvalue[1], " ")
|
||||
stats[currentPool] = make(metric)
|
||||
continue
|
||||
}
|
||||
|
||||
// Start to parse metric for current pool
|
||||
switch fieldName {
|
||||
case PF_ACCEPTED_CONN,
|
||||
PF_LISTEN_QUEUE,
|
||||
PF_MAX_LISTEN_QUEUE,
|
||||
PF_LISTEN_QUEUE_LEN,
|
||||
PF_IDLE_PROCESSES,
|
||||
PF_ACTIVE_PROCESSES,
|
||||
PF_TOTAL_PROCESSES,
|
||||
PF_MAX_ACTIVE_PROCESSES,
|
||||
PF_MAX_CHILDREN_REACHED,
|
||||
PF_SLOW_REQUESTS:
|
||||
fieldValue, err := strconv.ParseInt(strings.Trim(keyvalue[1], " "), 10, 64)
|
||||
if err == nil {
|
||||
stats[currentPool][fieldName] = fieldValue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, we push the pool metric
|
||||
for pool := range stats {
|
||||
tags := map[string]string{
|
||||
"url": host,
|
||||
"pool": pool,
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
for k, v := range stats[pool] {
|
||||
fields[strings.Replace(k, " ", "_", -1)] = v
|
||||
}
|
||||
acc.AddFields("phpfpm", fields, tags)
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("phpfpm", func() inputs.Input {
|
||||
return &phpfpm{}
|
||||
})
|
||||
}
|
||||
321
plugins/inputs/phpfpm/phpfpm_fcgi.go
Normal file
321
plugins/inputs/phpfpm/phpfpm_fcgi.go
Normal file
@@ -0,0 +1,321 @@
|
||||
package phpfpm
|
||||
|
||||
// FastCGI client to request via socket
|
||||
|
||||
// Copyright 2012 Junqing Tan <ivan@mysqlab.net> and The Go Authors
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// Part of source code is from Go fcgi package
|
||||
|
||||
// Fix bug: Can't recive more than 1 record untill FCGI_END_REQUEST 2012-09-15
|
||||
// By: wofeiwo
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const FCGI_LISTENSOCK_FILENO uint8 = 0
|
||||
const FCGI_HEADER_LEN uint8 = 8
|
||||
const VERSION_1 uint8 = 1
|
||||
const FCGI_NULL_REQUEST_ID uint8 = 0
|
||||
const FCGI_KEEP_CONN uint8 = 1
|
||||
|
||||
const (
|
||||
FCGI_BEGIN_REQUEST uint8 = iota + 1
|
||||
FCGI_ABORT_REQUEST
|
||||
FCGI_END_REQUEST
|
||||
FCGI_PARAMS
|
||||
FCGI_STDIN
|
||||
FCGI_STDOUT
|
||||
FCGI_STDERR
|
||||
FCGI_DATA
|
||||
FCGI_GET_VALUES
|
||||
FCGI_GET_VALUES_RESULT
|
||||
FCGI_UNKNOWN_TYPE
|
||||
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
|
||||
)
|
||||
|
||||
const (
|
||||
FCGI_RESPONDER uint8 = iota + 1
|
||||
FCGI_AUTHORIZER
|
||||
FCGI_FILTER
|
||||
)
|
||||
|
||||
const (
|
||||
FCGI_REQUEST_COMPLETE uint8 = iota
|
||||
FCGI_CANT_MPX_CONN
|
||||
FCGI_OVERLOADED
|
||||
FCGI_UNKNOWN_ROLE
|
||||
)
|
||||
|
||||
const (
|
||||
FCGI_MAX_CONNS string = "MAX_CONNS"
|
||||
FCGI_MAX_REQS string = "MAX_REQS"
|
||||
FCGI_MPXS_CONNS string = "MPXS_CONNS"
|
||||
)
|
||||
|
||||
const (
|
||||
maxWrite = 6553500 // maximum record body
|
||||
maxPad = 255
|
||||
)
|
||||
|
||||
type header struct {
|
||||
Version uint8
|
||||
Type uint8
|
||||
Id uint16
|
||||
ContentLength uint16
|
||||
PaddingLength uint8
|
||||
Reserved uint8
|
||||
}
|
||||
|
||||
// for padding so we don't have to allocate all the time
|
||||
// not synchronized because we don't care what the contents are
|
||||
var pad [maxPad]byte
|
||||
|
||||
func (h *header) init(recType uint8, reqId uint16, contentLength int) {
|
||||
h.Version = 1
|
||||
h.Type = recType
|
||||
h.Id = reqId
|
||||
h.ContentLength = uint16(contentLength)
|
||||
h.PaddingLength = uint8(-contentLength & 7)
|
||||
}
|
||||
|
||||
type record struct {
|
||||
h header
|
||||
buf [maxWrite + maxPad]byte
|
||||
}
|
||||
|
||||
func (rec *record) read(r io.Reader) (err error) {
|
||||
if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil {
|
||||
return err
|
||||
}
|
||||
if rec.h.Version != 1 {
|
||||
return errors.New("fcgi: invalid header version")
|
||||
}
|
||||
n := int(rec.h.ContentLength) + int(rec.h.PaddingLength)
|
||||
if _, err = io.ReadFull(r, rec.buf[:n]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *record) content() []byte {
|
||||
return r.buf[:r.h.ContentLength]
|
||||
}
|
||||
|
||||
type FCGIClient struct {
|
||||
mutex sync.Mutex
|
||||
rwc io.ReadWriteCloser
|
||||
h header
|
||||
buf bytes.Buffer
|
||||
keepAlive bool
|
||||
}
|
||||
|
||||
func NewClient(h string, args ...interface{}) (fcgi *FCGIClient, err error) {
|
||||
var conn net.Conn
|
||||
if len(args) != 1 {
|
||||
err = errors.New("fcgi: not enough params")
|
||||
return
|
||||
}
|
||||
switch args[0].(type) {
|
||||
case int:
|
||||
addr := h + ":" + strconv.FormatInt(int64(args[0].(int)), 10)
|
||||
conn, err = net.Dial("tcp", addr)
|
||||
case string:
|
||||
laddr := net.UnixAddr{Name: args[0].(string), Net: h}
|
||||
conn, err = net.DialUnix(h, nil, &laddr)
|
||||
default:
|
||||
err = errors.New("fcgi: we only accept int (port) or string (socket) params.")
|
||||
}
|
||||
fcgi = &FCGIClient{
|
||||
rwc: conn,
|
||||
keepAlive: false,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (client *FCGIClient) writeRecord(recType uint8, reqId uint16, content []byte) (err error) {
|
||||
client.mutex.Lock()
|
||||
defer client.mutex.Unlock()
|
||||
client.buf.Reset()
|
||||
client.h.init(recType, reqId, len(content))
|
||||
if err := binary.Write(&client.buf, binary.BigEndian, client.h); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := client.buf.Write(content); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := client.buf.Write(pad[:client.h.PaddingLength]); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = client.rwc.Write(client.buf.Bytes())
|
||||
return err
|
||||
}
|
||||
|
||||
func (client *FCGIClient) writeBeginRequest(reqId uint16, role uint16, flags uint8) error {
|
||||
b := [8]byte{byte(role >> 8), byte(role), flags}
|
||||
return client.writeRecord(FCGI_BEGIN_REQUEST, reqId, b[:])
|
||||
}
|
||||
|
||||
func (client *FCGIClient) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint32(b, uint32(appStatus))
|
||||
b[4] = protocolStatus
|
||||
return client.writeRecord(FCGI_END_REQUEST, reqId, b)
|
||||
}
|
||||
|
||||
func (client *FCGIClient) writePairs(recType uint8, reqId uint16, pairs map[string]string) error {
|
||||
w := newWriter(client, recType, reqId)
|
||||
b := make([]byte, 8)
|
||||
for k, v := range pairs {
|
||||
n := encodeSize(b, uint32(len(k)))
|
||||
n += encodeSize(b[n:], uint32(len(v)))
|
||||
if _, err := w.Write(b[:n]); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.WriteString(k); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.WriteString(v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func readSize(s []byte) (uint32, int) {
|
||||
if len(s) == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
size, n := uint32(s[0]), 1
|
||||
if size&(1<<7) != 0 {
|
||||
if len(s) < 4 {
|
||||
return 0, 0
|
||||
}
|
||||
n = 4
|
||||
size = binary.BigEndian.Uint32(s)
|
||||
size &^= 1 << 31
|
||||
}
|
||||
return size, n
|
||||
}
|
||||
|
||||
func readString(s []byte, size uint32) string {
|
||||
if size > uint32(len(s)) {
|
||||
return ""
|
||||
}
|
||||
return string(s[:size])
|
||||
}
|
||||
|
||||
func encodeSize(b []byte, size uint32) int {
|
||||
if size > 127 {
|
||||
size |= 1 << 31
|
||||
binary.BigEndian.PutUint32(b, size)
|
||||
return 4
|
||||
}
|
||||
b[0] = byte(size)
|
||||
return 1
|
||||
}
|
||||
|
||||
// bufWriter encapsulates bufio.Writer but also closes the underlying stream when
|
||||
// Closed.
|
||||
type bufWriter struct {
|
||||
closer io.Closer
|
||||
*bufio.Writer
|
||||
}
|
||||
|
||||
func (w *bufWriter) Close() error {
|
||||
if err := w.Writer.Flush(); err != nil {
|
||||
w.closer.Close()
|
||||
return err
|
||||
}
|
||||
return w.closer.Close()
|
||||
}
|
||||
|
||||
func newWriter(c *FCGIClient, recType uint8, reqId uint16) *bufWriter {
|
||||
s := &streamWriter{c: c, recType: recType, reqId: reqId}
|
||||
w := bufio.NewWriterSize(s, maxWrite)
|
||||
return &bufWriter{s, w}
|
||||
}
|
||||
|
||||
// streamWriter abstracts out the separation of a stream into discrete records.
|
||||
// It only writes maxWrite bytes at a time.
|
||||
type streamWriter struct {
|
||||
c *FCGIClient
|
||||
recType uint8
|
||||
reqId uint16
|
||||
}
|
||||
|
||||
func (w *streamWriter) Write(p []byte) (int, error) {
|
||||
nn := 0
|
||||
for len(p) > 0 {
|
||||
n := len(p)
|
||||
if n > maxWrite {
|
||||
n = maxWrite
|
||||
}
|
||||
if err := w.c.writeRecord(w.recType, w.reqId, p[:n]); err != nil {
|
||||
return nn, err
|
||||
}
|
||||
nn += n
|
||||
p = p[n:]
|
||||
}
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
func (w *streamWriter) Close() error {
|
||||
// send empty record to close the stream
|
||||
return w.c.writeRecord(w.recType, w.reqId, nil)
|
||||
}
|
||||
|
||||
func (client *FCGIClient) Request(env map[string]string, reqStr string) (retout []byte, reterr []byte, err error) {
|
||||
|
||||
var reqId uint16 = 1
|
||||
defer client.rwc.Close()
|
||||
|
||||
err = client.writeBeginRequest(reqId, uint16(FCGI_RESPONDER), 0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = client.writePairs(FCGI_PARAMS, reqId, env)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(reqStr) > 0 {
|
||||
err = client.writeRecord(FCGI_STDIN, reqId, []byte(reqStr))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
rec := &record{}
|
||||
var err1 error
|
||||
|
||||
// recive untill EOF or FCGI_END_REQUEST
|
||||
for {
|
||||
err1 = rec.read(client.rwc)
|
||||
if err1 != nil {
|
||||
if err1 != io.EOF {
|
||||
err = err1
|
||||
}
|
||||
break
|
||||
}
|
||||
switch {
|
||||
case rec.h.Type == FCGI_STDOUT:
|
||||
retout = append(retout, rec.content()...)
|
||||
case rec.h.Type == FCGI_STDERR:
|
||||
reterr = append(reterr, rec.content()...)
|
||||
case rec.h.Type == FCGI_END_REQUEST:
|
||||
fallthrough
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
79
plugins/inputs/phpfpm/phpfpm_test.go
Normal file
79
plugins/inputs/phpfpm/phpfpm_test.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package phpfpm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
)
|
||||
|
||||
func TestPhpFpmGeneratesMetrics(t *testing.T) {
|
||||
//We create a fake server to return test data
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprint(w, outputSample)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
//Now we tested again above server, with our authentication data
|
||||
r := &phpfpm{
|
||||
Urls: []string{ts.URL},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"url": ts.Listener.Addr().String(),
|
||||
"pool": "www",
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"accepted_conn": int64(3),
|
||||
"listen_queue": int64(1),
|
||||
"max_listen_queue": int64(0),
|
||||
"listen_queue_len": int64(0),
|
||||
"idle_processes": int64(1),
|
||||
"active_processes": int64(1),
|
||||
"total_processes": int64(2),
|
||||
"max_active_processes": int64(1),
|
||||
"max_children_reached": int64(2),
|
||||
"slow_requests": int64(1),
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags)
|
||||
}
|
||||
|
||||
//When not passing server config, we default to localhost
|
||||
//We just want to make sure we did request stat from localhost
|
||||
func TestHaproxyDefaultGetFromLocalhost(t *testing.T) {
|
||||
r := &phpfpm{}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := r.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "127.0.0.1/status")
|
||||
}
|
||||
|
||||
const outputSample = `
|
||||
pool: www
|
||||
process manager: dynamic
|
||||
start time: 11/Oct/2015:23:38:51 +0000
|
||||
start since: 1991
|
||||
accepted conn: 3
|
||||
listen queue: 1
|
||||
max listen queue: 0
|
||||
listen queue len: 0
|
||||
idle processes: 1
|
||||
active processes: 1
|
||||
total processes: 2
|
||||
max active processes: 1
|
||||
max children reached: 2
|
||||
slow requests: 1
|
||||
`
|
||||
180
plugins/inputs/ping/ping.go
Normal file
180
plugins/inputs/ping/ping.go
Normal file
@@ -0,0 +1,180 @@
|
||||
package ping
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// HostPinger is a function that runs the "ping" function using a list of
|
||||
// passed arguments. This can be easily switched with a mocked ping function
|
||||
// for unit test purposes (see ping_test.go)
|
||||
type HostPinger func(args ...string) (string, error)
|
||||
|
||||
type Ping struct {
|
||||
// Interval at which to ping (ping -i <INTERVAL>)
|
||||
PingInterval float64 `toml:"ping_interval"`
|
||||
|
||||
// Number of pings to send (ping -c <COUNT>)
|
||||
Count int
|
||||
|
||||
// Ping timeout, in seconds. 0 means no timeout (ping -t <TIMEOUT>)
|
||||
Timeout float64
|
||||
|
||||
// Interface to send ping from (ping -I <INTERFACE>)
|
||||
Interface string
|
||||
|
||||
// URLs to ping
|
||||
Urls []string
|
||||
|
||||
// host ping function
|
||||
pingHost HostPinger
|
||||
}
|
||||
|
||||
func (_ *Ping) Description() string {
|
||||
return "Ping given url(s) and return statistics"
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# urls to ping
|
||||
urls = ["www.google.com"] # required
|
||||
# number of pings to send (ping -c <COUNT>)
|
||||
count = 1 # required
|
||||
# interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
|
||||
ping_interval = 0.0
|
||||
# ping timeout, in s. 0 == no timeout (ping -t <TIMEOUT>)
|
||||
timeout = 0.0
|
||||
# interface to send ping from (ping -I <INTERFACE>)
|
||||
interface = ""
|
||||
`
|
||||
|
||||
func (_ *Ping) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (p *Ping) Gather(acc inputs.Accumulator) error {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errorChannel := make(chan error, len(p.Urls)*2)
|
||||
|
||||
// Spin off a go routine for each url to ping
|
||||
for _, url := range p.Urls {
|
||||
wg.Add(1)
|
||||
go func(url string, acc inputs.Accumulator) {
|
||||
defer wg.Done()
|
||||
args := p.args(url)
|
||||
out, err := p.pingHost(args...)
|
||||
if err != nil {
|
||||
// Combine go err + stderr output
|
||||
errorChannel <- errors.New(
|
||||
strings.TrimSpace(out) + ", " + err.Error())
|
||||
}
|
||||
tags := map[string]string{"url": url}
|
||||
trans, rec, avg, err := processPingOutput(out)
|
||||
if err != nil {
|
||||
// fatal error
|
||||
errorChannel <- err
|
||||
return
|
||||
}
|
||||
// Calculate packet loss percentage
|
||||
loss := float64(trans-rec) / float64(trans) * 100.0
|
||||
fields := map[string]interface{}{
|
||||
"packets_transmitted": trans,
|
||||
"packets_received": rec,
|
||||
"percent_packet_loss": loss,
|
||||
"average_response_ms": avg,
|
||||
}
|
||||
acc.AddFields("ping", fields, tags)
|
||||
}(url, acc)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errorChannel)
|
||||
|
||||
// Get all errors and return them as one giant error
|
||||
errorStrings := []string{}
|
||||
for err := range errorChannel {
|
||||
errorStrings = append(errorStrings, err.Error())
|
||||
}
|
||||
|
||||
if len(errorStrings) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errors.New(strings.Join(errorStrings, "\n"))
|
||||
}
|
||||
|
||||
func hostPinger(args ...string) (string, error) {
|
||||
c := exec.Command("ping", args...)
|
||||
out, err := c.CombinedOutput()
|
||||
return string(out), err
|
||||
}
|
||||
|
||||
// args returns the arguments for the 'ping' executable
|
||||
func (p *Ping) args(url string) []string {
|
||||
// Build the ping command args based on toml config
|
||||
args := []string{"-c", strconv.Itoa(p.Count)}
|
||||
if p.PingInterval > 0 {
|
||||
args = append(args, "-i", strconv.FormatFloat(p.PingInterval, 'f', 1, 64))
|
||||
}
|
||||
if p.Timeout > 0 {
|
||||
args = append(args, "-t", strconv.FormatFloat(p.Timeout, 'f', 1, 64))
|
||||
}
|
||||
if p.Interface != "" {
|
||||
args = append(args, "-I", p.Interface)
|
||||
}
|
||||
args = append(args, url)
|
||||
return args
|
||||
}
|
||||
|
||||
// processPingOutput takes in a string output from the ping command, like:
|
||||
//
|
||||
// PING www.google.com (173.194.115.84): 56 data bytes
|
||||
// 64 bytes from 173.194.115.84: icmp_seq=0 ttl=54 time=52.172 ms
|
||||
// 64 bytes from 173.194.115.84: icmp_seq=1 ttl=54 time=34.843 ms
|
||||
//
|
||||
// --- www.google.com ping statistics ---
|
||||
// 2 packets transmitted, 2 packets received, 0.0% packet loss
|
||||
// round-trip min/avg/max/stddev = 34.843/43.508/52.172/8.664 ms
|
||||
//
|
||||
// It returns (<transmitted packets>, <received packets>, <average response>)
|
||||
func processPingOutput(out string) (int, int, float64, error) {
|
||||
var trans, recv int
|
||||
var avg float64
|
||||
// Set this error to nil if we find a 'transmitted' line
|
||||
err := errors.New("Fatal error processing ping output")
|
||||
lines := strings.Split(out, "\n")
|
||||
for _, line := range lines {
|
||||
if strings.Contains(line, "transmitted") &&
|
||||
strings.Contains(line, "received") {
|
||||
err = nil
|
||||
stats := strings.Split(line, ", ")
|
||||
// Transmitted packets
|
||||
trans, err = strconv.Atoi(strings.Split(stats[0], " ")[0])
|
||||
if err != nil {
|
||||
return trans, recv, avg, err
|
||||
}
|
||||
// Received packets
|
||||
recv, err = strconv.Atoi(strings.Split(stats[1], " ")[0])
|
||||
if err != nil {
|
||||
return trans, recv, avg, err
|
||||
}
|
||||
} else if strings.Contains(line, "min/avg/max") {
|
||||
stats := strings.Split(line, " = ")[1]
|
||||
avg, err = strconv.ParseFloat(strings.Split(stats, "/")[1], 64)
|
||||
if err != nil {
|
||||
return trans, recv, avg, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return trans, recv, avg, err
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("ping", func() inputs.Input {
|
||||
return &Ping{pingHost: hostPinger}
|
||||
})
|
||||
}
|
||||
222
plugins/inputs/ping/ping_test.go
Normal file
222
plugins/inputs/ping/ping_test.go
Normal file
@@ -0,0 +1,222 @@
|
||||
package ping
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// BSD/Darwin ping output
|
||||
var bsdPingOutput = `
|
||||
PING www.google.com (216.58.217.36): 56 data bytes
|
||||
64 bytes from 216.58.217.36: icmp_seq=0 ttl=55 time=15.087 ms
|
||||
64 bytes from 216.58.217.36: icmp_seq=1 ttl=55 time=21.564 ms
|
||||
64 bytes from 216.58.217.36: icmp_seq=2 ttl=55 time=27.263 ms
|
||||
64 bytes from 216.58.217.36: icmp_seq=3 ttl=55 time=18.828 ms
|
||||
64 bytes from 216.58.217.36: icmp_seq=4 ttl=55 time=18.378 ms
|
||||
|
||||
--- www.google.com ping statistics ---
|
||||
5 packets transmitted, 5 packets received, 0.0% packet loss
|
||||
round-trip min/avg/max/stddev = 15.087/20.224/27.263/4.076 ms
|
||||
`
|
||||
|
||||
// Linux ping output
|
||||
var linuxPingOutput = `
|
||||
PING www.google.com (216.58.218.164) 56(84) bytes of data.
|
||||
64 bytes from host.net (216.58.218.164): icmp_seq=1 ttl=63 time=35.2 ms
|
||||
64 bytes from host.net (216.58.218.164): icmp_seq=2 ttl=63 time=42.3 ms
|
||||
64 bytes from host.net (216.58.218.164): icmp_seq=3 ttl=63 time=45.1 ms
|
||||
64 bytes from host.net (216.58.218.164): icmp_seq=4 ttl=63 time=43.5 ms
|
||||
64 bytes from host.net (216.58.218.164): icmp_seq=5 ttl=63 time=51.8 ms
|
||||
|
||||
--- www.google.com ping statistics ---
|
||||
5 packets transmitted, 5 received, 0% packet loss, time 4010ms
|
||||
rtt min/avg/max/mdev = 35.225/43.628/51.806/5.325 ms
|
||||
`
|
||||
|
||||
// Fatal ping output (invalid argument)
|
||||
var fatalPingOutput = `
|
||||
ping: -i interval too short: Operation not permitted
|
||||
`
|
||||
|
||||
// Test that ping command output is processed properly
|
||||
func TestProcessPingOutput(t *testing.T) {
|
||||
trans, rec, avg, err := processPingOutput(bsdPingOutput)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, trans, "5 packets were transmitted")
|
||||
assert.Equal(t, 5, rec, "5 packets were transmitted")
|
||||
assert.InDelta(t, 20.224, avg, 0.001)
|
||||
|
||||
trans, rec, avg, err = processPingOutput(linuxPingOutput)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, trans, "5 packets were transmitted")
|
||||
assert.Equal(t, 5, rec, "5 packets were transmitted")
|
||||
assert.InDelta(t, 43.628, avg, 0.001)
|
||||
}
|
||||
|
||||
// Test that processPingOutput returns an error when 'ping' fails to run, such
|
||||
// as when an invalid argument is provided
|
||||
func TestErrorProcessPingOutput(t *testing.T) {
|
||||
_, _, _, err := processPingOutput(fatalPingOutput)
|
||||
assert.Error(t, err, "Error was expected from processPingOutput")
|
||||
}
|
||||
|
||||
// Test that arg lists and created correctly
|
||||
func TestArgs(t *testing.T) {
|
||||
p := Ping{
|
||||
Count: 2,
|
||||
}
|
||||
|
||||
// Actual and Expected arg lists must be sorted for reflect.DeepEqual
|
||||
|
||||
actual := p.args("www.google.com")
|
||||
expected := []string{"-c", "2", "www.google.com"}
|
||||
sort.Strings(actual)
|
||||
sort.Strings(expected)
|
||||
assert.True(t, reflect.DeepEqual(expected, actual),
|
||||
"Expected: %s Actual: %s", expected, actual)
|
||||
|
||||
p.Interface = "eth0"
|
||||
actual = p.args("www.google.com")
|
||||
expected = []string{"-c", "2", "-I", "eth0", "www.google.com"}
|
||||
sort.Strings(actual)
|
||||
sort.Strings(expected)
|
||||
assert.True(t, reflect.DeepEqual(expected, actual),
|
||||
"Expected: %s Actual: %s", expected, actual)
|
||||
|
||||
p.Timeout = 12.0
|
||||
actual = p.args("www.google.com")
|
||||
expected = []string{"-c", "2", "-I", "eth0", "-t", "12.0", "www.google.com"}
|
||||
sort.Strings(actual)
|
||||
sort.Strings(expected)
|
||||
assert.True(t, reflect.DeepEqual(expected, actual),
|
||||
"Expected: %s Actual: %s", expected, actual)
|
||||
|
||||
p.PingInterval = 1.2
|
||||
actual = p.args("www.google.com")
|
||||
expected = []string{"-c", "2", "-I", "eth0", "-t", "12.0", "-i", "1.2",
|
||||
"www.google.com"}
|
||||
sort.Strings(actual)
|
||||
sort.Strings(expected)
|
||||
assert.True(t, reflect.DeepEqual(expected, actual),
|
||||
"Expected: %s Actual: %s", expected, actual)
|
||||
}
|
||||
|
||||
func mockHostPinger(args ...string) (string, error) {
|
||||
return linuxPingOutput, nil
|
||||
}
|
||||
|
||||
// Test that Gather function works on a normal ping
|
||||
func TestPingGather(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
p := Ping{
|
||||
Urls: []string{"www.google.com", "www.reddit.com"},
|
||||
pingHost: mockHostPinger,
|
||||
}
|
||||
|
||||
p.Gather(&acc)
|
||||
tags := map[string]string{"url": "www.google.com"}
|
||||
fields := map[string]interface{}{
|
||||
"packets_transmitted": 5,
|
||||
"packets_received": 5,
|
||||
"percent_packet_loss": 0.0,
|
||||
"average_response_ms": 43.628,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
|
||||
|
||||
tags = map[string]string{"url": "www.reddit.com"}
|
||||
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
|
||||
}
|
||||
|
||||
var lossyPingOutput = `
|
||||
PING www.google.com (216.58.218.164) 56(84) bytes of data.
|
||||
64 bytes from host.net (216.58.218.164): icmp_seq=1 ttl=63 time=35.2 ms
|
||||
64 bytes from host.net (216.58.218.164): icmp_seq=3 ttl=63 time=45.1 ms
|
||||
64 bytes from host.net (216.58.218.164): icmp_seq=5 ttl=63 time=51.8 ms
|
||||
|
||||
--- www.google.com ping statistics ---
|
||||
5 packets transmitted, 3 received, 40% packet loss, time 4010ms
|
||||
rtt min/avg/max/mdev = 35.225/44.033/51.806/5.325 ms
|
||||
`
|
||||
|
||||
func mockLossyHostPinger(args ...string) (string, error) {
|
||||
return lossyPingOutput, nil
|
||||
}
|
||||
|
||||
// Test that Gather works on a ping with lossy packets
|
||||
func TestLossyPingGather(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
p := Ping{
|
||||
Urls: []string{"www.google.com"},
|
||||
pingHost: mockLossyHostPinger,
|
||||
}
|
||||
|
||||
p.Gather(&acc)
|
||||
tags := map[string]string{"url": "www.google.com"}
|
||||
fields := map[string]interface{}{
|
||||
"packets_transmitted": 5,
|
||||
"packets_received": 3,
|
||||
"percent_packet_loss": 40.0,
|
||||
"average_response_ms": 44.033,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
|
||||
}
|
||||
|
||||
var errorPingOutput = `
|
||||
PING www.amazon.com (176.32.98.166): 56 data bytes
|
||||
Request timeout for icmp_seq 0
|
||||
|
||||
--- www.amazon.com ping statistics ---
|
||||
2 packets transmitted, 0 packets received, 100.0% packet loss
|
||||
`
|
||||
|
||||
func mockErrorHostPinger(args ...string) (string, error) {
|
||||
return errorPingOutput, errors.New("No packets received")
|
||||
}
|
||||
|
||||
// Test that Gather works on a ping with no transmitted packets, even though the
|
||||
// command returns an error
|
||||
func TestBadPingGather(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
p := Ping{
|
||||
Urls: []string{"www.amazon.com"},
|
||||
pingHost: mockErrorHostPinger,
|
||||
}
|
||||
|
||||
p.Gather(&acc)
|
||||
tags := map[string]string{"url": "www.amazon.com"}
|
||||
fields := map[string]interface{}{
|
||||
"packets_transmitted": 2,
|
||||
"packets_received": 0,
|
||||
"percent_packet_loss": 100.0,
|
||||
"average_response_ms": 0.0,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
|
||||
}
|
||||
|
||||
func mockFatalHostPinger(args ...string) (string, error) {
|
||||
return fatalPingOutput, errors.New("So very bad")
|
||||
}
|
||||
|
||||
// Test that a fatal ping command does not gather any statistics.
|
||||
func TestFatalPingGather(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
p := Ping{
|
||||
Urls: []string{"www.amazon.com"},
|
||||
pingHost: mockFatalHostPinger,
|
||||
}
|
||||
|
||||
p.Gather(&acc)
|
||||
assert.False(t, acc.HasMeasurement("packets_transmitted"),
|
||||
"Fatal ping should not have packet measurements")
|
||||
assert.False(t, acc.HasMeasurement("packets_received"),
|
||||
"Fatal ping should not have packet measurements")
|
||||
assert.False(t, acc.HasMeasurement("percent_packet_loss"),
|
||||
"Fatal ping should not have packet measurements")
|
||||
assert.False(t, acc.HasMeasurement("average_response_ms"),
|
||||
"Fatal ping should not have packet measurements")
|
||||
}
|
||||
30
plugins/inputs/postgresql/README.md
Normal file
30
plugins/inputs/postgresql/README.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# PostgreSQL plugin
|
||||
|
||||
This postgresql plugin provides metrics for your postgres database. It currently works with postgres versions 8.1+. It uses data from the built in _pg_stat_database_ view. The metrics recorded depend on your version of postgres. See table:
|
||||
```
|
||||
pg version 9.2+ 9.1 8.3-9.0 8.1-8.2 7.4-8.0(unsupported)
|
||||
--- --- --- ------- ------- -------
|
||||
datid* x x x x
|
||||
datname* x x x x
|
||||
numbackends x x x x x
|
||||
xact_commit x x x x x
|
||||
xact_rollback x x x x x
|
||||
blks_read x x x x x
|
||||
blks_hit x x x x x
|
||||
tup_returned x x x
|
||||
tup_fetched x x x
|
||||
tup_inserted x x x
|
||||
tup_updated x x x
|
||||
tup_deleted x x x
|
||||
conflicts x x
|
||||
temp_files x
|
||||
temp_bytes x
|
||||
deadlocks x
|
||||
blk_read_time x
|
||||
blk_write_time x
|
||||
stats_reset* x x
|
||||
```
|
||||
|
||||
_* value ignored and therefore not recorded._
|
||||
|
||||
More information about the meaning of these metrics can be found in the [PostgreSQL Documentation](http://www.postgresql.org/docs/9.2/static/monitoring-stats.html#PG-STAT-DATABASE-VIEW)
|
||||
151
plugins/inputs/postgresql/postgresql.go
Normal file
151
plugins/inputs/postgresql/postgresql.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package postgresql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
type Postgresql struct {
|
||||
Address string
|
||||
Databases []string
|
||||
OrderedColumns []string
|
||||
}
|
||||
|
||||
var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true}
|
||||
|
||||
var sampleConfig = `
|
||||
# specify address via a url matching:
|
||||
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
|
||||
# or a simple string:
|
||||
# host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
||||
#
|
||||
# All connection parameters are optional.
|
||||
#
|
||||
# Without the dbname parameter, the driver will default to a database
|
||||
# with the same name as the user. This dbname is just for instantiating a
|
||||
# connection with the server and doesn't restrict the databases we are trying
|
||||
# to grab metrics for.
|
||||
#
|
||||
address = "host=localhost user=postgres sslmode=disable"
|
||||
|
||||
# A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# databases are gathered.
|
||||
# databases = ["app_production", "testing"]
|
||||
`
|
||||
|
||||
func (p *Postgresql) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (p *Postgresql) Description() string {
|
||||
return "Read metrics from one or many postgresql servers"
|
||||
}
|
||||
|
||||
func (p *Postgresql) IgnoredColumns() map[string]bool {
|
||||
return ignoredColumns
|
||||
}
|
||||
|
||||
var localhost = "host=localhost sslmode=disable"
|
||||
|
||||
func (p *Postgresql) Gather(acc inputs.Accumulator) error {
|
||||
var query string
|
||||
|
||||
if p.Address == "" || p.Address == "localhost" {
|
||||
p.Address = localhost
|
||||
}
|
||||
|
||||
db, err := sql.Open("postgres", p.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer db.Close()
|
||||
|
||||
if len(p.Databases) == 0 {
|
||||
query = `SELECT * FROM pg_stat_database`
|
||||
} else {
|
||||
query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname IN ('%s')`,
|
||||
strings.Join(p.Databases, "','"))
|
||||
}
|
||||
|
||||
rows, err := db.Query(query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
// grab the column information from the result
|
||||
p.OrderedColumns, err = rows.Columns()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
err = p.accRow(rows, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
type scanner interface {
|
||||
Scan(dest ...interface{}) error
|
||||
}
|
||||
|
||||
func (p *Postgresql) accRow(row scanner, acc inputs.Accumulator) error {
|
||||
var columnVars []interface{}
|
||||
var dbname bytes.Buffer
|
||||
|
||||
// this is where we'll store the column name with its *interface{}
|
||||
columnMap := make(map[string]*interface{})
|
||||
|
||||
for _, column := range p.OrderedColumns {
|
||||
columnMap[column] = new(interface{})
|
||||
}
|
||||
|
||||
// populate the array of interface{} with the pointers in the right order
|
||||
for i := 0; i < len(columnMap); i++ {
|
||||
columnVars = append(columnVars, columnMap[p.OrderedColumns[i]])
|
||||
}
|
||||
|
||||
// deconstruct array of variables and send to Scan
|
||||
err := row.Scan(columnVars...)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// extract the database name from the column map
|
||||
dbnameChars := (*columnMap["datname"]).([]uint8)
|
||||
for i := 0; i < len(dbnameChars); i++ {
|
||||
dbname.WriteString(string(dbnameChars[i]))
|
||||
}
|
||||
|
||||
tags := map[string]string{"server": p.Address, "db": dbname.String()}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for col, val := range columnMap {
|
||||
_, ignore := ignoredColumns[col]
|
||||
if !ignore {
|
||||
fields[col] = *val
|
||||
}
|
||||
}
|
||||
acc.AddFields("postgresql", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("postgresql", func() inputs.Input {
|
||||
return &Postgresql{}
|
||||
})
|
||||
}
|
||||
146
plugins/inputs/postgresql/postgresql_test.go
Normal file
146
plugins/inputs/postgresql/postgresql_test.go
Normal file
@@ -0,0 +1,146 @@
|
||||
package postgresql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPostgresqlGeneratesMetrics(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := &Postgresql{
|
||||
Address: fmt.Sprintf("host=%s user=postgres sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
Databases: []string{"postgres"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := p.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
availableColumns := make(map[string]bool)
|
||||
for _, col := range p.OrderedColumns {
|
||||
availableColumns[col] = true
|
||||
}
|
||||
|
||||
intMetrics := []string{
|
||||
"xact_commit",
|
||||
"xact_rollback",
|
||||
"blks_read",
|
||||
"blks_hit",
|
||||
"tup_returned",
|
||||
"tup_fetched",
|
||||
"tup_inserted",
|
||||
"tup_updated",
|
||||
"tup_deleted",
|
||||
"conflicts",
|
||||
"temp_files",
|
||||
"temp_bytes",
|
||||
"deadlocks",
|
||||
"numbackends",
|
||||
}
|
||||
|
||||
floatMetrics := []string{
|
||||
"blk_read_time",
|
||||
"blk_write_time",
|
||||
}
|
||||
|
||||
metricsCounted := 0
|
||||
|
||||
for _, metric := range intMetrics {
|
||||
_, ok := availableColumns[metric]
|
||||
if ok {
|
||||
assert.True(t, acc.HasIntField("postgresql", metric))
|
||||
metricsCounted++
|
||||
}
|
||||
}
|
||||
|
||||
for _, metric := range floatMetrics {
|
||||
_, ok := availableColumns[metric]
|
||||
if ok {
|
||||
assert.True(t, acc.HasFloatField("postgresql", metric))
|
||||
metricsCounted++
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, metricsCounted > 0)
|
||||
assert.Equal(t, len(availableColumns)-len(p.IgnoredColumns()), metricsCounted)
|
||||
}
|
||||
|
||||
func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := &Postgresql{
|
||||
Address: fmt.Sprintf("host=%s user=postgres sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
Databases: []string{"postgres"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := p.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
point, ok := acc.Get("postgresql")
|
||||
require.True(t, ok)
|
||||
|
||||
assert.Equal(t, "postgres", point.Tags["db"])
|
||||
}
|
||||
|
||||
func TestPostgresqlDefaultsToAllDatabases(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := &Postgresql{
|
||||
Address: fmt.Sprintf("host=%s user=postgres sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := p.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
var found bool
|
||||
|
||||
for _, pnt := range acc.Points {
|
||||
if pnt.Measurement == "postgresql" {
|
||||
if pnt.Tags["db"] == "postgres" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, found)
|
||||
}
|
||||
|
||||
func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := &Postgresql{
|
||||
Address: fmt.Sprintf("host=%s user=postgres sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := p.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
for col := range p.IgnoredColumns() {
|
||||
assert.False(t, acc.HasMeasurement(col))
|
||||
}
|
||||
}
|
||||
72
plugins/inputs/procstat/README.md
Normal file
72
plugins/inputs/procstat/README.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# Telegraf plugin: procstat
|
||||
|
||||
#### Description
|
||||
|
||||
The procstat plugin can be used to monitor system resource usage by an
|
||||
individual process using their /proc data.
|
||||
|
||||
The plugin will tag processes by their PID and their process name.
|
||||
|
||||
Processes can be specified either by pid file or by executable name. Procstat
|
||||
plugin will use `pgrep` when executable name is provided to obtain the pid.
|
||||
Proctstas plugin will transmit IO, memory, cpu, file descriptor related
|
||||
measurements for every process specified. A prefix can be set to isolate
|
||||
individual process specific measurements.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
[procstat]
|
||||
|
||||
[[procstat.specifications]]
|
||||
exe = "influxd"
|
||||
prefix = "influxd"
|
||||
|
||||
[[procstat.specifications]]
|
||||
pid_file = "/var/run/lxc/dnsmasq.pid"
|
||||
```
|
||||
|
||||
The above configuration would result in output like:
|
||||
|
||||
```
|
||||
[...]
|
||||
> [name="dnsmasq" pid="44979"] procstat_cpu_user value=0.14
|
||||
> [name="dnsmasq" pid="44979"] procstat_cpu_system value=0.07
|
||||
[...]
|
||||
> [name="influxd" pid="34337"] procstat_influxd_cpu_user value=25.43
|
||||
> [name="influxd" pid="34337"] procstat_influxd_cpu_system value=21.82
|
||||
```
|
||||
|
||||
# Measurements
|
||||
Note: prefix can be set by the user, per process.
|
||||
|
||||
File descriptor related measurement names:
|
||||
- procstat_[prefix_]num_fds value=4
|
||||
|
||||
Context switch related measurement names:
|
||||
- procstat_[prefix_]voluntary_context_switches value=250
|
||||
- procstat_[prefix_]involuntary_context_switches value=0
|
||||
|
||||
I/O related measurement names:
|
||||
- procstat_[prefix_]read_count value=396
|
||||
- procstat_[prefix_]write_count value=1
|
||||
- procstat_[prefix_]read_bytes value=1019904
|
||||
- procstat_[prefix_]write_bytes value=1
|
||||
|
||||
CPU related measurement names:
|
||||
- procstat_[prefix_]cpu_user value=0
|
||||
- procstat_[prefix_]cpu_system value=0.01
|
||||
- procstat_[prefix_]cpu_idle value=0
|
||||
- procstat_[prefix_]cpu_nice value=0
|
||||
- procstat_[prefix_]cpu_iowait value=0
|
||||
- procstat_[prefix_]cpu_irq value=0
|
||||
- procstat_[prefix_]cpu_soft_irq value=0
|
||||
- procstat_[prefix_]cpu_soft_steal value=0
|
||||
- procstat_[prefix_]cpu_soft_stolen value=0
|
||||
- procstat_[prefix_]cpu_soft_guest value=0
|
||||
- procstat_[prefix_]cpu_soft_guest_nice value=0
|
||||
|
||||
Memory related measurement names:
|
||||
- procstat_[prefix_]memory_rss value=1777664
|
||||
- procstat_[prefix_]memory_vms value=24227840
|
||||
- procstat_[prefix_]memory_swap value=282624
|
||||
167
plugins/inputs/procstat/procstat.go
Normal file
167
plugins/inputs/procstat/procstat.go
Normal file
@@ -0,0 +1,167 @@
|
||||
package procstat
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/shirou/gopsutil/process"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Procstat struct {
|
||||
PidFile string `toml:"pid_file"`
|
||||
Exe string
|
||||
Pattern string
|
||||
Prefix string
|
||||
}
|
||||
|
||||
func NewProcstat() *Procstat {
|
||||
return &Procstat{}
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Must specify one of: pid_file, exe, or pattern
|
||||
# PID file to monitor process
|
||||
pid_file = "/var/run/nginx.pid"
|
||||
# executable name (ie, pgrep <exe>)
|
||||
# exe = "nginx"
|
||||
# pattern as argument for pgrep (ie, pgrep -f <pattern>)
|
||||
# pattern = "nginx"
|
||||
|
||||
# Field name prefix
|
||||
prefix = ""
|
||||
`
|
||||
|
||||
func (_ *Procstat) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (_ *Procstat) Description() string {
|
||||
return "Monitor process cpu and memory usage"
|
||||
}
|
||||
|
||||
func (p *Procstat) Gather(acc inputs.Accumulator) error {
|
||||
procs, err := p.createProcesses()
|
||||
if err != nil {
|
||||
log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] %s",
|
||||
p.Exe, p.PidFile, p.Pattern, err.Error())
|
||||
} else {
|
||||
for _, proc := range procs {
|
||||
p := NewSpecProcessor(p.Prefix, acc, proc)
|
||||
p.pushMetrics()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Procstat) createProcesses() ([]*process.Process, error) {
|
||||
var out []*process.Process
|
||||
var errstring string
|
||||
var outerr error
|
||||
|
||||
pids, err := p.getAllPids()
|
||||
if err != nil {
|
||||
errstring += err.Error() + " "
|
||||
}
|
||||
|
||||
for _, pid := range pids {
|
||||
p, err := process.NewProcess(int32(pid))
|
||||
if err == nil {
|
||||
out = append(out, p)
|
||||
} else {
|
||||
errstring += err.Error() + " "
|
||||
}
|
||||
}
|
||||
|
||||
if errstring != "" {
|
||||
outerr = fmt.Errorf("%s", errstring)
|
||||
}
|
||||
|
||||
return out, outerr
|
||||
}
|
||||
|
||||
func (p *Procstat) getAllPids() ([]int32, error) {
|
||||
var pids []int32
|
||||
var err error
|
||||
|
||||
if p.PidFile != "" {
|
||||
pids, err = pidsFromFile(p.PidFile)
|
||||
} else if p.Exe != "" {
|
||||
pids, err = pidsFromExe(p.Exe)
|
||||
} else if p.Pattern != "" {
|
||||
pids, err = pidsFromPattern(p.Pattern)
|
||||
} else {
|
||||
err = fmt.Errorf("Either exe, pid_file or pattern has to be specified")
|
||||
}
|
||||
|
||||
return pids, err
|
||||
}
|
||||
|
||||
func pidsFromFile(file string) ([]int32, error) {
|
||||
var out []int32
|
||||
var outerr error
|
||||
pidString, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
outerr = fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", file, err)
|
||||
} else {
|
||||
pid, err := strconv.Atoi(strings.TrimSpace(string(pidString)))
|
||||
if err != nil {
|
||||
outerr = err
|
||||
} else {
|
||||
out = append(out, int32(pid))
|
||||
}
|
||||
}
|
||||
return out, outerr
|
||||
}
|
||||
|
||||
func pidsFromExe(exe string) ([]int32, error) {
|
||||
var out []int32
|
||||
var outerr error
|
||||
pgrep, err := exec.Command("pgrep", exe).Output()
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("Failed to execute pgrep. Error: '%s'", err)
|
||||
} else {
|
||||
pids := strings.Fields(string(pgrep))
|
||||
for _, pid := range pids {
|
||||
ipid, err := strconv.Atoi(pid)
|
||||
if err == nil {
|
||||
out = append(out, int32(ipid))
|
||||
} else {
|
||||
outerr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
return out, outerr
|
||||
}
|
||||
|
||||
func pidsFromPattern(pattern string) ([]int32, error) {
|
||||
var out []int32
|
||||
var outerr error
|
||||
pgrep, err := exec.Command("pgrep", "-f", pattern).Output()
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("Failed to execute pgrep. Error: '%s'", err)
|
||||
} else {
|
||||
pids := strings.Fields(string(pgrep))
|
||||
for _, pid := range pids {
|
||||
ipid, err := strconv.Atoi(pid)
|
||||
if err == nil {
|
||||
out = append(out, int32(ipid))
|
||||
} else {
|
||||
outerr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
return out, outerr
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("procstat", func() inputs.Input {
|
||||
return NewProcstat()
|
||||
})
|
||||
}
|
||||
30
plugins/inputs/procstat/procstat_test.go
Normal file
30
plugins/inputs/procstat/procstat_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package procstat
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestGather(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
pid := os.Getpid()
|
||||
file, err := ioutil.TempFile(os.TempDir(), "telegraf")
|
||||
require.NoError(t, err)
|
||||
file.Write([]byte(strconv.Itoa(pid)))
|
||||
file.Close()
|
||||
defer os.Remove(file.Name())
|
||||
p := Procstat{
|
||||
PidFile: file.Name(),
|
||||
Prefix: "foo",
|
||||
}
|
||||
p.Gather(&acc)
|
||||
assert.True(t, acc.HasFloatField("procstat", "foo_cpu_time_user"))
|
||||
assert.True(t, acc.HasUIntField("procstat", "foo_memory_vms"))
|
||||
}
|
||||
133
plugins/inputs/procstat/spec_processor.go
Normal file
133
plugins/inputs/procstat/spec_processor.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package procstat
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/shirou/gopsutil/process"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type SpecProcessor struct {
|
||||
Prefix string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
acc inputs.Accumulator
|
||||
proc *process.Process
|
||||
}
|
||||
|
||||
func (p *SpecProcessor) add(metric string, value interface{}) {
|
||||
var mname string
|
||||
if p.Prefix == "" {
|
||||
mname = metric
|
||||
} else {
|
||||
mname = p.Prefix + "_" + metric
|
||||
}
|
||||
p.fields[mname] = value
|
||||
}
|
||||
|
||||
func (p *SpecProcessor) flush() {
|
||||
p.acc.AddFields("procstat", p.fields, p.tags)
|
||||
p.fields = make(map[string]interface{})
|
||||
}
|
||||
|
||||
func NewSpecProcessor(
|
||||
prefix string,
|
||||
acc inputs.Accumulator,
|
||||
p *process.Process,
|
||||
) *SpecProcessor {
|
||||
tags := make(map[string]string)
|
||||
tags["pid"] = fmt.Sprintf("%v", p.Pid)
|
||||
if name, err := p.Name(); err == nil {
|
||||
tags["name"] = name
|
||||
}
|
||||
return &SpecProcessor{
|
||||
Prefix: prefix,
|
||||
tags: tags,
|
||||
fields: make(map[string]interface{}),
|
||||
acc: acc,
|
||||
proc: p,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *SpecProcessor) pushMetrics() {
|
||||
if err := p.pushFDStats(); err != nil {
|
||||
log.Printf("procstat, fd stats not available: %s", err.Error())
|
||||
}
|
||||
if err := p.pushCtxStats(); err != nil {
|
||||
log.Printf("procstat, ctx stats not available: %s", err.Error())
|
||||
}
|
||||
if err := p.pushIOStats(); err != nil {
|
||||
log.Printf("procstat, io stats not available: %s", err.Error())
|
||||
}
|
||||
if err := p.pushCPUStats(); err != nil {
|
||||
log.Printf("procstat, cpu stats not available: %s", err.Error())
|
||||
}
|
||||
if err := p.pushMemoryStats(); err != nil {
|
||||
log.Printf("procstat, mem stats not available: %s", err.Error())
|
||||
}
|
||||
p.flush()
|
||||
}
|
||||
|
||||
func (p *SpecProcessor) pushFDStats() error {
|
||||
fds, err := p.proc.NumFDs()
|
||||
if err != nil {
|
||||
return fmt.Errorf("NumFD error: %s\n", err)
|
||||
}
|
||||
p.add("num_fds", fds)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SpecProcessor) pushCtxStats() error {
|
||||
ctx, err := p.proc.NumCtxSwitches()
|
||||
if err != nil {
|
||||
return fmt.Errorf("ContextSwitch error: %s\n", err)
|
||||
}
|
||||
p.add("voluntary_context_switches", ctx.Voluntary)
|
||||
p.add("involuntary_context_switches", ctx.Involuntary)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SpecProcessor) pushIOStats() error {
|
||||
io, err := p.proc.IOCounters()
|
||||
if err != nil {
|
||||
return fmt.Errorf("IOCounters error: %s\n", err)
|
||||
}
|
||||
p.add("read_count", io.ReadCount)
|
||||
p.add("write_count", io.WriteCount)
|
||||
p.add("read_bytes", io.ReadBytes)
|
||||
p.add("write_bytes", io.WriteCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SpecProcessor) pushCPUStats() error {
|
||||
cpu_time, err := p.proc.CPUTimes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.add("cpu_time_user", cpu_time.User)
|
||||
p.add("cpu_time_system", cpu_time.System)
|
||||
p.add("cpu_time_idle", cpu_time.Idle)
|
||||
p.add("cpu_time_nice", cpu_time.Nice)
|
||||
p.add("cpu_time_iowait", cpu_time.Iowait)
|
||||
p.add("cpu_time_irq", cpu_time.Irq)
|
||||
p.add("cpu_time_soft_irq", cpu_time.Softirq)
|
||||
p.add("cpu_time_soft_steal", cpu_time.Steal)
|
||||
p.add("cpu_time_soft_stolen", cpu_time.Stolen)
|
||||
p.add("cpu_time_soft_guest", cpu_time.Guest)
|
||||
p.add("cpu_time_soft_guest_nice", cpu_time.GuestNice)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SpecProcessor) pushMemoryStats() error {
|
||||
mem, err := p.proc.MemoryInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.add("memory_rss", mem.RSS)
|
||||
p.add("memory_vms", mem.VMS)
|
||||
p.add("memory_swap", mem.Swap)
|
||||
return nil
|
||||
}
|
||||
103
plugins/inputs/prometheus/prometheus.go
Normal file
103
plugins/inputs/prometheus/prometheus.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Prometheus struct {
|
||||
Urls []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# An array of urls to scrape metrics from.
|
||||
urls = ["http://localhost:9100/metrics"]
|
||||
`
|
||||
|
||||
func (r *Prometheus) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (r *Prometheus) Description() string {
|
||||
return "Read metrics from one or many prometheus clients"
|
||||
}
|
||||
|
||||
var ErrProtocolError = errors.New("prometheus protocol error")
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (g *Prometheus) Gather(acc inputs.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, serv := range g.Urls {
|
||||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = g.gatherURL(serv, acc)
|
||||
}(serv)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (g *Prometheus) gatherURL(url string, acc inputs.Accumulator) error {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", url, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("%s returned HTTP status %s", url, resp.Status)
|
||||
}
|
||||
format := expfmt.ResponseFormat(resp.Header)
|
||||
|
||||
decoder := expfmt.NewDecoder(resp.Body, format)
|
||||
|
||||
options := &expfmt.DecodeOptions{
|
||||
Timestamp: model.Now(),
|
||||
}
|
||||
sampleDecoder := &expfmt.SampleDecoder{
|
||||
Dec: decoder,
|
||||
Opts: options,
|
||||
}
|
||||
|
||||
for {
|
||||
var samples model.Vector
|
||||
err := sampleDecoder.Decode(&samples)
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("error getting processing samples for %s: %s",
|
||||
url, err)
|
||||
}
|
||||
for _, sample := range samples {
|
||||
tags := make(map[string]string)
|
||||
for key, value := range sample.Metric {
|
||||
if key == model.MetricNameLabel {
|
||||
continue
|
||||
}
|
||||
tags[string(key)] = string(value)
|
||||
}
|
||||
acc.Add("prometheus_"+string(sample.Metric[model.MetricNameLabel]),
|
||||
float64(sample.Value), tags)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("prometheus", func() inputs.Input {
|
||||
return &Prometheus{}
|
||||
})
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user