Compare commits
804 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
828231193f | ||
|
|
c3ae75730b | ||
|
|
95bad9e55b | ||
|
|
e812a2efc6 | ||
|
|
411853fc74 | ||
|
|
b7d29ca0e9 | ||
|
|
947e1909ff | ||
|
|
31a4f03031 | ||
|
|
81f95e7a29 | ||
|
|
2aa2c796e5 | ||
|
|
a658e6c509 | ||
|
|
5f6766f6e1 | ||
|
|
7279018cfe | ||
|
|
4b08d127e0 | ||
|
|
fd1feff7b4 | ||
|
|
37bc9cf795 | ||
|
|
b762546fa7 | ||
|
|
bf5f2659a1 | ||
|
|
d2787e8ef5 | ||
|
|
a9f03a72f5 | ||
|
|
7fc57812a7 | ||
|
|
8a982ca68f | ||
|
|
200237a515 | ||
|
|
0ae1e0611c | ||
|
|
1392e73125 | ||
|
|
a90afd95c6 | ||
|
|
9866146545 | ||
|
|
8df325a68c | ||
|
|
48ae105a11 | ||
|
|
4e808c5c20 | ||
|
|
eb96443a34 | ||
|
|
e36c354ff5 | ||
|
|
f09c08d1f3 | ||
|
|
0e8122a2fc | ||
|
|
6723ea5fe6 | ||
|
|
e8bf968c78 | ||
|
|
9c8f24601f | ||
|
|
4957717df5 | ||
|
|
21fac3ebec | ||
|
|
ecbc634221 | ||
|
|
90cec20d1d | ||
|
|
bcbf82f8e8 | ||
|
|
3a45d8851d | ||
|
|
4a83c8c518 | ||
|
|
bc13d32d53 | ||
|
|
e6fc32bdf0 | ||
|
|
a970b9c62c | ||
|
|
17b307a7bc | ||
|
|
393f5044bb | ||
|
|
c630212dde | ||
|
|
f39db08c6d | ||
|
|
b4f9bc8745 | ||
|
|
5f06bd2566 | ||
|
|
8a4ab3654d | ||
|
|
e2f9617228 | ||
|
|
e097ae9632 | ||
|
|
07684fb030 | ||
|
|
17fa6f9b17 | ||
|
|
8e3fbaa9dd | ||
|
|
dede3e70ad | ||
|
|
7558081873 | ||
|
|
6e241611be | ||
|
|
fc9f921b62 | ||
|
|
12db3b9120 | ||
|
|
b58926dd26 | ||
|
|
91143dda1a | ||
|
|
efb64a049f | ||
|
|
4f6087a99d | ||
|
|
6b0e863556 | ||
|
|
11bc82379c | ||
|
|
a093ec1eaa | ||
|
|
d71a42cd1b | ||
|
|
d518d7d806 | ||
|
|
1d1afe6481 | ||
|
|
5a3f2e61f3 | ||
|
|
504f4e69db | ||
|
|
9f6666beb3 | ||
|
|
af6e7b9531 | ||
|
|
6fd7361364 | ||
|
|
e5c7a71d8e | ||
|
|
db7a4b24b6 | ||
|
|
332f678afb | ||
|
|
04a2b36a52 | ||
|
|
f862c6585d | ||
|
|
5c32521a07 | ||
|
|
9db30250c3 | ||
|
|
2b0cd2037b | ||
|
|
536dbfb724 | ||
|
|
b77398c4d3 | ||
|
|
fbf5bee051 | ||
|
|
81004c808f | ||
|
|
196509cc53 | ||
|
|
94ce67cc67 | ||
|
|
33ed528afe | ||
|
|
2435e47926 | ||
|
|
ff67a4b96c | ||
|
|
f816b952cf | ||
|
|
b905bc1b5d | ||
|
|
0ecbf9e349 | ||
|
|
1c7715780e | ||
|
|
5d3850c44e | ||
|
|
e84b356a12 | ||
|
|
b349800f7a | ||
|
|
47de43abf3 | ||
|
|
7a9fef80f5 | ||
|
|
dc28875437 | ||
|
|
a6ed4d4c3a | ||
|
|
fe6162b2a1 | ||
|
|
34182d9c9f | ||
|
|
16081b2d1a | ||
|
|
e43cfc2fce | ||
|
|
137272afea | ||
|
|
2150510bd4 | ||
|
|
fc59757a1a | ||
|
|
0cfa0d419a | ||
|
|
522658bd07 | ||
|
|
b1a97e35b9 | ||
|
|
c66363cba5 | ||
|
|
61269c3500 | ||
|
|
393d129982 | ||
|
|
80d4864844 | ||
|
|
f729fa990d | ||
|
|
662db7a944 | ||
|
|
c849b58de9 | ||
|
|
097b1e09db | ||
|
|
babd37bf35 | ||
|
|
91f48e7ad5 | ||
|
|
a12bd878e0 | ||
|
|
a4e8f24b16 | ||
|
|
a65447d22e | ||
|
|
b00ad65b08 | ||
|
|
a84ce5d5cb | ||
|
|
8ca4a50c18 | ||
|
|
03b2984ac2 | ||
|
|
9540a6532f | ||
|
|
cace663bbf | ||
|
|
acfdd15aa9 | ||
|
|
78f544c0aa | ||
|
|
2175a72fcc | ||
|
|
b03c1d9691 | ||
|
|
fead80844e | ||
|
|
ef885eda62 | ||
|
|
64a71263a1 | ||
|
|
974221f0cf | ||
|
|
bccef2856d | ||
|
|
80df3f7634 | ||
|
|
e96f7a9b12 | ||
|
|
2bbb6aa6f2 | ||
|
|
1ff721ad84 | ||
|
|
3e3b094270 | ||
|
|
1f7a8fceef | ||
|
|
b702a9758b | ||
|
|
3b607aa8ae | ||
|
|
4a4a6892f9 | ||
|
|
56b627dfe2 | ||
|
|
5c87b92976 | ||
|
|
dbcc312b0e | ||
|
|
2d842fefb8 | ||
|
|
d63e3c8cc4 | ||
|
|
187a894fe9 | ||
|
|
ca55c4a55d | ||
|
|
d627bdbbdb | ||
|
|
4f06f6b3d8 | ||
|
|
7f0fe78615 | ||
|
|
5913f7cb36 | ||
|
|
8dc42ad9f2 | ||
|
|
886bdd2ef2 | ||
|
|
5a86a2ff26 | ||
|
|
817d696628 | ||
|
|
4ab0344ebf | ||
|
|
7b05170145 | ||
|
|
b48ad4b737 | ||
|
|
9feb639bbd | ||
|
|
ce5054c850 | ||
|
|
c7834209d2 | ||
|
|
78ced6bc30 | ||
|
|
ca8e512e5b | ||
|
|
573628dbdd | ||
|
|
e477620dc5 | ||
|
|
32268fb25b | ||
|
|
80391bfe1f | ||
|
|
e19845c202 | ||
|
|
52134555d6 | ||
|
|
e7e39df6a0 | ||
|
|
055ef168ae | ||
|
|
2778b7be30 | ||
|
|
953db51b2c | ||
|
|
c043461f6c | ||
|
|
ddc07f9ef8 | ||
|
|
2cf1db0837 | ||
|
|
17e6496830 | ||
|
|
1d10eda84e | ||
|
|
9ea3dbeee8 | ||
|
|
100501ba72 | ||
|
|
f12368698b | ||
|
|
6b25a73629 | ||
|
|
90c7475c68 | ||
|
|
6648c101dd | ||
|
|
8d3285522c | ||
|
|
b613405f42 | ||
|
|
e999298078 | ||
|
|
0f0ab953f6 | ||
|
|
aaddbd153e | ||
|
|
9b2e2cc41f | ||
|
|
bc22309459 | ||
|
|
b6f81b538a | ||
|
|
c3aa43a6bd | ||
|
|
b2ea39077e | ||
|
|
811567a2f4 | ||
|
|
ca8fb440cc | ||
|
|
ac58a6bb3c | ||
|
|
9757d39240 | ||
|
|
5a9e7d77b8 | ||
|
|
e963b7f01b | ||
|
|
e7899d4dc5 | ||
|
|
301c79e57c | ||
|
|
67c288abda | ||
|
|
8dd2a8527a | ||
|
|
2fe427b3b3 | ||
|
|
6b1cc67664 | ||
|
|
1271f9d71a | ||
|
|
49ea4e9f39 | ||
|
|
50ef3282b6 | ||
|
|
b63dedb74d | ||
|
|
5628049440 | ||
|
|
54c9ba7639 | ||
|
|
b18d375d6c | ||
|
|
6dbbe65897 | ||
|
|
03d8abccdd | ||
|
|
0f6d317a8e | ||
|
|
792682590c | ||
|
|
2d3da343b3 | ||
|
|
094eda22c0 | ||
|
|
4886109d9c | ||
|
|
2dc47285bd | ||
|
|
6e33a6d62f | ||
|
|
a8f9eb23cc | ||
|
|
41a5ee6571 | ||
|
|
7d8de4b8e1 | ||
|
|
cc2b53abf4 | ||
|
|
32aa1cc814 | ||
|
|
38d877165a | ||
|
|
5c5984bfe1 | ||
|
|
30cdc31a27 | ||
|
|
602a36e241 | ||
|
|
b863ee1d65 | ||
|
|
ca49babf3a | ||
|
|
cf37b5cdcf | ||
|
|
969f388ef2 | ||
|
|
0589a1d0a5 | ||
|
|
4e019a176d | ||
|
|
a0e23d30fe | ||
|
|
e931706249 | ||
|
|
2457d95262 | ||
|
|
e9d33726a9 | ||
|
|
2462e04bf2 | ||
|
|
7fac74919c | ||
|
|
b022b5567d | ||
|
|
dbf6380e4b | ||
|
|
a0e42f8a61 | ||
|
|
94e673fe85 | ||
|
|
7600757f16 | ||
|
|
4ce8dd5f9a | ||
|
|
26315bfbea | ||
|
|
a282fb8524 | ||
|
|
dee98612e2 | ||
|
|
69e4e862a3 | ||
|
|
c0e895c3a7 | ||
|
|
fec9760f72 | ||
|
|
1989a5855d | ||
|
|
abcd19493e | ||
|
|
e457b7a8df | ||
|
|
3853d0d065 | ||
|
|
53e31cf1b5 | ||
|
|
c99c22534b | ||
|
|
8e22526756 | ||
|
|
7b6713b094 | ||
|
|
b0ef506a88 | ||
|
|
22c293de62 | ||
|
|
d3bb1e7010 | ||
|
|
49988b15a3 | ||
|
|
f0357b7a12 | ||
|
|
9d3ad6309e | ||
|
|
b55e9e78e3 | ||
|
|
4bc6fdb09e | ||
|
|
2b43b385de | ||
|
|
13865f9e04 | ||
|
|
497353e586 | ||
|
|
2d86dfba8b | ||
|
|
30dbfd9af8 | ||
|
|
c991b579d2 | ||
|
|
841729c0f9 | ||
|
|
412f5b5acb | ||
|
|
0b3958d3cd | ||
|
|
e68f251df7 | ||
|
|
986735234b | ||
|
|
4363eebc1b | ||
|
|
1be6ea5696 | ||
|
|
8acda0da8f | ||
|
|
ee240a5599 | ||
|
|
29ea433763 | ||
|
|
0462af164e | ||
|
|
1c24665b29 | ||
|
|
0af0fa7c2e | ||
|
|
191608041f | ||
|
|
42d9d5d237 | ||
|
|
d54b169d67 | ||
|
|
82166a36d0 | ||
|
|
cbf5a55c7d | ||
|
|
5f14ad9fa1 | ||
|
|
0be69b8a44 | ||
|
|
375710488d | ||
|
|
03d02fa67a | ||
|
|
b58cd78c79 | ||
|
|
dabb6f5466 | ||
|
|
281a4d5500 | ||
|
|
1c2965703d | ||
|
|
5dc4cce157 | ||
|
|
8c7edeb53b | ||
|
|
1d9745ee98 | ||
|
|
2d6c8767f7 | ||
|
|
b4a6d9c647 | ||
|
|
6afe9ceef1 | ||
|
|
704d9ad76c | ||
|
|
300d9adbd0 | ||
|
|
207c5498e7 | ||
|
|
d5e7439343 | ||
|
|
21add2c799 | ||
|
|
4651ab88ad | ||
|
|
53f40063b3 | ||
|
|
97d92bba67 | ||
|
|
bfdd665435 | ||
|
|
821d3fafa6 | ||
|
|
7c9b312cee | ||
|
|
69ab8a645c | ||
|
|
7b550c11cb | ||
|
|
bb4f18ca88 | ||
|
|
6efe91ea9c | ||
|
|
5f0a63f554 | ||
|
|
d14e7536ab | ||
|
|
c873937356 | ||
|
|
e1c3800cd9 | ||
|
|
c046232425 | ||
|
|
2d4864e126 | ||
|
|
048448aa93 | ||
|
|
755b2ec953 | ||
|
|
f62c493c77 | ||
|
|
a6365a6086 | ||
|
|
f7e057ec55 | ||
|
|
30cc00d11b | ||
|
|
d641c42029 | ||
|
|
9c2ca805da | ||
|
|
b0484d8a0c | ||
|
|
5ddd61d2e2 | ||
|
|
50ea7f4a9d | ||
|
|
b18134a4e3 | ||
|
|
7825df4771 | ||
|
|
d6951dacdc | ||
|
|
e603825e37 | ||
|
|
e3448153e1 | ||
|
|
25848c545a | ||
|
|
3098564896 | ||
|
|
4b6f9b93dd | ||
|
|
2beef21231 | ||
|
|
cb3c54a1ae | ||
|
|
d50a1e83ac | ||
|
|
1f10639222 | ||
|
|
af0979cce5 | ||
|
|
5b43901bd8 | ||
|
|
d7efb7a71d | ||
|
|
4d242836ee | ||
|
|
06cb5a041e | ||
|
|
ea2521bf27 | ||
|
|
4cd1f7a104 | ||
|
|
137843b2f6 | ||
|
|
008ed17a79 | ||
|
|
75e6cb9064 | ||
|
|
ad88a9421a | ||
|
|
346deb30a3 | ||
|
|
8c3d7cd145 | ||
|
|
821b30eb92 | ||
|
|
a362352587 | ||
|
|
94f952787f | ||
|
|
3ff184c061 | ||
|
|
80368e3936 | ||
|
|
2c448e22e1 | ||
|
|
1aabd38eb2 | ||
|
|
675457873a | ||
|
|
8173338f8a | ||
|
|
c4841843a9 | ||
|
|
f08a27be5d | ||
|
|
a4b36d12dd | ||
|
|
c842724b61 | ||
|
|
fb5f40319e | ||
|
|
52b9fc837c | ||
|
|
6f991ec78a | ||
|
|
7921d87a45 | ||
|
|
9f7a758bf9 | ||
|
|
0aff7a0bc1 | ||
|
|
c4cfdb8a25 | ||
|
|
342cfc4087 | ||
|
|
bd1282eddf | ||
|
|
892abec025 | ||
|
|
e809c4e445 | ||
|
|
9ff536d94d | ||
|
|
4f27315720 | ||
|
|
958ef2f872 | ||
|
|
069764f05e | ||
|
|
eeeab5192b | ||
|
|
a7dfbce3d3 | ||
|
|
ed2d1d9bb7 | ||
|
|
0fb2d2ffae | ||
|
|
3af65e7abb | ||
|
|
984b6cb0fb | ||
|
|
ca504a19ec | ||
|
|
c2797c85d1 | ||
|
|
d5add07c0b | ||
|
|
0ebf1c1ad7 | ||
|
|
42d7fc5e16 | ||
|
|
6828fc48e1 | ||
|
|
98d91b1c89 | ||
|
|
9bbdb2d562 | ||
|
|
a8334c3261 | ||
|
|
9144f9630b | ||
|
|
3e4a19539a | ||
|
|
5fe7e6e40e | ||
|
|
58f2ba1247 | ||
|
|
5f3a91bffd | ||
|
|
6351aa5167 | ||
|
|
9966099d1a | ||
|
|
1ef5599361 | ||
|
|
c78b6cdb4e | ||
|
|
d736c7235a | ||
|
|
475252d873 | ||
|
|
e103923430 | ||
|
|
cb59517ceb | ||
|
|
1248934f3e | ||
|
|
204ebf6bf6 | ||
|
|
52d5b19219 | ||
|
|
8e92d3a4a0 | ||
|
|
c44ecf54a5 | ||
|
|
c6699c36d3 | ||
|
|
d6ceae7005 | ||
|
|
4dcb82bf08 | ||
|
|
4f5d5926d9 | ||
|
|
3c5c3b98df | ||
|
|
56aee1ceee | ||
|
|
f176c28a56 | ||
|
|
2e68bd1412 | ||
|
|
35eb65460d | ||
|
|
ab54064689 | ||
|
|
debf7bf149 | ||
|
|
1dbe3b8231 | ||
|
|
b065573e23 | ||
|
|
e94e50181c | ||
|
|
69dfe63809 | ||
|
|
f32916a5bd | ||
|
|
be7ca56872 | ||
|
|
33cacc71b8 | ||
|
|
c292e3931a | ||
|
|
a87d6f0545 | ||
|
|
3a01b6d5b7 | ||
|
|
39df2635bd | ||
|
|
08ecfb8a67 | ||
|
|
a59bf7246a | ||
|
|
281296cd3f | ||
|
|
61d190b1ae | ||
|
|
dc89f029ad | ||
|
|
7557056a31 | ||
|
|
20c45a150c | ||
|
|
46bf0ef271 | ||
|
|
a7b632eb5e | ||
|
|
90a98c76a0 | ||
|
|
12357ee8c5 | ||
|
|
bb254fc2b9 | ||
|
|
aeadc2c43a | ||
|
|
ed492fe950 | ||
|
|
775daba8f5 | ||
|
|
677dd7ad53 | ||
|
|
85dee02a3b | ||
|
|
afdebbc3a2 | ||
|
|
5deb22a539 | ||
|
|
36b9e2e077 | ||
|
|
5348937c3d | ||
|
|
72fcacbbc7 | ||
|
|
4c28f15b35 | ||
|
|
095ef04c04 | ||
|
|
7d49979658 | ||
|
|
7a36695a21 | ||
|
|
5865587bd0 | ||
|
|
219bf93566 | ||
|
|
8371546a66 | ||
|
|
0b9b7bddd7 | ||
|
|
4c8449f4bc | ||
|
|
36d7b5c9ab | ||
|
|
f2b0ea6722 | ||
|
|
46f4be88a6 | ||
|
|
6381efa7ce | ||
|
|
85ee66efb9 | ||
|
|
40dccf5b29 | ||
|
|
c114849a31 | ||
|
|
4e9798d0e6 | ||
|
|
a30b1a394f | ||
|
|
91460436cf | ||
|
|
3f807a9432 | ||
|
|
cbe32c7482 | ||
|
|
5d3c582ecf | ||
|
|
3ed006d216 | ||
|
|
3e1026286b | ||
|
|
b59266249d | ||
|
|
015261a524 | ||
|
|
024e1088eb | ||
|
|
08f4b1ae8a | ||
|
|
1390c22004 | ||
|
|
8742ead585 | ||
|
|
59a297abe6 | ||
|
|
18636ea628 | ||
|
|
cf5980ace2 | ||
|
|
a7b0861436 | ||
|
|
89f2c0b0a4 | ||
|
|
ee4f4d7800 | ||
|
|
4de75ce621 | ||
|
|
1c4043ab39 | ||
|
|
44c945b9f5 | ||
|
|
c7719ac365 | ||
|
|
b9c24189e4 | ||
|
|
411d8d7439 | ||
|
|
671b40df2a | ||
|
|
249a860c6f | ||
|
|
0367a39e1f | ||
|
|
1a7340bb02 | ||
|
|
ce7d852d22 | ||
|
|
01b01c5969 | ||
|
|
c159460b2c | ||
|
|
07728d7425 | ||
|
|
d3a25e4dc1 | ||
|
|
1751c35f69 | ||
|
|
93f5b8cc4a | ||
|
|
5b1e59a48c | ||
|
|
7b27cad1ba | ||
|
|
1b083d63ab | ||
|
|
23f2b47531 | ||
|
|
194288c00e | ||
|
|
f9c8ed0dc3 | ||
|
|
88def9b71b | ||
|
|
f818f44693 | ||
|
|
8a395fdb4a | ||
|
|
c0588926b8 | ||
|
|
f1b7ecb2a2 | ||
|
|
4bcf157d88 | ||
|
|
2f7da03cce | ||
|
|
f1c995dcb8 | ||
|
|
9aec58c6b8 | ||
|
|
46aaaa9b70 | ||
|
|
46543d6323 | ||
|
|
a585119a67 | ||
|
|
8cc72368ca | ||
|
|
92e57ee06c | ||
|
|
c737a19d9f | ||
|
|
708a97d773 | ||
|
|
b95a90dbd6 | ||
|
|
a2d1ee08d4 | ||
|
|
7e64dc380f | ||
|
|
046cb6a564 | ||
|
|
644ce9edab | ||
|
|
059b601b13 | ||
|
|
d59999f510 | ||
|
|
c5d31e7527 | ||
|
|
c121e38da6 | ||
|
|
b16bc3d2e3 | ||
|
|
c732abbda2 | ||
|
|
61d681a7c8 | ||
|
|
7828bc09cf | ||
|
|
36d330fea0 | ||
|
|
4d46589d39 | ||
|
|
93f57edd3a | ||
|
|
8ec8ae0587 | ||
|
|
ce94e636bb | ||
|
|
21c7378b61 | ||
|
|
75a9845d20 | ||
|
|
d638f6e411 | ||
|
|
81d0a64d46 | ||
|
|
f76739cb1b | ||
|
|
7f992fd321 | ||
|
|
e428d11add | ||
|
|
0ed5b75a14 | ||
|
|
b1b4adec74 | ||
|
|
1934cc2e62 | ||
|
|
ae8cf8c35e | ||
|
|
f5878eafb9 | ||
|
|
27fe4f7062 | ||
|
|
7ad8b26297 | ||
|
|
1a383b7d90 | ||
|
|
445946792e | ||
|
|
de82c7d5ac | ||
|
|
07f0d561dc | ||
|
|
be379f3dac | ||
|
|
1bf904fe60 | ||
|
|
c6faf005cb | ||
|
|
b534b58542 | ||
|
|
920711533e | ||
|
|
194110433e | ||
|
|
7926396d2a | ||
|
|
797522e8ca | ||
|
|
64b5d1a269 | ||
|
|
d00d3802c9 | ||
|
|
46fff13341 | ||
|
|
be3374a3ef | ||
|
|
264ac0b017 | ||
|
|
17033b3c6c | ||
|
|
c4ea122d66 | ||
|
|
90185dc6b3 | ||
|
|
377b030d88 | ||
|
|
437bd87d7c | ||
|
|
73a7916ce3 | ||
|
|
f947fa86e3 | ||
|
|
7219efbdb7 | ||
|
|
b7435b9cd1 | ||
|
|
207ab5a0d1 | ||
|
|
70aa0ef85d | ||
|
|
dfbe231a51 | ||
|
|
cce35da366 | ||
|
|
1a612bcae9 | ||
|
|
d5b9e003fe | ||
|
|
e19c474a92 | ||
|
|
8274798499 | ||
|
|
9f68a32934 | ||
|
|
5c688daff1 | ||
|
|
741fb1181f | ||
|
|
fd1f05c8e0 | ||
|
|
708cbf937f | ||
|
|
32213cad01 | ||
|
|
9320a6e115 | ||
|
|
0f16c0f4cf | ||
|
|
30464396d9 | ||
|
|
64066c4ea8 | ||
|
|
7e97787d9d | ||
|
|
40f2dd8c6c | ||
|
|
4dd364e1c3 | ||
|
|
03f2a35b31 | ||
|
|
73bd98df57 | ||
|
|
bcf1fc658d | ||
|
|
863cbe512d | ||
|
|
d871e9aee7 | ||
|
|
70ef61ac6d | ||
|
|
a4a140bfad | ||
|
|
d2d91e713a | ||
|
|
d9bb1ceaec | ||
|
|
5fe8903fd2 | ||
|
|
8509101b83 | ||
|
|
357849c348 | ||
|
|
0f1b4e06f5 | ||
|
|
8e041420cd | ||
|
|
9211d22b2b | ||
|
|
f5246eb167 | ||
|
|
e436b2d720 | ||
|
|
51f4e9c0d3 | ||
|
|
8c3371c4ac | ||
|
|
6ff0fc6d83 | ||
|
|
9347a70425 | ||
|
|
91957f0848 | ||
|
|
62105bb353 | ||
|
|
e03f684508 | ||
|
|
2f41ae24f8 | ||
|
|
4ad551be9a | ||
|
|
bd640ae2c5 | ||
|
|
2cfc882c62 | ||
|
|
21ece2d76d | ||
|
|
d055d7f496 | ||
|
|
b1cfb1afe4 | ||
|
|
2f215356d6 | ||
|
|
e07c79259b | ||
|
|
59085f072a | ||
|
|
474d6db42f | ||
|
|
a95710ed0c | ||
|
|
51d7724255 | ||
|
|
276e7629bd | ||
|
|
69606a45e0 | ||
|
|
7f65ffcb15 | ||
|
|
4f5f6761f3 | ||
|
|
f543dbb42f | ||
|
|
5917a42997 | ||
|
|
fbe1664214 | ||
|
|
d09bb13cb6 | ||
|
|
31c323c097 | ||
|
|
8f09aadfdf | ||
|
|
20b4e8c779 | ||
|
|
402a0108ae | ||
|
|
9de4a8efcf | ||
|
|
077fa2e6b9 | ||
|
|
2ae9316f48 | ||
|
|
9b5a90e3b9 | ||
|
|
483942dc41 | ||
|
|
2ddda6457f | ||
|
|
681e695170 | ||
|
|
a043664dc4 | ||
|
|
e940f99646 | ||
|
|
22073042a9 | ||
|
|
2634cc408a | ||
|
|
36446bcbc2 | ||
|
|
b371ec5cf6 | ||
|
|
18f4afb388 | ||
|
|
77dcbe95c0 | ||
|
|
061b749041 | ||
|
|
5b0c3951f6 | ||
|
|
f2394b5a8d | ||
|
|
fe7b884cc9 | ||
|
|
5c1b635229 | ||
|
|
63410491b7 | ||
|
|
26e0a4bbde | ||
|
|
c356e56522 | ||
|
|
fd26bbbd0b | ||
|
|
7aa55371b5 | ||
|
|
ba06533c3e | ||
|
|
d66d66e74b | ||
|
|
d6b5f3efe6 | ||
|
|
b5a431624b | ||
|
|
8e7284de5a | ||
|
|
b2d38cd31c | ||
|
|
a15fed35b7 | ||
|
|
eee6b0059c | ||
|
|
530b4f3bee | ||
|
|
bac1c223de | ||
|
|
57f7582b4d | ||
|
|
5afe819ebd | ||
|
|
59568f5311 | ||
|
|
822706367b | ||
|
|
f8e9fafda3 | ||
|
|
c2bb9db012 | ||
|
|
e4e7d7fbfc | ||
|
|
035e4cf90a | ||
|
|
18fff4a3f5 | ||
|
|
675b6dc305 | ||
|
|
4071c78b2b | ||
|
|
2cb32a683e | ||
|
|
b6dc9c004b | ||
|
|
4ea0c707c1 | ||
|
|
2fbcb5c6d8 | ||
|
|
a4d60d9750 | ||
|
|
d3925890b1 | ||
|
|
8c6c144f28 | ||
|
|
db8c24cc7b | ||
|
|
ecbbb8426f | ||
|
|
0752879fc8 | ||
|
|
3f2a04b25b | ||
|
|
aa15e7916e | ||
|
|
7b09623fa8 | ||
|
|
2f45b8b7f5 | ||
|
|
5ffa2a30be | ||
|
|
b102ae141a | ||
|
|
845abcdd77 | ||
|
|
805db7ca50 | ||
|
|
bd3d0c330f | ||
|
|
0060df9877 | ||
|
|
cd66e203bd | ||
|
|
240f99478a | ||
|
|
41534c73f0 | ||
|
|
6139a69fa8 | ||
|
|
3cca312e61 | ||
|
|
7e312797ec | ||
|
|
fe44fa648a | ||
|
|
3249030257 | ||
|
|
8f98c20c51 | ||
|
|
1c76d5d096 | ||
|
|
35f1e28809 | ||
|
|
20999979de | ||
|
|
c6706a86f1 | ||
|
|
b4b1866286 | ||
|
|
28eb9b4c29 | ||
|
|
0a9accccc1 | ||
|
|
c3d220175f | ||
|
|
095c90ad22 | ||
|
|
a77bfecb02 | ||
|
|
72027b5b3c | ||
|
|
e5503c56ad | ||
|
|
ee7b225272 | ||
|
|
03d37725a9 | ||
|
|
29d1cbb673 | ||
|
|
e81278b800 | ||
|
|
e5482a5725 | ||
|
|
8464be691e | ||
|
|
ed9937bbd8 | ||
|
|
b2a4d4a018 | ||
|
|
74aaf4f75b | ||
|
|
2945f9daa9 | ||
|
|
3b496ab3d8 | ||
|
|
e1f30aeff9 | ||
|
|
a92e73231d | ||
|
|
8d91115623 | ||
|
|
9af8d6912a | ||
|
|
fe43fb47e1 | ||
|
|
ca3a80fbe1 | ||
|
|
f0747e76da | ||
|
|
7416d6ea71 | ||
|
|
ea7cbc781e | ||
|
|
3568fb9f93 | ||
|
|
43b7ce4f6d | ||
|
|
baa38d6266 | ||
|
|
1677960caa | ||
|
|
0fab573c98 | ||
|
|
04a8e5b888 | ||
|
|
6284e2011c | ||
|
|
a97c93abe4 | ||
|
|
664816383a |
4
.gitattributes
vendored
Normal file
4
.gitattributes
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
CHANGELOG.md merge=union
|
||||
README.md merge=union
|
||||
plugins/inputs/all/all.go merge=union
|
||||
plugins/outputs/all/all.go merge=union
|
||||
44
.github/ISSUE_TEMPLATE.md
vendored
Normal file
44
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
## Directions
|
||||
|
||||
GitHub Issues are reserved for actionable bug reports and feature requests.
|
||||
General questions should be sent to the [InfluxDB mailing list](https://groups.google.com/forum/#!forum/influxdb).
|
||||
|
||||
Before opening an issue, search for similar bug reports or feature requests on GitHub Issues.
|
||||
If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.
|
||||
Erase the other section and everything on and above this line.
|
||||
|
||||
*Please note, the quickest way to fix a bug is to open a Pull Request.*
|
||||
|
||||
## Bug report
|
||||
|
||||
### Relevant telegraf.conf:
|
||||
|
||||
### System info:
|
||||
|
||||
[Include Telegraf version, operating system name, and other relevant details]
|
||||
|
||||
### Steps to reproduce:
|
||||
|
||||
1. ...
|
||||
2. ...
|
||||
|
||||
### Expected behavior:
|
||||
|
||||
### Actual behavior:
|
||||
|
||||
### Additional info:
|
||||
|
||||
[Include gist of relevant config, logs, etc.]
|
||||
|
||||
|
||||
## Feature Request
|
||||
|
||||
Opening a feature request kicks off a discussion.
|
||||
|
||||
### Proposal:
|
||||
|
||||
### Current behavior:
|
||||
|
||||
### Desired behavior:
|
||||
|
||||
### Use case: [Why is this important (helps with prioritizing requests)]
|
||||
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
### Required for all PRs:
|
||||
|
||||
- [ ] CHANGELOG.md updated (we recommend not updating this until the PR has been approved by a maintainer)
|
||||
- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed)
|
||||
- [ ] README.md updated (if adding a new plugin)
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
build
|
||||
tivan
|
||||
.vagrant
|
||||
/telegraf
|
||||
|
||||
568
CHANGELOG.md
568
CHANGELOG.md
@@ -1,10 +1,576 @@
|
||||
## v0.10.5 [unreleased]
|
||||
## v1.2 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- The StatsD plugin will now default all "delete_" config options to "true". This
|
||||
will change te default behavior for users who were not specifying these parameters
|
||||
in their config file.
|
||||
|
||||
- The StatsD plugin will also no longer save it's state on a service reload.
|
||||
Essentially we have reverted PR [#887](https://github.com/influxdata/telegraf/pull/887).
|
||||
The reason for this is that saving the state in a global variable is not
|
||||
thread-safe (see [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102)),
|
||||
and this creates issues if users want to define multiple instances
|
||||
of the statsd plugin. Saving state on reload may be considered in the future,
|
||||
but this would need to be implemented at a higher level and applied to all
|
||||
plugins, not just statsd.
|
||||
|
||||
### Features
|
||||
|
||||
- [#2123](https://github.com/influxdata/telegraf/pull/2123): Fix improper calculation of CPU percentages
|
||||
- [#1564](https://github.com/influxdata/telegraf/issues/1564): Use RFC3339 timestamps in log output.
|
||||
- [#1997](https://github.com/influxdata/telegraf/issues/1997): Non-default HTTP timeouts for RabbitMQ plugin.
|
||||
- [#2074](https://github.com/influxdata/telegraf/pull/2074): "discard" output plugin added, primarily for testing purposes.
|
||||
- [#1965](https://github.com/influxdata/telegraf/pull/1965): The JSON parser can now parse an array of objects using the same configuration.
|
||||
- [#1807](https://github.com/influxdata/telegraf/pull/1807): Option to use device name rather than path for reporting disk stats.
|
||||
- [#1348](https://github.com/influxdata/telegraf/issues/1348): Telegraf "internal" plugin for collecting stats on itself.
|
||||
- [#2127](https://github.com/influxdata/telegraf/pull/2127): Update Go version to 1.7.4.
|
||||
- [#2126](https://github.com/influxdata/telegraf/pull/2126): Support a metric.Split function.
|
||||
- [#2026](https://github.com/influxdata/telegraf/pull/2065): elasticsearch "shield" (basic auth) support doc.
|
||||
- [#1885](https://github.com/influxdata/telegraf/pull/1885): Fix over-querying of cloudwatch metrics
|
||||
- [#1913](https://github.com/influxdata/telegraf/pull/1913): OpenTSDB basic auth support.
|
||||
- [#1908](https://github.com/influxdata/telegraf/pull/1908): RabbitMQ Connection metrics.
|
||||
- [#1937](https://github.com/influxdata/telegraf/pull/1937): HAProxy session limit metric.
|
||||
- [#2068](https://github.com/influxdata/telegraf/issues/2068): Accept strings for StatsD sets.
|
||||
- [#1893](https://github.com/influxdata/telegraf/issues/1893): Change StatsD default "reset" behavior.
|
||||
- [#2079](https://github.com/influxdata/telegraf/pull/2079): Enable setting ClientID in MQTT output.
|
||||
- [#2001](https://github.com/influxdata/telegraf/pull/2001): MongoDB input plugin: Improve state data.
|
||||
- [#2078](https://github.com/influxdata/telegraf/pull/2078): Ping input: add standard deviation field.
|
||||
- [#2121](https://github.com/influxdata/telegraf/pull/2121): Add GC pause metric to InfluxDB input plugin.
|
||||
- [#2006](https://github.com/influxdata/telegraf/pull/2006): Added response_timeout property to prometheus input plugin.
|
||||
- [#1763](https://github.com/influxdata/telegraf/issues/1763): Pulling github.com/lxn/win's pdh wrapper into telegraf.
|
||||
- [#1898](https://github.com/influxdata/telegraf/issues/1898): Support negative statsd counters.
|
||||
- [#1921](https://github.com/influxdata/telegraf/issues/1921): Elasticsearch cluster stats support.
|
||||
- [#1942](https://github.com/influxdata/telegraf/pull/1942): Change Amazon Kinesis output plugin to use the built-in serializer plugins.
|
||||
- [#1980](https://github.com/influxdata/telegraf/issues/1980): Hide username/password from elasticsearch error log messages.
|
||||
- [#2097](https://github.com/influxdata/telegraf/issues/2097): Configurable HTTP timeouts in Jolokia plugin
|
||||
- [#2255](https://github.com/influxdata/telegraf/pull/2255): Allow changing jolokia attribute delimiter
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2049](https://github.com/influxdata/telegraf/pull/2049): Fix the Value data format not trimming null characters from input.
|
||||
- [#1949](https://github.com/influxdata/telegraf/issues/1949): Fix windows `net` plugin.
|
||||
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus
|
||||
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus.
|
||||
- [#2146](https://github.com/influxdata/telegraf/issues/2146): Fix potential panic in aggregator plugin metric maker.
|
||||
- [#1843](https://github.com/influxdata/telegraf/pull/1843) & [#1668](https://github.com/influxdata/telegraf/issues/1668): Add optional ability to define PID as a tag.
|
||||
- [#1730](https://github.com/influxdata/telegraf/issues/1730): Fix win_perf_counters not gathering non-English counters.
|
||||
- [#2061](https://github.com/influxdata/telegraf/issues/2061): Fix panic when file stat info cannot be collected due to permissions or other issue(s).
|
||||
- [#2045](https://github.com/influxdata/telegraf/issues/2045): Graylog output should set short_message field.
|
||||
- [#1904](https://github.com/influxdata/telegraf/issues/1904): Hddtemp always put the value in the field temperature.
|
||||
- [#1693](https://github.com/influxdata/telegraf/issues/1693): Properly collect nested jolokia struct data.
|
||||
- [#1917](https://github.com/influxdata/telegraf/pull/1917): fix puppetagent inputs plugin to support string for config variable.
|
||||
- [#1987](https://github.com/influxdata/telegraf/issues/1987): fix docker input plugin tags when registry has port.
|
||||
- [#2089](https://github.com/influxdata/telegraf/issues/2089): Fix tail input when reading from a pipe.
|
||||
- [#1449](https://github.com/influxdata/telegraf/issues/1449): MongoDB plugin always shows 0 replication lag.
|
||||
- [#1825](https://github.com/influxdata/telegraf/issues/1825): Consul plugin: add check_id as a tag in metrics to avoid overwrites.
|
||||
- [#1973](https://github.com/influxdata/telegraf/issues/1973): Partial fix: logparser CLF pattern with IPv6 addresses.
|
||||
- [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102): Fix thread-safety when using multiple instances of the statsd input plugin.
|
||||
- [#2027](https://github.com/influxdata/telegraf/issues/2027): docker input: interface conversion panic fix.
|
||||
- [#1814](https://github.com/influxdata/telegraf/issues/1814): snmp: ensure proper context is present on error messages.
|
||||
- [#2299](https://github.com/influxdata/telegraf/issues/2299): opentsdb: add tcp:// prefix if no scheme provided.
|
||||
- [#2297](https://github.com/influxdata/telegraf/issues/2297): influx parser: parse line-protocol without newlines.
|
||||
|
||||
## v1.1.2 [2016-12-12]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2007](https://github.com/influxdata/telegraf/issues/2007): Make snmptranslate not required when using numeric OID.
|
||||
- [#2104](https://github.com/influxdata/telegraf/issues/2104): Add a global snmp translation cache.
|
||||
|
||||
## v1.1.1 [2016-11-14]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2023](https://github.com/influxdata/telegraf/issues/2023): Fix issue parsing toml durations with single quotes.
|
||||
|
||||
## v1.1.0 [2016-11-07]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- Telegraf now supports two new types of plugins: processors & aggregators.
|
||||
|
||||
- On systemd Telegraf will no longer redirect it's stdout to /var/log/telegraf/telegraf.log.
|
||||
On most systems, the logs will be directed to the systemd journal and can be
|
||||
accessed by `journalctl -u telegraf.service`. Consult the systemd journal
|
||||
documentation for configuring journald. There is also a [`logfile` config option](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf#L70)
|
||||
available in 1.1, which will allow users to easily configure telegraf to
|
||||
continue sending logs to /var/log/telegraf/telegraf.log.
|
||||
|
||||
### Features
|
||||
|
||||
- [#1726](https://github.com/influxdata/telegraf/issues/1726): Processor & Aggregator plugin support.
|
||||
- [#1861](https://github.com/influxdata/telegraf/pull/1861): adding the tags in the graylog output plugin
|
||||
- [#1732](https://github.com/influxdata/telegraf/pull/1732): Telegraf systemd service, log to journal.
|
||||
- [#1782](https://github.com/influxdata/telegraf/pull/1782): Allow numeric and non-string values for tag_keys.
|
||||
- [#1694](https://github.com/influxdata/telegraf/pull/1694): Adding Gauge and Counter metric types.
|
||||
- [#1606](https://github.com/influxdata/telegraf/pull/1606): Remove carraige returns from exec plugin output on Windows
|
||||
- [#1674](https://github.com/influxdata/telegraf/issues/1674): elasticsearch input: configurable timeout.
|
||||
- [#1607](https://github.com/influxdata/telegraf/pull/1607): Massage metric names in Instrumental output plugin
|
||||
- [#1572](https://github.com/influxdata/telegraf/pull/1572): mesos improvements.
|
||||
- [#1513](https://github.com/influxdata/telegraf/issues/1513): Add Ceph Cluster Performance Statistics
|
||||
- [#1650](https://github.com/influxdata/telegraf/issues/1650): Ability to configure response_timeout in httpjson input.
|
||||
- [#1685](https://github.com/influxdata/telegraf/issues/1685): Add additional redis metrics.
|
||||
- [#1539](https://github.com/influxdata/telegraf/pull/1539): Added capability to send metrics through Http API for OpenTSDB.
|
||||
- [#1471](https://github.com/influxdata/telegraf/pull/1471): iptables input plugin.
|
||||
- [#1542](https://github.com/influxdata/telegraf/pull/1542): Add filestack webhook plugin.
|
||||
- [#1599](https://github.com/influxdata/telegraf/pull/1599): Add server hostname for each docker measurements.
|
||||
- [#1697](https://github.com/influxdata/telegraf/pull/1697): Add NATS output plugin.
|
||||
- [#1407](https://github.com/influxdata/telegraf/pull/1407) & [#1915](https://github.com/influxdata/telegraf/pull/1915): HTTP service listener input plugin.
|
||||
- [#1699](https://github.com/influxdata/telegraf/pull/1699): Add database blacklist option for Postgresql
|
||||
- [#1791](https://github.com/influxdata/telegraf/pull/1791): Add Docker container state metrics to Docker input plugin output
|
||||
- [#1755](https://github.com/influxdata/telegraf/issues/1755): Add support to SNMP for IP & MAC address conversion.
|
||||
- [#1729](https://github.com/influxdata/telegraf/issues/1729): Add support to SNMP for OID index suffixes.
|
||||
- [#1813](https://github.com/influxdata/telegraf/pull/1813): Change default arguments for SNMP plugin.
|
||||
- [#1686](https://github.com/influxdata/telegraf/pull/1686): Mesos input plugin: very high-cardinality mesos-task metrics removed.
|
||||
- [#1838](https://github.com/influxdata/telegraf/pull/1838): Logging overhaul to centralize the logger & log levels, & provide a logfile config option.
|
||||
- [#1700](https://github.com/influxdata/telegraf/pull/1700): HAProxy plugin socket glob matching.
|
||||
- [#1847](https://github.com/influxdata/telegraf/pull/1847): Add Kubernetes plugin for retrieving pod metrics.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1955](https://github.com/influxdata/telegraf/issues/1955): Fix NATS plug-ins reconnection logic.
|
||||
- [#1936](https://github.com/influxdata/telegraf/issues/1936): Set required default values in udp_listener & tcp_listener.
|
||||
- [#1926](https://github.com/influxdata/telegraf/issues/1926): Fix toml unmarshal panic in Duration objects.
|
||||
- [#1746](https://github.com/influxdata/telegraf/issues/1746): Fix handling of non-string values for JSON keys listed in tag_keys.
|
||||
- [#1628](https://github.com/influxdata/telegraf/issues/1628): Fix mongodb input panic on version 2.2.
|
||||
- [#1733](https://github.com/influxdata/telegraf/issues/1733): Fix statsd scientific notation parsing
|
||||
- [#1716](https://github.com/influxdata/telegraf/issues/1716): Sensors plugin strconv.ParseFloat: parsing "": invalid syntax
|
||||
- [#1530](https://github.com/influxdata/telegraf/issues/1530): Fix prometheus_client reload panic
|
||||
- [#1764](https://github.com/influxdata/telegraf/issues/1764): Fix kafka consumer panic when nil error is returned down errs channel.
|
||||
- [#1768](https://github.com/influxdata/telegraf/pull/1768): Speed up statsd parsing.
|
||||
- [#1751](https://github.com/influxdata/telegraf/issues/1751): Fix powerdns integer parse error handling.
|
||||
- [#1752](https://github.com/influxdata/telegraf/issues/1752): Fix varnish plugin defaults not being used.
|
||||
- [#1517](https://github.com/influxdata/telegraf/issues/1517): Fix windows glob paths.
|
||||
- [#1137](https://github.com/influxdata/telegraf/issues/1137): Fix issue loading config directory on windows.
|
||||
- [#1772](https://github.com/influxdata/telegraf/pull/1772): Windows remote management interactive service fix.
|
||||
- [#1702](https://github.com/influxdata/telegraf/issues/1702): sqlserver, fix issue when case sensitive collation is activated.
|
||||
- [#1823](https://github.com/influxdata/telegraf/issues/1823): Fix huge allocations in http_listener when dealing with huge payloads.
|
||||
- [#1833](https://github.com/influxdata/telegraf/issues/1833): Fix translating SNMP fields not in MIB.
|
||||
- [#1835](https://github.com/influxdata/telegraf/issues/1835): Fix SNMP emitting empty fields.
|
||||
- [#1854](https://github.com/influxdata/telegraf/pull/1853): SQL Server waitstats truncation bug.
|
||||
- [#1810](https://github.com/influxdata/telegraf/issues/1810): Fix logparser common log format: numbers in ident.
|
||||
- [#1793](https://github.com/influxdata/telegraf/pull/1793): Fix JSON Serialization in OpenTSDB output.
|
||||
- [#1731](https://github.com/influxdata/telegraf/issues/1731): Fix Graphite template ordering, use most specific.
|
||||
- [#1836](https://github.com/influxdata/telegraf/pull/1836): Fix snmp table field initialization for non-automatic table.
|
||||
- [#1724](https://github.com/influxdata/telegraf/issues/1724): cgroups path being parsed as metric.
|
||||
- [#1886](https://github.com/influxdata/telegraf/issues/1886): Fix phpfpm fcgi client panic when URL does not exist.
|
||||
- [#1344](https://github.com/influxdata/telegraf/issues/1344): Fix config file parse error logging.
|
||||
- [#1771](https://github.com/influxdata/telegraf/issues/1771): Delete nil fields in the metric maker.
|
||||
- [#870](https://github.com/influxdata/telegraf/issues/870): Fix MySQL special characters in DSN parsing.
|
||||
- [#1742](https://github.com/influxdata/telegraf/issues/1742): Ping input odd timeout behavior.
|
||||
- [#1950](https://github.com/influxdata/telegraf/pull/1950): Switch to github.com/kballard/go-shellquote.
|
||||
|
||||
## v1.0.1 [2016-09-26]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Prometheus output: Fix bug with multi-batch writes.
|
||||
- [#1738](https://github.com/influxdata/telegraf/issues/1738): Fix unmarshal of influxdb metrics with null tags.
|
||||
- [#1773](https://github.com/influxdata/telegraf/issues/1773): Add configurable timeout to influxdb input plugin.
|
||||
- [#1785](https://github.com/influxdata/telegraf/pull/1785): Fix statsd no default value panic.
|
||||
|
||||
## v1.0 [2016-09-08]
|
||||
|
||||
### Release Notes
|
||||
|
||||
**Breaking Change** The SNMP plugin is being deprecated in it's current form.
|
||||
There is a [new SNMP plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
|
||||
which fixes many of the issues and confusions
|
||||
of its predecessor. For users wanting to continue to use the deprecated SNMP
|
||||
plugin, you will need to change your config file from `[[inputs.snmp]]` to
|
||||
`[[inputs.snmp_legacy]]`. The configuration of the new SNMP plugin is _not_
|
||||
backwards-compatible.
|
||||
|
||||
**Breaking Change**: Aerospike main server node measurements have been renamed
|
||||
aerospike_node. Aerospike namespace measurements have been renamed to
|
||||
aerospike_namespace. They will also now be tagged with the node_name
|
||||
that they correspond to. This has been done to differentiate measurements
|
||||
that pertain to node vs. namespace statistics.
|
||||
|
||||
**Breaking Change**: users of github_webhooks must change to the new
|
||||
`[[inputs.webhooks]]` plugin.
|
||||
|
||||
This means that the default github_webhooks config:
|
||||
|
||||
```
|
||||
# A Github Webhook Event collector
|
||||
[[inputs.github_webhooks]]
|
||||
## Address and port to host Webhook listener on
|
||||
service_address = ":1618"
|
||||
```
|
||||
|
||||
should now look like:
|
||||
|
||||
```
|
||||
# A Webhooks Event collector
|
||||
[[inputs.webhooks]]
|
||||
## Address and port to host Webhook listener on
|
||||
service_address = ":1618"
|
||||
|
||||
[inputs.webhooks.github]
|
||||
path = "/"
|
||||
```
|
||||
|
||||
- Telegraf now supports being installed as an official windows service,
|
||||
which can be installed via
|
||||
`> C:\Program Files\Telegraf\telegraf.exe --service install`
|
||||
|
||||
- `flush_jitter` behavior has been changed. The random jitter will now be
|
||||
evaluated at every flush interval, rather than once at startup. This makes it
|
||||
consistent with the behavior of `collection_jitter`.
|
||||
|
||||
### Features
|
||||
|
||||
- [#1413](https://github.com/influxdata/telegraf/issues/1413): Separate container_version from container_image tag.
|
||||
- [#1525](https://github.com/influxdata/telegraf/pull/1525): Support setting per-device and total metrics for Docker network and blockio.
|
||||
- [#1466](https://github.com/influxdata/telegraf/pull/1466): MongoDB input plugin: adding per DB stats from db.stats()
|
||||
- [#1503](https://github.com/influxdata/telegraf/pull/1503): Add tls support for certs to RabbitMQ input plugin
|
||||
- [#1289](https://github.com/influxdata/telegraf/pull/1289): webhooks input plugin. Thanks @francois2metz and @cduez!
|
||||
- [#1247](https://github.com/influxdata/telegraf/pull/1247): rollbar webhook plugin.
|
||||
- [#1408](https://github.com/influxdata/telegraf/pull/1408): mandrill webhook plugin.
|
||||
- [#1402](https://github.com/influxdata/telegraf/pull/1402): docker-machine/boot2docker no longer required for unit tests.
|
||||
- [#1350](https://github.com/influxdata/telegraf/pull/1350): cgroup input plugin.
|
||||
- [#1369](https://github.com/influxdata/telegraf/pull/1369): Add input plugin for consuming metrics from NSQD.
|
||||
- [#1369](https://github.com/influxdata/telegraf/pull/1480): add ability to read redis from a socket.
|
||||
- [#1387](https://github.com/influxdata/telegraf/pull/1387): **Breaking Change** - Redis `role` tag renamed to `replication_role` to avoid global_tags override
|
||||
- [#1437](https://github.com/influxdata/telegraf/pull/1437): Fetching Galera status metrics in MySQL
|
||||
- [#1500](https://github.com/influxdata/telegraf/pull/1500): Aerospike plugin refactored to use official client lib.
|
||||
- [#1434](https://github.com/influxdata/telegraf/pull/1434): Add measurement name arg to logparser plugin.
|
||||
- [#1479](https://github.com/influxdata/telegraf/pull/1479): logparser: change resp_code from a field to a tag.
|
||||
- [#1411](https://github.com/influxdata/telegraf/pull/1411): Implement support for fetching hddtemp data
|
||||
- [#1340](https://github.com/influxdata/telegraf/issues/1340): statsd: do not log every dropped metric.
|
||||
- [#1368](https://github.com/influxdata/telegraf/pull/1368): Add precision rounding to all metrics on collection.
|
||||
- [#1390](https://github.com/influxdata/telegraf/pull/1390): Add support for Tengine
|
||||
- [#1320](https://github.com/influxdata/telegraf/pull/1320): Logparser input plugin for parsing grok-style log patterns.
|
||||
- [#1397](https://github.com/influxdata/telegraf/issues/1397): ElasticSearch: now supports connecting to ElasticSearch via SSL
|
||||
- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging.
|
||||
- [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash
|
||||
- [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy!
|
||||
- [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm!
|
||||
- [#1208](https://github.com/influxdata/telegraf/pull/1208): Standardized AWS credentials evaluation & wildcard CloudWatch dimensions. Thanks @johnrengelman!
|
||||
- [#1264](https://github.com/influxdata/telegraf/pull/1264): Add SSL config options to http_response plugin.
|
||||
- [#1272](https://github.com/influxdata/telegraf/pull/1272): graphite parser: add ability to specify multiple tag keys, for consistency with influxdb parser.
|
||||
- [#1265](https://github.com/influxdata/telegraf/pull/1265): Make dns lookups for chrony configurable. Thanks @zbindenren!
|
||||
- [#1275](https://github.com/influxdata/telegraf/pull/1275): Allow wildcard filtering of varnish stats.
|
||||
- [#1142](https://github.com/influxdata/telegraf/pull/1142): Support for glob patterns in exec plugin commands configuration.
|
||||
- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL (http://localhost:15672) if not specified
|
||||
- [#1197](https://github.com/influxdata/telegraf/pull/1197): Limit AWS GetMetricStatistics requests to 10 per second.
|
||||
- [#1278](https://github.com/influxdata/telegraf/pull/1278) & [#1288](https://github.com/influxdata/telegraf/pull/1288) & [#1295](https://github.com/influxdata/telegraf/pull/1295): RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified
|
||||
- [#1296](https://github.com/influxdata/telegraf/issues/1296): Refactor of flush_jitter argument.
|
||||
- [#1213](https://github.com/influxdata/telegraf/issues/1213): Add inactive & active memory to mem plugin.
|
||||
- [#1543](https://github.com/influxdata/telegraf/pull/1543): Official Windows service.
|
||||
- [#1414](https://github.com/influxdata/telegraf/pull/1414): Forking sensors command to remove C package dependency.
|
||||
- [#1389](https://github.com/influxdata/telegraf/pull/1389): Add a new SNMP plugin.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1619](https://github.com/influxdata/telegraf/issues/1619): Fix `make windows` build target
|
||||
- [#1519](https://github.com/influxdata/telegraf/pull/1519): Fix error race conditions and partial failures.
|
||||
- [#1477](https://github.com/influxdata/telegraf/issues/1477): nstat: fix inaccurate config panic.
|
||||
- [#1481](https://github.com/influxdata/telegraf/issues/1481): jolokia: fix handling multiple multi-dimensional attributes.
|
||||
- [#1430](https://github.com/influxdata/telegraf/issues/1430): Fix prometheus character sanitizing. Sanitize more win_perf_counters characters.
|
||||
- [#1534](https://github.com/influxdata/telegraf/pull/1534): Add diskio io_time to FreeBSD & report timing metrics as ms (as linux does).
|
||||
- [#1379](https://github.com/influxdata/telegraf/issues/1379): Fix covering Amazon Linux for post remove flow.
|
||||
- [#1584](https://github.com/influxdata/telegraf/issues/1584): procstat missing fields: read/write bytes & count
|
||||
- [#1472](https://github.com/influxdata/telegraf/pull/1472): diskio input plugin: set 'skip_serial_number = true' by default to avoid high cardinality.
|
||||
- [#1426](https://github.com/influxdata/telegraf/pull/1426): nil metrics panic fix.
|
||||
- [#1384](https://github.com/influxdata/telegraf/pull/1384): Fix datarace in apache input plugin.
|
||||
- [#1399](https://github.com/influxdata/telegraf/issues/1399): Add `read_repairs` statistics to riak plugin.
|
||||
- [#1405](https://github.com/influxdata/telegraf/issues/1405): Fix memory/connection leak in prometheus input plugin.
|
||||
- [#1378](https://github.com/influxdata/telegraf/issues/1378): Trim BOM from config file for Windows support.
|
||||
- [#1339](https://github.com/influxdata/telegraf/issues/1339): Prometheus client output panic on service reload.
|
||||
- [#1461](https://github.com/influxdata/telegraf/pull/1461): Prometheus parser, protobuf format header fix.
|
||||
- [#1334](https://github.com/influxdata/telegraf/issues/1334): Prometheus output, metric refresh and caching fixes.
|
||||
- [#1432](https://github.com/influxdata/telegraf/issues/1432): Panic fix for multiple graphite outputs under very high load.
|
||||
- [#1412](https://github.com/influxdata/telegraf/pull/1412): Instrumental output has better reconnect behavior
|
||||
- [#1460](https://github.com/influxdata/telegraf/issues/1460): Remove PID from procstat plugin to fix cardinality issues.
|
||||
- [#1427](https://github.com/influxdata/telegraf/issues/1427): Cassandra input: version 2.x "column family" fix.
|
||||
- [#1463](https://github.com/influxdata/telegraf/issues/1463): Shared WaitGroup in Exec plugin
|
||||
- [#1436](https://github.com/influxdata/telegraf/issues/1436): logparser: honor modifiers in "pattern" config.
|
||||
- [#1418](https://github.com/influxdata/telegraf/issues/1418): logparser: error and exit on file permissions/missing errors.
|
||||
- [#1499](https://github.com/influxdata/telegraf/pull/1499): Make the user able to specify full path for HAproxy stats
|
||||
- [#1521](https://github.com/influxdata/telegraf/pull/1521): Fix Redis url, an extra "tcp://" was added.
|
||||
- [#1330](https://github.com/influxdata/telegraf/issues/1330): Fix exec plugin panic when using single binary.
|
||||
- [#1336](https://github.com/influxdata/telegraf/issues/1336): Fixed incorrect prometheus metrics source selection.
|
||||
- [#1112](https://github.com/influxdata/telegraf/issues/1112): Set default Zookeeper chroot to empty string.
|
||||
- [#1335](https://github.com/influxdata/telegraf/issues/1335): Fix overall ping timeout to be calculated based on per-ping timeout.
|
||||
- [#1374](https://github.com/influxdata/telegraf/pull/1374): Change "default" retention policy to "".
|
||||
- [#1377](https://github.com/influxdata/telegraf/issues/1377): Graphite output mangling '%' character.
|
||||
- [#1396](https://github.com/influxdata/telegraf/pull/1396): Prometheus input plugin now supports x509 certs authentication
|
||||
- [#1252](https://github.com/influxdata/telegraf/pull/1252) & [#1279](https://github.com/influxdata/telegraf/pull/1279): Fix systemd service. Thanks @zbindenren & @PierreF!
|
||||
- [#1221](https://github.com/influxdata/telegraf/pull/1221): Fix influxdb n_shards counter.
|
||||
- [#1258](https://github.com/influxdata/telegraf/pull/1258): Fix potential kernel plugin integer parse error.
|
||||
- [#1268](https://github.com/influxdata/telegraf/pull/1268): Fix potential influxdb input type assertion panic.
|
||||
- [#1283](https://github.com/influxdata/telegraf/pull/1283): Still send processes metrics if a process exited during metric collection.
|
||||
- [#1297](https://github.com/influxdata/telegraf/issues/1297): disk plugin panic when usage grab fails.
|
||||
- [#1316](https://github.com/influxdata/telegraf/pull/1316): Removed leaked "database" tag on redis metrics. Thanks @PierreF!
|
||||
- [#1323](https://github.com/influxdata/telegraf/issues/1323): Processes plugin: fix potential error with /proc/net/stat directory.
|
||||
- [#1322](https://github.com/influxdata/telegraf/issues/1322): Fix rare RHEL 5.2 panic in gopsutil diskio gathering function.
|
||||
- [#1586](https://github.com/influxdata/telegraf/pull/1586): Remove IF NOT EXISTS from influxdb output database creation.
|
||||
- [#1600](https://github.com/influxdata/telegraf/issues/1600): Fix quoting with text values in postgresql_extensible plugin.
|
||||
- [#1425](https://github.com/influxdata/telegraf/issues/1425): Fix win_perf_counter "index out of range" panic.
|
||||
- [#1634](https://github.com/influxdata/telegraf/issues/1634): Fix ntpq panic when field is missing.
|
||||
- [#1637](https://github.com/influxdata/telegraf/issues/1637): Sanitize graphite output field names.
|
||||
- [#1695](https://github.com/influxdata/telegraf/pull/1695): Fix MySQL plugin not sending 0 value fields.
|
||||
|
||||
## v0.13.1 [2016-05-24]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- net_response and http_response plugins timeouts will now accept duration
|
||||
strings, ie, "2s" or "500ms".
|
||||
- Input plugin Gathers will no longer be logged by default, but a Gather for
|
||||
_each_ plugin will be logged in Debug mode.
|
||||
- Debug mode will no longer print every point added to the accumulator. This
|
||||
functionality can be duplicated using the `file` output plugin and printing
|
||||
to "stdout".
|
||||
|
||||
### Features
|
||||
|
||||
- [#1173](https://github.com/influxdata/telegraf/pull/1173): varnish input plugin. Thanks @sfox-xmatters!
|
||||
- [#1138](https://github.com/influxdata/telegraf/pull/1138): nstat input plugin. Thanks @Maksadbek!
|
||||
- [#1139](https://github.com/influxdata/telegraf/pull/1139): instrumental output plugin. Thanks @jasonroelofs!
|
||||
- [#1172](https://github.com/influxdata/telegraf/pull/1172): Ceph storage stats. Thanks @robinpercy!
|
||||
- [#1233](https://github.com/influxdata/telegraf/pull/1233): Updated golint gopsutil dependency.
|
||||
- [#1238](https://github.com/influxdata/telegraf/pull/1238): chrony input plugin. Thanks @zbindenren!
|
||||
- [#479](https://github.com/influxdata/telegraf/issues/479): per-plugin execution time added to debug output.
|
||||
- [#1249](https://github.com/influxdata/telegraf/issues/1249): influxdb output: added write_consistency argument.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1195](https://github.com/influxdata/telegraf/pull/1195): Docker panic on timeout. Thanks @zstyblik!
|
||||
- [#1211](https://github.com/influxdata/telegraf/pull/1211): mongodb input. Fix possible panic. Thanks @kols!
|
||||
- [#1215](https://github.com/influxdata/telegraf/pull/1215): Fix for possible gopsutil-dependent plugin hangs.
|
||||
- [#1228](https://github.com/influxdata/telegraf/pull/1228): Fix service plugin host tag overwrite.
|
||||
- [#1198](https://github.com/influxdata/telegraf/pull/1198): http_response: override request Host header properly
|
||||
- [#1230](https://github.com/influxdata/telegraf/issues/1230): Fix Telegraf process hangup due to a single plugin hanging.
|
||||
- [#1214](https://github.com/influxdata/telegraf/issues/1214): Use TCP timeout argument in net_response plugin.
|
||||
- [#1243](https://github.com/influxdata/telegraf/pull/1243): Logfile not created on systemd.
|
||||
|
||||
## v0.13 [2016-05-11]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- **Breaking change** in jolokia plugin. See
|
||||
https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia/README.md
|
||||
for updated configuration. The plugin will now support proxy mode and will make
|
||||
POST requests.
|
||||
|
||||
- New [agent] configuration option: `metric_batch_size`. This option tells
|
||||
telegraf the maximum batch size to allow to accumulate before sending a flush
|
||||
to the configured outputs. `metric_buffer_limit` now refers to the absolute
|
||||
maximum number of metrics that will accumulate before metrics are dropped.
|
||||
|
||||
- There is no longer an option to
|
||||
`flush_buffer_when_full`, this is now the default and only behavior of telegraf.
|
||||
|
||||
- **Breaking Change**: docker plugin tags. The cont_id tag no longer exists, it
|
||||
will now be a field, and be called container_id. Additionally, cont_image and
|
||||
cont_name are being renamed to container_image and container_name.
|
||||
|
||||
- **Breaking Change**: docker plugin measurements. The `docker_cpu`, `docker_mem`,
|
||||
`docker_blkio` and `docker_net` measurements are being renamed to
|
||||
`docker_container_cpu`, `docker_container_mem`, `docker_container_blkio` and
|
||||
`docker_container_net`. Why? Because these metrics are
|
||||
specifically tracking per-container stats. The problem with per-container stats,
|
||||
in some use-cases, is that if containers are short-lived AND names are not
|
||||
kept consistent, then the series cardinality will balloon very quickly.
|
||||
So adding "container" to each metric will:
|
||||
(1) make it more clear that these metrics are per-container, and
|
||||
(2) allow users to easily drop per-container metrics if cardinality is an
|
||||
issue (`namedrop = ["docker_container_*"]`)
|
||||
|
||||
- `tagexclude` and `taginclude` are now available, which can be used to remove
|
||||
tags from measurements on inputs and outputs. See
|
||||
[the configuration doc](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md)
|
||||
for more details.
|
||||
|
||||
- **Measurement filtering:** All measurement filters now match based on glob
|
||||
only. Previously there was an undocumented behavior where filters would match
|
||||
based on _prefix_ in addition to globs. This means that a filter like
|
||||
`fielddrop = ["time_"]` will need to be changed to `fielddrop = ["time_*"]`
|
||||
|
||||
- **datadog**: measurement and field names will no longer have `_` replaced by `.`
|
||||
|
||||
- The following plugins have changed their tags to _not_ overwrite the host tag:
|
||||
- cassandra: `host -> cassandra_host`
|
||||
- disque: `host -> disque_host`
|
||||
- rethinkdb: `host -> rethinkdb_host`
|
||||
|
||||
- **Breaking Change**: The `win_perf_counters` input has been changed to
|
||||
sanitize field names, replacing `/Sec` and `/sec` with `_persec`, as well as
|
||||
spaces with underscores. This is needed because Graphite doesn't like slashes
|
||||
and spaces, and was failing to accept metrics that had them.
|
||||
The `/[sS]ec` -> `_persec` is just to make things clearer and uniform.
|
||||
|
||||
- **Breaking Change**: snmp plugin. The `host` tag of the snmp plugin has been
|
||||
changed to the `snmp_host` tag.
|
||||
|
||||
- The `disk` input plugin can now be configured with the `HOST_MOUNT_PREFIX` environment variable.
|
||||
This value is prepended to any mountpaths discovered before retrieving stats.
|
||||
It is not included on the report path. This is necessary for reporting host disk stats when running from within a container.
|
||||
|
||||
### Features
|
||||
|
||||
- [#1031](https://github.com/influxdata/telegraf/pull/1031): Jolokia plugin proxy mode. Thanks @saiello!
|
||||
- [#1017](https://github.com/influxdata/telegraf/pull/1017): taginclude and tagexclude arguments.
|
||||
- [#1015](https://github.com/influxdata/telegraf/pull/1015): Docker plugin schema refactor.
|
||||
- [#889](https://github.com/influxdata/telegraf/pull/889): Improved MySQL plugin. Thanks @maksadbek!
|
||||
- [#1060](https://github.com/influxdata/telegraf/pull/1060): TTL metrics added to MongoDB input plugin
|
||||
- [#1056](https://github.com/influxdata/telegraf/pull/1056): Don't allow inputs to overwrite host tags.
|
||||
- [#1035](https://github.com/influxdata/telegraf/issues/1035): Add `user`, `exe`, `pidfile` tags to procstat plugin.
|
||||
- [#1041](https://github.com/influxdata/telegraf/issues/1041): Add `n_cpus` field to the system plugin.
|
||||
- [#1072](https://github.com/influxdata/telegraf/pull/1072): New Input Plugin: filestat.
|
||||
- [#1066](https://github.com/influxdata/telegraf/pull/1066): Replication lag metrics for MongoDB input plugin
|
||||
- [#1086](https://github.com/influxdata/telegraf/pull/1086): Ability to specify AWS keys in config file. Thanks @johnrengleman!
|
||||
- [#1096](https://github.com/influxdata/telegraf/pull/1096): Performance refactor of running output buffers.
|
||||
- [#967](https://github.com/influxdata/telegraf/issues/967): Buffer logging improvements.
|
||||
- [#1107](https://github.com/influxdata/telegraf/issues/1107): Support lustre2 job stats. Thanks @hanleyja!
|
||||
- [#1122](https://github.com/influxdata/telegraf/pull/1122): Support setting config path through env variable and default paths.
|
||||
- [#1128](https://github.com/influxdata/telegraf/pull/1128): MongoDB jumbo chunks metric for MongoDB input plugin
|
||||
- [#1146](https://github.com/influxdata/telegraf/pull/1146): HAProxy socket support. Thanks weshmashian!
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1050](https://github.com/influxdata/telegraf/issues/1050): jolokia plugin - do not overwrite host tag. Thanks @saiello!
|
||||
- [#921](https://github.com/influxdata/telegraf/pull/921): mqtt_consumer stops gathering metrics. Thanks @chaton78!
|
||||
- [#1013](https://github.com/influxdata/telegraf/pull/1013): Close dead riemann output connections. Thanks @echupriyanov!
|
||||
- [#1012](https://github.com/influxdata/telegraf/pull/1012): Set default tags in test accumulator.
|
||||
- [#1024](https://github.com/influxdata/telegraf/issues/1024): Don't replace `.` with `_` in datadog output.
|
||||
- [#1058](https://github.com/influxdata/telegraf/issues/1058): Fix possible leaky TCP connections in influxdb output.
|
||||
- [#1044](https://github.com/influxdata/telegraf/pull/1044): Fix SNMP OID possible collisions. Thanks @relip
|
||||
- [#1022](https://github.com/influxdata/telegraf/issues/1022): Dont error deb/rpm install on systemd errors.
|
||||
- [#1078](https://github.com/influxdata/telegraf/issues/1078): Use default AWS credential chain.
|
||||
- [#1070](https://github.com/influxdata/telegraf/issues/1070): SQL Server input. Fix datatype conversion.
|
||||
- [#1089](https://github.com/influxdata/telegraf/issues/1089): Fix leaky TCP connections in phpfpm plugin.
|
||||
- [#914](https://github.com/influxdata/telegraf/issues/914): Telegraf can drop metrics on full buffers.
|
||||
- [#1098](https://github.com/influxdata/telegraf/issues/1098): Sanitize invalid OpenTSDB characters.
|
||||
- [#1110](https://github.com/influxdata/telegraf/pull/1110): Sanitize * to - in graphite serializer. Thanks @goodeggs!
|
||||
- [#1118](https://github.com/influxdata/telegraf/pull/1118): Sanitize Counter names for `win_perf_counters` input.
|
||||
- [#1125](https://github.com/influxdata/telegraf/pull/1125): Wrap all exec command runners with a timeout, so hung os processes don't halt Telegraf.
|
||||
- [#1113](https://github.com/influxdata/telegraf/pull/1113): Set MaxRetry and RequiredAcks defaults in Kafka output.
|
||||
- [#1090](https://github.com/influxdata/telegraf/issues/1090): [agent] and [global_tags] config sometimes not getting applied.
|
||||
- [#1133](https://github.com/influxdata/telegraf/issues/1133): Use a timeout for docker list & stat cmds.
|
||||
- [#1052](https://github.com/influxdata/telegraf/issues/1052): Docker panic fix when decode fails.
|
||||
- [#1136](https://github.com/influxdata/telegraf/pull/1136): "DELAYED" Inserts were deprecated in MySQL 5.6.6. Thanks @PierreF
|
||||
|
||||
## v0.12.1 [2016-04-14]
|
||||
|
||||
### Release Notes
|
||||
- Breaking change in the dovecot input plugin. See Features section below.
|
||||
- Graphite output templates are now supported. See
|
||||
https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
|
||||
- Possible breaking change for the librato and graphite outputs. Telegraf will
|
||||
no longer insert field names when the field is simply named `value`. This is
|
||||
because the `value` field is redundant in the graphite/librato context.
|
||||
|
||||
### Features
|
||||
- [#1009](https://github.com/influxdata/telegraf/pull/1009): Cassandra input plugin. Thanks @subhachandrachandra!
|
||||
- [#976](https://github.com/influxdata/telegraf/pull/976): Reduce allocations in the UDP and statsd inputs.
|
||||
- [#979](https://github.com/influxdata/telegraf/pull/979): Reduce allocations in the TCP listener.
|
||||
- [#992](https://github.com/influxdata/telegraf/pull/992): Refactor allocations in TCP/UDP listeners.
|
||||
- [#935](https://github.com/influxdata/telegraf/pull/935): AWS Cloudwatch input plugin. Thanks @joshhardy & @ljosa!
|
||||
- [#943](https://github.com/influxdata/telegraf/pull/943): http_response input plugin. Thanks @Lswith!
|
||||
- [#939](https://github.com/influxdata/telegraf/pull/939): sysstat input plugin. Thanks @zbindenren!
|
||||
- [#998](https://github.com/influxdata/telegraf/pull/998): **breaking change** enabled global, user and ip queries in dovecot plugin. Thanks @mikif70!
|
||||
- [#1001](https://github.com/influxdata/telegraf/pull/1001): Graphite serializer templates.
|
||||
- [#1008](https://github.com/influxdata/telegraf/pull/1008): Adding memstats metrics to the influxdb plugin.
|
||||
|
||||
### Bugfixes
|
||||
- [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name)
|
||||
- [#969](https://github.com/influxdata/telegraf/pull/969): ipmi_sensors: allow : in password. Thanks @awaw!
|
||||
- [#972](https://github.com/influxdata/telegraf/pull/972): dovecot: remove extra newline in dovecot command. Thanks @mrannanj!
|
||||
- [#645](https://github.com/influxdata/telegraf/issues/645): docker plugin i/o error on closed pipe. Thanks @tripledes!
|
||||
|
||||
## v0.12.0 [2016-04-05]
|
||||
|
||||
### Features
|
||||
- [#951](https://github.com/influxdata/telegraf/pull/951): Parse environment variables in the config file.
|
||||
- [#948](https://github.com/influxdata/telegraf/pull/948): Cleanup config file and make default package version include all plugins (but commented).
|
||||
- [#927](https://github.com/influxdata/telegraf/pull/927): Adds parsing of tags to the statsd input when using DataDog's dogstatsd extension
|
||||
- [#863](https://github.com/influxdata/telegraf/pull/863): AMQP output: allow external auth. Thanks @ekini!
|
||||
- [#707](https://github.com/influxdata/telegraf/pull/707): Improved prometheus plugin. Thanks @titilambert!
|
||||
- [#878](https://github.com/influxdata/telegraf/pull/878): Added json serializer. Thanks @ch3lo!
|
||||
- [#880](https://github.com/influxdata/telegraf/pull/880): Add the ability to specify the bearer token to the prometheus plugin. Thanks @jchauncey!
|
||||
- [#882](https://github.com/influxdata/telegraf/pull/882): Fixed SQL Server Plugin issues
|
||||
- [#849](https://github.com/influxdata/telegraf/issues/849): Adding ability to parse single values as an input data type.
|
||||
- [#844](https://github.com/influxdata/telegraf/pull/844): postgres_extensible plugin added. Thanks @menardorama!
|
||||
- [#866](https://github.com/influxdata/telegraf/pull/866): couchbase input plugin. Thanks @ljosa!
|
||||
- [#789](https://github.com/influxdata/telegraf/pull/789): Support multiple field specification and `field*` in graphite templates. Thanks @chrusty!
|
||||
- [#762](https://github.com/influxdata/telegraf/pull/762): Nagios parser for the exec plugin. Thanks @titilambert!
|
||||
- [#848](https://github.com/influxdata/telegraf/issues/848): Provide option to omit host tag from telegraf agent.
|
||||
- [#928](https://github.com/influxdata/telegraf/pull/928): Deprecating the statsd "convert_names" options, expose separator config.
|
||||
- [#919](https://github.com/influxdata/telegraf/pull/919): ipmi_sensor input plugin. Thanks @ebookbug!
|
||||
- [#945](https://github.com/influxdata/telegraf/pull/945): KAFKA output: codec, acks, and retry configuration. Thanks @framiere!
|
||||
|
||||
### Bugfixes
|
||||
- [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided.
|
||||
- [#884](https://github.com/influxdata/telegraf/issues/884): Do not call write method if there are 0 metrics to write.
|
||||
- [#898](https://github.com/influxdata/telegraf/issues/898): Put database name in quotes, fixes special characters in the database name.
|
||||
- [#656](https://github.com/influxdata/telegraf/issues/656): No longer run `lsof` on linux to get netstat data, fixes permissions issue.
|
||||
- [#907](https://github.com/influxdata/telegraf/issues/907): Fix prometheus invalid label/measurement name key.
|
||||
- [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic.
|
||||
- [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert!
|
||||
- [#934](https://github.com/influxdata/telegraf/pull/934): phpfpm: Fix fcgi uri path. Thanks @rudenkovk!
|
||||
- [#805](https://github.com/influxdata/telegraf/issues/805): Kafka consumer stops gathering after i/o timeout.
|
||||
- [#959](https://github.com/influxdata/telegraf/pull/959): reduce mongodb & prometheus collection timeouts. Thanks @PierreF!
|
||||
|
||||
## v0.11.1 [2016-03-17]
|
||||
|
||||
### Release Notes
|
||||
- Primarily this release was cut to fix [#859](https://github.com/influxdata/telegraf/issues/859)
|
||||
|
||||
### Features
|
||||
- [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @pierref!
|
||||
- [#794](https://github.com/influxdata/telegraf/pull/794): Add service reload ability. Thanks @entertainyou!
|
||||
|
||||
### Bugfixes
|
||||
- [#852](https://github.com/influxdata/telegraf/issues/852): Windows zip package fix
|
||||
- [#859](https://github.com/influxdata/telegraf/issues/859): httpjson plugin panic
|
||||
|
||||
## v0.11.0 [2016-03-15]
|
||||
|
||||
### Release Notes
|
||||
|
||||
### Features
|
||||
- [#692](https://github.com/influxdata/telegraf/pull/770): Support InfluxDB retention policies
|
||||
- [#771](https://github.com/influxdata/telegraf/pull/771): Default timeouts for input plugns. Thanks @PierreF!
|
||||
- [#758](https://github.com/influxdata/telegraf/pull/758): UDP Listener input plugin, thanks @whatyouhide!
|
||||
- [#769](https://github.com/influxdata/telegraf/issues/769): httpjson plugin: allow specifying SSL configuration.
|
||||
- [#735](https://github.com/influxdata/telegraf/pull/735): SNMP Table feature. Thanks @titilambert!
|
||||
- [#754](https://github.com/influxdata/telegraf/pull/754): docker plugin: adding `docker info` metrics to output. Thanks @titilambert!
|
||||
- [#788](https://github.com/influxdata/telegraf/pull/788): -input-list and -output-list command-line options. Thanks @ebookbug!
|
||||
- [#778](https://github.com/influxdata/telegraf/pull/778): Adding a TCP input listener.
|
||||
- [#797](https://github.com/influxdata/telegraf/issues/797): Provide option for persistent MQTT consumer client sessions.
|
||||
- [#799](https://github.com/influxdata/telegraf/pull/799): Add number of threads for procstat input plugin. Thanks @titilambert!
|
||||
- [#776](https://github.com/influxdata/telegraf/pull/776): Add Zookeeper chroot option to kafka_consumer. Thanks @prune998!
|
||||
- [#811](https://github.com/influxdata/telegraf/pull/811): Add processes plugin for classifying total procs on system. Thanks @titilambert!
|
||||
- [#235](https://github.com/influxdata/telegraf/issues/235): Add number of users to the `system` input plugin.
|
||||
- [#826](https://github.com/influxdata/telegraf/pull/826): "kernel" linux plugin for /proc/stat metrics (context switches, interrupts, etc.)
|
||||
- [#847](https://github.com/influxdata/telegraf/pull/847): `ntpq`: Input plugin for running ntp query executable and gathering metrics.
|
||||
|
||||
### Bugfixes
|
||||
- [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":"
|
||||
- [#722](https://github.com/influxdata/telegraf/pull/722): Librato output plugin fixes. Thanks @chrusty!
|
||||
- [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. Thanks @titilambert!
|
||||
- [#781](https://github.com/influxdata/telegraf/pull/781): Fix mqtt_consumer username not being set. Thanks @chaton78!
|
||||
- [#786](https://github.com/influxdata/telegraf/pull/786): Fix mqtt output username not being set. Thanks @msangoi!
|
||||
- [#773](https://github.com/influxdata/telegraf/issues/773): Fix duplicate measurements in snmp plugin. Thanks @titilambert!
|
||||
- [#708](https://github.com/influxdata/telegraf/issues/708): packaging: build ARM package
|
||||
- [#713](https://github.com/influxdata/telegraf/issues/713): packaging: insecure permissions error on log directory
|
||||
- [#816](https://github.com/influxdata/telegraf/issues/816): Fix phpfpm panic if fcgi endpoint unreachable.
|
||||
- [#828](https://github.com/influxdata/telegraf/issues/828): fix net_response plugin overwriting host tag.
|
||||
- [#821](https://github.com/influxdata/telegraf/issues/821): Remove postgres password from server tag. Thanks @menardorama!
|
||||
|
||||
## v0.10.4.1
|
||||
|
||||
### Release Notes
|
||||
- Bug in the build script broke deb and rpm packages.
|
||||
|
||||
### Bugfixes
|
||||
- [#750](https://github.com/influxdata/telegraf/issues/750): deb package broken
|
||||
- [#752](https://github.com/influxdata/telegraf/issues/752): rpm package broken
|
||||
|
||||
## v0.10.4 [2016-02-24]
|
||||
|
||||
|
||||
219
CONTRIBUTING.md
219
CONTRIBUTING.md
@@ -2,7 +2,7 @@
|
||||
|
||||
1. [Sign the CLA](http://influxdb.com/community/cla.html)
|
||||
1. Make changes or write plugin (see below for details)
|
||||
1. Add your plugin to `plugins/inputs/all/all.go` or `plugins/outputs/all/all.go`
|
||||
1. Add your plugin to one of: `plugins/{inputs,outputs,aggregators,processors}/all/all.go`
|
||||
1. If your plugin requires a new Go package,
|
||||
[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency)
|
||||
1. Write a README for your plugin, if it's an input plugin, it should be structured
|
||||
@@ -11,11 +11,13 @@ Output plugins READMEs are less structured,
|
||||
but any information you can provide on how the data will look is appreciated.
|
||||
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
||||
for a good example.
|
||||
1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin.
|
||||
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf). Or mention @jackzampolin in a PR comment with some common queries that you would want to alert on and he will write one for you.
|
||||
|
||||
## GoDoc
|
||||
|
||||
Public interfaces for inputs, outputs, metrics, and the accumulator can be found
|
||||
on the GoDoc
|
||||
Public interfaces for inputs, outputs, processors, aggregators, metrics,
|
||||
and the accumulator can be found on the GoDoc
|
||||
|
||||
[](https://godoc.org/github.com/influxdata/telegraf)
|
||||
|
||||
@@ -30,7 +32,7 @@ Assuming you can already build the project, run these in the telegraf directory:
|
||||
|
||||
1. `go get github.com/sparrc/gdm`
|
||||
1. `gdm restore`
|
||||
1. `gdm save`
|
||||
1. `GOOS=linux gdm save`
|
||||
|
||||
## Input Plugins
|
||||
|
||||
@@ -44,7 +46,7 @@ and submit new inputs.
|
||||
|
||||
### Input Plugin Guidelines
|
||||
|
||||
* A plugin must conform to the `telegraf.Input` interface.
|
||||
* A plugin must conform to the [`telegraf.Input`](https://godoc.org/github.com/influxdata/telegraf#Input) interface.
|
||||
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* Input Plugins must be added to the
|
||||
@@ -80,11 +82,11 @@ func (s *Simple) SampleConfig() string {
|
||||
return "ok = true # indicate if everything is fine"
|
||||
}
|
||||
|
||||
func (s *Simple) Gather(acc inputs.Accumulator) error {
|
||||
func (s *Simple) Gather(acc telegraf.Accumulator) error {
|
||||
if s.Ok {
|
||||
acc.Add("state", "pretty good", nil)
|
||||
acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil)
|
||||
} else {
|
||||
acc.Add("state", "not great", nil)
|
||||
acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -95,6 +97,13 @@ func init() {
|
||||
}
|
||||
```
|
||||
|
||||
## Adding Typed Metrics
|
||||
|
||||
In addition the the `AddFields` function, the accumulator also supports an
|
||||
`AddGauge` and `AddCounter` function. These functions are for adding _typed_
|
||||
metrics. Metric types are ignored for the InfluxDB output, but can be used
|
||||
for other outputs, such as [prometheus](https://prometheus.io/docs/concepts/metric_types/).
|
||||
|
||||
## Input Plugins Accepting Arbitrary Data Formats
|
||||
|
||||
Some input plugins (such as
|
||||
@@ -114,7 +123,7 @@ creating the `Parser` object.
|
||||
You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to consume. This can be "json", "influx" or "graphite"
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
@@ -168,7 +177,7 @@ similar constructs.
|
||||
|
||||
### Output Plugin Guidelines
|
||||
|
||||
* An output must conform to the `outputs.Output` interface.
|
||||
* An output must conform to the [`telegraf.Output`](https://godoc.org/github.com/influxdata/telegraf#Output) interface.
|
||||
* Outputs should call `outputs.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
@@ -212,8 +221,8 @@ func (s *Simple) Close() error {
|
||||
}
|
||||
|
||||
func (s *Simple) Write(metrics []telegraf.Metric) error {
|
||||
for _, pt := range points {
|
||||
// write `pt` to the output sink here
|
||||
for _, metric := range metrics {
|
||||
// write `metric` to the output sink here
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -244,7 +253,7 @@ instantiating and creating the `Serializer` object.
|
||||
You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to output. This can be "influx" or "graphite"
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
@@ -266,6 +275,186 @@ and `Stop()` methods.
|
||||
* Same as the `Output` guidelines, except that they must conform to the
|
||||
`output.ServiceOutput` interface.
|
||||
|
||||
## Processor Plugins
|
||||
|
||||
This section is for developers who want to create a new processor plugin.
|
||||
|
||||
### Processor Plugin Guidelines
|
||||
|
||||
* A processor must conform to the [`telegraf.Processor`](https://godoc.org/github.com/influxdata/telegraf#Processor) interface.
|
||||
* Processors should call `processors.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdata/telegraf/plugins/processors/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
processor can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this processor does.
|
||||
|
||||
### Processor Example
|
||||
|
||||
```go
|
||||
package printer
|
||||
|
||||
// printer.go
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/processors"
|
||||
)
|
||||
|
||||
type Printer struct {
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
`
|
||||
|
||||
func (p *Printer) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (p *Printer) Description() string {
|
||||
return "Print all metrics that pass through this filter."
|
||||
}
|
||||
|
||||
func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
||||
for _, metric := range in {
|
||||
fmt.Println(metric.String())
|
||||
}
|
||||
return in
|
||||
}
|
||||
|
||||
func init() {
|
||||
processors.Add("printer", func() telegraf.Processor {
|
||||
return &Printer{}
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
## Aggregator Plugins
|
||||
|
||||
This section is for developers who want to create a new aggregator plugin.
|
||||
|
||||
### Aggregator Plugin Guidelines
|
||||
|
||||
* A aggregator must conform to the [`telegraf.Aggregator`](https://godoc.org/github.com/influxdata/telegraf#Aggregator) interface.
|
||||
* Aggregators should call `aggregators.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
aggregator can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this aggregator does.
|
||||
* The Aggregator plugin will need to keep caches of metrics that have passed
|
||||
through it. This should be done using the builtin `HashID()` function of each
|
||||
metric.
|
||||
* When the `Reset()` function is called, all caches should be cleared.
|
||||
|
||||
### Aggregator Example
|
||||
|
||||
```go
|
||||
package min
|
||||
|
||||
// min.go
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||
)
|
||||
|
||||
type Min struct {
|
||||
// caches for metric fields, names, and tags
|
||||
fieldCache map[uint64]map[string]float64
|
||||
nameCache map[uint64]string
|
||||
tagCache map[uint64]map[string]string
|
||||
}
|
||||
|
||||
func NewMin() telegraf.Aggregator {
|
||||
m := &Min{}
|
||||
m.Reset()
|
||||
return m
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## period is the flush & clear interval of the aggregator.
|
||||
period = "30s"
|
||||
## If true drop_original will drop the original metrics and
|
||||
## only send aggregates.
|
||||
drop_original = false
|
||||
`
|
||||
|
||||
func (m *Min) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (m *Min) Description() string {
|
||||
return "Keep the aggregate min of each metric passing through."
|
||||
}
|
||||
|
||||
func (m *Min) Add(in telegraf.Metric) {
|
||||
id := in.HashID()
|
||||
if _, ok := m.nameCache[id]; !ok {
|
||||
// hit an uncached metric, create caches for first time:
|
||||
m.nameCache[id] = in.Name()
|
||||
m.tagCache[id] = in.Tags()
|
||||
m.fieldCache[id] = make(map[string]float64)
|
||||
for k, v := range in.Fields() {
|
||||
if fv, ok := convert(v); ok {
|
||||
m.fieldCache[id][k] = fv
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for k, v := range in.Fields() {
|
||||
if fv, ok := convert(v); ok {
|
||||
if _, ok := m.fieldCache[id][k]; !ok {
|
||||
// hit an uncached field of a cached metric
|
||||
m.fieldCache[id][k] = fv
|
||||
continue
|
||||
}
|
||||
if fv < m.fieldCache[id][k] {
|
||||
// set new minimum
|
||||
m.fieldCache[id][k] = fv
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Min) Push(acc telegraf.Accumulator) {
|
||||
for id, _ := range m.nameCache {
|
||||
fields := map[string]interface{}{}
|
||||
for k, v := range m.fieldCache[id] {
|
||||
fields[k+"_min"] = v
|
||||
}
|
||||
acc.AddFields(m.nameCache[id], fields, m.tagCache[id])
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Min) Reset() {
|
||||
m.fieldCache = make(map[uint64]map[string]float64)
|
||||
m.nameCache = make(map[uint64]string)
|
||||
m.tagCache = make(map[uint64]map[string]string)
|
||||
}
|
||||
|
||||
func convert(in interface{}) (float64, bool) {
|
||||
switch v := in.(type) {
|
||||
case float64:
|
||||
return v, true
|
||||
case int64:
|
||||
return float64(v), true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
aggregators.Add("min", func() telegraf.Aggregator {
|
||||
return NewMin()
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
## Unit Tests
|
||||
|
||||
### Execute short tests
|
||||
@@ -290,10 +479,6 @@ To execute Telegraf tests follow these simple steps:
|
||||
instructions
|
||||
- execute `make test`
|
||||
|
||||
**OSX users**: you will need to install `boot2docker` or `docker-machine`.
|
||||
The Makefile will assume that you have a `docker-machine` box called `default` to
|
||||
get the IP address.
|
||||
|
||||
### Unit test troubleshooting
|
||||
|
||||
Try cleaning up your test environment by executing `make docker-kill` and
|
||||
|
||||
88
Godeps
88
Godeps
@@ -1,53 +1,65 @@
|
||||
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5
|
||||
github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef
|
||||
github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252
|
||||
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
|
||||
github.com/aws/aws-sdk-go 87b1e60a50b09e4812dee560b33a238f67305804
|
||||
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
|
||||
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
|
||||
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
|
||||
github.com/aerospike/aerospike-client-go 7f3a312c3b2a60ac083ec6da296091c52c795c63
|
||||
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
|
||||
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
|
||||
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||
github.com/dancannon/gorethink 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1
|
||||
github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2
|
||||
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
||||
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
|
||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537
|
||||
github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb
|
||||
github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/fsouza/go-dockerclient 7b651349f9479f5114913eefbfd3c4eeddd79ab4
|
||||
github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3
|
||||
github.com/go-sql-driver/mysql 7c7f556282622f94213bc028b4d0a7b6151ba239
|
||||
github.com/golang/protobuf 6aaa8d47701fa6cf07e914ec01fde3d4a1fe79c3
|
||||
github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d
|
||||
github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690
|
||||
github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
|
||||
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
|
||||
github.com/gobwas/glob 49571a1557cd20e6a2410adc6421f85b66c730b5
|
||||
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
|
||||
github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380
|
||||
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
|
||||
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24
|
||||
github.com/influxdata/influxdb ef571fc104dc24b77cd3710c156cd95e5cfd7aa5
|
||||
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
|
||||
github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38
|
||||
github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f
|
||||
github.com/hashicorp/consul 5aa90455ce78d4d41578bafc86305e6e6b28d7d2
|
||||
github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
|
||||
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
||||
github.com/influxdata/influxdb fc57c0f7c635df3873f3d64f0ed2100ddc94d5ae
|
||||
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||
github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc
|
||||
github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a
|
||||
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
|
||||
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
||||
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
|
||||
github.com/nats-io/nats 6a83f1a633cfbfd90aa648ac99fb38c06a8b40df
|
||||
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
|
||||
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
|
||||
github.com/nats-io/nats ea8b4fd12ebb823073c0004b9f09ac8748f4f165
|
||||
github.com/nats-io/nuid a5152d67cf63cbfb5d992a395458722a45194715
|
||||
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
|
||||
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
||||
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common 14ca1097bbe21584194c15e391a9dab95ad42a59
|
||||
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil e77438504d45b9985c99a75730fe65220ceea00e
|
||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||
github.com/shirou/gopsutil 1516eb9ddc5e61ba58874047a98f8b44b5e585e8
|
||||
github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18
|
||||
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
|
||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
|
||||
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/yuin/gopher-lua bf3808abd44b1e55143a2d7f08571aaa80db1808
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto 1f22c0103821b9390939b6776727195525381532
|
||||
golang.org/x/net 04b9de9b512f58addf28c9853d50ebef61c3953e
|
||||
golang.org/x/text 6d3c22c4525a4da167968fa2479be5524d2e8bd0
|
||||
gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
golang.org/x/crypto c197bcf24cde29d3f73c7b4ac6fd41f4384e8af6
|
||||
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
|
||||
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
|
||||
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64
|
||||
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4
|
||||
github.com/miekg/dns e0d84d97e59bcb6561eae269c4e94d25b66822cb
|
||||
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
|
||||
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
|
||||
|
||||
@@ -1,56 +1,11 @@
|
||||
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5
|
||||
github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef
|
||||
github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252
|
||||
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
|
||||
github.com/aws/aws-sdk-go 87b1e60a50b09e4812dee560b33a238f67305804
|
||||
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
|
||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||
github.com/dancannon/gorethink 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/fsouza/go-dockerclient 7b651349f9479f5114913eefbfd3c4eeddd79ab4
|
||||
github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3
|
||||
github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4
|
||||
github.com/go-sql-driver/mysql 7c7f556282622f94213bc028b4d0a7b6151ba239
|
||||
github.com/golang/protobuf 6aaa8d47701fa6cf07e914ec01fde3d4a1fe79c3
|
||||
github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d
|
||||
github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24
|
||||
github.com/influxdata/influxdb ef571fc104dc24b77cd3710c156cd95e5cfd7aa5
|
||||
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
|
||||
github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38
|
||||
github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f
|
||||
github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/miekg/dns e0d84d97e59bcb6561eae269c4e94d25b66822cb
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
|
||||
github.com/nats-io/nats 6a83f1a633cfbfd90aa648ac99fb38c06a8b40df
|
||||
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
|
||||
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common 14ca1097bbe21584194c15e391a9dab95ad42a59
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil e77438504d45b9985c99a75730fe65220ceea00e
|
||||
github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5
|
||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8
|
||||
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18
|
||||
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/net 04b9de9b512f58addf28c9853d50ebef61c3953e
|
||||
golang.org/x/text 6d3c22c4525a4da167968fa2479be5524d2e8bd0
|
||||
gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64
|
||||
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4
|
||||
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
|
||||
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
|
||||
|
||||
44
Makefile
44
Makefile
@@ -1,5 +1,6 @@
|
||||
UNAME := $(shell sh -c 'uname')
|
||||
VERSION := $(shell sh -c 'git describe --always --tags')
|
||||
BRANCH := $(shell sh -c 'git rev-parse --abbrev-ref HEAD')
|
||||
COMMIT := $(shell sh -c 'git rev-parse --short HEAD')
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
else
|
||||
@@ -14,21 +15,18 @@ windows: prepare-windows build-windows
|
||||
|
||||
# Only run the build (no dependency grabbing)
|
||||
build:
|
||||
go install -ldflags "-X main.Version=$(VERSION)" ./...
|
||||
go install -ldflags \
|
||||
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" ./...
|
||||
|
||||
build-windows:
|
||||
go build -o telegraf.exe -ldflags \
|
||||
"-X main.Version=$(VERSION)" \
|
||||
GOOS=windows GOARCH=amd64 go build -o telegraf.exe -ldflags \
|
||||
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
build-for-docker:
|
||||
CGO_ENABLED=0 GOOS=linux go -o telegraf -ldflags \
|
||||
"-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# Build with race detector
|
||||
dev: prepare
|
||||
go build -race -ldflags "-X main.Version=$(VERSION)" ./...
|
||||
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
|
||||
"-s -X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# run package script
|
||||
package:
|
||||
@@ -42,54 +40,44 @@ prepare:
|
||||
# Use the windows godeps file to prepare dependencies
|
||||
prepare-windows:
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
gdm restore -f Godeps_windows
|
||||
|
||||
# Run all docker containers necessary for unit tests
|
||||
docker-run:
|
||||
ifeq ($(UNAME), Darwin)
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=$(shell sh -c 'boot2docker ip || docker-machine ip default') \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
endif
|
||||
ifeq ($(UNAME), Linux)
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
endif
|
||||
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
||||
docker run --name memcached -p "11211:11211" -d memcached
|
||||
docker run --name postgres -p "5432:5432" -d postgres
|
||||
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
||||
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
||||
docker run --name redis -p "6379:6379" -d redis
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
|
||||
# Run docker containers necessary for CircleCI unit tests
|
||||
docker-run-circle:
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
|
||||
# Kill all docker containers, ignore errors
|
||||
docker-kill:
|
||||
-docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||
-docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
|
||||
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
|
||||
|
||||
# Run full unit tests using docker containers (includes setup and teardown)
|
||||
test: vet docker-kill docker-run
|
||||
|
||||
325
README.md
325
README.md
@@ -1,15 +1,23 @@
|
||||
# Telegraf [](https://circleci.com/gh/influxdata/telegraf)
|
||||
# Telegraf [](https://circleci.com/gh/influxdata/telegraf) [](https://hub.docker.com/_/telegraf/)
|
||||
|
||||
Telegraf is an agent written in Go for collecting metrics from the system it's
|
||||
running on, or from other services, and writing them into InfluxDB or other
|
||||
[outputs](https://github.com/influxdata/telegraf#supported-output-plugins).
|
||||
Telegraf is an agent written in Go for collecting, processing, aggregating,
|
||||
and writing metrics.
|
||||
|
||||
Design goals are to have a minimal memory footprint with a plugin system so
|
||||
that developers in the community can easily add support for collecting metrics
|
||||
from well known services (like Hadoop, Postgres, or Redis) and third party
|
||||
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
|
||||
|
||||
New input and output plugins are designed to be easy to contribute,
|
||||
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
|
||||
|
||||
1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs
|
||||
2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics
|
||||
3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.)
|
||||
4. [Output Plugins](#output-plugins) write metrics to various destinations
|
||||
|
||||
For more information on Processor and Aggregator plugins please [read this](./docs/AGGREGATORS_AND_PROCESSORS.md).
|
||||
|
||||
New plugins are designed to be easy to contribute,
|
||||
we'll eagerly accept pull
|
||||
requests and will manage the set of plugins that Telegraf supports.
|
||||
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
|
||||
@@ -17,26 +25,15 @@ new plugins.
|
||||
|
||||
## Installation:
|
||||
|
||||
NOTE: Telegraf 0.10.x is **not** backwards-compatible with previous versions
|
||||
of telegraf, both in the database layout and the configuration file. 0.2.x
|
||||
will continue to be supported, see below for download links.
|
||||
|
||||
For more details on the differences between Telegraf 0.2.x and 0.10.x, see
|
||||
the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/).
|
||||
|
||||
### Linux deb and rpm Packages:
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.10.4-1_amd64.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1.x86_64.rpm
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.1.1_amd64.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1.x86_64.rpm
|
||||
|
||||
Latest (arm):
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.10.4-1_arm.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1.arm.rpm
|
||||
|
||||
0.2.x:
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.2.4-1.x86_64.rpm
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.1.1_armhf.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1.armhf.rpm
|
||||
|
||||
##### Package Instructions:
|
||||
|
||||
@@ -50,34 +47,21 @@ controlled via `systemctl [action] telegraf`
|
||||
### yum/apt Repositories:
|
||||
|
||||
There is a yum/apt repo available for the whole InfluxData stack, see
|
||||
[here](https://docs.influxdata.com/influxdb/v0.9/introduction/installation/#installation)
|
||||
for instructions, replacing the `influxdb` package name with `telegraf`.
|
||||
[here](https://docs.influxdata.com/influxdb/latest/introduction/installation/#installation)
|
||||
for instructions on setting up the repo. Once it is configured, you will be able
|
||||
to use this repo to install & update telegraf.
|
||||
|
||||
### Linux tarballs:
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_amd64.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_i386.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_arm.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_i386.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_armhf.tar.gz
|
||||
|
||||
0.2.x:
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_386_0.2.4.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.2.4.tar.gz
|
||||
### FreeBSD tarball:
|
||||
|
||||
##### tarball Instructions:
|
||||
|
||||
To install the full directory structure with config file, run:
|
||||
|
||||
```
|
||||
sudo tar -C / -zxvf ./telegraf-0.10.4-1_linux_amd64.tar.gz
|
||||
```
|
||||
|
||||
To extract only the binary, run:
|
||||
|
||||
```
|
||||
tar -zxvf telegraf-0.10.4-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
|
||||
```
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_freebsd_amd64.tar.gz
|
||||
|
||||
### Ansible Role:
|
||||
|
||||
@@ -93,8 +77,7 @@ brew install telegraf
|
||||
### Windows Binaries (EXPERIMENTAL)
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_windows_amd64.zip
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_windows_i386.zip
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_windows_amd64.zip
|
||||
|
||||
### From Source:
|
||||
|
||||
@@ -110,103 +93,114 @@ if you don't have it already. You also must build with golang version 1.5+.
|
||||
|
||||
## How to use it:
|
||||
|
||||
```console
|
||||
$ telegraf -help
|
||||
Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
See usage with:
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf <flags>
|
||||
|
||||
The flags are:
|
||||
|
||||
-config <file> configuration file to load
|
||||
-test gather metrics once, print them to stdout, and exit
|
||||
-sample-config print out full sample configuration to stdout
|
||||
-config-directory directory containing additional *.conf files
|
||||
-input-filter filter the input plugins to enable, separator is :
|
||||
-output-filter filter the output plugins to enable, separator is :
|
||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||
-debug print metrics as they're generated to stdout
|
||||
-quiet run in quiet mode
|
||||
-version print the version to stdout
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf -sample-config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf -sample-config -input-filter cpu -output-filter influxdb
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf -config telegraf.conf -test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf -config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
```
|
||||
telegraf --help
|
||||
```
|
||||
|
||||
### Generate a telegraf config file:
|
||||
|
||||
```
|
||||
telegraf config > telegraf.conf
|
||||
```
|
||||
|
||||
### Generate config with only cpu input & influxdb output plugins defined
|
||||
|
||||
```
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
```
|
||||
|
||||
### Run a single telegraf collection, outputing metrics to stdout
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf -test
|
||||
```
|
||||
|
||||
### Run telegraf with all plugins defined in config file
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf
|
||||
```
|
||||
|
||||
### Run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
```
|
||||
|
||||
|
||||
## Configuration
|
||||
|
||||
See the [configuration guide](docs/CONFIGURATION.md) for a rundown of the more advanced
|
||||
configuration options.
|
||||
|
||||
## Supported Input Plugins
|
||||
## Input Plugins
|
||||
|
||||
Telegraf currently has support for collecting metrics from many sources. For
|
||||
more information on each, please look at the directory of the same name in
|
||||
`plugins/inputs`.
|
||||
|
||||
Currently implemented sources:
|
||||
|
||||
* aerospike
|
||||
* apache
|
||||
* bcache
|
||||
* couchdb
|
||||
* disque
|
||||
* dns query time
|
||||
* docker
|
||||
* dovecot
|
||||
* elasticsearch
|
||||
* exec (generic executable plugin, support JSON, influx and graphite)
|
||||
* haproxy
|
||||
* httpjson (generic JSON-emitting http service plugin)
|
||||
* influxdb
|
||||
* jolokia
|
||||
* leofs
|
||||
* lustre2
|
||||
* mailchimp
|
||||
* memcached
|
||||
* mesos
|
||||
* mongodb
|
||||
* mysql
|
||||
* net_response
|
||||
* nginx
|
||||
* nsq
|
||||
* phpfpm
|
||||
* phusion passenger
|
||||
* ping
|
||||
* postgresql
|
||||
* powerdns
|
||||
* procstat
|
||||
* prometheus
|
||||
* puppetagent
|
||||
* rabbitmq
|
||||
* raindrops
|
||||
* redis
|
||||
* rethinkdb
|
||||
* riak
|
||||
* sensors (only available if built from source)
|
||||
* snmp
|
||||
* sql server (microsoft)
|
||||
* twemproxy
|
||||
* zfs
|
||||
* zookeeper
|
||||
* win_perf_counters (windows performance counters)
|
||||
* system
|
||||
* [aws cloudwatch](./plugins/inputs/cloudwatch)
|
||||
* [aerospike](./plugins/inputs/aerospike)
|
||||
* [apache](./plugins/inputs/apache)
|
||||
* [bcache](./plugins/inputs/bcache)
|
||||
* [cassandra](./plugins/inputs/cassandra)
|
||||
* [ceph](./plugins/inputs/ceph)
|
||||
* [chrony](./plugins/inputs/chrony)
|
||||
* [consul](./plugins/inputs/consul)
|
||||
* [conntrack](./plugins/inputs/conntrack)
|
||||
* [couchbase](./plugins/inputs/couchbase)
|
||||
* [couchdb](./plugins/inputs/couchdb)
|
||||
* [disque](./plugins/inputs/disque)
|
||||
* [dns query time](./plugins/inputs/dns_query)
|
||||
* [docker](./plugins/inputs/docker)
|
||||
* [dovecot](./plugins/inputs/dovecot)
|
||||
* [elasticsearch](./plugins/inputs/elasticsearch)
|
||||
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||
* [filestat](./plugins/inputs/filestat)
|
||||
* [haproxy](./plugins/inputs/haproxy)
|
||||
* [hddtemp](./plugins/inputs/hddtemp)
|
||||
* [http_response](./plugins/inputs/http_response)
|
||||
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||
* [internal](./plugins/inputs/internal)
|
||||
* [influxdb](./plugins/inputs/influxdb)
|
||||
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
|
||||
* [iptables](./plugins/inputs/iptables)
|
||||
* [jolokia](./plugins/inputs/jolokia)
|
||||
* [leofs](./plugins/inputs/leofs)
|
||||
* [lustre2](./plugins/inputs/lustre2)
|
||||
* [mailchimp](./plugins/inputs/mailchimp)
|
||||
* [memcached](./plugins/inputs/memcached)
|
||||
* [mesos](./plugins/inputs/mesos)
|
||||
* [mongodb](./plugins/inputs/mongodb)
|
||||
* [mysql](./plugins/inputs/mysql)
|
||||
* [net_response](./plugins/inputs/net_response)
|
||||
* [nginx](./plugins/inputs/nginx)
|
||||
* [nsq](./plugins/inputs/nsq)
|
||||
* [nstat](./plugins/inputs/nstat)
|
||||
* [ntpq](./plugins/inputs/ntpq)
|
||||
* [phpfpm](./plugins/inputs/phpfpm)
|
||||
* [phusion passenger](./plugins/inputs/passenger)
|
||||
* [ping](./plugins/inputs/ping)
|
||||
* [postgresql](./plugins/inputs/postgresql)
|
||||
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
|
||||
* [powerdns](./plugins/inputs/powerdns)
|
||||
* [procstat](./plugins/inputs/procstat)
|
||||
* [prometheus](./plugins/inputs/prometheus)
|
||||
* [puppetagent](./plugins/inputs/puppetagent)
|
||||
* [rabbitmq](./plugins/inputs/rabbitmq)
|
||||
* [raindrops](./plugins/inputs/raindrops)
|
||||
* [redis](./plugins/inputs/redis)
|
||||
* [rethinkdb](./plugins/inputs/rethinkdb)
|
||||
* [riak](./plugins/inputs/riak)
|
||||
* [sensors](./plugins/inputs/sensors)
|
||||
* [snmp](./plugins/inputs/snmp)
|
||||
* [snmp_legacy](./plugins/inputs/snmp_legacy)
|
||||
* [sql server](./plugins/inputs/sqlserver) (microsoft)
|
||||
* [twemproxy](./plugins/inputs/twemproxy)
|
||||
* [varnish](./plugins/inputs/varnish)
|
||||
* [zfs](./plugins/inputs/zfs)
|
||||
* [zookeeper](./plugins/inputs/zookeeper)
|
||||
* [win_perf_counters ](./plugins/inputs/win_perf_counters) (windows performance counters)
|
||||
* [sysstat](./plugins/inputs/sysstat)
|
||||
* [system](./plugins/inputs/system)
|
||||
* cpu
|
||||
* mem
|
||||
* net
|
||||
@@ -214,34 +208,57 @@ Currently implemented sources:
|
||||
* disk
|
||||
* diskio
|
||||
* swap
|
||||
* processes
|
||||
* kernel (/proc/stat)
|
||||
* kernel (/proc/vmstat)
|
||||
|
||||
Telegraf can also collect metrics via the following service plugins:
|
||||
|
||||
* statsd
|
||||
* mqtt_consumer
|
||||
* kafka_consumer
|
||||
* nats_consumer
|
||||
* github_webhooks
|
||||
* [http_listener](./plugins/inputs/http_listener)
|
||||
* [kafka_consumer](./plugins/inputs/kafka_consumer)
|
||||
* [mqtt_consumer](./plugins/inputs/mqtt_consumer)
|
||||
* [nats_consumer](./plugins/inputs/nats_consumer)
|
||||
* [nsq_consumer](./plugins/inputs/nsq_consumer)
|
||||
* [logparser](./plugins/inputs/logparser)
|
||||
* [statsd](./plugins/inputs/statsd)
|
||||
* [tail](./plugins/inputs/tail)
|
||||
* [tcp_listener](./plugins/inputs/tcp_listener)
|
||||
* [udp_listener](./plugins/inputs/udp_listener)
|
||||
* [webhooks](./plugins/inputs/webhooks)
|
||||
* [filestack](./plugins/inputs/webhooks/filestack)
|
||||
* [github](./plugins/inputs/webhooks/github)
|
||||
* [mandrill](./plugins/inputs/webhooks/mandrill)
|
||||
* [rollbar](./plugins/inputs/webhooks/rollbar)
|
||||
|
||||
We'll be adding support for many more over the coming months. Read on if you
|
||||
want to add support for another service or third-party API.
|
||||
## Processor Plugins
|
||||
|
||||
## Supported Output Plugins
|
||||
* [printer](./plugins/processors/printer)
|
||||
|
||||
* influxdb
|
||||
* amon
|
||||
* amqp
|
||||
* aws kinesis
|
||||
* aws cloudwatch
|
||||
* datadog
|
||||
* graphite
|
||||
* kafka
|
||||
* librato
|
||||
* mqtt
|
||||
* nsq
|
||||
* opentsdb
|
||||
* prometheus
|
||||
* riemann
|
||||
## Aggregator Plugins
|
||||
|
||||
* [minmax](./plugins/aggregators/minmax)
|
||||
|
||||
## Output Plugins
|
||||
|
||||
* [influxdb](./plugins/outputs/influxdb)
|
||||
* [amon](./plugins/outputs/amon)
|
||||
* [amqp](./plugins/outputs/amqp)
|
||||
* [aws kinesis](./plugins/outputs/kinesis)
|
||||
* [aws cloudwatch](./plugins/outputs/cloudwatch)
|
||||
* [datadog](./plugins/outputs/datadog)
|
||||
* [discard](./plugins/outputs/discard)
|
||||
* [file](./plugins/outputs/file)
|
||||
* [graphite](./plugins/outputs/graphite)
|
||||
* [graylog](./plugins/outputs/graylog)
|
||||
* [instrumental](./plugins/outputs/instrumental)
|
||||
* [kafka](./plugins/outputs/kafka)
|
||||
* [librato](./plugins/outputs/librato)
|
||||
* [mqtt](./plugins/outputs/mqtt)
|
||||
* [nats](./plugins/outputs/nats)
|
||||
* [nsq](./plugins/outputs/nsq)
|
||||
* [opentsdb](./plugins/outputs/opentsdb)
|
||||
* [prometheus](./plugins/outputs/prometheus_client)
|
||||
* [riemann](./plugins/outputs/riemann)
|
||||
|
||||
## Contributing
|
||||
|
||||
|
||||
@@ -2,20 +2,33 @@ package telegraf
|
||||
|
||||
import "time"
|
||||
|
||||
// Accumulator is an interface for "accumulating" metrics from plugin(s).
|
||||
// The metrics are sent down a channel shared between all plugins.
|
||||
type Accumulator interface {
|
||||
// AddFields adds a metric to the accumulator with the given measurement
|
||||
// name, fields, and tags (and timestamp). If a timestamp is not provided,
|
||||
// then the accumulator sets it to "now".
|
||||
// Create a point with a value, decorating it with tags
|
||||
// NOTE: tags is expected to be owned by the caller, don't mutate
|
||||
// it after passing to Add.
|
||||
Add(measurement string,
|
||||
value interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
AddFields(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
Debug() bool
|
||||
SetDebug(enabled bool)
|
||||
// AddGauge is the same as AddFields, but will add the metric as a "Gauge" type
|
||||
AddGauge(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
// AddCounter is the same as AddFields, but will add the metric as a "Counter" type
|
||||
AddCounter(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
SetPrecision(precision, interval time.Duration)
|
||||
|
||||
AddError(err error)
|
||||
}
|
||||
|
||||
@@ -1,54 +1,46 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
var (
|
||||
NErrors = selfstat.Register("agent", "gather_errors", map[string]string{})
|
||||
)
|
||||
|
||||
type MetricMaker interface {
|
||||
Name() string
|
||||
MakeMetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric
|
||||
}
|
||||
|
||||
func NewAccumulator(
|
||||
inputConfig *internal_models.InputConfig,
|
||||
maker MetricMaker,
|
||||
metrics chan telegraf.Metric,
|
||||
) *accumulator {
|
||||
acc := accumulator{}
|
||||
acc.metrics = metrics
|
||||
acc.inputConfig = inputConfig
|
||||
acc := accumulator{
|
||||
maker: maker,
|
||||
metrics: metrics,
|
||||
precision: time.Nanosecond,
|
||||
}
|
||||
return &acc
|
||||
}
|
||||
|
||||
type accumulator struct {
|
||||
sync.Mutex
|
||||
|
||||
metrics chan telegraf.Metric
|
||||
|
||||
defaultTags map[string]string
|
||||
maker MetricMaker
|
||||
|
||||
debug bool
|
||||
|
||||
inputConfig *internal_models.InputConfig
|
||||
|
||||
prefix string
|
||||
}
|
||||
|
||||
func (ac *accumulator) Add(
|
||||
measurement string,
|
||||
value interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
fields["value"] = value
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
|
||||
return
|
||||
}
|
||||
|
||||
ac.AddFields(measurement, fields, tags, t...)
|
||||
precision time.Duration
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddFields(
|
||||
@@ -57,116 +49,71 @@ func (ac *accumulator) AddFields(
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if len(fields) == 0 || len(measurement) == 0 {
|
||||
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Untyped, ac.getTime(t)); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddGauge(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Gauge, ac.getTime(t)); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddCounter(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Counter, ac.getTime(t)); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
// AddError passes a runtime error to the accumulator.
|
||||
// The error will be tagged with the plugin name and written to the log.
|
||||
func (ac *accumulator) AddError(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
NErrors.Incr(1)
|
||||
//TODO suppress/throttle consecutive duplicate errors?
|
||||
log.Printf("E! Error in plugin [%s]: %s", ac.maker.Name(), err)
|
||||
}
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
|
||||
return
|
||||
}
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldTagsPass(tags) {
|
||||
return
|
||||
}
|
||||
|
||||
// Override measurement name if set
|
||||
if len(ac.inputConfig.NameOverride) != 0 {
|
||||
measurement = ac.inputConfig.NameOverride
|
||||
}
|
||||
// Apply measurement prefix and suffix if set
|
||||
if len(ac.inputConfig.MeasurementPrefix) != 0 {
|
||||
measurement = ac.inputConfig.MeasurementPrefix + measurement
|
||||
}
|
||||
if len(ac.inputConfig.MeasurementSuffix) != 0 {
|
||||
measurement = measurement + ac.inputConfig.MeasurementSuffix
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
// Apply plugin-wide tags if set
|
||||
for k, v := range ac.inputConfig.Tags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
// Apply daemon-wide tags if set
|
||||
for k, v := range ac.defaultTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
result := make(map[string]interface{})
|
||||
for k, v := range fields {
|
||||
// Filter out any filtered fields
|
||||
if ac.inputConfig != nil {
|
||||
if !ac.inputConfig.Filter.ShouldFieldsPass(k) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
result[k] = v
|
||||
|
||||
// Validate uint64 and float64 fields
|
||||
switch val := v.(type) {
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
result[k] = int64(val)
|
||||
} else {
|
||||
result[k] = int64(9223372036854775807)
|
||||
}
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
if ac.debug {
|
||||
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
fields = nil
|
||||
if len(result) == 0 {
|
||||
// SetPrecision takes two time.Duration objects. If the first is non-zero,
|
||||
// it sets that as the precision. Otherwise, it takes the second argument
|
||||
// as the order of time that the metrics should be rounded to, with the
|
||||
// maximum being 1s.
|
||||
func (ac *accumulator) SetPrecision(precision, interval time.Duration) {
|
||||
if precision > 0 {
|
||||
ac.precision = precision
|
||||
return
|
||||
}
|
||||
switch {
|
||||
case interval >= time.Second:
|
||||
ac.precision = time.Second
|
||||
case interval >= time.Millisecond:
|
||||
ac.precision = time.Millisecond
|
||||
case interval >= time.Microsecond:
|
||||
ac.precision = time.Microsecond
|
||||
default:
|
||||
ac.precision = time.Nanosecond
|
||||
}
|
||||
}
|
||||
|
||||
func (ac accumulator) getTime(t []time.Time) time.Time {
|
||||
var timestamp time.Time
|
||||
if len(t) > 0 {
|
||||
timestamp = t[0]
|
||||
} else {
|
||||
timestamp = time.Now()
|
||||
}
|
||||
|
||||
if ac.prefix != "" {
|
||||
measurement = ac.prefix + measurement
|
||||
}
|
||||
|
||||
m, err := telegraf.NewMetric(measurement, tags, result, timestamp)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return
|
||||
}
|
||||
if ac.debug {
|
||||
fmt.Println("> " + m.String())
|
||||
}
|
||||
ac.metrics <- m
|
||||
}
|
||||
|
||||
func (ac *accumulator) Debug() bool {
|
||||
return ac.debug
|
||||
}
|
||||
|
||||
func (ac *accumulator) SetDebug(debug bool) {
|
||||
ac.debug = debug
|
||||
}
|
||||
|
||||
func (ac *accumulator) setDefaultTags(tags map[string]string) {
|
||||
ac.defaultTags = tags
|
||||
}
|
||||
|
||||
func (ac *accumulator) addDefaultTag(key, value string) {
|
||||
ac.defaultTags[key] = value
|
||||
return timestamp.Round(ac.precision)
|
||||
}
|
||||
|
||||
340
agent/accumulator_test.go
Normal file
340
agent/accumulator_test.go
Normal file
@@ -0,0 +1,340 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddGauge("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddCounter("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest usage=99")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test usage=99")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test usage=99 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAccAddError(t *testing.T) {
|
||||
errBuf := bytes.NewBuffer(nil)
|
||||
log.SetOutput(errBuf)
|
||||
defer log.SetOutput(os.Stderr)
|
||||
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddError(fmt.Errorf("foo"))
|
||||
a.AddError(fmt.Errorf("bar"))
|
||||
a.AddError(fmt.Errorf("baz"))
|
||||
|
||||
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
|
||||
assert.EqualValues(t, int64(3), NErrors.Get())
|
||||
require.Len(t, errs, 4) // 4 because of trailing newline
|
||||
assert.Contains(t, string(errs[0]), "TestPlugin")
|
||||
assert.Contains(t, string(errs[0]), "foo")
|
||||
assert.Contains(t, string(errs[1]), "TestPlugin")
|
||||
assert.Contains(t, string(errs[1]), "bar")
|
||||
assert.Contains(t, string(errs[2]), "TestPlugin")
|
||||
assert.Contains(t, string(errs[2]), "baz")
|
||||
}
|
||||
|
||||
func TestAddNoIntervalWithPrecision(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
a.SetPrecision(0, time.Second)
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddDisablePrecision(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(time.Nanosecond, 0)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(0, time.Second)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestDifferentPrecisions(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(0, time.Second)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Millisecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800083000000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Microsecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082913000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Nanosecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddGauge(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
}
|
||||
|
||||
func TestAddCounter(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
}
|
||||
|
||||
type TestMetricMaker struct {
|
||||
}
|
||||
|
||||
func (tm *TestMetricMaker) Name() string {
|
||||
return "TestPlugin"
|
||||
}
|
||||
func (tm *TestMetricMaker) MakeMetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric {
|
||||
switch mType {
|
||||
case telegraf.Untyped:
|
||||
if m, err := metric.New(measurement, tags, fields, t); err == nil {
|
||||
return m
|
||||
}
|
||||
case telegraf.Counter:
|
||||
if m, err := metric.New(measurement, tags, fields, t, telegraf.Counter); err == nil {
|
||||
return m
|
||||
}
|
||||
case telegraf.Gauge:
|
||||
if m, err := metric.New(measurement, tags, fields, t, telegraf.Gauge); err == nil {
|
||||
return m
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
346
agent/agent.go
346
agent/agent.go
@@ -1,19 +1,18 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
cryptorand "crypto/rand"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
// Agent runs telegraf and collects data based on the given config
|
||||
@@ -27,40 +26,38 @@ func NewAgent(config *config.Config) (*Agent, error) {
|
||||
Config: config,
|
||||
}
|
||||
|
||||
if a.Config.Agent.Hostname == "" {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !a.Config.Agent.OmitHostname {
|
||||
if a.Config.Agent.Hostname == "" {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a.Config.Agent.Hostname = hostname
|
||||
}
|
||||
|
||||
a.Config.Agent.Hostname = hostname
|
||||
config.Tags["host"] = a.Config.Agent.Hostname
|
||||
}
|
||||
|
||||
config.Tags["host"] = a.Config.Agent.Hostname
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Connect connects to all configured outputs
|
||||
func (a *Agent) Connect() error {
|
||||
for _, o := range a.Config.Outputs {
|
||||
o.Quiet = a.Config.Agent.Quiet
|
||||
|
||||
switch ot := o.Output.(type) {
|
||||
case telegraf.ServiceOutput:
|
||||
if err := ot.Start(); err != nil {
|
||||
log.Printf("Service for output %s failed to start, exiting\n%s\n",
|
||||
log.Printf("E! Service for output %s failed to start, exiting\n%s\n",
|
||||
o.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if a.Config.Agent.Debug {
|
||||
log.Printf("Attempting connection to output: %s\n", o.Name)
|
||||
}
|
||||
log.Printf("D! Attempting connection to output: %s\n", o.Name)
|
||||
err := o.Output.Connect()
|
||||
if err != nil {
|
||||
log.Printf("Failed to connect to output %s, retrying in 15s, "+
|
||||
log.Printf("E! Failed to connect to output %s, retrying in 15s, "+
|
||||
"error was '%s' \n", o.Name, err)
|
||||
time.Sleep(15 * time.Second)
|
||||
err = o.Output.Connect()
|
||||
@@ -68,9 +65,7 @@ func (a *Agent) Connect() error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if a.Config.Agent.Debug {
|
||||
log.Printf("Successfully connected to output: %s\n", o.Name)
|
||||
}
|
||||
log.Printf("D! Successfully connected to output: %s\n", o.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -88,115 +83,94 @@ func (a *Agent) Close() error {
|
||||
return err
|
||||
}
|
||||
|
||||
func panicRecover(input *internal_models.RunningInput) {
|
||||
func panicRecover(input *models.RunningInput) {
|
||||
if err := recover(); err != nil {
|
||||
trace := make([]byte, 2048)
|
||||
runtime.Stack(trace, true)
|
||||
log.Printf("FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
|
||||
input.Name, err, trace)
|
||||
log.Println("PLEASE REPORT THIS PANIC ON GITHUB with " +
|
||||
log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
|
||||
input.Name(), err, trace)
|
||||
log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " +
|
||||
"stack trace, configuration, and OS information: " +
|
||||
"https://github.com/influxdata/telegraf/issues/new")
|
||||
}
|
||||
}
|
||||
|
||||
// gatherParallel runs the inputs that are using the same reporting interval
|
||||
// as the telegraf agent.
|
||||
func (a *Agent) gatherParallel(metricC chan telegraf.Metric) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
start := time.Now()
|
||||
counter := 0
|
||||
jitter := a.Config.Agent.CollectionJitter.Duration.Nanoseconds()
|
||||
for _, input := range a.Config.Inputs {
|
||||
if input.Config.Interval != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
counter++
|
||||
go func(input *internal_models.RunningInput) {
|
||||
defer panicRecover(input)
|
||||
defer wg.Done()
|
||||
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
|
||||
if jitter != 0 {
|
||||
nanoSleep := rand.Int63n(jitter)
|
||||
d, err := time.ParseDuration(fmt.Sprintf("%dns", nanoSleep))
|
||||
if err != nil {
|
||||
log.Printf("Jittering collection interval failed for plugin %s",
|
||||
input.Name)
|
||||
} else {
|
||||
time.Sleep(d)
|
||||
}
|
||||
}
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
log.Printf("Error in input [%s]: %s", input.Name, err)
|
||||
}
|
||||
|
||||
}(input)
|
||||
}
|
||||
|
||||
if counter == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
elapsed := time.Since(start)
|
||||
if !a.Config.Agent.Quiet {
|
||||
log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n",
|
||||
a.Config.Agent.Interval.Duration, counter, elapsed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherSeparate runs the inputs that have been configured with their own
|
||||
// gatherer runs the inputs that have been configured with their own
|
||||
// reporting interval.
|
||||
func (a *Agent) gatherSeparate(
|
||||
func (a *Agent) gatherer(
|
||||
shutdown chan struct{},
|
||||
input *internal_models.RunningInput,
|
||||
input *models.RunningInput,
|
||||
interval time.Duration,
|
||||
metricC chan telegraf.Metric,
|
||||
) error {
|
||||
) {
|
||||
defer panicRecover(input)
|
||||
|
||||
ticker := time.NewTicker(input.Config.Interval)
|
||||
GatherTime := selfstat.RegisterTiming("gather",
|
||||
"gather_time_ns",
|
||||
map[string]string{"input": input.Config.Name},
|
||||
)
|
||||
|
||||
acc := NewAccumulator(input, metricC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
var outerr error
|
||||
internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
log.Printf("Error in input [%s]: %s", input.Name, err)
|
||||
}
|
||||
|
||||
gatherWithTimeout(shutdown, input, acc, interval)
|
||||
elapsed := time.Since(start)
|
||||
if !a.Config.Agent.Quiet {
|
||||
log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
|
||||
input.Config.Interval, input.Name, elapsed)
|
||||
}
|
||||
|
||||
if outerr != nil {
|
||||
return outerr
|
||||
}
|
||||
GatherTime.Incr(elapsed.Nanoseconds())
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return nil
|
||||
return
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// gatherWithTimeout gathers from the given input, with the given timeout.
|
||||
// when the given timeout is reached, gatherWithTimeout logs an error message
|
||||
// but continues waiting for it to return. This is to avoid leaving behind
|
||||
// hung processes, and to prevent re-calling the same hung process over and
|
||||
// over.
|
||||
func gatherWithTimeout(
|
||||
shutdown chan struct{},
|
||||
input *models.RunningInput,
|
||||
acc *accumulator,
|
||||
timeout time.Duration,
|
||||
) {
|
||||
ticker := time.NewTicker(timeout)
|
||||
defer ticker.Stop()
|
||||
done := make(chan error)
|
||||
go func() {
|
||||
done <- input.Input.Gather(acc)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case err := <-done:
|
||||
if err != nil {
|
||||
log.Printf("E! ERROR in input [%s]: %s", input.Name(), err)
|
||||
}
|
||||
return
|
||||
case <-ticker.C:
|
||||
log.Printf("E! ERROR: input [%s] took longer to collect than "+
|
||||
"collection interval (%s)",
|
||||
input.Name(), timeout)
|
||||
continue
|
||||
case <-shutdown:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test verifies that we can 'Gather' from all inputs with their configured
|
||||
// Config struct
|
||||
func (a *Agent) Test() error {
|
||||
@@ -217,10 +191,13 @@ func (a *Agent) Test() error {
|
||||
}()
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(true)
|
||||
acc := NewAccumulator(input, metricC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
input.SetTrace(true)
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name())
|
||||
if input.Config.Interval != 0 {
|
||||
fmt.Printf("* Internal: %s\n", input.Config.Interval)
|
||||
}
|
||||
@@ -231,10 +208,10 @@ func (a *Agent) Test() error {
|
||||
|
||||
// Special instructions for some inputs. cpu, for example, needs to be
|
||||
// run twice in order to return cpu usage percentages.
|
||||
switch input.Name {
|
||||
switch input.Name() {
|
||||
case "cpu", "mongodb", "procstat":
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name())
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -250,11 +227,11 @@ func (a *Agent) flush() {
|
||||
|
||||
wg.Add(len(a.Config.Outputs))
|
||||
for _, o := range a.Config.Outputs {
|
||||
go func(output *internal_models.RunningOutput) {
|
||||
go func(output *models.RunningOutput) {
|
||||
defer wg.Done()
|
||||
err := output.Write()
|
||||
if err != nil {
|
||||
log.Printf("Error writing to output [%s]: %s\n",
|
||||
log.Printf("E! Error writing to output [%s]: %s\n",
|
||||
output.Name, err.Error())
|
||||
}
|
||||
}(o)
|
||||
@@ -267,73 +244,97 @@ func (a *Agent) flush() {
|
||||
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
|
||||
// Inelegant, but this sleep is to allow the Gather threads to run, so that
|
||||
// the flusher will flush after metrics are collected.
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
time.Sleep(time.Millisecond * 300)
|
||||
|
||||
// create an output metric channel and a gorouting that continously passes
|
||||
// each metric onto the output plugins & aggregators.
|
||||
outMetricC := make(chan telegraf.Metric, 100)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
if len(outMetricC) > 0 {
|
||||
// keep going until outMetricC is flushed
|
||||
continue
|
||||
}
|
||||
return
|
||||
case m := <-outMetricC:
|
||||
// if dropOriginal is set to true, then we will only send this
|
||||
// metric to the aggregators, not the outputs.
|
||||
var dropOriginal bool
|
||||
if !m.IsAggregate() {
|
||||
for _, agg := range a.Config.Aggregators {
|
||||
if ok := agg.Add(m.Copy()); ok {
|
||||
dropOriginal = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !dropOriginal {
|
||||
for i, o := range a.Config.Outputs {
|
||||
if i == len(a.Config.Outputs)-1 {
|
||||
o.AddMetric(m)
|
||||
} else {
|
||||
o.AddMetric(m.Copy())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
log.Println("Hang on, flushing any cached metrics before shutdown")
|
||||
log.Println("I! Hang on, flushing any cached metrics before shutdown")
|
||||
// wait for outMetricC to get flushed before flushing outputs
|
||||
wg.Wait()
|
||||
a.flush()
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
|
||||
a.flush()
|
||||
case m := <-metricC:
|
||||
for _, o := range a.Config.Outputs {
|
||||
o.AddMetric(m)
|
||||
case metric := <-metricC:
|
||||
// NOTE potential bottleneck here as we put each metric through the
|
||||
// processors serially.
|
||||
mS := []telegraf.Metric{metric}
|
||||
for _, processor := range a.Config.Processors {
|
||||
mS = processor.Apply(mS...)
|
||||
}
|
||||
for _, m := range mS {
|
||||
outMetricC <- m
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// jitterInterval applies the the interval jitter to the flush interval using
|
||||
// crypto/rand number generator
|
||||
func jitterInterval(ininterval, injitter time.Duration) time.Duration {
|
||||
var jitter int64
|
||||
outinterval := ininterval
|
||||
if injitter.Nanoseconds() != 0 {
|
||||
maxjitter := big.NewInt(injitter.Nanoseconds())
|
||||
if j, err := cryptorand.Int(cryptorand.Reader, maxjitter); err == nil {
|
||||
jitter = j.Int64()
|
||||
}
|
||||
outinterval = time.Duration(jitter + ininterval.Nanoseconds())
|
||||
}
|
||||
|
||||
if outinterval.Nanoseconds() < time.Duration(500*time.Millisecond).Nanoseconds() {
|
||||
log.Printf("Flush interval %s too low, setting to 500ms\n", outinterval)
|
||||
outinterval = time.Duration(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
return outinterval
|
||||
}
|
||||
|
||||
// Run runs the agent daemon, gathering every Interval
|
||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
a.Config.Agent.FlushInterval.Duration = jitterInterval(
|
||||
a.Config.Agent.FlushInterval.Duration,
|
||||
a.Config.Agent.FlushJitter.Duration)
|
||||
|
||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+
|
||||
log.Printf("I! Agent Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+
|
||||
"Flush Interval:%s \n",
|
||||
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet,
|
||||
a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet,
|
||||
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
|
||||
|
||||
// channel shared between all input threads for accumulating metrics
|
||||
metricC := make(chan telegraf.Metric, 10000)
|
||||
metricC := make(chan telegraf.Metric, 100)
|
||||
|
||||
// Start all ServicePlugins
|
||||
for _, input := range a.Config.Inputs {
|
||||
// Start service of any ServicePlugins
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
switch p := input.Input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
acc := NewAccumulator(input, metricC)
|
||||
// Service input plugins should set their own precision of their
|
||||
// metrics.
|
||||
acc.SetPrecision(time.Nanosecond, 0)
|
||||
if err := p.Start(acc); err != nil {
|
||||
log.Printf("Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name, err.Error())
|
||||
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name(), err.Error())
|
||||
return err
|
||||
}
|
||||
defer p.Stop()
|
||||
@@ -345,43 +346,40 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
i := int64(a.Config.Agent.Interval.Duration)
|
||||
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
|
||||
}
|
||||
ticker := time.NewTicker(a.Config.Agent.Interval.Duration)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := a.flusher(shutdown, metricC); err != nil {
|
||||
log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
|
||||
log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error())
|
||||
close(shutdown)
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(len(a.Config.Aggregators))
|
||||
for _, aggregator := range a.Config.Aggregators {
|
||||
go func(agg *models.RunningAggregator) {
|
||||
defer wg.Done()
|
||||
acc := NewAccumulator(agg, metricC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
agg.Run(acc, shutdown)
|
||||
}(aggregator)
|
||||
}
|
||||
|
||||
wg.Add(len(a.Config.Inputs))
|
||||
for _, input := range a.Config.Inputs {
|
||||
// Special handling for inputs that have their own collection interval
|
||||
// configured. Default intervals are handled below with gatherParallel
|
||||
interval := a.Config.Agent.Interval.Duration
|
||||
// overwrite global interval if this plugin has it's own.
|
||||
if input.Config.Interval != 0 {
|
||||
wg.Add(1)
|
||||
go func(input *internal_models.RunningInput) {
|
||||
defer wg.Done()
|
||||
if err := a.gatherSeparate(shutdown, input, metricC); err != nil {
|
||||
log.Printf(err.Error())
|
||||
}
|
||||
}(input)
|
||||
interval = input.Config.Interval
|
||||
}
|
||||
go func(in *models.RunningInput, interv time.Duration) {
|
||||
defer wg.Done()
|
||||
a.gatherer(shutdown, in, interv, metricC)
|
||||
}(input, interval)
|
||||
}
|
||||
|
||||
defer wg.Wait()
|
||||
|
||||
for {
|
||||
if err := a.gatherParallel(metricC); err != nil {
|
||||
log.Printf(err.Error())
|
||||
}
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
|
||||
@@ -11,8 +9,18 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
// needing to load the outputs
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAgent_OmitHostname(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.Agent.OmitHostname = true
|
||||
_, err := NewAgent(c)
|
||||
assert.NoError(t, err)
|
||||
assert.NotContains(t, c.Tags, "host")
|
||||
}
|
||||
|
||||
func TestAgent_LoadPlugin(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.InputFilters = []string{"mysql"}
|
||||
@@ -101,75 +109,3 @@ func TestAgent_LoadOutput(t *testing.T) {
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
}
|
||||
|
||||
func TestAgent_ZeroJitter(t *testing.T) {
|
||||
flushinterval := jitterInterval(time.Duration(10*time.Second),
|
||||
time.Duration(0*time.Second))
|
||||
|
||||
actual := flushinterval.Nanoseconds()
|
||||
exp := time.Duration(10 * time.Second).Nanoseconds()
|
||||
|
||||
if actual != exp {
|
||||
t.Errorf("Actual %v, expected %v", actual, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_ZeroInterval(t *testing.T) {
|
||||
min := time.Duration(500 * time.Millisecond).Nanoseconds()
|
||||
max := time.Duration(5 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
||||
time.Duration(5*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
|
||||
if actual > max {
|
||||
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
||||
break
|
||||
}
|
||||
if actual < min {
|
||||
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_ZeroBoth(t *testing.T) {
|
||||
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
||||
time.Duration(0*time.Second))
|
||||
|
||||
actual := flushinterval
|
||||
exp := time.Duration(500 * time.Millisecond)
|
||||
|
||||
if actual != exp {
|
||||
t.Errorf("Actual %v, expected %v", actual, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_JitterMax(t *testing.T) {
|
||||
max := time.Duration(32 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
||||
time.Duration(2*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
if actual > max {
|
||||
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_JitterMin(t *testing.T) {
|
||||
min := time.Duration(30 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
||||
time.Duration(2*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
if actual < min {
|
||||
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
22
aggregator.go
Normal file
22
aggregator.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package telegraf
|
||||
|
||||
// Aggregator is an interface for implementing an Aggregator plugin.
|
||||
// the RunningAggregator wraps this interface and guarantees that
|
||||
// Add, Push, and Reset can not be called concurrently, so locking is not
|
||||
// required when implementing an Aggregator plugin.
|
||||
type Aggregator interface {
|
||||
// SampleConfig returns the default configuration of the Input.
|
||||
SampleConfig() string
|
||||
|
||||
// Description returns a one-sentence description on the Input.
|
||||
Description() string
|
||||
|
||||
// Add the metric to the aggregator.
|
||||
Add(in Metric)
|
||||
|
||||
// Push pushes the current aggregates to the accumulator.
|
||||
Push(acc Accumulator)
|
||||
|
||||
// Reset resets the aggregators caches and aggregates.
|
||||
Reset()
|
||||
}
|
||||
@@ -4,17 +4,14 @@ machine:
|
||||
post:
|
||||
- sudo service zookeeper stop
|
||||
- go version
|
||||
- go version | grep 1.5.3 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.5.3.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.5.3.linux-amd64.tar.gz
|
||||
- go version | grep 1.7.4 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.7.4.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.7.4.linux-amd64.tar.gz
|
||||
- go version
|
||||
|
||||
dependencies:
|
||||
override:
|
||||
- docker info
|
||||
post:
|
||||
- gem install fpm
|
||||
- sudo apt-get install -y rpm python-boto
|
||||
|
||||
test:
|
||||
override:
|
||||
|
||||
@@ -6,18 +6,24 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/influxdata/telegraf/agent"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
|
||||
"github.com/influxdata/telegraf/logger"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
_ "github.com/influxdata/telegraf/plugins/processors/all"
|
||||
"github.com/kardianos/service"
|
||||
)
|
||||
|
||||
var fDebug = flag.Bool("debug", false,
|
||||
"show metrics as they're generated to stdout")
|
||||
"turn on debug logging")
|
||||
var fQuiet = flag.Bool("quiet", false,
|
||||
"run in quiet mode")
|
||||
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
||||
@@ -30,200 +36,244 @@ var fSampleConfig = flag.Bool("sample-config", false,
|
||||
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
|
||||
var fInputFilters = flag.String("input-filter", "",
|
||||
"filter the inputs to enable, separator is :")
|
||||
var fInputList = flag.Bool("input-list", false,
|
||||
"print available input plugins.")
|
||||
var fOutputFilters = flag.String("output-filter", "",
|
||||
"filter the outputs to enable, separator is :")
|
||||
var fOutputList = flag.Bool("output-list", false,
|
||||
"print available output plugins.")
|
||||
var fAggregatorFilters = flag.String("aggregator-filter", "",
|
||||
"filter the aggregators to enable, separator is :")
|
||||
var fProcessorFilters = flag.String("processor-filter", "",
|
||||
"filter the processors to enable, separator is :")
|
||||
var fUsage = flag.String("usage", "",
|
||||
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
||||
var fService = flag.String("service", "",
|
||||
"operate on the service")
|
||||
|
||||
var fInputFiltersLegacy = flag.String("filter", "",
|
||||
"filter the inputs to enable, separator is :")
|
||||
var fOutputFiltersLegacy = flag.String("outputfilter", "",
|
||||
"filter the outputs to enable, separator is :")
|
||||
var fConfigDirectoryLegacy = flag.String("configdirectory", "",
|
||||
"directory containing additional *.conf files")
|
||||
// Telegraf version, populated linker.
|
||||
// ie, -ldflags "-X main.version=`git describe --always --tags`"
|
||||
var (
|
||||
version string
|
||||
commit string
|
||||
branch string
|
||||
)
|
||||
|
||||
// Telegraf version
|
||||
// -ldflags "-X main.Version=`git describe --always --tags`"
|
||||
var Version string
|
||||
func init() {
|
||||
// If commit or branch are not set, make that clear.
|
||||
if commit == "" {
|
||||
commit = "unknown"
|
||||
}
|
||||
if branch == "" {
|
||||
branch = "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf <flags>
|
||||
telegraf [commands|flags]
|
||||
|
||||
The flags are:
|
||||
The commands & flags are:
|
||||
|
||||
-config <file> configuration file to load
|
||||
-test gather metrics once, print them to stdout, and exit
|
||||
-sample-config print out full sample configuration to stdout
|
||||
-config-directory directory containing additional *.conf files
|
||||
-input-filter filter the input plugins to enable, separator is :
|
||||
-output-filter filter the output plugins to enable, separator is :
|
||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||
-debug print metrics as they're generated to stdout
|
||||
-quiet run in quiet mode
|
||||
-version print the version to stdout
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
--config-directory directory containing additional *.conf files
|
||||
--input-filter filter the input plugins to enable, separator is :
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--quiet run in quiet mode
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf -sample-config > telegraf.conf
|
||||
telegraf config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf -sample-config -input-filter cpu -output-filter influxdb
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf -config telegraf.conf -test
|
||||
telegraf --config telegraf.conf -test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf -config telegraf.conf
|
||||
telegraf --config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
`
|
||||
|
||||
func main() {
|
||||
var stop chan struct{}
|
||||
|
||||
var srvc service.Service
|
||||
|
||||
type program struct{}
|
||||
|
||||
func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
defer func() {
|
||||
if service.Interactive() {
|
||||
os.Exit(0)
|
||||
}
|
||||
return
|
||||
}()
|
||||
reload := make(chan bool, 1)
|
||||
reload <- true
|
||||
for <-reload {
|
||||
reload <- false
|
||||
flag.Usage = func() { usageExit(0) }
|
||||
flag.Parse()
|
||||
|
||||
if flag.NFlag() == 0 {
|
||||
usageExit(0)
|
||||
}
|
||||
args := flag.Args()
|
||||
|
||||
var inputFilters []string
|
||||
if *fInputFiltersLegacy != "" {
|
||||
inputFilter := strings.TrimSpace(*fInputFiltersLegacy)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
if *fInputFilters != "" {
|
||||
inputFilter := strings.TrimSpace(*fInputFilters)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
|
||||
var outputFilters []string
|
||||
if *fOutputFiltersLegacy != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFiltersLegacy)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
if *fOutputFilters != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFilters)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
|
||||
if *fVersion {
|
||||
v := fmt.Sprintf("Telegraf - Version %s", Version)
|
||||
fmt.Println(v)
|
||||
return
|
||||
var aggregatorFilters []string
|
||||
if *fAggregatorFilters != "" {
|
||||
aggregatorFilter := strings.TrimSpace(*fAggregatorFilters)
|
||||
aggregatorFilters = strings.Split(":"+aggregatorFilter+":", ":")
|
||||
}
|
||||
var processorFilters []string
|
||||
if *fProcessorFilters != "" {
|
||||
processorFilter := strings.TrimSpace(*fProcessorFilters)
|
||||
processorFilters = strings.Split(":"+processorFilter+":", ":")
|
||||
}
|
||||
|
||||
if *fSampleConfig {
|
||||
config.PrintSampleConfig(inputFilters, outputFilters)
|
||||
return
|
||||
if len(args) > 0 {
|
||||
switch args[0] {
|
||||
case "version":
|
||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
||||
return
|
||||
case "config":
|
||||
config.PrintSampleConfig(
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if *fUsage != "" {
|
||||
// switch for flags which just do something and exit immediately
|
||||
switch {
|
||||
case *fOutputList:
|
||||
fmt.Println("Available Output Plugins:")
|
||||
for k, _ := range outputs.Outputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
case *fInputList:
|
||||
fmt.Println("Available Input Plugins:")
|
||||
for k, _ := range inputs.Inputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
case *fVersion:
|
||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
||||
return
|
||||
case *fSampleConfig:
|
||||
config.PrintSampleConfig(
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
return
|
||||
case *fUsage != "":
|
||||
if err := config.PrintInputConfig(*fUsage); err != nil {
|
||||
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
||||
log.Fatalf("%s and %s", err, err2)
|
||||
log.Fatalf("E! %s and %s", err, err2)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
c *config.Config
|
||||
err error
|
||||
)
|
||||
|
||||
if *fConfig != "" {
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = outputFilters
|
||||
c.InputFilters = inputFilters
|
||||
err = c.LoadConfig(*fConfig)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("You must specify a config file. See telegraf --help")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if *fConfigDirectoryLegacy != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectoryLegacy)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// If no other options are specified, load the config file and run.
|
||||
c := config.NewConfig()
|
||||
c.OutputFilters = outputFilters
|
||||
c.InputFilters = inputFilters
|
||||
err := c.LoadConfig(*fConfig)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
|
||||
if *fConfigDirectory != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectory)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
}
|
||||
if len(c.Outputs) == 0 {
|
||||
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
|
||||
log.Fatalf("E! Error: no outputs found, did you provide a valid config file?")
|
||||
}
|
||||
if len(c.Inputs) == 0 {
|
||||
log.Fatalf("Error: no inputs found, did you provide a valid config file?")
|
||||
log.Fatalf("E! Error: no inputs found, did you provide a valid config file?")
|
||||
}
|
||||
|
||||
ag, err := agent.NewAgent(c)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
|
||||
if *fDebug {
|
||||
ag.Config.Agent.Debug = true
|
||||
}
|
||||
|
||||
if *fQuiet {
|
||||
ag.Config.Agent.Quiet = true
|
||||
}
|
||||
// Setup logging
|
||||
logger.SetupLogging(
|
||||
ag.Config.Agent.Debug || *fDebug,
|
||||
ag.Config.Agent.Quiet || *fQuiet,
|
||||
ag.Config.Agent.Logfile,
|
||||
)
|
||||
|
||||
if *fTest {
|
||||
err = ag.Test()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = ag.Connect()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
signals := make(chan os.Signal)
|
||||
signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
|
||||
go func() {
|
||||
sig := <-signals
|
||||
if sig == os.Interrupt {
|
||||
close(shutdown)
|
||||
}
|
||||
if sig == syscall.SIGHUP {
|
||||
log.Printf("Reloading Telegraf config\n")
|
||||
<-reload
|
||||
reload <- true
|
||||
select {
|
||||
case sig := <-signals:
|
||||
if sig == os.Interrupt {
|
||||
close(shutdown)
|
||||
}
|
||||
if sig == syscall.SIGHUP {
|
||||
log.Printf("I! Reloading Telegraf config\n")
|
||||
<-reload
|
||||
reload <- true
|
||||
close(shutdown)
|
||||
}
|
||||
case <-stop:
|
||||
close(shutdown)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("Starting Telegraf (version %s)\n", Version)
|
||||
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("Tags enabled: %s", c.ListTags())
|
||||
log.Printf("I! Starting Telegraf (version %s)\n", version)
|
||||
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("I! Tags enabled: %s", c.ListTags())
|
||||
|
||||
if *fPidfile != "" {
|
||||
f, err := os.Create(*fPidfile)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to create pidfile: %s", err)
|
||||
log.Fatalf("E! Unable to create pidfile: %s", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||
@@ -239,3 +289,56 @@ func usageExit(rc int) {
|
||||
fmt.Println(usage)
|
||||
os.Exit(rc)
|
||||
}
|
||||
|
||||
func (p *program) Start(s service.Service) error {
|
||||
srvc = s
|
||||
go p.run()
|
||||
return nil
|
||||
}
|
||||
func (p *program) run() {
|
||||
stop = make(chan struct{})
|
||||
reloadLoop(stop, srvc)
|
||||
}
|
||||
func (p *program) Stop(s service.Service) error {
|
||||
close(stop)
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() { usageExit(0) }
|
||||
flag.Parse()
|
||||
if runtime.GOOS == "windows" {
|
||||
svcConfig := &service.Config{
|
||||
Name: "telegraf",
|
||||
DisplayName: "Telegraf Data Collector Service",
|
||||
Description: "Collects data using a series of plugins and publishes it to" +
|
||||
"another series of plugins.",
|
||||
Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
|
||||
}
|
||||
|
||||
prg := &program{}
|
||||
s, err := service.New(prg, svcConfig)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
// Handle the -service flag here to prevent any issues with tooling that
|
||||
// may not have an interactive session, e.g. installing from Ansible.
|
||||
if *fService != "" {
|
||||
if *fConfig != "" {
|
||||
(*svcConfig).Arguments = []string{"-config", *fConfig}
|
||||
}
|
||||
err := service.Control(s, *fService)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
} else {
|
||||
err = s.Run()
|
||||
if err != nil {
|
||||
log.Println("E! " + err.Error())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
stop = make(chan struct{})
|
||||
reloadLoop(stop, nil)
|
||||
}
|
||||
}
|
||||
|
||||
59
docs/AGGREGATORS_AND_PROCESSORS.md
Normal file
59
docs/AGGREGATORS_AND_PROCESSORS.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Telegraf Aggregator & Processor Plugins
|
||||
|
||||
As of release 1.1.0, Telegraf has the concept of Aggregator and Processor Plugins.
|
||||
|
||||
These plugins sit in-between Input & Output plugins, aggregating and processing
|
||||
metrics as they pass through Telegraf:
|
||||
|
||||
```
|
||||
┌───────────┐
|
||||
│ │
|
||||
│ CPU │───┐
|
||||
│ │ │
|
||||
└───────────┘ │
|
||||
│
|
||||
┌───────────┐ │ ┌───────────┐
|
||||
│ │ │ │ │
|
||||
│ Memory │───┤ ┌──▶│ InfluxDB │
|
||||
│ │ │ │ │ │
|
||||
└───────────┘ │ ┌─────────────┐ ┌─────────────┐ │ └───────────┘
|
||||
│ │ │ │Aggregate │ │
|
||||
┌───────────┐ │ │Process │ │ - mean │ │ ┌───────────┐
|
||||
│ │ │ │ - transform │ │ - quantiles │ │ │ │
|
||||
│ MySQL │───┼───▶│ - decorate │────▶│ - min/max │───┼──▶│ File │
|
||||
│ │ │ │ - filter │ │ - count │ │ │ │
|
||||
└───────────┘ │ │ │ │ │ │ └───────────┘
|
||||
│ └─────────────┘ └─────────────┘ │
|
||||
┌───────────┐ │ │ ┌───────────┐
|
||||
│ │ │ │ │ │
|
||||
│ SNMP │───┤ └──▶│ Kafka │
|
||||
│ │ │ │ │
|
||||
└───────────┘ │ └───────────┘
|
||||
│
|
||||
┌───────────┐ │
|
||||
│ │ │
|
||||
│ Docker │───┘
|
||||
│ │
|
||||
└───────────┘
|
||||
```
|
||||
|
||||
Both Aggregators and Processors analyze metrics as they pass through Telegraf.
|
||||
|
||||
**Processor** plugins process metrics as they pass through and immediately emit
|
||||
results based on the values they process. For example, this could be printing
|
||||
all metrics or adding a tag to all metrics that pass through.
|
||||
|
||||
**Aggregator** plugins, on the other hand, are a bit more complicated. Aggregators
|
||||
are typically for emitting new _aggregate_ metrics, such as a running mean,
|
||||
minimum, maximum, quantiles, or standard deviation. For this reason, all _aggregator_
|
||||
plugins are configured with a `period`. The `period` is the size of the window
|
||||
of metrics that each _aggregate_ represents. In other words, the emitted
|
||||
_aggregate_ metric will be the aggregated value of the past `period` seconds.
|
||||
Since many users will only care about their aggregates and not every single metric
|
||||
gathered, there is also a `drop_original` argument, which tells Telegraf to only
|
||||
emit the aggregates and not the original metrics.
|
||||
|
||||
**NOTE** That since aggregators only aggregate metrics within their period, that
|
||||
historical data is not supported. In other words, if your metric timestamp is more
|
||||
than `now() - period` in the past, it will not be aggregated. If this is a feature
|
||||
that you need, please comment on this [github issue](https://github.com/influxdata/telegraf/issues/1992)
|
||||
@@ -1,30 +1,49 @@
|
||||
# Telegraf Configuration
|
||||
|
||||
You can see the latest config file with all available plugins here:
|
||||
[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf)
|
||||
|
||||
## Generating a Configuration File
|
||||
|
||||
A default Telegraf config file can be generated using the -sample-config flag:
|
||||
`telegraf -sample-config > telegraf.conf`
|
||||
A default Telegraf config file can be auto-generated by telegraf:
|
||||
|
||||
```
|
||||
telegraf config > telegraf.conf
|
||||
```
|
||||
|
||||
To generate a file with specific inputs and outputs, you can use the
|
||||
-input-filter and -output-filter flags:
|
||||
`telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka`
|
||||
--input-filter and --output-filter flags:
|
||||
|
||||
## `[global_tags]` Configuration
|
||||
```
|
||||
telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config
|
||||
```
|
||||
|
||||
Global tags can be specific in the `[global_tags]` section of the config file in
|
||||
key="value" format. All metrics being gathered on this host will be tagged
|
||||
## Environment Variables
|
||||
|
||||
Environment variables can be used anywhere in the config file, simply prepend
|
||||
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
|
||||
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
|
||||
|
||||
# Global Tags
|
||||
|
||||
Global tags can be specified in the `[global_tags]` section of the config file
|
||||
in key="value" format. All metrics being gathered on this host will be tagged
|
||||
with the tags specified here.
|
||||
|
||||
## `[agent]` Configuration
|
||||
## Agent Configuration
|
||||
|
||||
Telegraf has a few options you can configure under the `agent` section of the
|
||||
Telegraf has a few options you can configure under the `[agent]` section of the
|
||||
config.
|
||||
|
||||
* **interval**: Default data collection interval for all inputs
|
||||
* **round_interval**: Rounds collection interval to 'interval'
|
||||
ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
* **metric_batch_size**: Telegraf will send metrics to output in batch of at
|
||||
most metric_batch_size metrics.
|
||||
* **metric_buffer_limit**: Telegraf will cache metric_buffer_limit metrics
|
||||
for each output, and will flush this buffer on a successful write.
|
||||
This should be a multiple of metric_batch_size and could not be less
|
||||
than 2 times metric_batch_size.
|
||||
* **collection_jitter**: Collection jitter is used to jitter
|
||||
the collection by a random amount.
|
||||
Each plugin will sleep for a random time within jitter before collecting.
|
||||
@@ -37,26 +56,63 @@ interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
This is primarily to avoid
|
||||
large write spikes for users running a large number of telegraf instances.
|
||||
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
|
||||
* **precision**: By default, precision will be set to the same timestamp order
|
||||
as the collection interval, with the maximum being 1s. Precision will NOT
|
||||
be used for service inputs, such as logparser and statsd. Valid values are
|
||||
"ns", "us" (or "µs"), "ms", "s".
|
||||
* **logfile**: Specify the log file name. The empty string means to log to stdout.
|
||||
* **debug**: Run telegraf in debug mode.
|
||||
* **quiet**: Run telegraf in quiet mode.
|
||||
* **quiet**: Run telegraf in quiet mode (error messages only).
|
||||
* **hostname**: Override default hostname, if empty use os.Hostname().
|
||||
* **omit_hostname**: If true, do no set the "host" tag in the telegraf agent.
|
||||
|
||||
## `[inputs.xxx]` Configuration
|
||||
## Input Configuration
|
||||
|
||||
There are some configuration options that are configurable per input:
|
||||
The following config parameters are available for all inputs:
|
||||
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||
global interval, but if one particular input should be run less or more often,
|
||||
you can configure that here.
|
||||
* **name_override**: Override the base name of the measurement.
|
||||
(Default is the name of the input).
|
||||
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||
global interval, but if one particular input should be run less or more often,
|
||||
you can configure that here.
|
||||
|
||||
#### Input Filters
|
||||
## Output Configuration
|
||||
|
||||
There are also filters that can be configured per input:
|
||||
There are no generic configuration options available for all outputs.
|
||||
|
||||
## Aggregator Configuration
|
||||
|
||||
The following config parameters are available for all aggregators:
|
||||
|
||||
* **period**: The period on which to flush & clear each aggregator. All metrics
|
||||
that are sent with timestamps outside of this period will be ignored by the
|
||||
aggregator.
|
||||
* **delay**: The delay before each aggregator is flushed. This is to control
|
||||
how long for aggregators to wait before receiving metrics from input plugins,
|
||||
in the case that aggregators are flushing and inputs are gathering on the
|
||||
same interval.
|
||||
* **drop_original**: If true, the original metric will be dropped by the
|
||||
aggregator and will not get sent to the output plugins.
|
||||
* **name_override**: Override the base name of the measurement.
|
||||
(Default is the name of the input).
|
||||
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||
|
||||
## Processor Configuration
|
||||
|
||||
The following config parameters are available for all processors:
|
||||
|
||||
* **order**: This is the order in which the processor(s) get executed. If this
|
||||
is not specified then processor execution order will be random.
|
||||
|
||||
#### Measurement Filtering
|
||||
|
||||
Filters can be configured per input, output, processor, or aggregator,
|
||||
see below for examples.
|
||||
|
||||
* **namepass**: An array of strings that is used to filter metrics generated by the
|
||||
current input. Each string in the array is tested as a glob match against
|
||||
@@ -64,13 +120,25 @@ measurement names and if it matches, the field is emitted.
|
||||
* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted.
|
||||
* **fieldpass**: An array of strings that is used to filter metrics generated by the
|
||||
current input. Each string in the array is tested as a glob match against field names
|
||||
and if it matches, the field is emitted.
|
||||
and if it matches, the field is emitted. fieldpass is not available for outputs.
|
||||
* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted.
|
||||
fielddrop is not available for outputs.
|
||||
* **tagpass**: tag names and arrays of strings that are used to filter
|
||||
measurements by the current input. Each string in the array is tested as a glob
|
||||
match against the tag name, and if it matches the measurement is emitted.
|
||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
|
||||
emitted. This is tested on measurements that have passed the tagpass test.
|
||||
* **tagexclude**: tagexclude can be used to exclude a tag from measurement(s).
|
||||
As opposed to tagdrop, which will drop an entire measurement based on it's
|
||||
tags, tagexclude simply strips the given tag keys from the measurement. This
|
||||
can be used on inputs & outputs, but it is _recommended_ to be used on inputs,
|
||||
as it is more efficient to filter out tags at the ingestion point.
|
||||
* **taginclude**: taginclude is the inverse of tagexclude. It will only include
|
||||
the tag keys in the final measurement.
|
||||
|
||||
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
|
||||
the plugin definition, otherwise subsequent plugin config options will be
|
||||
interpreted as part of the tagpass/tagdrop map.
|
||||
|
||||
#### Input Configuration Examples
|
||||
|
||||
@@ -97,16 +165,20 @@ fields which begin with `time_`.
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
# filter all fields beginning with 'time_'
|
||||
drop = ["time_*"]
|
||||
fielddrop = ["time_*"]
|
||||
```
|
||||
|
||||
#### Input Config: tagpass and tagdrop
|
||||
|
||||
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
|
||||
the plugin definition, otherwise subsequent plugin config options will be
|
||||
interpreted as part of the tagpass/tagdrop map.
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time"]
|
||||
fielddrop = ["cpu_time"]
|
||||
# Don't collect CPU data for cpu6 & cpu7
|
||||
[inputs.cpu.tagdrop]
|
||||
cpu = [ "cpu6", "cpu7" ]
|
||||
@@ -141,12 +213,26 @@ fields which begin with `time_`.
|
||||
# Drop all metrics about containers for kubelet
|
||||
[[inputs.prometheus]]
|
||||
urls = ["http://kube-node-1:4194/metrics"]
|
||||
namedrop = ["container_"]
|
||||
namedrop = ["container_*"]
|
||||
|
||||
# Only store rest client related metrics for kubelet
|
||||
[[inputs.prometheus]]
|
||||
urls = ["http://kube-node-1:4194/metrics"]
|
||||
namepass = ["rest_client_"]
|
||||
namepass = ["rest_client_*"]
|
||||
```
|
||||
|
||||
#### Input Config: taginclude and tagexclude
|
||||
|
||||
```toml
|
||||
# Only include the "cpu" tag in the measurements for the cpu plugin.
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = true
|
||||
taginclude = ["cpu"]
|
||||
|
||||
# Exclude the "fstype" tag from the measurements for the disk plugin.
|
||||
[[inputs.disk]]
|
||||
tagexclude = ["fstype"]
|
||||
```
|
||||
|
||||
#### Input config: prefix, suffix, and override
|
||||
@@ -174,6 +260,9 @@ This will emit measurements with the name `foobar`
|
||||
This plugin will emit measurements with two additional tags: `tag1=foo` and
|
||||
`tag2=bar`
|
||||
|
||||
NOTE: Order matters, the `[inputs.cpu.tags]` table must be at the _end_ of the
|
||||
plugin definition.
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
percpu = false
|
||||
@@ -199,17 +288,10 @@ to avoid measurement collisions:
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
name_override = "percpu_usage"
|
||||
drop = ["cpu_time*"]
|
||||
fielddrop = ["cpu_time*"]
|
||||
```
|
||||
|
||||
## `[outputs.xxx]` Configuration
|
||||
|
||||
Telegraf also supports specifying multiple output sinks to send data to,
|
||||
configuring each output sink is different, but examples can be
|
||||
found by running `telegraf -sample-config`.
|
||||
|
||||
Outputs also support the same configurable options as inputs
|
||||
(namepass, namedrop, tagpass, tagdrop)
|
||||
#### Output Configuration Examples:
|
||||
|
||||
```toml
|
||||
[[outputs.influxdb]]
|
||||
@@ -234,3 +316,39 @@ Outputs also support the same configurable options as inputs
|
||||
[outputs.influxdb.tagpass]
|
||||
cpu = ["cpu0"]
|
||||
```
|
||||
|
||||
#### Aggregator Configuration Examples:
|
||||
|
||||
This will collect and emit the min/max of the system load1 metric every
|
||||
30s, dropping the originals.
|
||||
|
||||
```toml
|
||||
[[inputs.system]]
|
||||
fieldpass = ["load1"] # collects system load1 metric.
|
||||
|
||||
[[aggregators.minmax]]
|
||||
period = "30s" # send & clear the aggregate every 30s.
|
||||
drop_original = true # drop the original metrics.
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
```
|
||||
|
||||
This will collect and emit the min/max of the swap metrics every
|
||||
30s, dropping the originals. The aggregator will not be applied
|
||||
to the system load metrics due to the `namepass` parameter.
|
||||
|
||||
```toml
|
||||
[[inputs.swap]]
|
||||
|
||||
[[inputs.system]]
|
||||
fieldpass = ["load1"] # collects system load1 metric.
|
||||
|
||||
[[aggregators.minmax]]
|
||||
period = "30s" # send & clear the aggregate every 30s.
|
||||
drop_original = true # drop the original metrics.
|
||||
namepass = ["swap"] # only "pass" swap metrics through the aggregator.
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
```
|
||||
@@ -1,5 +1,13 @@
|
||||
# Telegraf Input Data Formats
|
||||
|
||||
Telegraf is able to parse the following input data formats into metrics:
|
||||
|
||||
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx)
|
||||
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#json)
|
||||
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite)
|
||||
1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
|
||||
1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||
are a combination of four basic parts:
|
||||
@@ -31,7 +39,7 @@ example, in the exec plugin:
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume. This can be "json", "influx" or "graphite"
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
@@ -43,7 +51,7 @@ example, in the exec plugin:
|
||||
Each data_format has an additional set of configuration options available, which
|
||||
I'll go over below.
|
||||
|
||||
## Influx:
|
||||
# Influx:
|
||||
|
||||
There are no additional configuration options for InfluxDB line-protocol. The
|
||||
metrics are parsed directly into Telegraf metrics.
|
||||
@@ -58,23 +66,28 @@ metrics are parsed directly into Telegraf metrics.
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume. This can be "json", "influx" or "graphite"
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
## JSON:
|
||||
# JSON:
|
||||
|
||||
The JSON data format flattens JSON into metric _fields_. For example, this JSON:
|
||||
The JSON data format flattens JSON into metric _fields_.
|
||||
NOTE: Only numerical values are converted to fields, and they are converted
|
||||
into a float. strings are ignored unless specified as a tag_key (see below).
|
||||
|
||||
So for example, this JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"a": 5,
|
||||
"b": {
|
||||
"c": 6
|
||||
}
|
||||
},
|
||||
"ignored": "I'm a string"
|
||||
}
|
||||
```
|
||||
|
||||
@@ -103,7 +116,7 @@ For example, if you had this configuration:
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume. This can be "json", "influx" or "graphite"
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
@@ -134,62 +147,191 @@ Your Telegraf metrics would get tagged with "my_tag_1"
|
||||
exec_mycollector,my_tag_1=foo a=5,b_c=6
|
||||
```
|
||||
|
||||
## Graphite:
|
||||
If the JSON data is an array, then each element of the array is parsed with the configured settings.
|
||||
Each resulting metric will be output with the same timestamp.
|
||||
|
||||
For example, if the following configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
|
||||
## List of tag names to extract from top-level of JSON server response
|
||||
tag_keys = [
|
||||
"my_tag_1",
|
||||
"my_tag_2"
|
||||
]
|
||||
```
|
||||
|
||||
with this JSON output from a command:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"a": 5,
|
||||
"b": {
|
||||
"c": 6
|
||||
},
|
||||
"my_tag_1": "foo",
|
||||
"my_tag_2": "baz"
|
||||
},
|
||||
{
|
||||
"a": 7,
|
||||
"b": {
|
||||
"c": 8
|
||||
},
|
||||
"my_tag_1": "bar",
|
||||
"my_tag_2": "baz"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Your Telegraf metrics would get tagged with "my_tag_1" and "my_tag_2"
|
||||
|
||||
```
|
||||
exec_mycollector,my_tag_1=foo,my_tag_2=baz a=5,b_c=6
|
||||
exec_mycollector,my_tag_1=bar,my_tag_2=baz a=7,b_c=8
|
||||
```
|
||||
|
||||
# Value:
|
||||
|
||||
The "value" data format translates single values into Telegraf metrics. This
|
||||
is done by assigning a measurement name and setting a single field ("value")
|
||||
as the parsed metric.
|
||||
|
||||
#### Value Configuration:
|
||||
|
||||
You **must** tell Telegraf what type of metric to collect by using the
|
||||
`data_type` configuration option. Available options are:
|
||||
|
||||
1. integer
|
||||
2. float or long
|
||||
3. string
|
||||
4. boolean
|
||||
|
||||
**Note:** It is also recommended that you set `name_override` to a measurement
|
||||
name that makes sense for your metric, otherwise it will just be set to the
|
||||
name of the plugin.
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["cat /proc/sys/kernel/random/entropy_avail"]
|
||||
|
||||
## override the default metric name of "exec"
|
||||
name_override = "entropy_available"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "value"
|
||||
data_type = "integer" # required
|
||||
```
|
||||
|
||||
# Graphite:
|
||||
|
||||
The Graphite data format translates graphite _dot_ buckets directly into
|
||||
telegraf measurement names, with a single value field, and without any tags. For
|
||||
more advanced options, Telegraf supports specifying "templates" to translate
|
||||
telegraf measurement names, with a single value field, and without any tags.
|
||||
By default, the separator is left as ".", but this can be changed using the
|
||||
"separator" argument. For more advanced options,
|
||||
Telegraf supports specifying "templates" to translate
|
||||
graphite buckets into Telegraf metrics.
|
||||
|
||||
#### Separator:
|
||||
|
||||
You can specify a separator to use for the parsed metrics.
|
||||
By default, it will leave the metrics with a "." separator.
|
||||
Setting `separator = "_"` will translate:
|
||||
Templates are of the form:
|
||||
|
||||
```
|
||||
cpu.usage.idle 99
|
||||
=> cpu_usage_idle value=99
|
||||
"host.mytag.mytag.measurement.measurement.field*"
|
||||
```
|
||||
|
||||
#### Measurement/Tag Templates:
|
||||
Where the following keywords exist:
|
||||
|
||||
1. `measurement`: specifies that this section of the graphite bucket corresponds
|
||||
to the measurement name. This can be specified multiple times.
|
||||
2. `field`: specifies that this section of the graphite bucket corresponds
|
||||
to the field name. This can be specified multiple times.
|
||||
3. `measurement*`: specifies that all remaining elements of the graphite bucket
|
||||
correspond to the measurement name.
|
||||
4. `field*`: specifies that all remaining elements of the graphite bucket
|
||||
correspond to the field name.
|
||||
|
||||
Any part of the template that is not a keyword is treated as a tag key. This
|
||||
can also be specified multiple times.
|
||||
|
||||
NOTE: `field*` cannot be used in conjunction with `measurement*`!
|
||||
|
||||
#### Measurement & Tag Templates:
|
||||
|
||||
The most basic template is to specify a single transformation to apply to all
|
||||
incoming metrics. _measurement_ is a special keyword that tells Telegraf which
|
||||
parts of the graphite bucket to combine into the measurement name. It can have a
|
||||
trailing `*` to indicate that the remainder of the metric should be used.
|
||||
Other words are considered tag keys. So the following template:
|
||||
incoming metrics. So the following template:
|
||||
|
||||
```toml
|
||||
templates = [
|
||||
"region.measurement*"
|
||||
"region.region.measurement*"
|
||||
]
|
||||
```
|
||||
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
us-west.cpu.load 100
|
||||
=> cpu.load,region=us-west value=100
|
||||
us.west.cpu.load 100
|
||||
=> cpu.load,region=us.west value=100
|
||||
```
|
||||
|
||||
Multiple templates can also be specified, but these should be differentiated
|
||||
using _filters_ (see below for more details)
|
||||
|
||||
```toml
|
||||
templates = [
|
||||
"*.*.* region.region.measurement", # <- all 3-part measurements will match this one.
|
||||
"*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one.
|
||||
]
|
||||
```
|
||||
|
||||
#### Field Templates:
|
||||
|
||||
There is also a _field_ keyword, which can only be specified once.
|
||||
The field keyword tells Telegraf to give the metric that field name.
|
||||
So the following template:
|
||||
|
||||
```toml
|
||||
separator = "_"
|
||||
templates = [
|
||||
"measurement.measurement.field.region"
|
||||
"measurement.measurement.field.field.region"
|
||||
]
|
||||
```
|
||||
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
cpu.usage.idle.us-west 100
|
||||
=> cpu_usage,region=us-west idle=100
|
||||
cpu.usage.idle.percent.eu-east 100
|
||||
=> cpu_usage,region=eu-east idle_percent=100
|
||||
```
|
||||
|
||||
The field key can also be derived from all remaining elements of the graphite
|
||||
bucket by specifying `field*`:
|
||||
|
||||
```toml
|
||||
separator = "_"
|
||||
templates = [
|
||||
"measurement.measurement.region.field*"
|
||||
]
|
||||
```
|
||||
|
||||
which would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
cpu.usage.eu-east.idle.percentage 100
|
||||
=> cpu_usage,region=eu-east idle_percentage=100
|
||||
```
|
||||
|
||||
#### Filter Templates:
|
||||
@@ -207,8 +349,8 @@ templates = [
|
||||
which would result in the following transformation:
|
||||
|
||||
```
|
||||
cpu.load.us-west 100
|
||||
=> cpu_load,region=us-west value=100
|
||||
cpu.load.eu-east 100
|
||||
=> cpu_load,region=eu-east value=100
|
||||
|
||||
mem.cached.localhost 256
|
||||
=> mem_cached,host=localhost value=256
|
||||
@@ -230,8 +372,8 @@ templates = [
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
cpu.usage.idle.us-west 100
|
||||
=> cpu_usage,region=us-west,datacenter=1a idle=100
|
||||
cpu.usage.idle.eu-east 100
|
||||
=> cpu_usage,region=eu-east,datacenter=1a idle=100
|
||||
```
|
||||
|
||||
There are many more options available,
|
||||
@@ -247,7 +389,7 @@ There are many more options available,
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume. This can be "json", "influx" or "graphite" (line-protocol)
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
@@ -262,13 +404,37 @@ There are many more options available,
|
||||
## similar to the line protocol format. There can be only one default template.
|
||||
## Templates support below format:
|
||||
## 1. filter + template
|
||||
## 2. filter + template + extra tag
|
||||
## 2. filter + template + extra tag(s)
|
||||
## 3. filter + template with field key
|
||||
## 4. default template
|
||||
templates = [
|
||||
"*.app env.service.resource.measurement",
|
||||
"stats.* .host.measurement* region=us-west,agent=sensu",
|
||||
"stats.* .host.measurement* region=eu-east,agent=sensu",
|
||||
"stats2.* .host.measurement.field",
|
||||
"measurement*"
|
||||
]
|
||||
```
|
||||
|
||||
# Nagios:
|
||||
|
||||
There are no additional configuration options for Nagios line-protocol. The
|
||||
metrics are parsed directly into Telegraf metrics.
|
||||
|
||||
Note: Nagios Input Data Formats is only supported in `exec` input plugin.
|
||||
|
||||
#### Nagios Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/usr/lib/nagios/plugins/check_load", "-w 5,6,7 -c 7,8,9"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "nagios"
|
||||
```
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
# Telegraf Output Data Formats
|
||||
|
||||
Telegraf is able to serialize metrics into the following output data formats:
|
||||
|
||||
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#influx)
|
||||
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#json)
|
||||
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite)
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||
are a combination of four basic parts:
|
||||
@@ -29,7 +35,7 @@ config option, for example, in the `file` output plugin:
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
files = ["stdout"]
|
||||
|
||||
## Data format to output. This can be "influx" or "graphite"
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
@@ -41,34 +47,46 @@ config option, for example, in the `file` output plugin:
|
||||
Each data_format has an additional set of configuration options available, which
|
||||
I'll go over below.
|
||||
|
||||
## Influx:
|
||||
# Influx:
|
||||
|
||||
There are no additional configuration options for InfluxDB line-protocol. The
|
||||
metrics are serialized directly into InfluxDB line-protocol.
|
||||
|
||||
#### Influx Configuration:
|
||||
### Influx Configuration:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output. This can be "influx" or "graphite"
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
## Graphite:
|
||||
# Graphite:
|
||||
|
||||
The Graphite data format translates Telegraf metrics into _dot_ buckets.
|
||||
The format is:
|
||||
The Graphite data format translates Telegraf metrics into _dot_ buckets. A
|
||||
template can be specified for the output of Telegraf metrics into Graphite
|
||||
buckets. The default template is:
|
||||
|
||||
```
|
||||
[prefix].[host tag].[all tags (alphabetical)].[measurement name].[field name] value timestamp
|
||||
template = "host.tags.measurement.field"
|
||||
```
|
||||
|
||||
In the above template, we have four parts:
|
||||
|
||||
1. _host_ is a tag key. This can be any tag key that is in the Telegraf
|
||||
metric(s). If the key doesn't exist, it will be ignored. If it does exist, the
|
||||
tag value will be filled in.
|
||||
1. _tags_ is a special keyword that outputs all remaining tag values, separated
|
||||
by dots and in alphabetical order (by tag key). These will be filled after all
|
||||
tag keys are filled.
|
||||
1. _measurement_ is a special keyword that outputs the measurement name.
|
||||
1. _field_ is a special keyword that outputs the field name.
|
||||
|
||||
Which means the following influx metric -> graphite conversion would happen:
|
||||
|
||||
```
|
||||
@@ -78,20 +96,55 @@ tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
|
||||
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
||||
```
|
||||
|
||||
`prefix` is a configuration option when using the graphite output data format.
|
||||
|
||||
#### Graphite Configuration:
|
||||
### Graphite Configuration:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output. This can be "influx" or "graphite"
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
data_format = "graphite"
|
||||
|
||||
# prefix each graphite bucket
|
||||
prefix = "telegraf"
|
||||
# graphite template
|
||||
template = "host.tags.measurement.field"
|
||||
```
|
||||
|
||||
# JSON:
|
||||
|
||||
The JSON data format serialized Telegraf metrics in json format. The format is:
|
||||
|
||||
```json
|
||||
{
|
||||
"fields":{
|
||||
"field_1":30,
|
||||
"field_2":4,
|
||||
"field_N":59,
|
||||
"n_images":660
|
||||
},
|
||||
"name":"docker",
|
||||
"tags":{
|
||||
"host":"raynor"
|
||||
},
|
||||
"timestamp":1458229140
|
||||
}
|
||||
```
|
||||
|
||||
### JSON Configuration:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "json"
|
||||
```
|
||||
|
||||
@@ -12,10 +12,11 @@
|
||||
- github.com/gogo/protobuf [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
|
||||
- github.com/gonuts/go-shellquote (No License, but the project it was forked from https://github.com/kballard/go-shellquote is [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)).
|
||||
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
||||
- github.com/kardianos/service [ZLIB LICENSE](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
|
||||
- github.com/kballard/go-shellquote [MIT LICENSE](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
|
||||
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||
@@ -28,6 +29,5 @@
|
||||
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
||||
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||
- golang.org/x/crypto/* [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||
- internal Glob function [MIT LICENSE](https://github.com/ryanuber/go-glob/blob/master/LICENSE)
|
||||
- golang.org/x/crypto/ [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||
|
||||
|
||||
39
docs/WINDOWS_SERVICE.md
Normal file
39
docs/WINDOWS_SERVICE.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Running Telegraf as a Windows Service
|
||||
|
||||
Telegraf natively supports running as a Windows Service. Outlined below is are
|
||||
the general steps to set it up.
|
||||
|
||||
1. Obtain the telegraf windows distribution
|
||||
2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
|
||||
location simply specify the `-config` parameter with the desired location)
|
||||
3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf`
|
||||
4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""):
|
||||
|
||||
```
|
||||
> C:\"Program Files"\Telegraf\telegraf.exe --service install
|
||||
```
|
||||
|
||||
5. Edit the configuration file to meet your needs
|
||||
6. To check that it works, run:
|
||||
|
||||
```
|
||||
> C:\"Program Files"\Telegraf\telegraf.exe --config C:\"Program Files"\Telegraf\telegraf.conf --test
|
||||
```
|
||||
|
||||
7. To start collecting data, run:
|
||||
|
||||
```
|
||||
> net start telegraf
|
||||
```
|
||||
|
||||
## Other supported operations
|
||||
|
||||
Telegraf can manage its own service through the --service flag:
|
||||
|
||||
| Command | Effect |
|
||||
|------------------------------------|-------------------------------|
|
||||
| `telegraf.exe --service install` | Install telegraf as a service |
|
||||
| `telegraf.exe --service uninstall` | Remove the telegraf service |
|
||||
| `telegraf.exe --service start` | Start the telegraf service |
|
||||
| `telegraf.exe --service stop` | Stop the telegraf service |
|
||||
|
||||
2014
etc/telegraf.conf
2014
etc/telegraf.conf
File diff suppressed because it is too large
Load Diff
@@ -24,7 +24,7 @@
|
||||
|
||||
## Telegraf will cache metric_buffer_limit metrics for each output, and will
|
||||
## flush this buffer on a successful write.
|
||||
metric_buffer_limit = 10000
|
||||
metric_buffer_limit = 1000
|
||||
## Flush the buffer whenever full, regardless of flush_interval.
|
||||
flush_buffer_when_full = true
|
||||
|
||||
@@ -42,10 +42,14 @@
|
||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
## Logging configuration:
|
||||
## Run telegraf in debug mode
|
||||
debug = false
|
||||
## Run telegraf in quiet mode
|
||||
quiet = false
|
||||
## Specify the log file name. The empty string means to log to stdout.
|
||||
logfile = "/Program Files/Telegraf/telegraf.log"
|
||||
|
||||
## Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
|
||||
@@ -85,7 +89,7 @@
|
||||
# Windows Performance Counters plugin.
|
||||
# These are the recommended method of monitoring system metrics on windows,
|
||||
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
|
||||
# which utilizes a lot of system resources.
|
||||
# which utilize more system resources.
|
||||
#
|
||||
# See more configuration examples at:
|
||||
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters
|
||||
@@ -95,70 +99,105 @@
|
||||
# Processor usage, alternative to native, reports on a per core.
|
||||
ObjectName = "Processor"
|
||||
Instances = ["*"]
|
||||
Counters = ["% Idle Time", "% Interrupt Time", "% Privileged Time", "% User Time", "% Processor Time"]
|
||||
Counters = [
|
||||
"% Idle Time",
|
||||
"% Interrupt Time",
|
||||
"% Privileged Time",
|
||||
"% User Time",
|
||||
"% Processor Time",
|
||||
]
|
||||
Measurement = "win_cpu"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Disk times and queues
|
||||
ObjectName = "LogicalDisk"
|
||||
Instances = ["*"]
|
||||
Counters = ["% Idle Time", "% Disk Time","% Disk Read Time", "% Disk Write Time", "% User Time", "Current Disk Queue Length"]
|
||||
Counters = [
|
||||
"% Idle Time",
|
||||
"% Disk Time","% Disk Read Time",
|
||||
"% Disk Write Time",
|
||||
"% User Time",
|
||||
"Current Disk Queue Length",
|
||||
]
|
||||
Measurement = "win_disk"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "System"
|
||||
Counters = ["Context Switches/sec","System Calls/sec"]
|
||||
Counters = [
|
||||
"Context Switches/sec",
|
||||
"System Calls/sec",
|
||||
"Processor Queue Length",
|
||||
]
|
||||
Instances = ["------"]
|
||||
Measurement = "win_system"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Example query where the Instance portion must be removed to get data back, such as from the Memory object.
|
||||
# Example query where the Instance portion must be removed to get data back,
|
||||
# such as from the Memory object.
|
||||
ObjectName = "Memory"
|
||||
Counters = ["Available Bytes","Cache Faults/sec","Demand Zero Faults/sec","Page Faults/sec","Pages/sec","Transition Faults/sec","Pool Nonpaged Bytes","Pool Paged Bytes"]
|
||||
Instances = ["------"] # Use 6 x - to remove the Instance bit from the query.
|
||||
Counters = [
|
||||
"Available Bytes",
|
||||
"Cache Faults/sec",
|
||||
"Demand Zero Faults/sec",
|
||||
"Page Faults/sec",
|
||||
"Pages/sec",
|
||||
"Transition Faults/sec",
|
||||
"Pool Nonpaged Bytes",
|
||||
"Pool Paged Bytes",
|
||||
]
|
||||
# Use 6 x - to remove the Instance bit from the query.
|
||||
Instances = ["------"]
|
||||
Measurement = "win_mem"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
|
||||
# Windows system plugins using WMI (disabled by default, using
|
||||
# win_perf_counters over WMI is recommended)
|
||||
|
||||
# Read metrics about cpu usage
|
||||
#[[inputs.cpu]]
|
||||
## Whether to report per-cpu stats or not
|
||||
#percpu = true
|
||||
## Whether to report total system cpu stats or not
|
||||
#totalcpu = true
|
||||
## Comment this line if you want the raw CPU time metrics
|
||||
#fielddrop = ["time_*"]
|
||||
# # Read metrics about cpu usage
|
||||
# [[inputs.cpu]]
|
||||
# ## Whether to report per-cpu stats or not
|
||||
# percpu = true
|
||||
# ## Whether to report total system cpu stats or not
|
||||
# totalcpu = true
|
||||
# ## Comment this line if you want the raw CPU time metrics
|
||||
# fielddrop = ["time_*"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
#[[inputs.disk]]
|
||||
## By default, telegraf gather stats for all mountpoints.
|
||||
## Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||
## mount_points=["/"]
|
||||
|
||||
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
## present on /run, /var/run, /dev/shm or /dev).
|
||||
#ignore_fs = ["tmpfs", "devtmpfs"]
|
||||
# # Read metrics about disk usage by mount point
|
||||
# [[inputs.disk]]
|
||||
# ## By default, telegraf gather stats for all mountpoints.
|
||||
# ## Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||
# ## mount_points=["/"]
|
||||
#
|
||||
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
# ## present on /run, /var/run, /dev/shm or /dev).
|
||||
# # ignore_fs = ["tmpfs", "devtmpfs"]
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
#[[inputs.diskio]]
|
||||
## By default, telegraf will gather stats for all devices including
|
||||
## disk partitions.
|
||||
## Setting devices will restrict the stats to the specified devices.
|
||||
## devices = ["sda", "sdb"]
|
||||
## Uncomment the following line if you do not need disk serial numbers.
|
||||
## skip_serial_number = true
|
||||
|
||||
# Read metrics about memory usage
|
||||
#[[inputs.mem]]
|
||||
# no configuration
|
||||
# # Read metrics about disk IO by device
|
||||
# [[inputs.diskio]]
|
||||
# ## By default, telegraf will gather stats for all devices including
|
||||
# ## disk partitions.
|
||||
# ## Setting devices will restrict the stats to the specified devices.
|
||||
# ## devices = ["sda", "sdb"]
|
||||
# ## Uncomment the following line if you do not need disk serial numbers.
|
||||
# ## skip_serial_number = true
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
#[[inputs.swap]]
|
||||
# no configuration
|
||||
|
||||
# # Read metrics about memory usage
|
||||
# [[inputs.mem]]
|
||||
# # no configuration
|
||||
|
||||
|
||||
# # Read metrics about swap memory usage
|
||||
# [[inputs.swap]]
|
||||
# # no configuration
|
||||
|
||||
|
||||
79
filter/filter.go
Normal file
79
filter/filter.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/gobwas/glob"
|
||||
)
|
||||
|
||||
type Filter interface {
|
||||
Match(string) bool
|
||||
}
|
||||
|
||||
// Compile takes a list of string filters and returns a Filter interface
|
||||
// for matching a given string against the filter list. The filter list
|
||||
// supports glob matching too, ie:
|
||||
//
|
||||
// f, _ := Compile([]string{"cpu", "mem", "net*"})
|
||||
// f.Match("cpu") // true
|
||||
// f.Match("network") // true
|
||||
// f.Match("memory") // false
|
||||
//
|
||||
func Compile(filters []string) (Filter, error) {
|
||||
// return if there is nothing to compile
|
||||
if len(filters) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// check if we can compile a non-glob filter
|
||||
noGlob := true
|
||||
for _, filter := range filters {
|
||||
if hasMeta(filter) {
|
||||
noGlob = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case noGlob:
|
||||
// return non-globbing filter if not needed.
|
||||
return compileFilterNoGlob(filters), nil
|
||||
case len(filters) == 1:
|
||||
return glob.Compile(filters[0])
|
||||
default:
|
||||
return glob.Compile("{" + strings.Join(filters, ",") + "}")
|
||||
}
|
||||
}
|
||||
|
||||
// hasMeta reports whether path contains any magic glob characters.
|
||||
func hasMeta(s string) bool {
|
||||
return strings.IndexAny(s, "*?[") >= 0
|
||||
}
|
||||
|
||||
type filter struct {
|
||||
m map[string]struct{}
|
||||
}
|
||||
|
||||
func (f *filter) Match(s string) bool {
|
||||
_, ok := f.m[s]
|
||||
return ok
|
||||
}
|
||||
|
||||
type filtersingle struct {
|
||||
s string
|
||||
}
|
||||
|
||||
func (f *filtersingle) Match(s string) bool {
|
||||
return f.s == s
|
||||
}
|
||||
|
||||
func compileFilterNoGlob(filters []string) Filter {
|
||||
if len(filters) == 1 {
|
||||
return &filtersingle{s: filters[0]}
|
||||
}
|
||||
out := filter{m: make(map[string]struct{})}
|
||||
for _, filter := range filters {
|
||||
out.m[filter] = struct{}{}
|
||||
}
|
||||
return &out
|
||||
}
|
||||
96
filter/filter_test.go
Normal file
96
filter/filter_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCompile(t *testing.T) {
|
||||
f, err := Compile([]string{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, f)
|
||||
|
||||
f, err = Compile([]string{"cpu"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, f.Match("cpu"))
|
||||
assert.False(t, f.Match("cpu0"))
|
||||
assert.False(t, f.Match("mem"))
|
||||
|
||||
f, err = Compile([]string{"cpu*"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, f.Match("cpu"))
|
||||
assert.True(t, f.Match("cpu0"))
|
||||
assert.False(t, f.Match("mem"))
|
||||
|
||||
f, err = Compile([]string{"cpu", "mem"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, f.Match("cpu"))
|
||||
assert.False(t, f.Match("cpu0"))
|
||||
assert.True(t, f.Match("mem"))
|
||||
|
||||
f, err = Compile([]string{"cpu", "mem", "net*"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, f.Match("cpu"))
|
||||
assert.False(t, f.Match("cpu0"))
|
||||
assert.True(t, f.Match("mem"))
|
||||
assert.True(t, f.Match("network"))
|
||||
}
|
||||
|
||||
var benchbool bool
|
||||
|
||||
func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
|
||||
f, _ := Compile([]string{"cpu"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("network")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
|
||||
func BenchmarkFilterSingleNoGlobTrue(b *testing.B) {
|
||||
f, _ := Compile([]string{"cpu"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("cpu")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
|
||||
func BenchmarkFilter(b *testing.B) {
|
||||
f, _ := Compile([]string{"cpu", "mem", "net*"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("network")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
|
||||
func BenchmarkFilterNoGlob(b *testing.B) {
|
||||
f, _ := Compile([]string{"cpu", "mem", "net"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("net")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
|
||||
func BenchmarkFilter2(b *testing.B) {
|
||||
f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
||||
"aw", "az", "axxx", "ab", "cpu", "mem", "net*"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("network")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
|
||||
func BenchmarkFilter2NoGlob(b *testing.B) {
|
||||
f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
||||
"aw", "az", "axxx", "ab", "cpu", "mem", "net"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("net")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
74
internal/buffer/buffer.go
Normal file
74
internal/buffer/buffer.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
var (
|
||||
MetricsWritten = selfstat.Register("agent", "metrics_written", map[string]string{})
|
||||
MetricsDropped = selfstat.Register("agent", "metrics_dropped", map[string]string{})
|
||||
)
|
||||
|
||||
// Buffer is an object for storing metrics in a circular buffer.
|
||||
type Buffer struct {
|
||||
buf chan telegraf.Metric
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewBuffer returns a Buffer
|
||||
// size is the maximum number of metrics that Buffer will cache. If Add is
|
||||
// called when the buffer is full, then the oldest metric(s) will be dropped.
|
||||
func NewBuffer(size int) *Buffer {
|
||||
return &Buffer{
|
||||
buf: make(chan telegraf.Metric, size),
|
||||
}
|
||||
}
|
||||
|
||||
// IsEmpty returns true if Buffer is empty.
|
||||
func (b *Buffer) IsEmpty() bool {
|
||||
return len(b.buf) == 0
|
||||
}
|
||||
|
||||
// Len returns the current length of the buffer.
|
||||
func (b *Buffer) Len() int {
|
||||
return len(b.buf)
|
||||
}
|
||||
|
||||
// Add adds metrics to the buffer.
|
||||
func (b *Buffer) Add(metrics ...telegraf.Metric) {
|
||||
for i, _ := range metrics {
|
||||
MetricsWritten.Incr(1)
|
||||
select {
|
||||
case b.buf <- metrics[i]:
|
||||
default:
|
||||
MetricsDropped.Incr(1)
|
||||
<-b.buf
|
||||
b.buf <- metrics[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Batch returns a batch of metrics of size batchSize.
|
||||
// the batch will be of maximum length batchSize. It can be less than batchSize,
|
||||
// if the length of Buffer is less than batchSize.
|
||||
func (b *Buffer) Batch(batchSize int) []telegraf.Metric {
|
||||
b.mu.Lock()
|
||||
n := min(len(b.buf), batchSize)
|
||||
out := make([]telegraf.Metric, n)
|
||||
for i := 0; i < n; i++ {
|
||||
out[i] = <-b.buf
|
||||
}
|
||||
b.mu.Unlock()
|
||||
return out
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if b < a {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
100
internal/buffer/buffer_test.go
Normal file
100
internal/buffer/buffer_test.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var metricList = []telegraf.Metric{
|
||||
testutil.TestMetric(2, "mymetric1"),
|
||||
testutil.TestMetric(1, "mymetric2"),
|
||||
testutil.TestMetric(11, "mymetric3"),
|
||||
testutil.TestMetric(15, "mymetric4"),
|
||||
testutil.TestMetric(8, "mymetric5"),
|
||||
}
|
||||
|
||||
func BenchmarkAddMetrics(b *testing.B) {
|
||||
buf := NewBuffer(10000)
|
||||
m := testutil.TestMetric(1, "mymetric")
|
||||
for n := 0; n < b.N; n++ {
|
||||
buf.Add(m)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewBufferBasicFuncs(t *testing.T) {
|
||||
b := NewBuffer(10)
|
||||
MetricsDropped.Set(0)
|
||||
MetricsWritten.Set(0)
|
||||
|
||||
assert.True(t, b.IsEmpty())
|
||||
assert.Zero(t, b.Len())
|
||||
assert.Zero(t, MetricsDropped.Get())
|
||||
assert.Zero(t, MetricsWritten.Get())
|
||||
|
||||
m := testutil.TestMetric(1, "mymetric")
|
||||
b.Add(m)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 1)
|
||||
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(1), MetricsWritten.Get())
|
||||
|
||||
b.Add(metricList...)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 6)
|
||||
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(6), MetricsWritten.Get())
|
||||
}
|
||||
|
||||
func TestDroppingMetrics(t *testing.T) {
|
||||
b := NewBuffer(10)
|
||||
MetricsDropped.Set(0)
|
||||
MetricsWritten.Set(0)
|
||||
|
||||
// Add up to the size of the buffer
|
||||
b.Add(metricList...)
|
||||
b.Add(metricList...)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 10)
|
||||
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(10), MetricsWritten.Get())
|
||||
|
||||
// Add 5 more and verify they were dropped
|
||||
b.Add(metricList...)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 10)
|
||||
assert.Equal(t, int64(5), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(15), MetricsWritten.Get())
|
||||
}
|
||||
|
||||
func TestGettingBatches(t *testing.T) {
|
||||
b := NewBuffer(20)
|
||||
MetricsDropped.Set(0)
|
||||
MetricsWritten.Set(0)
|
||||
|
||||
// Verify that the buffer returned is smaller than requested when there are
|
||||
// not as many items as requested.
|
||||
b.Add(metricList...)
|
||||
batch := b.Batch(10)
|
||||
assert.Len(t, batch, 5)
|
||||
|
||||
// Verify that the buffer is now empty
|
||||
assert.True(t, b.IsEmpty())
|
||||
assert.Zero(t, b.Len())
|
||||
assert.Zero(t, MetricsDropped.Get())
|
||||
assert.Equal(t, int64(5), MetricsWritten.Get())
|
||||
|
||||
// Verify that the buffer returned is not more than the size requested
|
||||
b.Add(metricList...)
|
||||
batch = b.Batch(3)
|
||||
assert.Len(t, batch, 3)
|
||||
|
||||
// Verify that buffer is not empty
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 2)
|
||||
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(10), MetricsWritten.Get())
|
||||
}
|
||||
49
internal/config/aws/credentials.go
Normal file
49
internal/config/aws/credentials.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
)
|
||||
|
||||
type CredentialConfig struct {
|
||||
Region string
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
RoleARN string
|
||||
Profile string
|
||||
Filename string
|
||||
Token string
|
||||
}
|
||||
|
||||
func (c *CredentialConfig) Credentials() client.ConfigProvider {
|
||||
if c.RoleARN != "" {
|
||||
return c.assumeCredentials()
|
||||
} else {
|
||||
return c.rootCredentials()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CredentialConfig) rootCredentials() client.ConfigProvider {
|
||||
config := &aws.Config{
|
||||
Region: aws.String(c.Region),
|
||||
}
|
||||
if c.AccessKey != "" || c.SecretKey != "" {
|
||||
config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token)
|
||||
} else if c.Profile != "" || c.Filename != "" {
|
||||
config.Credentials = credentials.NewSharedCredentials(c.Filename, c.Profile)
|
||||
}
|
||||
|
||||
return session.New(config)
|
||||
}
|
||||
|
||||
func (c *CredentialConfig) assumeCredentials() client.ConfigProvider {
|
||||
rootCredentials := c.rootCredentials()
|
||||
config := &aws.Config{
|
||||
Region: aws.String(c.Region),
|
||||
}
|
||||
config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN)
|
||||
return session.New(config)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,7 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -10,9 +11,53 @@ import (
|
||||
"github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/procstat"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
|
||||
c := NewConfig()
|
||||
err := os.Setenv("MY_TEST_SERVER", "192.168.1.1")
|
||||
assert.NoError(t, err)
|
||||
err = os.Setenv("TEST_INTERVAL", "10s")
|
||||
assert.NoError(t, err)
|
||||
c.LoadConfig("./testdata/single_plugin_env_vars.toml")
|
||||
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"192.168.1.1"}
|
||||
|
||||
filter := models.Filter{
|
||||
NameDrop: []string{"metricname2"},
|
||||
NamePass: []string{"metricname1"},
|
||||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, filter.Compile())
|
||||
mConfig := &models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: filter,
|
||||
Interval: 10 * time.Second,
|
||||
}
|
||||
mConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, memcached, c.Inputs[0].Input,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
}
|
||||
|
||||
func TestConfig_LoadSingleInput(t *testing.T) {
|
||||
c := NewConfig()
|
||||
c.LoadConfig("./testdata/single_plugin.toml")
|
||||
@@ -20,27 +65,28 @@ func TestConfig_LoadSingleInput(t *testing.T) {
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"localhost"}
|
||||
|
||||
mConfig := &internal_models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: internal_models.Filter{
|
||||
NameDrop: []string{"metricname2"},
|
||||
NamePass: []string{"metricname1"},
|
||||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
filter := models.Filter{
|
||||
NameDrop: []string{"metricname2"},
|
||||
NamePass: []string{"metricname1"},
|
||||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
TagPass: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
},
|
||||
TagPass: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, filter.Compile())
|
||||
mConfig := &models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: filter,
|
||||
Interval: 5 * time.Second,
|
||||
}
|
||||
mConfig.Tags = make(map[string]string)
|
||||
@@ -65,27 +111,28 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"localhost"}
|
||||
|
||||
mConfig := &internal_models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: internal_models.Filter{
|
||||
NameDrop: []string{"metricname2"},
|
||||
NamePass: []string{"metricname1"},
|
||||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
filter := models.Filter{
|
||||
NameDrop: []string{"metricname2"},
|
||||
NamePass: []string{"metricname1"},
|
||||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
TagPass: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
},
|
||||
TagPass: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, filter.Compile())
|
||||
mConfig := &models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: filter,
|
||||
Interval: 5 * time.Second,
|
||||
}
|
||||
mConfig.Tags = make(map[string]string)
|
||||
@@ -100,7 +147,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
ex.SetParser(p)
|
||||
ex.Command = "/usr/bin/myothercollector --foo=bar"
|
||||
eConfig := &internal_models.InputConfig{
|
||||
eConfig := &models.InputConfig{
|
||||
Name: "exec",
|
||||
MeasurementSuffix: "_myothercollector",
|
||||
}
|
||||
@@ -119,7 +166,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||
pstat := inputs.Inputs["procstat"]().(*procstat.Procstat)
|
||||
pstat.PidFile = "/var/run/grafana-server.pid"
|
||||
|
||||
pConfig := &internal_models.InputConfig{Name: "procstat"}
|
||||
pConfig := &models.InputConfig{Name: "procstat"}
|
||||
pConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, pstat, c.Inputs[3].Input,
|
||||
|
||||
11
internal/config/testdata/single_plugin_env_vars.toml
vendored
Normal file
11
internal/config/testdata/single_plugin_env_vars.toml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
[[inputs.memcached]]
|
||||
servers = ["$MY_TEST_SERVER"]
|
||||
namepass = ["metricname1"]
|
||||
namedrop = ["metricname2"]
|
||||
fieldpass = ["some", "strings"]
|
||||
fielddrop = ["other", "stuff"]
|
||||
interval = "$TEST_INTERVAL"
|
||||
[inputs.memcached.tagpass]
|
||||
goodtag = ["mytag"]
|
||||
[inputs.memcached.tagdrop]
|
||||
badtag = ["othertag"]
|
||||
37
internal/errchan/errchan.go
Normal file
37
internal/errchan/errchan.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package errchan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ErrChan struct {
|
||||
C chan error
|
||||
}
|
||||
|
||||
// New returns an error channel of max length 'n'
|
||||
// errors can be sent to the ErrChan.C channel, and will be returned when
|
||||
// ErrChan.Error() is called.
|
||||
func New(n int) *ErrChan {
|
||||
return &ErrChan{
|
||||
C: make(chan error, n),
|
||||
}
|
||||
}
|
||||
|
||||
// Error closes the ErrChan.C channel and returns an error if there are any
|
||||
// non-nil errors, otherwise returns nil.
|
||||
func (e *ErrChan) Error() error {
|
||||
close(e.C)
|
||||
|
||||
var out string
|
||||
for err := range e.C {
|
||||
if err != nil {
|
||||
out += "[" + err.Error() + "], "
|
||||
}
|
||||
}
|
||||
|
||||
if out != "" {
|
||||
return fmt.Errorf("Errors encountered: " + strings.TrimRight(out, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
116
internal/globpath/globpath.go
Normal file
116
internal/globpath/globpath.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package globpath
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/gobwas/glob"
|
||||
)
|
||||
|
||||
var sepStr = fmt.Sprintf("%v", string(os.PathSeparator))
|
||||
|
||||
type GlobPath struct {
|
||||
path string
|
||||
hasMeta bool
|
||||
hasSuperMeta bool
|
||||
g glob.Glob
|
||||
root string
|
||||
}
|
||||
|
||||
func Compile(path string) (*GlobPath, error) {
|
||||
out := GlobPath{
|
||||
hasMeta: hasMeta(path),
|
||||
hasSuperMeta: hasSuperMeta(path),
|
||||
path: path,
|
||||
}
|
||||
|
||||
// if there are no glob meta characters in the path, don't bother compiling
|
||||
// a glob object or finding the root directory. (see short-circuit in Match)
|
||||
if !out.hasMeta || !out.hasSuperMeta {
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
if out.g, err = glob.Compile(path, os.PathSeparator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Get the root directory for this filepath
|
||||
out.root = findRootDir(path)
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (g *GlobPath) Match() map[string]os.FileInfo {
|
||||
if !g.hasMeta {
|
||||
out := make(map[string]os.FileInfo)
|
||||
info, err := os.Stat(g.path)
|
||||
if !os.IsNotExist(err) {
|
||||
out[g.path] = info
|
||||
}
|
||||
return out
|
||||
}
|
||||
if !g.hasSuperMeta {
|
||||
out := make(map[string]os.FileInfo)
|
||||
files, _ := filepath.Glob(g.path)
|
||||
for _, file := range files {
|
||||
info, err := os.Stat(file)
|
||||
if !os.IsNotExist(err) {
|
||||
out[file] = info
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
return walkFilePath(g.root, g.g)
|
||||
}
|
||||
|
||||
// walk the filepath from the given root and return a list of files that match
|
||||
// the given glob.
|
||||
func walkFilePath(root string, g glob.Glob) map[string]os.FileInfo {
|
||||
matchedFiles := make(map[string]os.FileInfo)
|
||||
walkfn := func(path string, info os.FileInfo, _ error) error {
|
||||
if g.Match(path) {
|
||||
matchedFiles[path] = info
|
||||
}
|
||||
return nil
|
||||
}
|
||||
filepath.Walk(root, walkfn)
|
||||
return matchedFiles
|
||||
}
|
||||
|
||||
// find the root dir of the given path (could include globs).
|
||||
// ie:
|
||||
// /var/log/telegraf.conf -> /var/log
|
||||
// /home/** -> /home
|
||||
// /home/*/** -> /home
|
||||
// /lib/share/*/*/**.txt -> /lib/share
|
||||
func findRootDir(path string) string {
|
||||
pathItems := strings.Split(path, sepStr)
|
||||
out := sepStr
|
||||
for i, item := range pathItems {
|
||||
if i == len(pathItems)-1 {
|
||||
break
|
||||
}
|
||||
if item == "" {
|
||||
continue
|
||||
}
|
||||
if hasMeta(item) {
|
||||
break
|
||||
}
|
||||
out += item + sepStr
|
||||
}
|
||||
if out != "/" {
|
||||
out = strings.TrimSuffix(out, "/")
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// hasMeta reports whether path contains any magic glob characters.
|
||||
func hasMeta(path string) bool {
|
||||
return strings.IndexAny(path, "*?[") >= 0
|
||||
}
|
||||
|
||||
// hasSuperMeta reports whether path contains any super magic glob characters (**).
|
||||
func hasSuperMeta(path string) bool {
|
||||
return strings.Index(path, "**") >= 0
|
||||
}
|
||||
62
internal/globpath/globpath_test.go
Normal file
62
internal/globpath/globpath_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package globpath
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCompileAndMatch(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
// test super asterisk
|
||||
g1, err := Compile(dir + "/**")
|
||||
require.NoError(t, err)
|
||||
// test single asterisk
|
||||
g2, err := Compile(dir + "/*.log")
|
||||
require.NoError(t, err)
|
||||
// test no meta characters (file exists)
|
||||
g3, err := Compile(dir + "/log1.log")
|
||||
require.NoError(t, err)
|
||||
// test file that doesn't exist
|
||||
g4, err := Compile(dir + "/i_dont_exist.log")
|
||||
require.NoError(t, err)
|
||||
// test super asterisk that doesn't exist
|
||||
g5, err := Compile(dir + "/dir_doesnt_exist/**")
|
||||
require.NoError(t, err)
|
||||
|
||||
matches := g1.Match()
|
||||
assert.Len(t, matches, 3)
|
||||
matches = g2.Match()
|
||||
assert.Len(t, matches, 2)
|
||||
matches = g3.Match()
|
||||
assert.Len(t, matches, 1)
|
||||
matches = g4.Match()
|
||||
assert.Len(t, matches, 0)
|
||||
matches = g5.Match()
|
||||
assert.Len(t, matches, 0)
|
||||
}
|
||||
|
||||
func TestFindRootDir(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
output string
|
||||
}{
|
||||
{"/var/log/telegraf.conf", "/var/log"},
|
||||
{"/home/**", "/home"},
|
||||
{"/home/*/**", "/home"},
|
||||
{"/lib/share/*/*/**.txt", "/lib/share"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
actual := findRootDir(test.input)
|
||||
assert.Equal(t, test.output, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func getTestdataDir() string {
|
||||
_, filename, _, _ := runtime.Caller(1)
|
||||
return strings.Replace(filename, "globpath_test.go", "testdata", 1)
|
||||
}
|
||||
0
internal/globpath/testdata/log1.log
vendored
Normal file
0
internal/globpath/testdata/log1.log
vendored
Normal file
0
internal/globpath/testdata/log2.log
vendored
Normal file
0
internal/globpath/testdata/log2.log
vendored
Normal file
5
internal/globpath/testdata/test.conf
vendored
Normal file
5
internal/globpath/testdata/test.conf
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# this is a fake testing config file
|
||||
# for testing the filestat plugin
|
||||
|
||||
option1 = "foo"
|
||||
option2 = "bar"
|
||||
@@ -2,19 +2,31 @@ package internal
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/big"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
|
||||
var (
|
||||
TimeoutErr = errors.New("Command timed out.")
|
||||
|
||||
NotImplementedError = errors.New("not implemented yet")
|
||||
)
|
||||
|
||||
// Duration just wraps time.Duration
|
||||
type Duration struct {
|
||||
Duration time.Duration
|
||||
@@ -22,18 +34,39 @@ type Duration struct {
|
||||
|
||||
// UnmarshalTOML parses the duration from the TOML config file
|
||||
func (d *Duration) UnmarshalTOML(b []byte) error {
|
||||
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
|
||||
if err != nil {
|
||||
return err
|
||||
var err error
|
||||
b = bytes.Trim(b, `'`)
|
||||
|
||||
// see if we can directly convert it
|
||||
d.Duration, err = time.ParseDuration(string(b))
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
d.Duration = dur
|
||||
// Parse string duration, ie, "1s"
|
||||
if uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 {
|
||||
d.Duration, err = time.ParseDuration(uq)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// First try parsing as integer seconds
|
||||
sI, err := strconv.ParseInt(string(b), 10, 64)
|
||||
if err == nil {
|
||||
d.Duration = time.Second * time.Duration(sI)
|
||||
return nil
|
||||
}
|
||||
// Second try parsing as float seconds
|
||||
sF, err := strconv.ParseFloat(string(b), 64)
|
||||
if err == nil {
|
||||
d.Duration = time.Second * time.Duration(sF)
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var NotImplementedError = errors.New("not implemented yet")
|
||||
|
||||
// ReadLines reads contents from a file and splits them by new lines.
|
||||
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
|
||||
func ReadLines(filename string) ([]string, error) {
|
||||
@@ -86,15 +119,15 @@ func GetTLSConfig(
|
||||
SSLCert, SSLKey, SSLCA string,
|
||||
InsecureSkipVerify bool,
|
||||
) (*tls.Config, error) {
|
||||
t := &tls.Config{}
|
||||
if SSLCert != "" && SSLKey != "" && SSLCA != "" {
|
||||
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf(
|
||||
"Could not load TLS client key/certificate: %s",
|
||||
err))
|
||||
}
|
||||
if SSLCert == "" && SSLKey == "" && SSLCA == "" && !InsecureSkipVerify {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
t := &tls.Config{
|
||||
InsecureSkipVerify: InsecureSkipVerify,
|
||||
}
|
||||
|
||||
if SSLCA != "" {
|
||||
caCert, err := ioutil.ReadFile(SSLCA)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s",
|
||||
@@ -103,75 +136,107 @@ func GetTLSConfig(
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
|
||||
t = &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
RootCAs: caCertPool,
|
||||
InsecureSkipVerify: InsecureSkipVerify,
|
||||
}
|
||||
} else {
|
||||
if InsecureSkipVerify {
|
||||
t.InsecureSkipVerify = true
|
||||
} else {
|
||||
return nil, nil
|
||||
}
|
||||
t.RootCAs = caCertPool
|
||||
}
|
||||
|
||||
if SSLCert != "" && SSLKey != "" {
|
||||
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf(
|
||||
"Could not load TLS client key/certificate from %s:%s: %s",
|
||||
SSLKey, SSLCert, err))
|
||||
}
|
||||
|
||||
t.Certificates = []tls.Certificate{cert}
|
||||
t.BuildNameToCertificate()
|
||||
}
|
||||
|
||||
// will be nil by default if nothing is provided
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Glob will test a string pattern, potentially containing globs, against a
|
||||
// subject string. The result is a simple true/false, determining whether or
|
||||
// not the glob pattern matched the subject text.
|
||||
//
|
||||
// Adapted from https://github.com/ryanuber/go-glob/blob/master/glob.go
|
||||
// thanks Ryan Uber!
|
||||
func Glob(pattern, measurement string) bool {
|
||||
// Empty pattern can only match empty subject
|
||||
if pattern == "" {
|
||||
return measurement == pattern
|
||||
}
|
||||
// SnakeCase converts the given string to snake case following the Golang format:
|
||||
// acronyms are converted to lower-case and preceded by an underscore.
|
||||
func SnakeCase(in string) string {
|
||||
runes := []rune(in)
|
||||
length := len(runes)
|
||||
|
||||
// If the pattern _is_ a glob, it matches everything
|
||||
if pattern == "*" {
|
||||
return true
|
||||
}
|
||||
|
||||
parts := strings.Split(pattern, "*")
|
||||
|
||||
if len(parts) == 1 {
|
||||
// No globs in pattern, so test for match
|
||||
return pattern == measurement
|
||||
}
|
||||
|
||||
leadingGlob := strings.HasPrefix(pattern, "*")
|
||||
trailingGlob := strings.HasSuffix(pattern, "*")
|
||||
end := len(parts) - 1
|
||||
|
||||
for i, part := range parts {
|
||||
switch i {
|
||||
case 0:
|
||||
if leadingGlob {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(measurement, part) {
|
||||
return false
|
||||
}
|
||||
case end:
|
||||
if len(measurement) > 0 {
|
||||
return trailingGlob || strings.HasSuffix(measurement, part)
|
||||
}
|
||||
default:
|
||||
if !strings.Contains(measurement, part) {
|
||||
return false
|
||||
}
|
||||
var out []rune
|
||||
for i := 0; i < length; i++ {
|
||||
if i > 0 && unicode.IsUpper(runes[i]) && ((i+1 < length && unicode.IsLower(runes[i+1])) || unicode.IsLower(runes[i-1])) {
|
||||
out = append(out, '_')
|
||||
}
|
||||
|
||||
// Trim evaluated text from measurement as we loop over the pattern.
|
||||
idx := strings.Index(measurement, part) + len(part)
|
||||
measurement = measurement[idx:]
|
||||
out = append(out, unicode.ToLower(runes[i]))
|
||||
}
|
||||
|
||||
// All parts of the pattern matched
|
||||
return true
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// CombinedOutputTimeout runs the given command with the given timeout and
|
||||
// returns the combined output of stdout and stderr.
|
||||
// If the command times out, it attempts to kill the process.
|
||||
func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) {
|
||||
var b bytes.Buffer
|
||||
c.Stdout = &b
|
||||
c.Stderr = &b
|
||||
if err := c.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err := WaitTimeout(c, timeout)
|
||||
return b.Bytes(), err
|
||||
}
|
||||
|
||||
// RunTimeout runs the given command with the given timeout.
|
||||
// If the command times out, it attempts to kill the process.
|
||||
func RunTimeout(c *exec.Cmd, timeout time.Duration) error {
|
||||
if err := c.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
return WaitTimeout(c, timeout)
|
||||
}
|
||||
|
||||
// WaitTimeout waits for the given command to finish with a timeout.
|
||||
// It assumes the command has already been started.
|
||||
// If the command times out, it attempts to kill the process.
|
||||
func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
|
||||
timer := time.NewTimer(timeout)
|
||||
done := make(chan error)
|
||||
go func() { done <- c.Wait() }()
|
||||
select {
|
||||
case err := <-done:
|
||||
timer.Stop()
|
||||
return err
|
||||
case <-timer.C:
|
||||
if err := c.Process.Kill(); err != nil {
|
||||
log.Printf("E! FATAL error killing process: %s", err)
|
||||
return err
|
||||
}
|
||||
// wait for the command to return after killing it
|
||||
<-done
|
||||
return TimeoutErr
|
||||
}
|
||||
}
|
||||
|
||||
// RandomSleep will sleep for a random amount of time up to max.
|
||||
// If the shutdown channel is closed, it will return before it has finished
|
||||
// sleeping.
|
||||
func RandomSleep(max time.Duration, shutdown chan struct{}) {
|
||||
if max == 0 {
|
||||
return
|
||||
}
|
||||
maxSleep := big.NewInt(max.Nanoseconds())
|
||||
|
||||
var sleepns int64
|
||||
if j, err := rand.Int(rand.Reader, maxSleep); err == nil {
|
||||
sleepns = j.Int64()
|
||||
}
|
||||
|
||||
t := time.NewTimer(time.Nanosecond * time.Duration(sleepns))
|
||||
select {
|
||||
case <-t.C:
|
||||
return
|
||||
case <-shutdown:
|
||||
t.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,44 +1,156 @@
|
||||
package internal
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"os/exec"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
func testGlobMatch(t *testing.T, pattern, subj string) {
|
||||
if !Glob(pattern, subj) {
|
||||
t.Errorf("%s should match %s", pattern, subj)
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type SnakeTest struct {
|
||||
input string
|
||||
output string
|
||||
}
|
||||
|
||||
var tests = []SnakeTest{
|
||||
{"a", "a"},
|
||||
{"snake", "snake"},
|
||||
{"A", "a"},
|
||||
{"ID", "id"},
|
||||
{"MOTD", "motd"},
|
||||
{"Snake", "snake"},
|
||||
{"SnakeTest", "snake_test"},
|
||||
{"APIResponse", "api_response"},
|
||||
{"SnakeID", "snake_id"},
|
||||
{"SnakeIDGoogle", "snake_id_google"},
|
||||
{"LinuxMOTD", "linux_motd"},
|
||||
{"OMGWTFBBQ", "omgwtfbbq"},
|
||||
{"omg_wtf_bbq", "omg_wtf_bbq"},
|
||||
}
|
||||
|
||||
func TestSnakeCase(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
if SnakeCase(test.input) != test.output {
|
||||
t.Errorf(`SnakeCase("%s"), wanted "%s", got \%s"`, test.input, test.output, SnakeCase(test.input))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testGlobNoMatch(t *testing.T, pattern, subj string) {
|
||||
if Glob(pattern, subj) {
|
||||
t.Errorf("%s should not match %s", pattern, subj)
|
||||
var (
|
||||
sleepbin, _ = exec.LookPath("sleep")
|
||||
echobin, _ = exec.LookPath("echo")
|
||||
)
|
||||
|
||||
func TestRunTimeout(t *testing.T) {
|
||||
if sleepbin == "" {
|
||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(sleepbin, "10")
|
||||
start := time.Now()
|
||||
err := RunTimeout(cmd, time.Millisecond*20)
|
||||
elapsed := time.Since(start)
|
||||
|
||||
assert.Equal(t, TimeoutErr, err)
|
||||
// Verify that command gets killed in 20ms, with some breathing room
|
||||
assert.True(t, elapsed < time.Millisecond*75)
|
||||
}
|
||||
|
||||
func TestEmptyPattern(t *testing.T) {
|
||||
testGlobMatch(t, "", "")
|
||||
testGlobNoMatch(t, "", "test")
|
||||
}
|
||||
|
||||
func TestPatternWithoutGlobs(t *testing.T) {
|
||||
testGlobMatch(t, "test", "test")
|
||||
}
|
||||
|
||||
func TestGlob(t *testing.T) {
|
||||
for _, pattern := range []string{
|
||||
"*test", // Leading glob
|
||||
"this*", // Trailing glob
|
||||
"*is*a*", // Lots of globs
|
||||
"**test**", // Double glob characters
|
||||
"**is**a***test*", // Varying number of globs
|
||||
} {
|
||||
testGlobMatch(t, pattern, "this_is_a_test")
|
||||
func TestCombinedOutputTimeout(t *testing.T) {
|
||||
if sleepbin == "" {
|
||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(sleepbin, "10")
|
||||
start := time.Now()
|
||||
_, err := CombinedOutputTimeout(cmd, time.Millisecond*20)
|
||||
elapsed := time.Since(start)
|
||||
|
||||
for _, pattern := range []string{
|
||||
"test*", // Implicit substring match should fail
|
||||
"*is", // Partial match should fail
|
||||
"*no*", // Globs without a match between them should fail
|
||||
} {
|
||||
testGlobNoMatch(t, pattern, "this_is_a_test")
|
||||
}
|
||||
assert.Equal(t, TimeoutErr, err)
|
||||
// Verify that command gets killed in 20ms, with some breathing room
|
||||
assert.True(t, elapsed < time.Millisecond*75)
|
||||
}
|
||||
|
||||
func TestCombinedOutput(t *testing.T) {
|
||||
if echobin == "" {
|
||||
t.Skip("'echo' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(echobin, "foo")
|
||||
out, err := CombinedOutputTimeout(cmd, time.Second)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "foo\n", string(out))
|
||||
}
|
||||
|
||||
// test that CombinedOutputTimeout and exec.Cmd.CombinedOutput return
|
||||
// the same output from a failed command.
|
||||
func TestCombinedOutputError(t *testing.T) {
|
||||
if sleepbin == "" {
|
||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(sleepbin, "foo")
|
||||
expected, err := cmd.CombinedOutput()
|
||||
|
||||
cmd2 := exec.Command(sleepbin, "foo")
|
||||
actual, err := CombinedOutputTimeout(cmd2, time.Second)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestRunError(t *testing.T) {
|
||||
if sleepbin == "" {
|
||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(sleepbin, "foo")
|
||||
err := RunTimeout(cmd, time.Second)
|
||||
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestRandomSleep(t *testing.T) {
|
||||
// test that zero max returns immediately
|
||||
s := time.Now()
|
||||
RandomSleep(time.Duration(0), make(chan struct{}))
|
||||
elapsed := time.Since(s)
|
||||
assert.True(t, elapsed < time.Millisecond)
|
||||
|
||||
// test that max sleep is respected
|
||||
s = time.Now()
|
||||
RandomSleep(time.Millisecond*50, make(chan struct{}))
|
||||
elapsed = time.Since(s)
|
||||
assert.True(t, elapsed < time.Millisecond*100)
|
||||
|
||||
// test that shutdown is respected
|
||||
s = time.Now()
|
||||
shutdown := make(chan struct{})
|
||||
go func() {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
close(shutdown)
|
||||
}()
|
||||
RandomSleep(time.Second, shutdown)
|
||||
elapsed = time.Since(s)
|
||||
assert.True(t, elapsed < time.Millisecond*150)
|
||||
}
|
||||
|
||||
func TestDuration(t *testing.T) {
|
||||
var d Duration
|
||||
|
||||
d.UnmarshalTOML([]byte(`"1s"`))
|
||||
assert.Equal(t, time.Second, d.Duration)
|
||||
|
||||
d = Duration{}
|
||||
d.UnmarshalTOML([]byte(`1s`))
|
||||
assert.Equal(t, time.Second, d.Duration)
|
||||
|
||||
d = Duration{}
|
||||
d.UnmarshalTOML([]byte(`'1s'`))
|
||||
assert.Equal(t, time.Second, d.Duration)
|
||||
|
||||
d = Duration{}
|
||||
d.UnmarshalTOML([]byte(`10`))
|
||||
assert.Equal(t, 10*time.Second, d.Duration)
|
||||
|
||||
d = Duration{}
|
||||
d.UnmarshalTOML([]byte(`1.5`))
|
||||
assert.Equal(t, time.Second, d.Duration)
|
||||
}
|
||||
|
||||
59
internal/limiter/limiter.go
Normal file
59
internal/limiter/limiter.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package limiter
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewRateLimiter returns a rate limiter that will will emit from the C
|
||||
// channel only 'n' times every 'rate' seconds.
|
||||
func NewRateLimiter(n int, rate time.Duration) *rateLimiter {
|
||||
r := &rateLimiter{
|
||||
C: make(chan bool),
|
||||
rate: rate,
|
||||
n: n,
|
||||
shutdown: make(chan bool),
|
||||
}
|
||||
r.wg.Add(1)
|
||||
go r.limiter()
|
||||
return r
|
||||
}
|
||||
|
||||
type rateLimiter struct {
|
||||
C chan bool
|
||||
rate time.Duration
|
||||
n int
|
||||
|
||||
shutdown chan bool
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func (r *rateLimiter) Stop() {
|
||||
close(r.shutdown)
|
||||
r.wg.Wait()
|
||||
close(r.C)
|
||||
}
|
||||
|
||||
func (r *rateLimiter) limiter() {
|
||||
defer r.wg.Done()
|
||||
ticker := time.NewTicker(r.rate)
|
||||
defer ticker.Stop()
|
||||
counter := 0
|
||||
for {
|
||||
select {
|
||||
case <-r.shutdown:
|
||||
return
|
||||
case <-ticker.C:
|
||||
counter = 0
|
||||
default:
|
||||
if counter < r.n {
|
||||
select {
|
||||
case r.C <- true:
|
||||
counter++
|
||||
case <-r.shutdown:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
54
internal/limiter/limiter_test.go
Normal file
54
internal/limiter/limiter_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package limiter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRateLimiter(t *testing.T) {
|
||||
r := NewRateLimiter(5, time.Second)
|
||||
ticker := time.NewTicker(time.Millisecond * 75)
|
||||
|
||||
// test that we can only get 5 receives from the rate limiter
|
||||
counter := 0
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-r.C:
|
||||
counter++
|
||||
case <-ticker.C:
|
||||
break outer
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, 5, counter)
|
||||
r.Stop()
|
||||
// verify that the Stop function closes the channel.
|
||||
_, ok := <-r.C
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestRateLimiterMultipleIterations(t *testing.T) {
|
||||
r := NewRateLimiter(5, time.Millisecond*50)
|
||||
ticker := time.NewTicker(time.Millisecond * 250)
|
||||
|
||||
// test that we can get 15 receives from the rate limiter
|
||||
counter := 0
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
break outer
|
||||
case <-r.C:
|
||||
counter++
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, counter > 10)
|
||||
r.Stop()
|
||||
// verify that the Stop function closes the channel.
|
||||
_, ok := <-r.C
|
||||
assert.False(t, ok)
|
||||
}
|
||||
@@ -1,105 +1,188 @@
|
||||
package internal_models
|
||||
package models
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"fmt"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
)
|
||||
|
||||
// TagFilter is the name of a tag, and the values on which to filter
|
||||
type TagFilter struct {
|
||||
Name string
|
||||
Filter []string
|
||||
filter filter.Filter
|
||||
}
|
||||
|
||||
// Filter containing drop/pass and tagdrop/tagpass rules
|
||||
type Filter struct {
|
||||
NameDrop []string
|
||||
nameDrop filter.Filter
|
||||
NamePass []string
|
||||
namePass filter.Filter
|
||||
|
||||
FieldDrop []string
|
||||
fieldDrop filter.Filter
|
||||
FieldPass []string
|
||||
fieldPass filter.Filter
|
||||
|
||||
TagDrop []TagFilter
|
||||
TagPass []TagFilter
|
||||
|
||||
IsActive bool
|
||||
TagExclude []string
|
||||
tagExclude filter.Filter
|
||||
TagInclude []string
|
||||
tagInclude filter.Filter
|
||||
|
||||
isActive bool
|
||||
}
|
||||
|
||||
func (f Filter) ShouldMetricPass(metric telegraf.Metric) bool {
|
||||
if f.ShouldNamePass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) {
|
||||
// Compile all Filter lists into filter.Filter objects.
|
||||
func (f *Filter) Compile() error {
|
||||
if len(f.NameDrop) == 0 &&
|
||||
len(f.NamePass) == 0 &&
|
||||
len(f.FieldDrop) == 0 &&
|
||||
len(f.FieldPass) == 0 &&
|
||||
len(f.TagInclude) == 0 &&
|
||||
len(f.TagExclude) == 0 &&
|
||||
len(f.TagPass) == 0 &&
|
||||
len(f.TagDrop) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
f.isActive = true
|
||||
var err error
|
||||
f.nameDrop, err = filter.Compile(f.NameDrop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'namedrop', %s", err)
|
||||
}
|
||||
f.namePass, err = filter.Compile(f.NamePass)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'namepass', %s", err)
|
||||
}
|
||||
|
||||
f.fieldDrop, err = filter.Compile(f.FieldDrop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'fielddrop', %s", err)
|
||||
}
|
||||
f.fieldPass, err = filter.Compile(f.FieldPass)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'fieldpass', %s", err)
|
||||
}
|
||||
|
||||
f.tagExclude, err = filter.Compile(f.TagExclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagexclude', %s", err)
|
||||
}
|
||||
f.tagInclude, err = filter.Compile(f.TagInclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'taginclude', %s", err)
|
||||
}
|
||||
|
||||
for i, _ := range f.TagDrop {
|
||||
f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
|
||||
}
|
||||
}
|
||||
for i, _ := range f.TagPass {
|
||||
f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagpass', %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Apply applies the filter to the given measurement name, fields map, and
|
||||
// tags map. It will return false if the metric should be "filtered out", and
|
||||
// true if the metric should "pass".
|
||||
// It will modify tags & fields in-place if they need to be deleted.
|
||||
func (f *Filter) Apply(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
) bool {
|
||||
if !f.isActive {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
||||
// check if the measurement name should pass
|
||||
if !f.shouldNamePass(measurement) {
|
||||
return false
|
||||
}
|
||||
|
||||
// check if the tags should pass
|
||||
if !f.shouldTagsPass(tags) {
|
||||
return false
|
||||
}
|
||||
|
||||
// filter fields
|
||||
for fieldkey, _ := range fields {
|
||||
if !f.shouldFieldPass(fieldkey) {
|
||||
delete(fields, fieldkey)
|
||||
}
|
||||
}
|
||||
if len(fields) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// filter tags
|
||||
f.filterTags(tags)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldFieldsPass returns true if the metric should pass, false if should drop
|
||||
func (f *Filter) IsActive() bool {
|
||||
return f.isActive
|
||||
}
|
||||
|
||||
// shouldNamePass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f Filter) ShouldNamePass(key string) bool {
|
||||
if f.NamePass != nil {
|
||||
for _, pat := range f.NamePass {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
|
||||
return true
|
||||
}
|
||||
func (f *Filter) shouldNamePass(key string) bool {
|
||||
if f.namePass != nil {
|
||||
if f.namePass.Match(key) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.NameDrop != nil {
|
||||
for _, pat := range f.NameDrop {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
|
||||
return false
|
||||
}
|
||||
if f.nameDrop != nil {
|
||||
if f.nameDrop.Match(key) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldFieldsPass returns true if the metric should pass, false if should drop
|
||||
// shouldFieldPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f Filter) ShouldFieldsPass(key string) bool {
|
||||
if f.FieldPass != nil {
|
||||
for _, pat := range f.FieldPass {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
|
||||
return true
|
||||
}
|
||||
func (f *Filter) shouldFieldPass(key string) bool {
|
||||
if f.fieldPass != nil {
|
||||
if f.fieldPass.Match(key) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.FieldDrop != nil {
|
||||
for _, pat := range f.FieldDrop {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
|
||||
return false
|
||||
}
|
||||
if f.fieldDrop != nil {
|
||||
if f.fieldDrop.Match(key) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldTagsPass returns true if the metric should pass, false if should drop
|
||||
// shouldTagsPass returns true if the metric should pass, false if should drop
|
||||
// based on the tagdrop/tagpass filter parameters
|
||||
func (f Filter) ShouldTagsPass(tags map[string]string) bool {
|
||||
func (f *Filter) shouldTagsPass(tags map[string]string) bool {
|
||||
if f.TagPass != nil {
|
||||
for _, pat := range f.TagPass {
|
||||
if pat.filter == nil {
|
||||
continue
|
||||
}
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
for _, filter := range pat.Filter {
|
||||
if internal.Glob(filter, tagval) {
|
||||
return true
|
||||
}
|
||||
if pat.filter.Match(tagval) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -108,11 +191,12 @@ func (f Filter) ShouldTagsPass(tags map[string]string) bool {
|
||||
|
||||
if f.TagDrop != nil {
|
||||
for _, pat := range f.TagDrop {
|
||||
if pat.filter == nil {
|
||||
continue
|
||||
}
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
for _, filter := range pat.Filter {
|
||||
if internal.Glob(filter, tagval) {
|
||||
return false
|
||||
}
|
||||
if pat.filter.Match(tagval) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -121,3 +205,23 @@ func (f Filter) ShouldTagsPass(tags map[string]string) bool {
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Apply TagInclude and TagExclude filters.
|
||||
// modifies the tags map in-place.
|
||||
func (f *Filter) filterTags(tags map[string]string) {
|
||||
if f.tagInclude != nil {
|
||||
for k, _ := range tags {
|
||||
if !f.tagInclude.Match(k) {
|
||||
delete(tags, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if f.tagExclude != nil {
|
||||
for k, _ := range tags {
|
||||
if f.tagExclude.Match(k) {
|
||||
delete(tags, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,64 @@
|
||||
package internal_models
|
||||
package models
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFilter_ApplyEmpty(t *testing.T) {
|
||||
f := Filter{}
|
||||
require.NoError(t, f.Compile())
|
||||
assert.False(t, f.IsActive())
|
||||
|
||||
assert.True(t, f.Apply("m", map[string]interface{}{"value": int64(1)}, map[string]string{}))
|
||||
}
|
||||
|
||||
func TestFilter_ApplyTagsDontPass(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
}
|
||||
f := Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
require.NoError(t, f.Compile())
|
||||
assert.True(t, f.IsActive())
|
||||
|
||||
assert.False(t, f.Apply("m",
|
||||
map[string]interface{}{"value": int64(1)},
|
||||
map[string]string{"cpu": "cpu-total"}))
|
||||
}
|
||||
|
||||
func TestFilter_ApplyDeleteFields(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldDrop: []string{"value"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
require.NoError(t, f.Compile())
|
||||
assert.True(t, f.IsActive())
|
||||
|
||||
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
|
||||
assert.True(t, f.Apply("m", fields, nil))
|
||||
assert.Equal(t, map[string]interface{}{"value2": int64(2)}, fields)
|
||||
}
|
||||
|
||||
func TestFilter_ApplyDeleteAllFields(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldDrop: []string{"value*"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
require.NoError(t, f.Compile())
|
||||
assert.True(t, f.IsActive())
|
||||
|
||||
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
|
||||
assert.False(t, f.Apply("m", fields, nil))
|
||||
}
|
||||
|
||||
func TestFilter_Empty(t *testing.T) {
|
||||
f := Filter{}
|
||||
|
||||
@@ -18,7 +73,7 @@ func TestFilter_Empty(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, measurement := range measurements {
|
||||
if !f.ShouldFieldsPass(measurement) {
|
||||
if !f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
@@ -28,6 +83,7 @@ func TestFilter_NamePass(t *testing.T) {
|
||||
f := Filter{
|
||||
NamePass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
@@ -45,13 +101,13 @@ func TestFilter_NamePass(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldNamePass(measurement) {
|
||||
if !f.shouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldNamePass(measurement) {
|
||||
if f.shouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
@@ -61,6 +117,7 @@ func TestFilter_NameDrop(t *testing.T) {
|
||||
f := Filter{
|
||||
NameDrop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
@@ -78,13 +135,13 @@ func TestFilter_NameDrop(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldNamePass(measurement) {
|
||||
if !f.shouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldNamePass(measurement) {
|
||||
if f.shouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
@@ -94,6 +151,7 @@ func TestFilter_FieldPass(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldPass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
@@ -111,13 +169,13 @@ func TestFilter_FieldPass(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldFieldsPass(measurement) {
|
||||
if !f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldFieldsPass(measurement) {
|
||||
if f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
@@ -127,6 +185,7 @@ func TestFilter_FieldDrop(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldDrop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
@@ -144,13 +203,13 @@ func TestFilter_FieldDrop(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldFieldsPass(measurement) {
|
||||
if !f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldFieldsPass(measurement) {
|
||||
if f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
@@ -169,6 +228,7 @@ func TestFilter_TagPass(t *testing.T) {
|
||||
f := Filter{
|
||||
TagPass: filters,
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
@@ -187,13 +247,13 @@ func TestFilter_TagPass(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
if !f.shouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
if f.shouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
@@ -212,6 +272,7 @@ func TestFilter_TagDrop(t *testing.T) {
|
||||
f := Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
@@ -230,14 +291,69 @@ func TestFilter_TagDrop(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
if !f.shouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
if f.shouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_FilterTagsNoMatches(t *testing.T) {
|
||||
pretags := map[string]string{
|
||||
"host": "localhost",
|
||||
"mytag": "foobar",
|
||||
}
|
||||
f := Filter{
|
||||
TagExclude: []string{"nomatch"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
f.filterTags(pretags)
|
||||
assert.Equal(t, map[string]string{
|
||||
"host": "localhost",
|
||||
"mytag": "foobar",
|
||||
}, pretags)
|
||||
|
||||
f = Filter{
|
||||
TagInclude: []string{"nomatch"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
f.filterTags(pretags)
|
||||
assert.Equal(t, map[string]string{}, pretags)
|
||||
}
|
||||
|
||||
func TestFilter_FilterTagsMatches(t *testing.T) {
|
||||
pretags := map[string]string{
|
||||
"host": "localhost",
|
||||
"mytag": "foobar",
|
||||
}
|
||||
f := Filter{
|
||||
TagExclude: []string{"ho*"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
f.filterTags(pretags)
|
||||
assert.Equal(t, map[string]string{
|
||||
"mytag": "foobar",
|
||||
}, pretags)
|
||||
|
||||
pretags = map[string]string{
|
||||
"host": "localhost",
|
||||
"mytag": "foobar",
|
||||
}
|
||||
f = Filter{
|
||||
TagInclude: []string{"my*"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
f.filterTags(pretags)
|
||||
assert.Equal(t, map[string]string{
|
||||
"mytag": "foobar",
|
||||
}, pretags)
|
||||
}
|
||||
|
||||
143
internal/models/makemetric.go
Normal file
143
internal/models/makemetric.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
// makemetric is used by both RunningAggregator & RunningInput
|
||||
// to make metrics.
|
||||
// nameOverride: override the name of the measurement being made.
|
||||
// namePrefix: add this prefix to each measurement name.
|
||||
// nameSuffix: add this suffix to each measurement name.
|
||||
// pluginTags: these are tags that are specific to this plugin.
|
||||
// daemonTags: these are daemon-wide global tags, and get applied after pluginTags.
|
||||
// filter: this is a filter to apply to each metric being made.
|
||||
// applyFilter: if false, the above filter is not applied to each metric.
|
||||
// This is used by Aggregators, because aggregators use filters
|
||||
// on incoming metrics instead of on created metrics.
|
||||
// TODO refactor this to not have such a huge func signature.
|
||||
func makemetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
nameOverride string,
|
||||
namePrefix string,
|
||||
nameSuffix string,
|
||||
pluginTags map[string]string,
|
||||
daemonTags map[string]string,
|
||||
filter Filter,
|
||||
applyFilter bool,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric {
|
||||
if len(fields) == 0 || len(measurement) == 0 {
|
||||
return nil
|
||||
}
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
|
||||
// Override measurement name if set
|
||||
if len(nameOverride) != 0 {
|
||||
measurement = nameOverride
|
||||
}
|
||||
// Apply measurement prefix and suffix if set
|
||||
if len(namePrefix) != 0 {
|
||||
measurement = namePrefix + measurement
|
||||
}
|
||||
if len(nameSuffix) != 0 {
|
||||
measurement = measurement + nameSuffix
|
||||
}
|
||||
|
||||
// Apply plugin-wide tags if set
|
||||
for k, v := range pluginTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
// Apply daemon-wide tags if set
|
||||
for k, v := range daemonTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the metric filter(s)
|
||||
// for aggregators, the filter does not get applied when the metric is made.
|
||||
// instead, the filter is applied to metric incoming into the plugin.
|
||||
// ie, it gets applied in the RunningAggregator.Apply function.
|
||||
if applyFilter {
|
||||
if ok := filter.Apply(measurement, fields, tags); !ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range fields {
|
||||
// Validate uint64 and float64 fields
|
||||
// convert all int & uint types to int64
|
||||
switch val := v.(type) {
|
||||
case nil:
|
||||
// delete nil fields
|
||||
delete(fields, k)
|
||||
case uint:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint8:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint16:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint32:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int8:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int16:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int32:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
fields[k] = int64(val)
|
||||
} else {
|
||||
fields[k] = int64(9223372036854775807)
|
||||
}
|
||||
continue
|
||||
case float32:
|
||||
fields[k] = float64(val)
|
||||
continue
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
log.Printf("D! Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
default:
|
||||
fields[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
m, err := metric.New(measurement, tags, fields, t, mType)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
166
internal/models/running_aggregator.go
Normal file
166
internal/models/running_aggregator.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
type RunningAggregator struct {
|
||||
a telegraf.Aggregator
|
||||
Config *AggregatorConfig
|
||||
|
||||
metrics chan telegraf.Metric
|
||||
|
||||
periodStart time.Time
|
||||
periodEnd time.Time
|
||||
}
|
||||
|
||||
func NewRunningAggregator(
|
||||
a telegraf.Aggregator,
|
||||
conf *AggregatorConfig,
|
||||
) *RunningAggregator {
|
||||
return &RunningAggregator{
|
||||
a: a,
|
||||
Config: conf,
|
||||
metrics: make(chan telegraf.Metric, 100),
|
||||
}
|
||||
}
|
||||
|
||||
// AggregatorConfig containing configuration parameters for the running
|
||||
// aggregator plugin.
|
||||
type AggregatorConfig struct {
|
||||
Name string
|
||||
|
||||
DropOriginal bool
|
||||
NameOverride string
|
||||
MeasurementPrefix string
|
||||
MeasurementSuffix string
|
||||
Tags map[string]string
|
||||
Filter Filter
|
||||
|
||||
Period time.Duration
|
||||
Delay time.Duration
|
||||
}
|
||||
|
||||
func (r *RunningAggregator) Name() string {
|
||||
return "aggregators." + r.Config.Name
|
||||
}
|
||||
|
||||
func (r *RunningAggregator) MakeMetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric {
|
||||
m := makemetric(
|
||||
measurement,
|
||||
fields,
|
||||
tags,
|
||||
r.Config.NameOverride,
|
||||
r.Config.MeasurementPrefix,
|
||||
r.Config.MeasurementSuffix,
|
||||
r.Config.Tags,
|
||||
nil,
|
||||
r.Config.Filter,
|
||||
false,
|
||||
mType,
|
||||
t,
|
||||
)
|
||||
|
||||
if m != nil {
|
||||
m.SetAggregate(true)
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Add applies the given metric to the aggregator.
|
||||
// Before applying to the plugin, it will run any defined filters on the metric.
|
||||
// Apply returns true if the original metric should be dropped.
|
||||
func (r *RunningAggregator) Add(in telegraf.Metric) bool {
|
||||
if r.Config.Filter.IsActive() {
|
||||
// check if the aggregator should apply this metric
|
||||
name := in.Name()
|
||||
fields := in.Fields()
|
||||
tags := in.Tags()
|
||||
t := in.Time()
|
||||
if ok := r.Config.Filter.Apply(name, fields, tags); !ok {
|
||||
// aggregator should not apply this metric
|
||||
return false
|
||||
}
|
||||
|
||||
in, _ = metric.New(name, tags, fields, t)
|
||||
}
|
||||
|
||||
r.metrics <- in
|
||||
return r.Config.DropOriginal
|
||||
}
|
||||
func (r *RunningAggregator) add(in telegraf.Metric) {
|
||||
r.a.Add(in)
|
||||
}
|
||||
|
||||
func (r *RunningAggregator) push(acc telegraf.Accumulator) {
|
||||
r.a.Push(acc)
|
||||
}
|
||||
|
||||
func (r *RunningAggregator) reset() {
|
||||
r.a.Reset()
|
||||
}
|
||||
|
||||
// Run runs the running aggregator, listens for incoming metrics, and waits
|
||||
// for period ticks to tell it when to push and reset the aggregator.
|
||||
func (r *RunningAggregator) Run(
|
||||
acc telegraf.Accumulator,
|
||||
shutdown chan struct{},
|
||||
) {
|
||||
// The start of the period is truncated to the nearest second.
|
||||
//
|
||||
// Every metric then gets it's timestamp checked and is dropped if it
|
||||
// is not within:
|
||||
//
|
||||
// start < t < end + truncation + delay
|
||||
//
|
||||
// So if we start at now = 00:00.2 with a 10s period and 0.3s delay:
|
||||
// now = 00:00.2
|
||||
// start = 00:00
|
||||
// truncation = 00:00.2
|
||||
// end = 00:10
|
||||
// 1st interval: 00:00 - 00:10.5
|
||||
// 2nd interval: 00:10 - 00:20.5
|
||||
// etc.
|
||||
//
|
||||
now := time.Now()
|
||||
r.periodStart = now.Truncate(time.Second)
|
||||
truncation := now.Sub(r.periodStart)
|
||||
r.periodEnd = r.periodStart.Add(r.Config.Period)
|
||||
time.Sleep(r.Config.Delay)
|
||||
periodT := time.NewTicker(r.Config.Period)
|
||||
defer periodT.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
if len(r.metrics) > 0 {
|
||||
// wait until metrics are flushed before exiting
|
||||
continue
|
||||
}
|
||||
return
|
||||
case m := <-r.metrics:
|
||||
if m.Time().Before(r.periodStart) ||
|
||||
m.Time().After(r.periodEnd.Add(truncation).Add(r.Config.Delay)) {
|
||||
// the metric is outside the current aggregation period, so
|
||||
// skip it.
|
||||
continue
|
||||
}
|
||||
r.add(m)
|
||||
case <-periodT.C:
|
||||
r.periodStart = r.periodEnd
|
||||
r.periodEnd = r.periodStart.Add(r.Config.Period)
|
||||
r.push(acc)
|
||||
r.reset()
|
||||
}
|
||||
}
|
||||
}
|
||||
256
internal/models/running_aggregator_test.go
Normal file
256
internal/models/running_aggregator_test.go
Normal file
@@ -0,0 +1,256 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
a := &TestAggregator{}
|
||||
ra := NewRunningAggregator(a, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
Filter: Filter{
|
||||
NamePass: []string{"*"},
|
||||
},
|
||||
Period: time.Millisecond * 500,
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
go ra.Run(&acc, make(chan struct{}))
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now().Add(time.Millisecond*150),
|
||||
)
|
||||
assert.False(t, ra.Add(m))
|
||||
|
||||
for {
|
||||
time.Sleep(time.Millisecond)
|
||||
if atomic.LoadInt64(&a.sum) > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum))
|
||||
}
|
||||
|
||||
func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
|
||||
a := &TestAggregator{}
|
||||
ra := NewRunningAggregator(a, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
Filter: Filter{
|
||||
NamePass: []string{"*"},
|
||||
},
|
||||
Period: time.Millisecond * 500,
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
go ra.Run(&acc, make(chan struct{}))
|
||||
|
||||
// metric before current period
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now().Add(-time.Hour),
|
||||
)
|
||||
assert.False(t, ra.Add(m))
|
||||
|
||||
// metric after current period
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now().Add(time.Hour),
|
||||
)
|
||||
assert.False(t, ra.Add(m))
|
||||
|
||||
// "now" metric
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now().Add(time.Millisecond*50),
|
||||
)
|
||||
assert.False(t, ra.Add(m))
|
||||
|
||||
for {
|
||||
time.Sleep(time.Millisecond)
|
||||
if atomic.LoadInt64(&a.sum) > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum))
|
||||
}
|
||||
|
||||
func TestAddAndPushOnePeriod(t *testing.T) {
|
||||
a := &TestAggregator{}
|
||||
ra := NewRunningAggregator(a, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
Filter: Filter{
|
||||
NamePass: []string{"*"},
|
||||
},
|
||||
Period: time.Millisecond * 500,
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
shutdown := make(chan struct{})
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ra.Run(&acc, shutdown)
|
||||
}()
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now().Add(time.Millisecond*100),
|
||||
)
|
||||
assert.False(t, ra.Add(m))
|
||||
|
||||
for {
|
||||
time.Sleep(time.Millisecond)
|
||||
if acc.NMetrics() > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
acc.AssertContainsFields(t, "TestMetric", map[string]interface{}{"sum": int64(101)})
|
||||
|
||||
close(shutdown)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestAddDropOriginal(t *testing.T) {
|
||||
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
Filter: Filter{
|
||||
NamePass: []string{"RI*"},
|
||||
},
|
||||
DropOriginal: true,
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now(),
|
||||
)
|
||||
assert.True(t, ra.Add(m))
|
||||
|
||||
// this metric name doesn't match the filter, so Add will return false
|
||||
m2 := ra.MakeMetric(
|
||||
"foobar",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now(),
|
||||
)
|
||||
assert.False(t, ra.Add(m2))
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetricA(t *testing.T) {
|
||||
now := time.Now()
|
||||
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
})
|
||||
assert.Equal(t, "aggregators.TestRunningAggregator", ra.Name())
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Untyped,
|
||||
)
|
||||
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Counter,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Counter,
|
||||
)
|
||||
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Gauge,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Gauge,
|
||||
)
|
||||
}
|
||||
|
||||
type TestAggregator struct {
|
||||
sum int64
|
||||
}
|
||||
|
||||
func (t *TestAggregator) Description() string { return "" }
|
||||
func (t *TestAggregator) SampleConfig() string { return "" }
|
||||
func (t *TestAggregator) Reset() {
|
||||
atomic.StoreInt64(&t.sum, 0)
|
||||
}
|
||||
|
||||
func (t *TestAggregator) Push(acc telegraf.Accumulator) {
|
||||
acc.AddFields("TestMetric",
|
||||
map[string]interface{}{"sum": t.sum},
|
||||
map[string]string{},
|
||||
)
|
||||
}
|
||||
|
||||
func (t *TestAggregator) Add(in telegraf.Metric) {
|
||||
for _, v := range in.Fields() {
|
||||
if vi, ok := v.(int64); ok {
|
||||
atomic.AddInt64(&t.sum, vi)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,38 @@
|
||||
package internal_models
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
var GlobalMetricsGathered = selfstat.Register("agent", "metrics_gathered", map[string]string{})
|
||||
|
||||
type RunningInput struct {
|
||||
Name string
|
||||
Input telegraf.Input
|
||||
Config *InputConfig
|
||||
|
||||
trace bool
|
||||
defaultTags map[string]string
|
||||
|
||||
MetricsGathered selfstat.Stat
|
||||
}
|
||||
|
||||
func NewRunningInput(
|
||||
input telegraf.Input,
|
||||
config *InputConfig,
|
||||
) *RunningInput {
|
||||
return &RunningInput{
|
||||
Input: input,
|
||||
Config: config,
|
||||
MetricsGathered: selfstat.Register(
|
||||
"gather",
|
||||
"metrics_gathered",
|
||||
map[string]string{"input": config.Name},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// InputConfig containing a name, interval, and filter
|
||||
@@ -22,3 +45,52 @@ type InputConfig struct {
|
||||
Filter Filter
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
func (r *RunningInput) Name() string {
|
||||
return "inputs." + r.Config.Name
|
||||
}
|
||||
|
||||
// MakeMetric either returns a metric, or returns nil if the metric doesn't
|
||||
// need to be created (because of filtering, an error, etc.)
|
||||
func (r *RunningInput) MakeMetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric {
|
||||
m := makemetric(
|
||||
measurement,
|
||||
fields,
|
||||
tags,
|
||||
r.Config.NameOverride,
|
||||
r.Config.MeasurementPrefix,
|
||||
r.Config.MeasurementSuffix,
|
||||
r.Config.Tags,
|
||||
r.defaultTags,
|
||||
r.Config.Filter,
|
||||
true,
|
||||
mType,
|
||||
t,
|
||||
)
|
||||
|
||||
if r.trace && m != nil {
|
||||
fmt.Print("> " + m.String())
|
||||
}
|
||||
|
||||
r.MetricsGathered.Incr(1)
|
||||
GlobalMetricsGathered.Incr(1)
|
||||
return m
|
||||
}
|
||||
|
||||
func (r *RunningInput) Trace() bool {
|
||||
return r.trace
|
||||
}
|
||||
|
||||
func (r *RunningInput) SetTrace(trace bool) {
|
||||
r.trace = trace
|
||||
}
|
||||
|
||||
func (r *RunningInput) SetDefaultTags(tags map[string]string) {
|
||||
r.defaultTags = tags
|
||||
}
|
||||
|
||||
339
internal/models/running_input_test.go
Normal file
339
internal/models/running_input_test.go
Normal file
@@ -0,0 +1,339 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMakeMetricNoFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Nil(t, m)
|
||||
}
|
||||
|
||||
// nil fields should get dropped
|
||||
func TestMakeMetricNilFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{
|
||||
"value": int(101),
|
||||
"nil": nil,
|
||||
},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
assert.Equal(t, "inputs.TestRunningInput", ri.Name())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Untyped,
|
||||
)
|
||||
|
||||
m = ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Counter,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Counter,
|
||||
)
|
||||
|
||||
m = ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Gauge,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Gauge,
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricWithPluginTags(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
Tags: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
nil,
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricFilteredOut(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
Tags: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
Filter: Filter{NamePass: []string{"foobar"}},
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
assert.NoError(t, ri.Config.Filter.Compile())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
nil,
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Nil(t, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricWithDaemonTags(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
ri.SetDefaultTags(map[string]string{
|
||||
"foo": "bar",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetricInfFields(t *testing.T) {
|
||||
inf := math.Inf(1)
|
||||
ninf := math.Inf(-1)
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{
|
||||
"value": int(101),
|
||||
"inf": inf,
|
||||
"ninf": ninf,
|
||||
},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricAllFieldTypes(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{
|
||||
"a": int(10),
|
||||
"b": int8(10),
|
||||
"c": int16(10),
|
||||
"d": int32(10),
|
||||
"e": uint(10),
|
||||
"f": uint8(10),
|
||||
"g": uint16(10),
|
||||
"h": uint32(10),
|
||||
"i": uint64(10),
|
||||
"j": float32(10),
|
||||
"k": uint64(9223372036854775810),
|
||||
"l": "foobar",
|
||||
"m": true,
|
||||
},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Contains(t, m.String(), "a=10i")
|
||||
assert.Contains(t, m.String(), "b=10i")
|
||||
assert.Contains(t, m.String(), "c=10i")
|
||||
assert.Contains(t, m.String(), "d=10i")
|
||||
assert.Contains(t, m.String(), "e=10i")
|
||||
assert.Contains(t, m.String(), "f=10i")
|
||||
assert.Contains(t, m.String(), "g=10i")
|
||||
assert.Contains(t, m.String(), "h=10i")
|
||||
assert.Contains(t, m.String(), "i=10i")
|
||||
assert.Contains(t, m.String(), "j=10")
|
||||
assert.NotContains(t, m.String(), "j=10i")
|
||||
assert.Contains(t, m.String(), "k=9223372036854775807i")
|
||||
assert.Contains(t, m.String(), "l=\"foobar\"")
|
||||
assert.Contains(t, m.String(), "m=true")
|
||||
}
|
||||
|
||||
func TestMakeMetricNameOverride(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
NameOverride: "foobar",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("foobar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricNamePrefix(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
MeasurementPrefix: "foobar_",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("foobar_RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricNameSuffix(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
MeasurementSuffix: "_foobar",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest_foobar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
type testInput struct{}
|
||||
|
||||
func (t *testInput) Description() string { return "" }
|
||||
func (t *testInput) SampleConfig() string { return "" }
|
||||
func (t *testInput) Gather(acc telegraf.Accumulator) error { return nil }
|
||||
@@ -1,132 +1,180 @@
|
||||
package internal_models
|
||||
package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/buffer"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
const (
|
||||
// Default number of metrics kept between flushes.
|
||||
DEFAULT_METRIC_BUFFER_LIMIT = 10000
|
||||
// Default size of metrics batch size.
|
||||
DEFAULT_METRIC_BATCH_SIZE = 1000
|
||||
|
||||
// Limit how many full metric buffers are kept due to failed writes.
|
||||
FULL_METRIC_BUFFERS_LIMIT = 100
|
||||
// Default number of metrics kept. It should be a multiple of batch size.
|
||||
DEFAULT_METRIC_BUFFER_LIMIT = 10000
|
||||
)
|
||||
|
||||
// RunningOutput contains the output configuration
|
||||
type RunningOutput struct {
|
||||
Name string
|
||||
Output telegraf.Output
|
||||
Config *OutputConfig
|
||||
Quiet bool
|
||||
MetricBufferLimit int
|
||||
FlushBufferWhenFull bool
|
||||
Name string
|
||||
Output telegraf.Output
|
||||
Config *OutputConfig
|
||||
MetricBufferLimit int
|
||||
MetricBatchSize int
|
||||
|
||||
metrics []telegraf.Metric
|
||||
tmpmetrics map[int][]telegraf.Metric
|
||||
overwriteI int
|
||||
mapI int
|
||||
MetricsFiltered selfstat.Stat
|
||||
MetricsWritten selfstat.Stat
|
||||
BufferSize selfstat.Stat
|
||||
BufferLimit selfstat.Stat
|
||||
WriteTime selfstat.Stat
|
||||
|
||||
sync.Mutex
|
||||
metrics *buffer.Buffer
|
||||
failMetrics *buffer.Buffer
|
||||
}
|
||||
|
||||
func NewRunningOutput(
|
||||
name string,
|
||||
output telegraf.Output,
|
||||
conf *OutputConfig,
|
||||
batchSize int,
|
||||
bufferLimit int,
|
||||
) *RunningOutput {
|
||||
if bufferLimit == 0 {
|
||||
bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT
|
||||
}
|
||||
if batchSize == 0 {
|
||||
batchSize = DEFAULT_METRIC_BATCH_SIZE
|
||||
}
|
||||
ro := &RunningOutput{
|
||||
Name: name,
|
||||
metrics: make([]telegraf.Metric, 0),
|
||||
tmpmetrics: make(map[int][]telegraf.Metric),
|
||||
metrics: buffer.NewBuffer(batchSize),
|
||||
failMetrics: buffer.NewBuffer(bufferLimit),
|
||||
Output: output,
|
||||
Config: conf,
|
||||
MetricBufferLimit: DEFAULT_METRIC_BUFFER_LIMIT,
|
||||
MetricBufferLimit: bufferLimit,
|
||||
MetricBatchSize: batchSize,
|
||||
MetricsWritten: selfstat.Register(
|
||||
"write",
|
||||
"metrics_written",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
MetricsFiltered: selfstat.Register(
|
||||
"write",
|
||||
"metrics_filtered",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
BufferSize: selfstat.Register(
|
||||
"write",
|
||||
"buffer_size",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
BufferLimit: selfstat.Register(
|
||||
"write",
|
||||
"buffer_limit",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
WriteTime: selfstat.RegisterTiming(
|
||||
"write",
|
||||
"write_time_ns",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
}
|
||||
ro.BufferLimit.Incr(int64(ro.MetricBufferLimit))
|
||||
return ro
|
||||
}
|
||||
|
||||
// AddMetric adds a metric to the output. This function can also write cached
|
||||
// points if FlushBufferWhenFull is true.
|
||||
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
||||
if ro.Config.Filter.IsActive {
|
||||
if !ro.Config.Filter.ShouldMetricPass(metric) {
|
||||
func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
|
||||
// Filter any tagexclude/taginclude parameters before adding metric
|
||||
if ro.Config.Filter.IsActive() {
|
||||
// In order to filter out tags, we need to create a new metric, since
|
||||
// metrics are immutable once created.
|
||||
name := m.Name()
|
||||
tags := m.Tags()
|
||||
fields := m.Fields()
|
||||
t := m.Time()
|
||||
if ok := ro.Config.Filter.Apply(name, fields, tags); !ok {
|
||||
ro.MetricsFiltered.Incr(1)
|
||||
return
|
||||
}
|
||||
// error is not possible if creating from another metric, so ignore.
|
||||
m, _ = metric.New(name, tags, fields, t)
|
||||
}
|
||||
ro.Lock()
|
||||
defer ro.Unlock()
|
||||
|
||||
if len(ro.metrics) < ro.MetricBufferLimit {
|
||||
ro.metrics = append(ro.metrics, metric)
|
||||
} else {
|
||||
if ro.FlushBufferWhenFull {
|
||||
ro.metrics = append(ro.metrics, metric)
|
||||
tmpmetrics := make([]telegraf.Metric, len(ro.metrics))
|
||||
copy(tmpmetrics, ro.metrics)
|
||||
ro.metrics = make([]telegraf.Metric, 0)
|
||||
err := ro.write(tmpmetrics)
|
||||
if err != nil {
|
||||
log.Printf("ERROR writing full metric buffer to output %s, %s",
|
||||
ro.Name, err)
|
||||
if len(ro.tmpmetrics) == FULL_METRIC_BUFFERS_LIMIT {
|
||||
ro.mapI = 0
|
||||
// overwrite one
|
||||
ro.tmpmetrics[ro.mapI] = tmpmetrics
|
||||
ro.mapI++
|
||||
} else {
|
||||
ro.tmpmetrics[ro.mapI] = tmpmetrics
|
||||
ro.mapI++
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Printf("WARNING: overwriting cached metrics, you may want to " +
|
||||
"increase the metric_buffer_limit setting in your [agent] " +
|
||||
"config if you do not wish to overwrite metrics.\n")
|
||||
if ro.overwriteI == len(ro.metrics) {
|
||||
ro.overwriteI = 0
|
||||
}
|
||||
ro.metrics[ro.overwriteI] = metric
|
||||
ro.overwriteI++
|
||||
ro.metrics.Add(m)
|
||||
if ro.metrics.Len() == ro.MetricBatchSize {
|
||||
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
||||
err := ro.write(batch)
|
||||
if err != nil {
|
||||
ro.failMetrics.Add(batch...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes all cached points to this output.
|
||||
func (ro *RunningOutput) Write() error {
|
||||
ro.Lock()
|
||||
defer ro.Unlock()
|
||||
err := ro.write(ro.metrics)
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
ro.metrics = make([]telegraf.Metric, 0)
|
||||
ro.overwriteI = 0
|
||||
}
|
||||
nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len()
|
||||
log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ",
|
||||
ro.Name, nFails+nMetrics, ro.MetricBufferLimit)
|
||||
ro.BufferSize.Incr(int64(nFails + nMetrics))
|
||||
var err error
|
||||
if !ro.failMetrics.IsEmpty() {
|
||||
// how many batches of failed writes we need to write.
|
||||
nBatches := nFails/ro.MetricBatchSize + 1
|
||||
batchSize := ro.MetricBatchSize
|
||||
|
||||
// Write any cached metric buffers that failed previously
|
||||
for i, tmpmetrics := range ro.tmpmetrics {
|
||||
if err := ro.write(tmpmetrics); err != nil {
|
||||
return err
|
||||
} else {
|
||||
delete(ro.tmpmetrics, i)
|
||||
for i := 0; i < nBatches; i++ {
|
||||
// If it's the last batch, only grab the metrics that have not had
|
||||
// a write attempt already (this is primarily to preserve order).
|
||||
if i == nBatches-1 {
|
||||
batchSize = nFails % ro.MetricBatchSize
|
||||
}
|
||||
batch := ro.failMetrics.Batch(batchSize)
|
||||
// If we've already failed previous writes, don't bother trying to
|
||||
// write to this output again. We are not exiting the loop just so
|
||||
// that we can rotate the metrics to preserve order.
|
||||
if err == nil {
|
||||
err = ro.write(batch)
|
||||
}
|
||||
if err != nil {
|
||||
ro.failMetrics.Add(batch...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
||||
// see comment above about not trying to write to an already failed output.
|
||||
// if ro.failMetrics is empty then err will always be nil at this point.
|
||||
if err == nil {
|
||||
err = ro.write(batch)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
ro.failMetrics.Add(batch...)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
nMetrics := len(metrics)
|
||||
if nMetrics == 0 {
|
||||
return nil
|
||||
}
|
||||
start := time.Now()
|
||||
err := ro.Output.Write(metrics)
|
||||
elapsed := time.Since(start)
|
||||
if err == nil {
|
||||
if !ro.Quiet {
|
||||
log.Printf("Wrote %d metrics to output %s in %s\n",
|
||||
len(metrics), ro.Name, elapsed)
|
||||
}
|
||||
log.Printf("D! Output [%s] wrote batch of %d metrics in %s\n",
|
||||
ro.Name, nMetrics, elapsed)
|
||||
ro.MetricsWritten.Incr(int64(nMetrics))
|
||||
ro.BufferSize.Incr(-int64(nMetrics))
|
||||
ro.WriteTime.Incr(elapsed.Nanoseconds())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
package internal_models
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
@@ -29,16 +28,89 @@ var next5 = []telegraf.Metric{
|
||||
testutil.TestMetric(101, "metric10"),
|
||||
}
|
||||
|
||||
// Test that we can write metrics with simple default setup.
|
||||
func TestRunningOutputDefault(t *testing.T) {
|
||||
// Benchmark adding metrics.
|
||||
func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &perfOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
ro.Write()
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark adding metrics.
|
||||
func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &perfOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
if n%100 == 0 {
|
||||
ro.Write()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark adding metrics.
|
||||
func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &perfOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
}
|
||||
}
|
||||
|
||||
// Test that NameDrop filters ger properly applied.
|
||||
func TestRunningOutput_DropFilter(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
NameDrop: []string{"metric1", "metric2"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf)
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 8)
|
||||
}
|
||||
|
||||
// Test that NameDrop filters without a match do nothing.
|
||||
func TestRunningOutput_PassFilter(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
NameDrop: []string{"metric1000", "foo*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
@@ -53,50 +125,98 @@ func TestRunningOutputDefault(t *testing.T) {
|
||||
assert.Len(t, m.Metrics(), 10)
|
||||
}
|
||||
|
||||
// Test that the first metric gets overwritten if there is a buffer overflow.
|
||||
func TestRunningOutputOverwrite(t *testing.T) {
|
||||
// Test that tags are properly included
|
||||
func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
TagInclude: []string{"nothing*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf)
|
||||
ro.MetricBufferLimit = 4
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
require.Len(t, m.Metrics(), 0)
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, m.Metrics(), 4)
|
||||
|
||||
var expected, actual []string
|
||||
for i, exp := range first5[1:] {
|
||||
expected = append(expected, exp.String())
|
||||
actual = append(actual, m.Metrics()[i].String())
|
||||
}
|
||||
|
||||
sort.Strings(expected)
|
||||
sort.Strings(actual)
|
||||
|
||||
assert.Equal(t, expected, actual)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 1)
|
||||
assert.Empty(t, m.Metrics()[0].Tags())
|
||||
}
|
||||
|
||||
// Test that multiple buffer overflows are handled properly.
|
||||
func TestRunningOutputMultiOverwrite(t *testing.T) {
|
||||
// Test that tags are properly excluded
|
||||
func TestRunningOutput_TagExcludeMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
TagExclude: []string{"tag*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 1)
|
||||
assert.Len(t, m.Metrics()[0].Tags(), 0)
|
||||
}
|
||||
|
||||
// Test that tags are properly Excluded
|
||||
func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
TagExclude: []string{"nothing*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 1)
|
||||
assert.Len(t, m.Metrics()[0].Tags(), 1)
|
||||
}
|
||||
|
||||
// Test that tags are properly included
|
||||
func TestRunningOutput_TagIncludeMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
TagInclude: []string{"tag*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 1)
|
||||
assert.Len(t, m.Metrics()[0].Tags(), 1)
|
||||
}
|
||||
|
||||
// Test that we can write metrics with simple default setup.
|
||||
func TestRunningOutputDefault(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf)
|
||||
ro.MetricBufferLimit = 3
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
@@ -104,39 +224,24 @@ func TestRunningOutputMultiOverwrite(t *testing.T) {
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
require.Len(t, m.Metrics(), 0)
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, m.Metrics(), 3)
|
||||
|
||||
var expected, actual []string
|
||||
for i, exp := range next5[2:] {
|
||||
expected = append(expected, exp.String())
|
||||
actual = append(actual, m.Metrics()[i].String())
|
||||
}
|
||||
|
||||
sort.Strings(expected)
|
||||
sort.Strings(actual)
|
||||
|
||||
assert.Equal(t, expected, actual)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 10)
|
||||
}
|
||||
|
||||
// Test that running output doesn't flush until it's full when
|
||||
// FlushBufferWhenFull is set.
|
||||
func TestRunningOutputFlushWhenFull(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf)
|
||||
ro.FlushBufferWhenFull = true
|
||||
ro.MetricBufferLimit = 5
|
||||
ro := NewRunningOutput("test", m, conf, 6, 10)
|
||||
|
||||
// Fill buffer to limit
|
||||
// Fill buffer to 1 under limit
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
@@ -159,15 +264,11 @@ func TestRunningOutputFlushWhenFull(t *testing.T) {
|
||||
// FlushBufferWhenFull is set, twice.
|
||||
func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf)
|
||||
ro.FlushBufferWhenFull = true
|
||||
ro.MetricBufferLimit = 4
|
||||
ro := NewRunningOutput("test", m, conf, 4, 12)
|
||||
|
||||
// Fill buffer past limit twive
|
||||
for _, metric := range first5 {
|
||||
@@ -177,23 +278,19 @@ func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// flushed twice
|
||||
assert.Len(t, m.Metrics(), 10)
|
||||
assert.Len(t, m.Metrics(), 8)
|
||||
}
|
||||
|
||||
func TestRunningOutputWriteFail(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf)
|
||||
ro.FlushBufferWhenFull = true
|
||||
ro.MetricBufferLimit = 4
|
||||
ro := NewRunningOutput("test", m, conf, 4, 12)
|
||||
|
||||
// Fill buffer past limit twice
|
||||
// Fill buffer to limit twice
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
@@ -216,6 +313,155 @@ func TestRunningOutputWriteFail(t *testing.T) {
|
||||
assert.Len(t, m.Metrics(), 10)
|
||||
}
|
||||
|
||||
// Verify that the order of points is preserved during a write failure.
|
||||
func TestRunningOutputWriteFailOrder(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf, 100, 1000)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// Write fails
|
||||
err := ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
m.failWrite = false
|
||||
// add 5 more metrics
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
err = ro.Write()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that 10 metrics were written
|
||||
assert.Len(t, m.Metrics(), 10)
|
||||
// Verify that they are in order
|
||||
expected := append(first5, next5...)
|
||||
assert.Equal(t, expected, m.Metrics())
|
||||
}
|
||||
|
||||
// Verify that the order of points is preserved during many write failures.
|
||||
func TestRunningOutputWriteFailOrder2(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf, 5, 100)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// Write fails
|
||||
err := ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// Write fails
|
||||
err = ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// Write fails
|
||||
err = ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// Write fails
|
||||
err = ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
m.failWrite = false
|
||||
err = ro.Write()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that 10 metrics were written
|
||||
assert.Len(t, m.Metrics(), 20)
|
||||
// Verify that they are in order
|
||||
expected := append(first5, next5...)
|
||||
expected = append(expected, first5...)
|
||||
expected = append(expected, next5...)
|
||||
assert.Equal(t, expected, m.Metrics())
|
||||
}
|
||||
|
||||
// Verify that the order of points is preserved when there is a remainder
|
||||
// of points for the batch.
|
||||
//
|
||||
// ie, with a batch size of 5:
|
||||
//
|
||||
// 1 2 3 4 5 6 <-- order, failed points
|
||||
// 6 1 2 3 4 5 <-- order, after 1st write failure (1 2 3 4 5 was batch)
|
||||
// 1 2 3 4 5 6 <-- order, after 2nd write failure, (6 was batch)
|
||||
//
|
||||
func TestRunningOutputWriteFailOrder3(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf, 5, 1000)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// Write fails
|
||||
err := ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// add and attempt to write a single metric:
|
||||
ro.AddMetric(next5[0])
|
||||
err = ro.Write()
|
||||
require.Error(t, err)
|
||||
|
||||
// unset fail and write metrics
|
||||
m.failWrite = false
|
||||
err = ro.Write()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that 6 metrics were written
|
||||
assert.Len(t, m.Metrics(), 6)
|
||||
// Verify that they are in order
|
||||
expected := append(first5, next5[0])
|
||||
assert.Equal(t, expected, m.Metrics())
|
||||
}
|
||||
|
||||
type mockOutput struct {
|
||||
sync.Mutex
|
||||
|
||||
@@ -263,3 +509,31 @@ func (m *mockOutput) Metrics() []telegraf.Metric {
|
||||
defer m.Unlock()
|
||||
return m.metrics
|
||||
}
|
||||
|
||||
type perfOutput struct {
|
||||
// if true, mock a write failure
|
||||
failWrite bool
|
||||
}
|
||||
|
||||
func (m *perfOutput) Connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *perfOutput) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *perfOutput) Description() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *perfOutput) SampleConfig() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *perfOutput) Write(metrics []telegraf.Metric) error {
|
||||
if m.failWrite {
|
||||
return fmt.Errorf("Failed Write!")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
44
internal/models/running_processor.go
Normal file
44
internal/models/running_processor.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type RunningProcessor struct {
|
||||
Name string
|
||||
Processor telegraf.Processor
|
||||
Config *ProcessorConfig
|
||||
}
|
||||
|
||||
type RunningProcessors []*RunningProcessor
|
||||
|
||||
func (rp RunningProcessors) Len() int { return len(rp) }
|
||||
func (rp RunningProcessors) Swap(i, j int) { rp[i], rp[j] = rp[j], rp[i] }
|
||||
func (rp RunningProcessors) Less(i, j int) bool { return rp[i].Config.Order < rp[j].Config.Order }
|
||||
|
||||
// FilterConfig containing a name and filter
|
||||
type ProcessorConfig struct {
|
||||
Name string
|
||||
Order int64
|
||||
Filter Filter
|
||||
}
|
||||
|
||||
func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
||||
ret := []telegraf.Metric{}
|
||||
|
||||
for _, metric := range in {
|
||||
if rp.Config.Filter.IsActive() {
|
||||
// check if the filter should be applied to this metric
|
||||
if ok := rp.Config.Filter.Apply(metric.Name(), metric.Fields(), metric.Tags()); !ok {
|
||||
// this means filter should not be applied
|
||||
ret = append(ret, metric)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// This metric should pass through the filter, so call the filter Apply
|
||||
// function and append results to the output slice.
|
||||
ret = append(ret, rp.Processor.Apply(metric)...)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
117
internal/models/running_processor_test.go
Normal file
117
internal/models/running_processor_test.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type TestProcessor struct {
|
||||
}
|
||||
|
||||
func (f *TestProcessor) SampleConfig() string { return "" }
|
||||
func (f *TestProcessor) Description() string { return "" }
|
||||
|
||||
// Apply renames:
|
||||
// "foo" to "fuz"
|
||||
// "bar" to "baz"
|
||||
// And it also drops measurements named "dropme"
|
||||
func (f *TestProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
||||
out := make([]telegraf.Metric, 0)
|
||||
for _, m := range in {
|
||||
switch m.Name() {
|
||||
case "foo":
|
||||
out = append(out, testutil.TestMetric(1, "fuz"))
|
||||
case "bar":
|
||||
out = append(out, testutil.TestMetric(1, "baz"))
|
||||
case "dropme":
|
||||
// drop the metric!
|
||||
default:
|
||||
out = append(out, m)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func NewTestRunningProcessor() *RunningProcessor {
|
||||
out := &RunningProcessor{
|
||||
Name: "test",
|
||||
Processor: &TestProcessor{},
|
||||
Config: &ProcessorConfig{Filter: Filter{}},
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func TestRunningProcessor(t *testing.T) {
|
||||
inmetrics := []telegraf.Metric{
|
||||
testutil.TestMetric(1, "foo"),
|
||||
testutil.TestMetric(1, "bar"),
|
||||
testutil.TestMetric(1, "baz"),
|
||||
}
|
||||
|
||||
expectedNames := []string{
|
||||
"fuz",
|
||||
"baz",
|
||||
"baz",
|
||||
}
|
||||
rfp := NewTestRunningProcessor()
|
||||
filteredMetrics := rfp.Apply(inmetrics...)
|
||||
|
||||
actualNames := []string{
|
||||
filteredMetrics[0].Name(),
|
||||
filteredMetrics[1].Name(),
|
||||
filteredMetrics[2].Name(),
|
||||
}
|
||||
assert.Equal(t, expectedNames, actualNames)
|
||||
}
|
||||
|
||||
func TestRunningProcessor_WithNameDrop(t *testing.T) {
|
||||
inmetrics := []telegraf.Metric{
|
||||
testutil.TestMetric(1, "foo"),
|
||||
testutil.TestMetric(1, "bar"),
|
||||
testutil.TestMetric(1, "baz"),
|
||||
}
|
||||
|
||||
expectedNames := []string{
|
||||
"foo",
|
||||
"baz",
|
||||
"baz",
|
||||
}
|
||||
rfp := NewTestRunningProcessor()
|
||||
|
||||
rfp.Config.Filter.NameDrop = []string{"foo"}
|
||||
assert.NoError(t, rfp.Config.Filter.Compile())
|
||||
|
||||
filteredMetrics := rfp.Apply(inmetrics...)
|
||||
|
||||
actualNames := []string{
|
||||
filteredMetrics[0].Name(),
|
||||
filteredMetrics[1].Name(),
|
||||
filteredMetrics[2].Name(),
|
||||
}
|
||||
assert.Equal(t, expectedNames, actualNames)
|
||||
}
|
||||
|
||||
func TestRunningProcessor_DroppedMetric(t *testing.T) {
|
||||
inmetrics := []telegraf.Metric{
|
||||
testutil.TestMetric(1, "dropme"),
|
||||
testutil.TestMetric(1, "foo"),
|
||||
testutil.TestMetric(1, "bar"),
|
||||
}
|
||||
|
||||
expectedNames := []string{
|
||||
"fuz",
|
||||
"baz",
|
||||
}
|
||||
rfp := NewTestRunningProcessor()
|
||||
filteredMetrics := rfp.Apply(inmetrics...)
|
||||
|
||||
actualNames := []string{
|
||||
filteredMetrics[0].Name(),
|
||||
filteredMetrics[1].Name(),
|
||||
}
|
||||
assert.Equal(t, expectedNames, actualNames)
|
||||
}
|
||||
60
logger/logger.go
Normal file
60
logger/logger.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/wlog"
|
||||
)
|
||||
|
||||
// newTelegrafWriter returns a logging-wrapped writer.
|
||||
func newTelegrafWriter(w io.Writer) io.Writer {
|
||||
return &telegrafLog{
|
||||
writer: wlog.NewWriter(w),
|
||||
}
|
||||
}
|
||||
|
||||
type telegrafLog struct {
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (t *telegrafLog) Write(b []byte) (n int, err error) {
|
||||
return t.writer.Write(append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...))
|
||||
}
|
||||
|
||||
// SetupLogging configures the logging output.
|
||||
// debug will set the log level to DEBUG
|
||||
// quiet will set the log level to ERROR
|
||||
// logfile will direct the logging output to a file. Empty string is
|
||||
// interpreted as stderr. If there is an error opening the file the
|
||||
// logger will fallback to stderr.
|
||||
func SetupLogging(debug, quiet bool, logfile string) {
|
||||
log.SetFlags(0)
|
||||
if debug {
|
||||
wlog.SetLevel(wlog.DEBUG)
|
||||
}
|
||||
if quiet {
|
||||
wlog.SetLevel(wlog.ERROR)
|
||||
}
|
||||
|
||||
var oFile *os.File
|
||||
if logfile != "" {
|
||||
if _, err := os.Stat(logfile); os.IsNotExist(err) {
|
||||
if oFile, err = os.Create(logfile); err != nil {
|
||||
log.Printf("E! Unable to create %s (%s), using stderr", logfile, err)
|
||||
oFile = os.Stderr
|
||||
}
|
||||
} else {
|
||||
if oFile, err = os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, os.ModeAppend); err != nil {
|
||||
log.Printf("E! Unable to append to %s (%s), using stderr", logfile, err)
|
||||
oFile = os.Stderr
|
||||
}
|
||||
}
|
||||
} else {
|
||||
oFile = os.Stderr
|
||||
}
|
||||
|
||||
log.SetOutput(newTelegrafWriter(oFile))
|
||||
}
|
||||
62
logger/logger_test.go
Normal file
62
logger/logger_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestWriteLogToFile(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
assert.NoError(t, err)
|
||||
defer func() { os.Remove(tmpfile.Name()) }()
|
||||
|
||||
SetupLogging(false, false, tmpfile.Name())
|
||||
log.Printf("I! TEST")
|
||||
log.Printf("D! TEST") // <- should be ignored
|
||||
|
||||
f, err := ioutil.ReadFile(tmpfile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, f[19:], []byte("Z I! TEST\n"))
|
||||
}
|
||||
|
||||
func TestDebugWriteLogToFile(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
assert.NoError(t, err)
|
||||
defer func() { os.Remove(tmpfile.Name()) }()
|
||||
|
||||
SetupLogging(true, false, tmpfile.Name())
|
||||
log.Printf("D! TEST")
|
||||
|
||||
f, err := ioutil.ReadFile(tmpfile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, f[19:], []byte("Z D! TEST\n"))
|
||||
}
|
||||
|
||||
func TestErrorWriteLogToFile(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
assert.NoError(t, err)
|
||||
defer func() { os.Remove(tmpfile.Name()) }()
|
||||
|
||||
SetupLogging(false, true, tmpfile.Name())
|
||||
log.Printf("E! TEST")
|
||||
log.Printf("I! TEST") // <- should be ignored
|
||||
|
||||
f, err := ioutil.ReadFile(tmpfile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, f[19:], []byte("Z E! TEST\n"))
|
||||
}
|
||||
|
||||
func BenchmarkTelegrafLogWrite(b *testing.B) {
|
||||
var msg = []byte("test")
|
||||
var buf bytes.Buffer
|
||||
w := newTelegrafWriter(&buf)
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf.Reset()
|
||||
w.Write(msg)
|
||||
}
|
||||
}
|
||||
122
metric.go
122
metric.go
@@ -3,92 +3,60 @@ package telegraf
|
||||
import (
|
||||
"time"
|
||||
|
||||
// TODO remove
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
)
|
||||
|
||||
// ValueType is an enumeration of metric types that represent a simple value.
|
||||
type ValueType int
|
||||
|
||||
// Possible values for the ValueType enum.
|
||||
const (
|
||||
_ ValueType = iota
|
||||
Counter
|
||||
Gauge
|
||||
Untyped
|
||||
)
|
||||
|
||||
type Metric interface {
|
||||
// Name returns the measurement name of the metric
|
||||
Serialize() []byte
|
||||
String() string // convenience function for string(Serialize())
|
||||
Copy() Metric
|
||||
// Split will attempt to return multiple metrics with the same timestamp
|
||||
// whose string representations are no longer than maxSize.
|
||||
// Metrics with a single field may exceed the requested size.
|
||||
Split(maxSize int) []Metric
|
||||
|
||||
// Tag functions
|
||||
HasTag(key string) bool
|
||||
AddTag(key, value string)
|
||||
RemoveTag(key string)
|
||||
|
||||
// Field functions
|
||||
HasField(key string) bool
|
||||
AddField(key string, value interface{})
|
||||
RemoveField(key string) error
|
||||
|
||||
// Name functions
|
||||
SetName(name string)
|
||||
SetPrefix(prefix string)
|
||||
SetSuffix(suffix string)
|
||||
|
||||
// Getting data structure functions
|
||||
Name() string
|
||||
|
||||
// Name returns the tags associated with the metric
|
||||
Tags() map[string]string
|
||||
|
||||
// Time return the timestamp for the metric
|
||||
Time() time.Time
|
||||
|
||||
// UnixNano returns the unix nano time of the metric
|
||||
UnixNano() int64
|
||||
|
||||
// Fields returns the fields for the metric
|
||||
Fields() map[string]interface{}
|
||||
Time() time.Time
|
||||
UnixNano() int64
|
||||
Type() ValueType
|
||||
Len() int // returns the length of the serialized metric, including newline
|
||||
HashID() uint64
|
||||
|
||||
// String returns a line-protocol string of the metric
|
||||
String() string
|
||||
|
||||
// PrecisionString returns a line-protocol string of the metric, at precision
|
||||
PrecisionString(precison string) string
|
||||
// aggregator things:
|
||||
SetAggregate(bool)
|
||||
IsAggregate() bool
|
||||
|
||||
// Point returns a influxdb client.Point object
|
||||
// TODO remove this function
|
||||
Point() *client.Point
|
||||
}
|
||||
|
||||
// metric is a wrapper of the influxdb client.Point struct
|
||||
type metric struct {
|
||||
pt *client.Point
|
||||
}
|
||||
|
||||
// NewMetric returns a metric with the given timestamp. If a timestamp is not
|
||||
// given, then data is sent to the database without a timestamp, in which case
|
||||
// the server will assign local time upon reception. NOTE: it is recommended to
|
||||
// send data with a timestamp.
|
||||
func NewMetric(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t ...time.Time,
|
||||
) (Metric, error) {
|
||||
var T time.Time
|
||||
if len(t) > 0 {
|
||||
T = t[0]
|
||||
}
|
||||
|
||||
pt, err := client.NewPoint(name, tags, fields, T)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metric{
|
||||
pt: pt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *metric) Name() string {
|
||||
return m.pt.Name()
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
return m.pt.Tags()
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
return m.pt.Time()
|
||||
}
|
||||
|
||||
func (m *metric) UnixNano() int64 {
|
||||
return m.pt.UnixNano()
|
||||
}
|
||||
|
||||
func (m *metric) Fields() map[string]interface{} {
|
||||
return m.pt.Fields()
|
||||
}
|
||||
|
||||
func (m *metric) String() string {
|
||||
return m.pt.String()
|
||||
}
|
||||
|
||||
func (m *metric) PrecisionString(precison string) string {
|
||||
return m.pt.PrecisionString(precison)
|
||||
}
|
||||
|
||||
func (m *metric) Point() *client.Point {
|
||||
return m.pt
|
||||
}
|
||||
|
||||
49
metric/escape.go
Normal file
49
metric/escape.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// escaper is for escaping:
|
||||
// - tag keys
|
||||
// - tag values
|
||||
// - field keys
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`)
|
||||
unEscaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`)
|
||||
|
||||
// nameEscaper is for escaping measurement names only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
nameEscaper = strings.NewReplacer(`,`, `\,`, ` `, `\ `)
|
||||
nameUnEscaper = strings.NewReplacer(`\,`, `,`, `\ `, ` `)
|
||||
|
||||
// stringFieldEscaper is for escaping string field values only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
stringFieldEscaper = strings.NewReplacer(`"`, `\"`)
|
||||
stringFieldUnEscaper = strings.NewReplacer(`\"`, `"`)
|
||||
)
|
||||
|
||||
func escape(s string, t string) string {
|
||||
switch t {
|
||||
case "fieldkey", "tagkey", "tagval":
|
||||
return escaper.Replace(s)
|
||||
case "name":
|
||||
return nameEscaper.Replace(s)
|
||||
case "fieldval":
|
||||
return stringFieldEscaper.Replace(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func unescape(s string, t string) string {
|
||||
switch t {
|
||||
case "fieldkey", "tagkey", "tagval":
|
||||
return unEscaper.Replace(s)
|
||||
case "name":
|
||||
return nameUnEscaper.Replace(s)
|
||||
case "fieldval":
|
||||
return stringFieldUnEscaper.Replace(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
38
metric/inline_strconv_parse.go
Normal file
38
metric/inline_strconv_parse.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
|
||||
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseInt(s, base, bitSize)
|
||||
}
|
||||
|
||||
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
|
||||
func parseFloatBytes(b []byte, bitSize int) (float64, error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseFloat(s, bitSize)
|
||||
}
|
||||
|
||||
// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
|
||||
func parseBoolBytes(b []byte) (bool, error) {
|
||||
return strconv.ParseBool(unsafeBytesToString(b))
|
||||
}
|
||||
|
||||
// unsafeBytesToString converts a []byte to a string without a heap allocation.
|
||||
//
|
||||
// It is unsafe, and is intended to prepare input to short-lived functions
|
||||
// that require strings.
|
||||
func unsafeBytesToString(in []byte) string {
|
||||
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
|
||||
dst := reflect.StringHeader{
|
||||
Data: src.Data,
|
||||
Len: src.Len,
|
||||
}
|
||||
s := *(*string)(unsafe.Pointer(&dst))
|
||||
return s
|
||||
}
|
||||
103
metric/inline_strconv_parse_test.go
Normal file
103
metric/inline_strconv_parse_test.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
func TestParseIntBytesEquivalenceFuzz(t *testing.T) {
|
||||
f := func(b []byte, base int, bitSize int) bool {
|
||||
exp, expErr := strconv.ParseInt(string(b), base, bitSize)
|
||||
got, gotErr := parseIntBytes(b, base, bitSize)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseIntBytesValid64bitBase10EquivalenceFuzz(t *testing.T) {
|
||||
buf := []byte{}
|
||||
f := func(n int64) bool {
|
||||
buf = strconv.AppendInt(buf[:0], n, 10)
|
||||
|
||||
exp, expErr := strconv.ParseInt(string(buf), 10, 64)
|
||||
got, gotErr := parseIntBytes(buf, 10, 64)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFloatBytesEquivalenceFuzz(t *testing.T) {
|
||||
f := func(b []byte, bitSize int) bool {
|
||||
exp, expErr := strconv.ParseFloat(string(b), bitSize)
|
||||
got, gotErr := parseFloatBytes(b, bitSize)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFloatBytesValid64bitEquivalenceFuzz(t *testing.T) {
|
||||
buf := []byte{}
|
||||
f := func(n float64) bool {
|
||||
buf = strconv.AppendFloat(buf[:0], n, 'f', -1, 64)
|
||||
|
||||
exp, expErr := strconv.ParseFloat(string(buf), 64)
|
||||
got, gotErr := parseFloatBytes(buf, 64)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseBoolBytesEquivalence(t *testing.T) {
|
||||
var buf []byte
|
||||
for _, s := range []string{"1", "t", "T", "TRUE", "true", "True", "0", "f", "F", "FALSE", "false", "False", "fail", "TrUe", "FAlSE", "numbers", ""} {
|
||||
buf = append(buf[:0], s...)
|
||||
|
||||
exp, expErr := strconv.ParseBool(s)
|
||||
got, gotErr := parseBoolBytes(buf)
|
||||
|
||||
if got != exp || !checkErrs(expErr, gotErr) {
|
||||
t.Errorf("Failed to parse boolean value %q correctly: wanted (%t, %v), got (%t, %v)", s, exp, expErr, got, gotErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkErrs(a, b error) bool {
|
||||
if (a == nil) != (b == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
return a == nil || a.Error() == b.Error()
|
||||
}
|
||||
546
metric/metric.go
Normal file
546
metric/metric.go
Normal file
@@ -0,0 +1,546 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
// TODO remove
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
)
|
||||
|
||||
const MaxInt = int(^uint(0) >> 1)
|
||||
|
||||
func New(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
mType ...telegraf.ValueType,
|
||||
) (telegraf.Metric, error) {
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("Metric cannot be made without any fields")
|
||||
}
|
||||
if len(name) == 0 {
|
||||
return nil, fmt.Errorf("Metric cannot be made with an empty name")
|
||||
}
|
||||
|
||||
var thisType telegraf.ValueType
|
||||
if len(mType) > 0 {
|
||||
thisType = mType[0]
|
||||
} else {
|
||||
thisType = telegraf.Untyped
|
||||
}
|
||||
|
||||
m := &metric{
|
||||
name: []byte(escape(name, "name")),
|
||||
t: []byte(fmt.Sprint(t.UnixNano())),
|
||||
nsec: t.UnixNano(),
|
||||
mType: thisType,
|
||||
}
|
||||
|
||||
// pre-allocate exact size of the tags slice
|
||||
taglen := 0
|
||||
for k, v := range tags {
|
||||
// TODO check that length of tag key & value are > 0
|
||||
taglen += 2 + len(escape(k, "tagkey")) + len(escape(v, "tagval"))
|
||||
}
|
||||
m.tags = make([]byte, taglen)
|
||||
|
||||
i := 0
|
||||
for k, v := range tags {
|
||||
m.tags[i] = ','
|
||||
i++
|
||||
i += copy(m.tags[i:], escape(k, "tagkey"))
|
||||
m.tags[i] = '='
|
||||
i++
|
||||
i += copy(m.tags[i:], escape(v, "tagval"))
|
||||
}
|
||||
|
||||
// pre-allocate capacity of the fields slice
|
||||
fieldlen := 0
|
||||
for k, _ := range fields {
|
||||
// 10 bytes is completely arbitrary, but will at least prevent some
|
||||
// amount of allocations. There's a small possibility this will create
|
||||
// slightly more allocations for a metric that has many short fields.
|
||||
fieldlen += len(k) + 10
|
||||
}
|
||||
m.fields = make([]byte, 0, fieldlen)
|
||||
|
||||
i = 0
|
||||
for k, v := range fields {
|
||||
if i != 0 {
|
||||
m.fields = append(m.fields, ',')
|
||||
}
|
||||
m.fields = appendField(m.fields, k, v)
|
||||
i++
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// indexUnescapedByte finds the index of the first byte equal to b in buf that
|
||||
// is not escaped. Returns -1 if not found.
|
||||
func indexUnescapedByte(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
i := bytes.IndexByte(buf[keyi:], b)
|
||||
if i == -1 {
|
||||
return -1
|
||||
} else if i == 0 {
|
||||
break
|
||||
}
|
||||
keyi += i
|
||||
if countBackslashes(buf, keyi-1)%2 == 0 {
|
||||
break
|
||||
} else {
|
||||
keyi++
|
||||
}
|
||||
}
|
||||
return keyi
|
||||
}
|
||||
|
||||
// countBackslashes counts the number of preceding backslashes starting at
|
||||
// the 'start' index.
|
||||
func countBackslashes(buf []byte, index int) int {
|
||||
var count int
|
||||
for {
|
||||
if index < 0 {
|
||||
return count
|
||||
}
|
||||
if buf[index] == '\\' {
|
||||
count++
|
||||
index--
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
name []byte
|
||||
tags []byte
|
||||
fields []byte
|
||||
t []byte
|
||||
|
||||
mType telegraf.ValueType
|
||||
aggregate bool
|
||||
|
||||
// cached values for reuse in "get" functions
|
||||
hashID uint64
|
||||
nsec int64
|
||||
}
|
||||
|
||||
func (m *metric) Point() *client.Point {
|
||||
c, _ := client.NewPoint(m.Name(), m.Tags(), m.Fields(), m.Time())
|
||||
return c
|
||||
}
|
||||
|
||||
func (m *metric) String() string {
|
||||
return string(m.name) + string(m.tags) + " " + string(m.fields) + " " + string(m.t) + "\n"
|
||||
}
|
||||
|
||||
func (m *metric) SetAggregate(b bool) {
|
||||
m.aggregate = b
|
||||
}
|
||||
|
||||
func (m *metric) IsAggregate() bool {
|
||||
return m.aggregate
|
||||
}
|
||||
|
||||
func (m *metric) Type() telegraf.ValueType {
|
||||
return m.mType
|
||||
}
|
||||
|
||||
func (m *metric) Len() int {
|
||||
// 3 is for 2 spaces surrounding the fields array + newline at the end.
|
||||
return len(m.name) + len(m.tags) + len(m.fields) + len(m.t) + 3
|
||||
}
|
||||
|
||||
func (m *metric) Serialize() []byte {
|
||||
tmp := make([]byte, m.Len())
|
||||
i := 0
|
||||
i += copy(tmp[i:], m.name)
|
||||
i += copy(tmp[i:], m.tags)
|
||||
tmp[i] = ' '
|
||||
i++
|
||||
i += copy(tmp[i:], m.fields)
|
||||
tmp[i] = ' '
|
||||
i++
|
||||
i += copy(tmp[i:], m.t)
|
||||
tmp[i] = '\n'
|
||||
return tmp
|
||||
}
|
||||
|
||||
func (m *metric) Split(maxSize int) []telegraf.Metric {
|
||||
if m.Len() < maxSize {
|
||||
return []telegraf.Metric{m}
|
||||
}
|
||||
var out []telegraf.Metric
|
||||
|
||||
// constant number of bytes for each metric (in addition to field bytes)
|
||||
constant := len(m.name) + len(m.tags) + len(m.t) + 3
|
||||
// currently selected fields
|
||||
fields := make([]byte, 0, maxSize)
|
||||
|
||||
i := 0
|
||||
for {
|
||||
if i >= len(m.fields) {
|
||||
// hit the end of the field byte slice
|
||||
if len(fields) > 0 {
|
||||
out = append(out, copyWith(m.name, m.tags, fields, m.t))
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// find the end of the next field
|
||||
j := indexUnescapedByte(m.fields[i:], ',')
|
||||
if j == -1 {
|
||||
j = len(m.fields)
|
||||
} else {
|
||||
j += i
|
||||
}
|
||||
|
||||
// if true, then we need to create a metric _not_ including the currently
|
||||
// selected field
|
||||
if len(m.fields[i:j])+len(fields)+constant > maxSize {
|
||||
// if false, then we'll create a metric including the currently
|
||||
// selected field anyways. This means that the given maxSize is too
|
||||
// small for a single field to fit.
|
||||
if len(fields) > 0 {
|
||||
out = append(out, copyWith(m.name, m.tags, fields, m.t))
|
||||
}
|
||||
|
||||
fields = make([]byte, 0, maxSize)
|
||||
}
|
||||
if len(fields) > 0 {
|
||||
fields = append(fields, ',')
|
||||
}
|
||||
fields = append(fields, m.fields[i:j]...)
|
||||
|
||||
i = j + 1
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (m *metric) Fields() map[string]interface{} {
|
||||
fieldMap := map[string]interface{}{}
|
||||
i := 0
|
||||
for {
|
||||
if i >= len(m.fields) {
|
||||
break
|
||||
}
|
||||
// end index of field key
|
||||
i1 := indexUnescapedByte(m.fields[i:], '=')
|
||||
if i1 == -1 {
|
||||
break
|
||||
}
|
||||
// start index of field value
|
||||
i2 := i1 + 1
|
||||
|
||||
// end index of field value
|
||||
var i3 int
|
||||
if m.fields[i:][i2] == '"' {
|
||||
i3 = indexUnescapedByte(m.fields[i:][i2+1:], '"')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
i3 += i2 + 2 // increment index to the comma
|
||||
} else {
|
||||
i3 = indexUnescapedByte(m.fields[i:], ',')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
}
|
||||
|
||||
switch m.fields[i:][i2] {
|
||||
case '"':
|
||||
// string field
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = unescape(string(m.fields[i:][i2+1:i3-1]), "fieldval")
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
// number field
|
||||
switch m.fields[i:][i3-1] {
|
||||
case 'i':
|
||||
// integer field
|
||||
n, err := parseIntBytes(m.fields[i:][i2:i3-1], 10, 64)
|
||||
if err == nil {
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
|
||||
} else {
|
||||
// TODO handle error or just ignore field silently?
|
||||
}
|
||||
default:
|
||||
// float field
|
||||
n, err := parseFloatBytes(m.fields[i:][i2:i3], 64)
|
||||
if err == nil {
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
|
||||
} else {
|
||||
// TODO handle error or just ignore field silently?
|
||||
}
|
||||
}
|
||||
case 'T', 't':
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = true
|
||||
case 'F', 'f':
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = false
|
||||
default:
|
||||
// TODO handle unsupported field type
|
||||
}
|
||||
|
||||
i += i3 + 1
|
||||
}
|
||||
|
||||
return fieldMap
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
tagMap := map[string]string{}
|
||||
if len(m.tags) == 0 {
|
||||
return tagMap
|
||||
}
|
||||
|
||||
i := 0
|
||||
for {
|
||||
// start index of tag key
|
||||
i0 := indexUnescapedByte(m.tags[i:], ',') + 1
|
||||
if i0 == 0 {
|
||||
// didn't find a tag start
|
||||
break
|
||||
}
|
||||
// end index of tag key
|
||||
i1 := indexUnescapedByte(m.tags[i:], '=')
|
||||
// start index of tag value
|
||||
i2 := i1 + 1
|
||||
// end index of tag value (starting from i2)
|
||||
i3 := indexUnescapedByte(m.tags[i+i2:], ',')
|
||||
if i3 == -1 {
|
||||
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:]), "tagval")
|
||||
break
|
||||
}
|
||||
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:i2+i3]), "tagval")
|
||||
// increment start index for the next tag
|
||||
i += i2 + i3
|
||||
}
|
||||
|
||||
return tagMap
|
||||
}
|
||||
|
||||
func (m *metric) Name() string {
|
||||
return unescape(string(m.name), "name")
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
// assume metric has been verified already and ignore error:
|
||||
if m.nsec == 0 {
|
||||
m.nsec, _ = parseIntBytes(m.t, 10, 64)
|
||||
}
|
||||
return time.Unix(0, m.nsec)
|
||||
}
|
||||
|
||||
func (m *metric) UnixNano() int64 {
|
||||
// assume metric has been verified already and ignore error:
|
||||
if m.nsec == 0 {
|
||||
m.nsec, _ = parseIntBytes(m.t, 10, 64)
|
||||
}
|
||||
return m.nsec
|
||||
}
|
||||
|
||||
func (m *metric) SetName(name string) {
|
||||
m.hashID = 0
|
||||
m.name = []byte(nameEscaper.Replace(name))
|
||||
}
|
||||
|
||||
func (m *metric) SetPrefix(prefix string) {
|
||||
m.hashID = 0
|
||||
m.name = append([]byte(nameEscaper.Replace(prefix)), m.name...)
|
||||
}
|
||||
|
||||
func (m *metric) SetSuffix(suffix string) {
|
||||
m.hashID = 0
|
||||
m.name = append(m.name, []byte(nameEscaper.Replace(suffix))...)
|
||||
}
|
||||
|
||||
func (m *metric) AddTag(key, value string) {
|
||||
m.RemoveTag(key)
|
||||
m.tags = append(m.tags, []byte(","+escape(key, "tagkey")+"="+escape(value, "tagval"))...)
|
||||
}
|
||||
|
||||
func (m *metric) HasTag(key string) bool {
|
||||
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *metric) RemoveTag(key string) {
|
||||
m.hashID = 0
|
||||
|
||||
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return
|
||||
}
|
||||
|
||||
tmp := m.tags[0 : i-1]
|
||||
j := indexUnescapedByte(m.tags[i:], ',')
|
||||
if j != -1 {
|
||||
tmp = append(tmp, m.tags[i+j:]...)
|
||||
}
|
||||
m.tags = tmp
|
||||
return
|
||||
}
|
||||
|
||||
func (m *metric) AddField(key string, value interface{}) {
|
||||
m.fields = append(m.fields, ',')
|
||||
m.fields = appendField(m.fields, key, value)
|
||||
}
|
||||
|
||||
func (m *metric) HasField(key string) bool {
|
||||
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *metric) RemoveField(key string) error {
|
||||
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var tmp []byte
|
||||
if i != 0 {
|
||||
tmp = m.fields[0 : i-1]
|
||||
}
|
||||
j := indexUnescapedByte(m.fields[i:], ',')
|
||||
if j != -1 {
|
||||
tmp = append(tmp, m.fields[i+j:]...)
|
||||
}
|
||||
|
||||
if len(tmp) == 0 {
|
||||
return fmt.Errorf("Metric cannot remove final field: %s", m.fields)
|
||||
}
|
||||
|
||||
m.fields = tmp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *metric) Copy() telegraf.Metric {
|
||||
return copyWith(m.name, m.tags, m.fields, m.t)
|
||||
}
|
||||
|
||||
func copyWith(name, tags, fields, t []byte) telegraf.Metric {
|
||||
out := metric{
|
||||
name: make([]byte, len(name)),
|
||||
tags: make([]byte, len(tags)),
|
||||
fields: make([]byte, len(fields)),
|
||||
t: make([]byte, len(t)),
|
||||
}
|
||||
copy(out.name, name)
|
||||
copy(out.tags, tags)
|
||||
copy(out.fields, fields)
|
||||
copy(out.t, t)
|
||||
return &out
|
||||
}
|
||||
|
||||
func (m *metric) HashID() uint64 {
|
||||
if m.hashID == 0 {
|
||||
h := fnv.New64a()
|
||||
h.Write(m.name)
|
||||
|
||||
tags := m.Tags()
|
||||
tmp := make([]string, len(tags))
|
||||
i := 0
|
||||
for k, v := range tags {
|
||||
tmp[i] = k + v
|
||||
i++
|
||||
}
|
||||
sort.Strings(tmp)
|
||||
|
||||
for _, s := range tmp {
|
||||
h.Write([]byte(s))
|
||||
}
|
||||
|
||||
m.hashID = h.Sum64()
|
||||
}
|
||||
return m.hashID
|
||||
}
|
||||
|
||||
func appendField(b []byte, k string, v interface{}) []byte {
|
||||
if v == nil {
|
||||
return b
|
||||
}
|
||||
b = append(b, []byte(escape(k, "tagkey")+"=")...)
|
||||
|
||||
// check popular types first
|
||||
switch v := v.(type) {
|
||||
case float64:
|
||||
b = strconv.AppendFloat(b, v, 'f', -1, 64)
|
||||
case int64:
|
||||
b = strconv.AppendInt(b, v, 10)
|
||||
b = append(b, 'i')
|
||||
case string:
|
||||
b = append(b, '"')
|
||||
b = append(b, []byte(escape(v, "fieldval"))...)
|
||||
b = append(b, '"')
|
||||
case bool:
|
||||
b = strconv.AppendBool(b, v)
|
||||
case int32:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int16:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int8:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint64:
|
||||
// Cap uints above the maximum int value
|
||||
var intv int64
|
||||
if v <= uint64(MaxInt) {
|
||||
intv = int64(v)
|
||||
} else {
|
||||
intv = int64(MaxInt)
|
||||
}
|
||||
b = strconv.AppendInt(b, intv, 10)
|
||||
b = append(b, 'i')
|
||||
case uint32:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint16:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint8:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint:
|
||||
// Cap uints above the maximum int value
|
||||
var intv int64
|
||||
if v <= uint(MaxInt) {
|
||||
intv = int64(v)
|
||||
} else {
|
||||
intv = int64(MaxInt)
|
||||
}
|
||||
b = strconv.AppendInt(b, intv, 10)
|
||||
b = append(b, 'i')
|
||||
case float32:
|
||||
b = strconv.AppendFloat(b, float64(v), 'f', -1, 32)
|
||||
case []byte:
|
||||
b = append(b, v...)
|
||||
default:
|
||||
// Can't determine the type, so convert to string
|
||||
b = append(b, '"')
|
||||
b = append(b, []byte(escape(fmt.Sprintf("%v", v), "fieldval"))...)
|
||||
b = append(b, '"')
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
148
metric/metric_benchmark_test.go
Normal file
148
metric/metric_benchmark_test.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
// vars for making sure that the compiler doesnt optimize out the benchmarks:
|
||||
var (
|
||||
s string
|
||||
I interface{}
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
)
|
||||
|
||||
func BenchmarkNewMetric(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
for n := 0; n < b.N; n++ {
|
||||
mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
}
|
||||
s = string(mt.String())
|
||||
}
|
||||
|
||||
func BenchmarkAddTag(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
mt = &metric{
|
||||
name: []byte("cpu"),
|
||||
tags: []byte(",host=localhost"),
|
||||
fields: []byte("a=101"),
|
||||
t: []byte("1480614053000000000"),
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
mt.AddTag("foo", "bar")
|
||||
}
|
||||
s = string(mt.String())
|
||||
}
|
||||
|
||||
func BenchmarkSplit(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
mt = &metric{
|
||||
name: []byte("cpu"),
|
||||
tags: []byte(",host=localhost"),
|
||||
fields: []byte("a=101,b=10i,c=10101,d=101010,e=42"),
|
||||
t: []byte("1480614053000000000"),
|
||||
}
|
||||
var metrics []telegraf.Metric
|
||||
for n := 0; n < b.N; n++ {
|
||||
metrics = mt.Split(60)
|
||||
}
|
||||
s = string(metrics[0].String())
|
||||
}
|
||||
|
||||
func BenchmarkTags(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
var mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
tags = mt.Tags()
|
||||
}
|
||||
s = fmt.Sprint(tags)
|
||||
}
|
||||
|
||||
func BenchmarkFields(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
var mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
fields = mt.Fields()
|
||||
}
|
||||
s = fmt.Sprint(fields)
|
||||
}
|
||||
|
||||
func BenchmarkString(b *testing.B) {
|
||||
mt, _ := New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var S string
|
||||
for n := 0; n < b.N; n++ {
|
||||
S = mt.String()
|
||||
}
|
||||
s = S
|
||||
}
|
||||
|
||||
func BenchmarkSerialize(b *testing.B) {
|
||||
mt, _ := New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var B []byte
|
||||
for n := 0; n < b.N; n++ {
|
||||
B = mt.Serialize()
|
||||
}
|
||||
s = string(B)
|
||||
}
|
||||
646
metric/metric_test.go
Normal file
646
metric/metric_test.go
Normal file
@@ -0,0 +1,646 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Untyped, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewErrors(t *testing.T) {
|
||||
// creating a metric with an empty name produces an error:
|
||||
m, err := New(
|
||||
"",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
// creating a metric with empty fields produces an error:
|
||||
m, err = New(
|
||||
"foobar",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, m)
|
||||
}
|
||||
|
||||
func TestNewMetric_Tags(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, m.HasTag("host"))
|
||||
assert.True(t, m.HasTag("datacenter"))
|
||||
|
||||
m.AddTag("newtag", "foo")
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
|
||||
m.RemoveTag("host")
|
||||
assert.False(t, m.HasTag("host"))
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
assert.True(t, m.HasTag("datacenter"))
|
||||
|
||||
m.RemoveTag("datacenter")
|
||||
assert.False(t, m.HasTag("datacenter"))
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
assert.Equal(t, map[string]string{"newtag": "foo"}, m.Tags())
|
||||
|
||||
m.RemoveTag("newtag")
|
||||
assert.False(t, m.HasTag("newtag"))
|
||||
assert.Equal(t, map[string]string{}, m.Tags())
|
||||
|
||||
assert.Equal(t, "cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n", m.String())
|
||||
}
|
||||
|
||||
func TestSerialize(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t,
|
||||
[]byte("cpu,datacenter=us-east-1 value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
|
||||
m.Serialize())
|
||||
|
||||
m.RemoveTag("datacenter")
|
||||
assert.Equal(t,
|
||||
[]byte("cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
|
||||
m.Serialize())
|
||||
}
|
||||
|
||||
func TestHashID(t *testing.T) {
|
||||
m, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
hash := m.HashID()
|
||||
|
||||
// adding a field doesn't change the hash:
|
||||
m.AddField("foo", int64(100))
|
||||
assert.Equal(t, hash, m.HashID())
|
||||
|
||||
// removing a non-existent tag doesn't change the hash:
|
||||
m.RemoveTag("no-op")
|
||||
assert.Equal(t, hash, m.HashID())
|
||||
|
||||
// adding a tag does change it:
|
||||
m.AddTag("foo", "bar")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
|
||||
// removing a tag also changes it:
|
||||
m.RemoveTag("mytag")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
}
|
||||
|
||||
func TestHashID_Consistency(t *testing.T) {
|
||||
m, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
hash := m.HashID()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
m2, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Equal(t, hash, m2.HashID())
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewMetric_NameModifiers(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hash := m.HashID()
|
||||
suffix := fmt.Sprintf(" value=1 %d\n", now.UnixNano())
|
||||
assert.Equal(t, "cpu"+suffix, m.String())
|
||||
|
||||
m.SetPrefix("pre_")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
assert.Equal(t, "pre_cpu"+suffix, m.String())
|
||||
|
||||
m.SetSuffix("_post")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
assert.Equal(t, "pre_cpu_post"+suffix, m.String())
|
||||
|
||||
m.SetName("mem")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
assert.Equal(t, "mem"+suffix, m.String())
|
||||
}
|
||||
|
||||
func TestNewMetric_FieldModifiers(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, m.HasField("value"))
|
||||
assert.False(t, m.HasField("foo"))
|
||||
|
||||
m.AddField("newfield", "foo")
|
||||
assert.True(t, m.HasField("newfield"))
|
||||
|
||||
assert.NoError(t, m.RemoveField("newfield"))
|
||||
assert.False(t, m.HasField("newfield"))
|
||||
|
||||
// don't allow user to remove all fields:
|
||||
assert.Error(t, m.RemoveField("value"))
|
||||
|
||||
m.AddField("value2", int64(101))
|
||||
assert.NoError(t, m.RemoveField("value"))
|
||||
assert.False(t, m.HasField("value"))
|
||||
}
|
||||
|
||||
func TestNewMetric_Fields(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
}
|
||||
|
||||
func TestNewMetric_Time(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m = m.Copy()
|
||||
m2 := m.Copy()
|
||||
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m2.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewMetric_Copy(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m2 := m.Copy()
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
m.AddTag("host", "localhost")
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu,host=localhost float=1 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
|
||||
m2.String())
|
||||
}
|
||||
|
||||
func TestNewMetric_AllTypes(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"float64": float64(1),
|
||||
"float32": float32(1),
|
||||
"int64": int64(1),
|
||||
"int32": int32(1),
|
||||
"int16": int16(1),
|
||||
"int8": int8(1),
|
||||
"int": int(1),
|
||||
"uint64": uint64(1),
|
||||
"uint32": uint32(1),
|
||||
"uint16": uint16(1),
|
||||
"uint8": uint8(1),
|
||||
"uint": uint(1),
|
||||
"bytes": []byte("foo"),
|
||||
"nil": nil,
|
||||
"maxuint64": uint64(MaxInt) + 10,
|
||||
"maxuint": uint(MaxInt) + 10,
|
||||
"unsupported": []int{1, 2},
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, m.String(), "float64=1")
|
||||
assert.Contains(t, m.String(), "float32=1")
|
||||
assert.Contains(t, m.String(), "int64=1i")
|
||||
assert.Contains(t, m.String(), "int32=1i")
|
||||
assert.Contains(t, m.String(), "int16=1i")
|
||||
assert.Contains(t, m.String(), "int8=1i")
|
||||
assert.Contains(t, m.String(), "int=1i")
|
||||
assert.Contains(t, m.String(), "uint64=1i")
|
||||
assert.Contains(t, m.String(), "uint32=1i")
|
||||
assert.Contains(t, m.String(), "uint16=1i")
|
||||
assert.Contains(t, m.String(), "uint8=1i")
|
||||
assert.Contains(t, m.String(), "uint=1i")
|
||||
assert.NotContains(t, m.String(), "nil")
|
||||
assert.Contains(t, m.String(), fmt.Sprintf("maxuint64=%di", MaxInt))
|
||||
assert.Contains(t, m.String(), fmt.Sprintf("maxuint=%di", MaxInt))
|
||||
}
|
||||
|
||||
func TestIndexUnescapedByte(t *testing.T) {
|
||||
tests := []struct {
|
||||
in []byte
|
||||
b byte
|
||||
expected int
|
||||
}{
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'b',
|
||||
expected: 3,
|
||||
},
|
||||
{
|
||||
in: []byte(`foo\bar`),
|
||||
b: 'b',
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
in: []byte(`foo\\bar`),
|
||||
b: 'b',
|
||||
expected: 5,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'f',
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'r',
|
||||
expected: 5,
|
||||
},
|
||||
{
|
||||
in: []byte(`\foobar`),
|
||||
b: 'f',
|
||||
expected: -1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := indexUnescapedByte(test.in, test.b)
|
||||
assert.Equal(t, test.expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewGaugeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now, telegraf.Gauge)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Gauge, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewCounterMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now, telegraf.Counter)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Counter, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
func TestSplitMetric(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split80 := m.Split(80)
|
||||
assert.Len(t, split80, 2)
|
||||
|
||||
split70 := m.Split(70)
|
||||
assert.Len(t, split70, 3)
|
||||
|
||||
split60 := m.Split(60)
|
||||
assert.Len(t, split60, 4)
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
// use a simple regex check to verify that the split metrics are valid
|
||||
func TestSplitMetric_RegexVerify(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"foo": float64(98934259085),
|
||||
"bar": float64(19385292),
|
||||
"number": float64(19385292),
|
||||
"another": float64(19385292),
|
||||
"n": float64(19385292),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// verification regex
|
||||
re := regexp.MustCompile(`cpu,host=localhost \w+=\d+(,\w+=\d+)* 1480940990034083306`)
|
||||
|
||||
split90 := m.Split(90)
|
||||
assert.Len(t, split90, 2)
|
||||
for _, splitM := range split90 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
|
||||
split70 := m.Split(70)
|
||||
assert.Len(t, split70, 3)
|
||||
for _, splitM := range split70 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
|
||||
split20 := m.Split(20)
|
||||
assert.Len(t, split20, 5)
|
||||
for _, splitM := range split20 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
}
|
||||
|
||||
// test splitting metric even when given length is shorter than
|
||||
// shortest possible length
|
||||
// Split should split metric as short as possible, ie, 1 field per metric
|
||||
func TestSplitMetric_TooShort(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split := m.Split(10)
|
||||
assert.Len(t, split, 5)
|
||||
strings := make([]string, 5)
|
||||
for i, splitM := range split {
|
||||
strings[i] = splitM.String()
|
||||
}
|
||||
|
||||
assert.Contains(t, strings, "cpu,host=localhost float=100001 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost int=100001i 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost bool=true 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost false=false 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost string=\"test\" 1480940990034083306\n")
|
||||
}
|
||||
|
||||
func TestSplitMetric_NoOp(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split := m.Split(1000)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, m, split[0])
|
||||
}
|
||||
|
||||
func TestSplitMetric_OneField(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", m.String())
|
||||
|
||||
split := m.Split(1000)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
|
||||
split = m.Split(1)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
|
||||
split = m.Split(40)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
}
|
||||
|
||||
func TestNewMetricAggregate(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.False(t, m.IsAggregate())
|
||||
m.SetAggregate(true)
|
||||
assert.True(t, m.IsAggregate())
|
||||
}
|
||||
|
||||
func TestNewMetricPoint(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
p := m.Point()
|
||||
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, fields, p.Fields())
|
||||
assert.Equal(t, "cpu", p.Name())
|
||||
}
|
||||
|
||||
func TestNewMetricString(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n",
|
||||
now.UnixNano())
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
}
|
||||
|
||||
func TestNewMetricFailNaN(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": math.NaN(),
|
||||
}
|
||||
|
||||
_, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
627
metric/parse.go
Normal file
627
metric/parse.go
Normal file
@@ -0,0 +1,627 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInvalidNumber = errors.New("invalid number")
|
||||
)
|
||||
|
||||
const (
|
||||
// the number of characters for the largest possible int64 (9223372036854775807)
|
||||
maxInt64Digits = 19
|
||||
|
||||
// the number of characters for the smallest possible int64 (-9223372036854775808)
|
||||
minInt64Digits = 20
|
||||
|
||||
// the number of characters required for the largest float64 before a range check
|
||||
// would occur during parsing
|
||||
maxFloat64Digits = 25
|
||||
|
||||
// the number of characters required for smallest float64 before a range check occur
|
||||
// would occur during parsing
|
||||
minFloat64Digits = 27
|
||||
|
||||
MaxKeyLength = 65535
|
||||
)
|
||||
|
||||
// The following constants allow us to specify which state to move to
|
||||
// next, when scanning sections of a Point.
|
||||
const (
|
||||
tagKeyState = iota
|
||||
tagValueState
|
||||
fieldsState
|
||||
)
|
||||
|
||||
func Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
return ParseWithDefaultTime(buf, time.Now())
|
||||
}
|
||||
|
||||
func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
|
||||
if len(buf) <= 6 {
|
||||
return []telegraf.Metric{}, makeError("buffer too short", buf, 0)
|
||||
}
|
||||
metrics := make([]telegraf.Metric, 0, bytes.Count(buf, []byte("\n"))+1)
|
||||
var errStr string
|
||||
i := 0
|
||||
for {
|
||||
j := bytes.IndexByte(buf[i:], '\n')
|
||||
if j == -1 {
|
||||
break
|
||||
}
|
||||
if len(buf[i:i+j]) < 2 {
|
||||
i += j + 1 // increment i past the previous newline
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := parseMetric(buf[i:i+j], t)
|
||||
if err != nil {
|
||||
i += j + 1 // increment i past the previous newline
|
||||
errStr += " " + err.Error()
|
||||
continue
|
||||
}
|
||||
i += j + 1 // increment i past the previous newline
|
||||
|
||||
metrics = append(metrics, m)
|
||||
}
|
||||
|
||||
if len(errStr) > 0 {
|
||||
return metrics, fmt.Errorf(errStr)
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func parseMetric(buf []byte, defaultTime time.Time) (telegraf.Metric, error) {
|
||||
var dTime string
|
||||
// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
|
||||
pos, key, err := scanKey(buf, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// measurement name is required
|
||||
if len(key) == 0 {
|
||||
return nil, fmt.Errorf("missing measurement")
|
||||
}
|
||||
|
||||
if len(key) > MaxKeyLength {
|
||||
return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
|
||||
}
|
||||
|
||||
// scan the second block is which is field1=value1[,field2=value2,...]
|
||||
pos, fields, err := scanFields(buf, pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// at least one field is required
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("missing fields")
|
||||
}
|
||||
|
||||
// scan the last block which is an optional integer timestamp
|
||||
pos, ts, err := scanTime(buf, pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := &metric{
|
||||
fields: fields,
|
||||
t: ts,
|
||||
}
|
||||
|
||||
// parse out the measurement name
|
||||
// namei is the index at which the "name" ends
|
||||
namei := indexUnescapedByte(key, ',')
|
||||
if namei < 1 {
|
||||
// no tags
|
||||
m.name = key
|
||||
} else {
|
||||
m.name = key[0:namei]
|
||||
m.tags = key[namei:]
|
||||
}
|
||||
|
||||
if len(m.t) == 0 {
|
||||
if len(dTime) == 0 {
|
||||
dTime = fmt.Sprint(defaultTime.UnixNano())
|
||||
}
|
||||
// use default time
|
||||
m.t = []byte(dTime)
|
||||
}
|
||||
|
||||
// here we copy on return because this allows us to later call
|
||||
// AddTag, AddField, RemoveTag, RemoveField, etc. without worrying about
|
||||
// modifying 'tag' bytes having an affect on 'field' bytes, for example.
|
||||
return m.Copy(), nil
|
||||
}
|
||||
|
||||
// scanKey scans buf starting at i for the measurement and tag portion of the point.
|
||||
// It returns the ending position and the byte slice of key within buf. If there
|
||||
// are tags, they will be sorted if they are not already.
|
||||
func scanKey(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
// First scan the Point's measurement.
|
||||
state, i, err := scanMeasurement(buf, i)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
|
||||
// Optionally scan tags if needed.
|
||||
if state == tagKeyState {
|
||||
i, err = scanTags(buf, i)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// scanMeasurement examines the measurement part of a Point, returning
|
||||
// the next state to move to, and the current location in the buffer.
|
||||
func scanMeasurement(buf []byte, i int) (int, int, error) {
|
||||
// Check first byte of measurement, anything except a comma is fine.
|
||||
// It can't be a space, since whitespace is stripped prior to this
|
||||
// function call.
|
||||
if i >= len(buf) || buf[i] == ',' {
|
||||
return -1, i, makeError("missing measurement", buf, i)
|
||||
}
|
||||
|
||||
for {
|
||||
i++
|
||||
if i >= len(buf) {
|
||||
// cpu
|
||||
return -1, i, makeError("missing fields", buf, i)
|
||||
}
|
||||
|
||||
if buf[i-1] == '\\' {
|
||||
// Skip character (it's escaped).
|
||||
continue
|
||||
}
|
||||
|
||||
// Unescaped comma; move onto scanning the tags.
|
||||
if buf[i] == ',' {
|
||||
return tagKeyState, i + 1, nil
|
||||
}
|
||||
|
||||
// Unescaped space; move onto scanning the fields.
|
||||
if buf[i] == ' ' {
|
||||
// cpu value=1.0
|
||||
return fieldsState, i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTags examines all the tags in a Point, keeping track of and
|
||||
// returning the updated indices slice, number of commas and location
|
||||
// in buf where to start examining the Point fields.
|
||||
func scanTags(buf []byte, i int) (int, error) {
|
||||
var (
|
||||
err error
|
||||
state = tagKeyState
|
||||
)
|
||||
|
||||
for {
|
||||
switch state {
|
||||
case tagKeyState:
|
||||
i, err = scanTagsKey(buf, i)
|
||||
state = tagValueState // tag value always follows a tag key
|
||||
case tagValueState:
|
||||
state, i, err = scanTagsValue(buf, i)
|
||||
case fieldsState:
|
||||
return i, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTagsKey scans each character in a tag key.
|
||||
func scanTagsKey(buf []byte, i int) (int, error) {
|
||||
// First character of the key.
|
||||
if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
|
||||
// cpu,{'', ' ', ',', '='}
|
||||
return i, makeError("missing tag key", buf, i)
|
||||
}
|
||||
|
||||
// Examine each character in the tag key until we hit an unescaped
|
||||
// equals (the tag value), or we hit an error (i.e., unescaped
|
||||
// space or comma).
|
||||
for {
|
||||
i++
|
||||
|
||||
// Either we reached the end of the buffer or we hit an
|
||||
// unescaped comma or space.
|
||||
if i >= len(buf) ||
|
||||
((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
|
||||
// cpu,tag{'', ' ', ','}
|
||||
return i, makeError("missing tag value", buf, i)
|
||||
}
|
||||
|
||||
if buf[i] == '=' && buf[i-1] != '\\' {
|
||||
// cpu,tag=
|
||||
return i + 1, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTagsValue scans each character in a tag value.
|
||||
func scanTagsValue(buf []byte, i int) (int, int, error) {
|
||||
// Tag value cannot be empty.
|
||||
if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
|
||||
// cpu,tag={',', ' '}
|
||||
return -1, i, makeError("missing tag value", buf, i)
|
||||
}
|
||||
|
||||
// Examine each character in the tag value until we hit an unescaped
|
||||
// comma (move onto next tag key), an unescaped space (move onto
|
||||
// fields), or we error out.
|
||||
for {
|
||||
i++
|
||||
if i >= len(buf) {
|
||||
// cpu,tag=value
|
||||
return -1, i, makeError("missing fields", buf, i)
|
||||
}
|
||||
|
||||
// An unescaped equals sign is an invalid tag value.
|
||||
if buf[i] == '=' && buf[i-1] != '\\' {
|
||||
// cpu,tag={'=', 'fo=o'}
|
||||
return -1, i, makeError("invalid tag format", buf, i)
|
||||
}
|
||||
|
||||
if buf[i] == ',' && buf[i-1] != '\\' {
|
||||
// cpu,tag=foo,
|
||||
return tagKeyState, i + 1, nil
|
||||
}
|
||||
|
||||
// cpu,tag=foo value=1.0
|
||||
// cpu, tag=foo\= value=1.0
|
||||
if buf[i] == ' ' && buf[i-1] != '\\' {
|
||||
return fieldsState, i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanFields scans buf, starting at i for the fields section of a point. It returns
|
||||
// the ending position and the byte slice of the fields within buf
|
||||
func scanFields(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
quoted := false
|
||||
|
||||
// tracks how many '=' we've seen
|
||||
equals := 0
|
||||
|
||||
// tracks how many commas we've seen
|
||||
commas := 0
|
||||
|
||||
for {
|
||||
// reached the end of buf?
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
// escaped characters?
|
||||
if buf[i] == '\\' && i+1 < len(buf) {
|
||||
i += 2
|
||||
continue
|
||||
}
|
||||
|
||||
// If the value is quoted, scan until we get to the end quote
|
||||
// Only quote values in the field value since quotes are not significant
|
||||
// in the field key
|
||||
if buf[i] == '"' && equals > commas {
|
||||
quoted = !quoted
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// If we see an =, ensure that there is at least on char before and after it
|
||||
if buf[i] == '=' && !quoted {
|
||||
equals++
|
||||
|
||||
// check for "... =123" but allow "a\ =123"
|
||||
if buf[i-1] == ' ' && buf[i-2] != '\\' {
|
||||
return i, buf[start:i], makeError("missing field key", buf, i)
|
||||
}
|
||||
|
||||
// check for "...a=123,=456" but allow "a=123,a\,=456"
|
||||
if buf[i-1] == ',' && buf[i-2] != '\\' {
|
||||
return i, buf[start:i], makeError("missing field key", buf, i)
|
||||
}
|
||||
|
||||
// check for "... value="
|
||||
if i+1 >= len(buf) {
|
||||
return i, buf[start:i], makeError("missing field value", buf, i)
|
||||
}
|
||||
|
||||
// check for "... value=,value2=..."
|
||||
if buf[i+1] == ',' || buf[i+1] == ' ' {
|
||||
return i, buf[start:i], makeError("missing field value", buf, i)
|
||||
}
|
||||
|
||||
if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
|
||||
var err error
|
||||
i, err = scanNumber(buf, i+1)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
continue
|
||||
}
|
||||
// If next byte is not a double-quote, the value must be a boolean
|
||||
if buf[i+1] != '"' {
|
||||
var err error
|
||||
i, _, err = scanBoolean(buf, i+1)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if buf[i] == ',' && !quoted {
|
||||
commas++
|
||||
}
|
||||
|
||||
// reached end of block?
|
||||
if buf[i] == ' ' && !quoted {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if quoted {
|
||||
return i, buf[start:i], makeError("unbalanced quotes", buf, i)
|
||||
}
|
||||
|
||||
// check that all field sections had key and values (e.g. prevent "a=1,b"
|
||||
if equals == 0 || commas != equals-1 {
|
||||
return i, buf[start:i], makeError("invalid field format", buf, i)
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// scanTime scans buf, starting at i for the time section of a point. It
|
||||
// returns the ending position and the byte slice of the timestamp within buf
|
||||
// and and error if the timestamp is not in the correct numeric format.
|
||||
func scanTime(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
for {
|
||||
// reached the end of buf?
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
// Reached end of block or trailing whitespace?
|
||||
if buf[i] == '\n' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
|
||||
// Handle negative timestamps
|
||||
if i == start && buf[i] == '-' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// Timestamps should be integers, make sure they are so we don't need
|
||||
// to actually parse the timestamp until needed.
|
||||
if buf[i] < '0' || buf[i] > '9' {
|
||||
return i, buf[start:i], makeError("invalid timestamp", buf, i)
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
func isNumeric(b byte) bool {
|
||||
return (b >= '0' && b <= '9') || b == '.'
|
||||
}
|
||||
|
||||
// scanNumber returns the end position within buf, start at i after
|
||||
// scanning over buf for an integer, or float. It returns an
|
||||
// error if a invalid number is scanned.
|
||||
func scanNumber(buf []byte, i int) (int, error) {
|
||||
start := i
|
||||
var isInt bool
|
||||
|
||||
// Is negative number?
|
||||
if i < len(buf) && buf[i] == '-' {
|
||||
i++
|
||||
// There must be more characters now, as just '-' is illegal.
|
||||
if i == len(buf) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
}
|
||||
|
||||
// how many decimal points we've see
|
||||
decimal := false
|
||||
|
||||
// indicates the number is float in scientific notation
|
||||
scientific := false
|
||||
|
||||
for {
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == ',' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == 'i' && i > start && !isInt {
|
||||
isInt = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
if buf[i] == '.' {
|
||||
// Can't have more than 1 decimal (e.g. 1.1.1 should fail)
|
||||
if decimal {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
decimal = true
|
||||
}
|
||||
|
||||
// `e` is valid for floats but not as the first char
|
||||
if i > start && (buf[i] == 'e' || buf[i] == 'E') {
|
||||
scientific = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// + and - are only valid at this point if they follow an e (scientific notation)
|
||||
if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// NaN is an unsupported value
|
||||
if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
if !isNumeric(buf[i]) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if isInt && (decimal || scientific) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
numericDigits := i - start
|
||||
if isInt {
|
||||
numericDigits--
|
||||
}
|
||||
if decimal {
|
||||
numericDigits--
|
||||
}
|
||||
if buf[start] == '-' {
|
||||
numericDigits--
|
||||
}
|
||||
|
||||
if numericDigits == 0 {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
// It's more common that numbers will be within min/max range for their type but we need to prevent
|
||||
// out or range numbers from being parsed successfully. This uses some simple heuristics to decide
|
||||
// if we should parse the number to the actual type. It does not do it all the time because it incurs
|
||||
// extra allocations and we end up converting the type again when writing points to disk.
|
||||
if isInt {
|
||||
// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
|
||||
if buf[i-1] != 'i' {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
// Parse the int to check bounds the number of digits could be larger than the max range
|
||||
// We subtract 1 from the index to remove the `i` from our tests
|
||||
if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
|
||||
if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil {
|
||||
return i, makeError(fmt.Sprintf("unable to parse integer %s: %s", buf[start:i-1], err), buf, i)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
|
||||
if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
|
||||
if _, err := parseFloatBytes(buf[start:i], 10); err != nil {
|
||||
return i, makeError("invalid float", buf, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// scanBoolean returns the end position within buf, start at i after
|
||||
// scanning over buf for boolean. Valid values for a boolean are
|
||||
// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean
|
||||
// is scanned.
|
||||
func scanBoolean(buf []byte, i int) (int, []byte, error) {
|
||||
start := i
|
||||
|
||||
if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
|
||||
return i, buf[start:i], makeError("invalid value", buf, i)
|
||||
}
|
||||
|
||||
i++
|
||||
for {
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == ',' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
// Single char bool (t, T, f, F) is ok
|
||||
if i-start == 1 {
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// length must be 4 for true or TRUE
|
||||
if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
// length must be 5 for false or FALSE
|
||||
if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
// Otherwise
|
||||
valid := false
|
||||
switch buf[start] {
|
||||
case 't':
|
||||
valid = bytes.Equal(buf[start:i], []byte("true"))
|
||||
case 'f':
|
||||
valid = bytes.Equal(buf[start:i], []byte("false"))
|
||||
case 'T':
|
||||
valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
|
||||
case 'F':
|
||||
valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
|
||||
}
|
||||
|
||||
if !valid {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
|
||||
}
|
||||
|
||||
// skipWhitespace returns the end position within buf, starting at i after
|
||||
// scanning over spaces in tags
|
||||
func skipWhitespace(buf []byte, i int) int {
|
||||
for i < len(buf) {
|
||||
if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// makeError is a helper function for making a metric parsing error.
|
||||
// reason is the reason that the error occured.
|
||||
// buf should be the current buffer we are parsing.
|
||||
// i is the current index, to give some context on where in the buffer we are.
|
||||
func makeError(reason string, buf []byte, i int) error {
|
||||
return fmt.Errorf("metric parsing error, reason: [%s], buffer: [%s], index: [%d]",
|
||||
reason, buf, i)
|
||||
}
|
||||
355
metric/parse_test.go
Normal file
355
metric/parse_test.go
Normal file
@@ -0,0 +1,355 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const trues = `booltest b=T
|
||||
booltest b=t
|
||||
booltest b=True
|
||||
booltest b=TRUE
|
||||
booltest b=true
|
||||
`
|
||||
|
||||
const falses = `booltest b=F
|
||||
booltest b=f
|
||||
booltest b=False
|
||||
booltest b=FALSE
|
||||
booltest b=false
|
||||
`
|
||||
|
||||
const withEscapes = `w\,\ eather,host=local temp=99 1465839830100400200
|
||||
w\,eather,host=local temp=99 1465839830100400200
|
||||
weather,location=us\,midwest temperature=82 1465839830100400200
|
||||
weather,location=us-midwest temp\=rature=82 1465839830100400200
|
||||
weather,location\ place=us-midwest temperature=82 1465839830100400200
|
||||
weather,location=us-midwest temperature="too\"hot\"" 1465839830100400200
|
||||
`
|
||||
|
||||
const withTimestamps = `cpu usage=99 1480595849000000000
|
||||
cpu usage=99 1480595850000000000
|
||||
cpu usage=99 1480595851700030000
|
||||
cpu usage=99 1480595852000000300
|
||||
`
|
||||
|
||||
const sevenMetrics = `cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
`
|
||||
|
||||
// some metrics are invalid
|
||||
const someInvalid = `cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu3, host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu4 , usage_idle=99,usage_busy=1
|
||||
cpu 1480595852000000300
|
||||
cpu usage=99 1480595852foobar300
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
`
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
start := time.Now()
|
||||
metrics, err := Parse([]byte(sevenMetrics))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 7)
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
firstTime := metrics[0].Time()
|
||||
for _, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"idle": float64(99),
|
||||
"busy": int64(1),
|
||||
"b": true,
|
||||
"s": "string",
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
},
|
||||
m.Tags(),
|
||||
)
|
||||
assert.True(t, m.Time().After(start))
|
||||
assert.True(t, m.Time().Equal(firstTime))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseErrors(t *testing.T) {
|
||||
start := time.Now()
|
||||
metrics, err := Parse([]byte(someInvalid))
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, metrics, 4)
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
firstTime := metrics[0].Time()
|
||||
for _, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
},
|
||||
m.Tags(),
|
||||
)
|
||||
assert.True(t, m.Time().After(start))
|
||||
assert.True(t, m.Time().Equal(firstTime))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseWithTimestamps(t *testing.T) {
|
||||
metrics, err := Parse([]byte(withTimestamps))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 4)
|
||||
|
||||
expectedTimestamps := []time.Time{
|
||||
time.Unix(0, 1480595849000000000),
|
||||
time.Unix(0, 1480595850000000000),
|
||||
time.Unix(0, 1480595851700030000),
|
||||
time.Unix(0, 1480595852000000300),
|
||||
}
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
for i, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.True(t, m.Time().Equal(expectedTimestamps[i]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEscapes(t *testing.T) {
|
||||
metrics, err := Parse([]byte(withEscapes))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 6)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
}{
|
||||
{
|
||||
name: `w, eather`,
|
||||
fields: map[string]interface{}{"temp": float64(99)},
|
||||
tags: map[string]string{"host": "local"},
|
||||
},
|
||||
{
|
||||
name: `w,eather`,
|
||||
fields: map[string]interface{}{"temp": float64(99)},
|
||||
tags: map[string]string{"host": "local"},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{"temperature": float64(82)},
|
||||
tags: map[string]string{"location": `us,midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{`temp=rature`: float64(82)},
|
||||
tags: map[string]string{"location": `us-midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{"temperature": float64(82)},
|
||||
tags: map[string]string{`location place`: `us-midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{`temperature`: `too"hot"`},
|
||||
tags: map[string]string{"location": `us-midwest`},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
assert.Equal(t, test.name, metrics[i].Name())
|
||||
assert.Equal(t, test.fields, metrics[i].Fields())
|
||||
assert.Equal(t, test.tags, metrics[i].Tags())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTrueBooleans(t *testing.T) {
|
||||
metrics, err := Parse([]byte(trues))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 5)
|
||||
|
||||
for _, metric := range metrics {
|
||||
assert.Equal(t, "booltest", metric.Name())
|
||||
assert.Equal(t, true, metric.Fields()["b"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFalseBooleans(t *testing.T) {
|
||||
metrics, err := Parse([]byte(falses))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 5)
|
||||
|
||||
for _, metric := range metrics {
|
||||
assert.Equal(t, "booltest", metric.Name())
|
||||
assert.Equal(t, false, metric.Fields()["b"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointBadNumber(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"cpu v=- ",
|
||||
"cpu v=-i ",
|
||||
"cpu v=-. ",
|
||||
"cpu v=. ",
|
||||
"cpu v=1.0i ",
|
||||
"cpu v=1ii ",
|
||||
"cpu v=1a ",
|
||||
"cpu v=-e-e-e ",
|
||||
"cpu v=42+3 ",
|
||||
"cpu v= ",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTagsMissingParts(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
`cpu,host`,
|
||||
`cpu,host,`,
|
||||
`cpu,host=`,
|
||||
`cpu,f=oo=bar value=1`,
|
||||
`cpu,host value=1i`,
|
||||
`cpu,host=serverA,region value=1i`,
|
||||
`cpu,host=serverA,region= value=1i`,
|
||||
`cpu,host=serverA,region=,zone=us-west value=1i`,
|
||||
`cpu, value=1`,
|
||||
`cpu, ,,`,
|
||||
`cpu,,,`,
|
||||
`cpu,host=serverA,=us-east value=1i`,
|
||||
`cpu,host=serverAa\,,=us-east value=1i`,
|
||||
`cpu,host=serverA\,,=us-east value=1i`,
|
||||
`cpu, =serverA value=1i`,
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointWhitespace(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000 `,
|
||||
} {
|
||||
m, err := Parse([]byte(tt + "\n"))
|
||||
assert.NoError(t, err, tt)
|
||||
assert.Equal(t, "cpu", m[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{"value": float64(1)}, m[0].Fields())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointInvalidFields(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test,foo=bar a=101,=value",
|
||||
"test,foo=bar =value",
|
||||
"test,foo=bar a=101,key=",
|
||||
"test,foo=bar key=",
|
||||
`test,foo=bar a=101,b="foo`,
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointNoFields(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"cpu_load_short,host=server01,region=us-west",
|
||||
"very_long_measurement_name",
|
||||
"cpu,host==",
|
||||
"============",
|
||||
"cpu",
|
||||
"cpu\n\n\n\n\n\n\n",
|
||||
" ",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
// a b=1 << this is the shortest possible metric
|
||||
// any shorter is just ignored
|
||||
func TestParseBufTooShort(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"",
|
||||
"a",
|
||||
"a ",
|
||||
"a b=",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidBooleans(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test b=tru",
|
||||
"test b=fals",
|
||||
"test b=faLse",
|
||||
"test q=foo",
|
||||
"test b=lambchops",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidNumbers(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test b=-",
|
||||
"test b=1.1.1",
|
||||
"test b=nan",
|
||||
"test b=9i10",
|
||||
"test b=9999999999999999999i",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNegativeTimestamps(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test foo=101 -1257894000000000000",
|
||||
} {
|
||||
metrics, err := Parse([]byte(tt + "\n"))
|
||||
assert.NoError(t, err, tt)
|
||||
assert.True(t, metrics[0].Time().Equal(time.Unix(0, -1257894000000000000)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMaxKeyLength(t *testing.T) {
|
||||
key := ""
|
||||
for {
|
||||
if len(key) > MaxKeyLength {
|
||||
break
|
||||
}
|
||||
key += "test"
|
||||
}
|
||||
|
||||
_, err := Parse([]byte(key + " value=1\n"))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewMetricString(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
|
||||
now.UnixNano())
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
|
||||
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
|
||||
now.Unix())
|
||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||
}
|
||||
|
||||
func TestNewMetricStringNoTime(t *testing.T) {
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
|
||||
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||
}
|
||||
|
||||
func TestNewMetricFailNaN(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": math.NaN(),
|
||||
}
|
||||
|
||||
_, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
5
plugins/aggregators/all/all.go
Normal file
5
plugins/aggregators/all/all.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
|
||||
)
|
||||
42
plugins/aggregators/minmax/README.md
Normal file
42
plugins/aggregators/minmax/README.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# MinMax Aggregator Plugin
|
||||
|
||||
The minmax aggregator plugin aggregates min & max values of each field it sees,
|
||||
emitting the aggrate every `period` seconds.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Keep the aggregate min/max of each metric passing through.
|
||||
[[aggregators.minmax]]
|
||||
## General Aggregator Arguments:
|
||||
## The period on which to flush & clear the aggregator.
|
||||
period = "30s"
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- measurement1
|
||||
- field1_max
|
||||
- field1_min
|
||||
|
||||
### Tags:
|
||||
|
||||
No tags are applied by this aggregator.
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ telegraf --config telegraf.conf --quiet
|
||||
system,host=tars load1=1.72 1475583980000000000
|
||||
system,host=tars load1=1.6 1475583990000000000
|
||||
system,host=tars load1=1.66 1475584000000000000
|
||||
system,host=tars load1=1.63 1475584010000000000
|
||||
system,host=tars load1_max=1.72,load1_min=1.6 1475584010000000000
|
||||
system,host=tars load1=1.46 1475584020000000000
|
||||
system,host=tars load1=1.39 1475584030000000000
|
||||
system,host=tars load1=1.41 1475584040000000000
|
||||
system,host=tars load1_max=1.46,load1_min=1.39 1475584040000000000
|
||||
```
|
||||
119
plugins/aggregators/minmax/minmax.go
Normal file
119
plugins/aggregators/minmax/minmax.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package minmax
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||
)
|
||||
|
||||
type MinMax struct {
|
||||
cache map[uint64]aggregate
|
||||
}
|
||||
|
||||
func NewMinMax() telegraf.Aggregator {
|
||||
mm := &MinMax{}
|
||||
mm.Reset()
|
||||
return mm
|
||||
}
|
||||
|
||||
type aggregate struct {
|
||||
fields map[string]minmax
|
||||
name string
|
||||
tags map[string]string
|
||||
}
|
||||
|
||||
type minmax struct {
|
||||
min float64
|
||||
max float64
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## General Aggregator Arguments:
|
||||
## The period on which to flush & clear the aggregator.
|
||||
period = "30s"
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
`
|
||||
|
||||
func (m *MinMax) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (m *MinMax) Description() string {
|
||||
return "Keep the aggregate min/max of each metric passing through."
|
||||
}
|
||||
|
||||
func (m *MinMax) Add(in telegraf.Metric) {
|
||||
id := in.HashID()
|
||||
if _, ok := m.cache[id]; !ok {
|
||||
// hit an uncached metric, create caches for first time:
|
||||
a := aggregate{
|
||||
name: in.Name(),
|
||||
tags: in.Tags(),
|
||||
fields: make(map[string]minmax),
|
||||
}
|
||||
for k, v := range in.Fields() {
|
||||
if fv, ok := convert(v); ok {
|
||||
a.fields[k] = minmax{
|
||||
min: fv,
|
||||
max: fv,
|
||||
}
|
||||
}
|
||||
}
|
||||
m.cache[id] = a
|
||||
} else {
|
||||
for k, v := range in.Fields() {
|
||||
if fv, ok := convert(v); ok {
|
||||
if _, ok := m.cache[id].fields[k]; !ok {
|
||||
// hit an uncached field of a cached metric
|
||||
m.cache[id].fields[k] = minmax{
|
||||
min: fv,
|
||||
max: fv,
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv < m.cache[id].fields[k].min {
|
||||
tmp := m.cache[id].fields[k]
|
||||
tmp.min = fv
|
||||
m.cache[id].fields[k] = tmp
|
||||
} else if fv > m.cache[id].fields[k].max {
|
||||
tmp := m.cache[id].fields[k]
|
||||
tmp.max = fv
|
||||
m.cache[id].fields[k] = tmp
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MinMax) Push(acc telegraf.Accumulator) {
|
||||
for _, aggregate := range m.cache {
|
||||
fields := map[string]interface{}{}
|
||||
for k, v := range aggregate.fields {
|
||||
fields[k+"_min"] = v.min
|
||||
fields[k+"_max"] = v.max
|
||||
}
|
||||
acc.AddFields(aggregate.name, fields, aggregate.tags)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MinMax) Reset() {
|
||||
m.cache = make(map[uint64]aggregate)
|
||||
}
|
||||
|
||||
func convert(in interface{}) (float64, bool) {
|
||||
switch v := in.(type) {
|
||||
case float64:
|
||||
return v, true
|
||||
case int64:
|
||||
return float64(v), true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
aggregators.Add("minmax", func() telegraf.Aggregator {
|
||||
return NewMinMax()
|
||||
})
|
||||
}
|
||||
162
plugins/aggregators/minmax/minmax_test.go
Normal file
162
plugins/aggregators/minmax/minmax_test.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package minmax
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
var m1, _ = metric.New("m1",
|
||||
map[string]string{"foo": "bar"},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
"b": int64(1),
|
||||
"c": int64(1),
|
||||
"d": int64(1),
|
||||
"e": int64(1),
|
||||
"f": float64(2),
|
||||
"g": float64(2),
|
||||
"h": float64(2),
|
||||
"i": float64(2),
|
||||
"j": float64(3),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var m2, _ = metric.New("m1",
|
||||
map[string]string{"foo": "bar"},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
"b": int64(3),
|
||||
"c": int64(3),
|
||||
"d": int64(3),
|
||||
"e": int64(3),
|
||||
"f": float64(1),
|
||||
"g": float64(1),
|
||||
"h": float64(1),
|
||||
"i": float64(1),
|
||||
"j": float64(1),
|
||||
"k": float64(200),
|
||||
"ignoreme": "string",
|
||||
"andme": true,
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
func BenchmarkApply(b *testing.B) {
|
||||
minmax := NewMinMax()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
minmax.Add(m1)
|
||||
minmax.Add(m2)
|
||||
}
|
||||
}
|
||||
|
||||
// Test two metrics getting added.
|
||||
func TestMinMaxWithPeriod(t *testing.T) {
|
||||
acc := testutil.Accumulator{}
|
||||
minmax := NewMinMax()
|
||||
|
||||
minmax.Add(m1)
|
||||
minmax.Add(m2)
|
||||
minmax.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_max": float64(1),
|
||||
"a_min": float64(1),
|
||||
"b_max": float64(3),
|
||||
"b_min": float64(1),
|
||||
"c_max": float64(3),
|
||||
"c_min": float64(1),
|
||||
"d_max": float64(3),
|
||||
"d_min": float64(1),
|
||||
"e_max": float64(3),
|
||||
"e_min": float64(1),
|
||||
"f_max": float64(2),
|
||||
"f_min": float64(1),
|
||||
"g_max": float64(2),
|
||||
"g_min": float64(1),
|
||||
"h_max": float64(2),
|
||||
"h_min": float64(1),
|
||||
"i_max": float64(2),
|
||||
"i_min": float64(1),
|
||||
"j_max": float64(3),
|
||||
"j_min": float64(1),
|
||||
"k_max": float64(200),
|
||||
"k_min": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test two metrics getting added with a push/reset in between (simulates
|
||||
// getting added in different periods.)
|
||||
func TestMinMaxDifferentPeriods(t *testing.T) {
|
||||
acc := testutil.Accumulator{}
|
||||
minmax := NewMinMax()
|
||||
|
||||
minmax.Add(m1)
|
||||
minmax.Push(&acc)
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_max": float64(1),
|
||||
"a_min": float64(1),
|
||||
"b_max": float64(1),
|
||||
"b_min": float64(1),
|
||||
"c_max": float64(1),
|
||||
"c_min": float64(1),
|
||||
"d_max": float64(1),
|
||||
"d_min": float64(1),
|
||||
"e_max": float64(1),
|
||||
"e_min": float64(1),
|
||||
"f_max": float64(2),
|
||||
"f_min": float64(2),
|
||||
"g_max": float64(2),
|
||||
"g_min": float64(2),
|
||||
"h_max": float64(2),
|
||||
"h_min": float64(2),
|
||||
"i_max": float64(2),
|
||||
"i_min": float64(2),
|
||||
"j_max": float64(3),
|
||||
"j_min": float64(3),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
|
||||
acc.ClearMetrics()
|
||||
minmax.Reset()
|
||||
minmax.Add(m2)
|
||||
minmax.Push(&acc)
|
||||
expectedFields = map[string]interface{}{
|
||||
"a_max": float64(1),
|
||||
"a_min": float64(1),
|
||||
"b_max": float64(3),
|
||||
"b_min": float64(3),
|
||||
"c_max": float64(3),
|
||||
"c_min": float64(3),
|
||||
"d_max": float64(3),
|
||||
"d_min": float64(3),
|
||||
"e_max": float64(3),
|
||||
"e_min": float64(3),
|
||||
"f_max": float64(1),
|
||||
"f_min": float64(1),
|
||||
"g_max": float64(1),
|
||||
"g_min": float64(1),
|
||||
"h_max": float64(1),
|
||||
"h_min": float64(1),
|
||||
"i_max": float64(1),
|
||||
"i_min": float64(1),
|
||||
"j_max": float64(1),
|
||||
"j_min": float64(1),
|
||||
"k_max": float64(200),
|
||||
"k_min": float64(200),
|
||||
}
|
||||
expectedTags = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
11
plugins/aggregators/registry.go
Normal file
11
plugins/aggregators/registry.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package aggregators
|
||||
|
||||
import "github.com/influxdata/telegraf"
|
||||
|
||||
type Creator func() telegraf.Aggregator
|
||||
|
||||
var Aggregators = map[string]Creator{}
|
||||
|
||||
func Add(name string, creator Creator) {
|
||||
Aggregators[name] = creator
|
||||
}
|
||||
@@ -27,11 +27,17 @@ The example plugin gathers metrics about example things
|
||||
- tag2
|
||||
- measurement2 has the following tags:
|
||||
- tag3
|
||||
|
||||
### Sample Queries:
|
||||
|
||||
These are some useful queries (to generate dashboards or other) to run against data from this plugin:
|
||||
|
||||
```
|
||||
SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag
|
||||
```
|
||||
|
||||
### Example Output:
|
||||
|
||||
Give an example `-test` output here
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter example -test
|
||||
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,104 +1,21 @@
|
||||
package aerospike
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"errors"
|
||||
"log"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
as "github.com/aerospike/aerospike-client-go"
|
||||
)
|
||||
|
||||
const (
|
||||
MSG_HEADER_SIZE = 8
|
||||
MSG_TYPE = 1 // Info is 1
|
||||
MSG_VERSION = 2
|
||||
)
|
||||
|
||||
var (
|
||||
STATISTICS_COMMAND = []byte("statistics\n")
|
||||
NAMESPACES_COMMAND = []byte("namespaces\n")
|
||||
)
|
||||
|
||||
type aerospikeMessageHeader struct {
|
||||
Version uint8
|
||||
Type uint8
|
||||
DataLen [6]byte
|
||||
}
|
||||
|
||||
type aerospikeMessage struct {
|
||||
aerospikeMessageHeader
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func (msg *aerospikeMessage) Serialize() []byte {
|
||||
msg.DataLen = msgLenToBytes(int64(len(msg.Data)))
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
binary.Write(buf, binary.BigEndian, msg.aerospikeMessageHeader)
|
||||
binary.Write(buf, binary.BigEndian, msg.Data[:])
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
type aerospikeInfoCommand struct {
|
||||
msg *aerospikeMessage
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/info.go
|
||||
func (nfo *aerospikeInfoCommand) parseMultiResponse() (map[string]string, error) {
|
||||
responses := make(map[string]string)
|
||||
offset := int64(0)
|
||||
begin := int64(0)
|
||||
|
||||
dataLen := int64(len(nfo.msg.Data))
|
||||
|
||||
// Create reusable StringBuilder for performance.
|
||||
for offset < dataLen {
|
||||
b := nfo.msg.Data[offset]
|
||||
|
||||
if b == '\t' {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
offset++
|
||||
begin = offset
|
||||
|
||||
// Parse field value.
|
||||
for offset < dataLen {
|
||||
if nfo.msg.Data[offset] == '\n' {
|
||||
break
|
||||
}
|
||||
offset++
|
||||
}
|
||||
|
||||
if offset > begin {
|
||||
value := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = string(value)
|
||||
} else {
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
offset++
|
||||
begin = offset
|
||||
} else if b == '\n' {
|
||||
if offset > begin {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
offset++
|
||||
begin = offset
|
||||
} else {
|
||||
offset++
|
||||
}
|
||||
}
|
||||
|
||||
if offset > begin {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
return responses, nil
|
||||
}
|
||||
|
||||
type Aerospike struct {
|
||||
Servers []string
|
||||
}
|
||||
@@ -115,7 +32,7 @@ func (a *Aerospike) SampleConfig() string {
|
||||
}
|
||||
|
||||
func (a *Aerospike) Description() string {
|
||||
return "Read stats from an aerospike server"
|
||||
return "Read stats from aerospike server(s)"
|
||||
}
|
||||
|
||||
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
||||
@@ -124,214 +41,114 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
errChan := errchan.New(len(a.Servers))
|
||||
wg.Add(len(a.Servers))
|
||||
for _, server := range a.Servers {
|
||||
wg.Add(1)
|
||||
go func(server string) {
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = a.gatherServer(server, acc)
|
||||
errChan.C <- a.gatherServer(serv, acc)
|
||||
}(server)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return outerr
|
||||
return errChan.Error()
|
||||
}
|
||||
|
||||
func (a *Aerospike) gatherServer(host string, acc telegraf.Accumulator) error {
|
||||
aerospikeInfo, err := getMap(STATISTICS_COMMAND, host)
|
||||
func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) error {
|
||||
host, port, err := net.SplitHostPort(hostport)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike info failed: %s", err)
|
||||
return err
|
||||
}
|
||||
readAerospikeStats(aerospikeInfo, acc, host, "")
|
||||
namespaces, err := getList(NAMESPACES_COMMAND, host)
|
||||
|
||||
iport, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike namespace list failed: %s", err)
|
||||
iport = 3000
|
||||
}
|
||||
for ix := range namespaces {
|
||||
nsInfo, err := getMap([]byte("namespace/"+namespaces[ix]+"\n"), host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike namespace '%s' query failed: %s", namespaces[ix], err)
|
||||
|
||||
c, err := as.NewClient(host, iport)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
nodes := c.GetNodes()
|
||||
for _, n := range nodes {
|
||||
tags := map[string]string{
|
||||
"aerospike_host": hostport,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"node_name": n.GetName(),
|
||||
}
|
||||
stats, err := as.RequestNodeStats(n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range stats {
|
||||
val, err := parseValue(v)
|
||||
if err == nil {
|
||||
fields[strings.Replace(k, "-", "_", -1)] = val
|
||||
} else {
|
||||
log.Printf("I! skipping aerospike field %v with int64 overflow", k)
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike_node", fields, tags, time.Now())
|
||||
|
||||
info, err := as.RequestNodeInfo(n, "namespaces")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
namespaces := strings.Split(info["namespaces"], ";")
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
nTags := map[string]string{
|
||||
"aerospike_host": hostport,
|
||||
}
|
||||
nTags["namespace"] = namespace
|
||||
nFields := map[string]interface{}{
|
||||
"node_name": n.GetName(),
|
||||
}
|
||||
info, err := as.RequestNodeInfo(n, "namespace/"+namespace)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
stats := strings.Split(info["namespace/"+namespace], ";")
|
||||
for _, stat := range stats {
|
||||
parts := strings.Split(stat, "=")
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
val, err := parseValue(parts[1])
|
||||
if err == nil {
|
||||
nFields[strings.Replace(parts[0], "-", "_", -1)] = val
|
||||
} else {
|
||||
log.Printf("I! skipping aerospike field %v with int64 overflow", parts[0])
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
|
||||
}
|
||||
readAerospikeStats(nsInfo, acc, host, namespaces[ix])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMap(key []byte, host string) (map[string]string, error) {
|
||||
data, err := get(key, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get data: %s", err)
|
||||
func parseValue(v string) (interface{}, error) {
|
||||
if parsed, err := strconv.ParseInt(v, 10, 64); err == nil {
|
||||
return parsed, nil
|
||||
} else if _, err := strconv.ParseUint(v, 10, 64); err == nil {
|
||||
// int64 overflow, yet valid uint64
|
||||
return nil, errors.New("Number is too large")
|
||||
} else if parsed, err := strconv.ParseBool(v); err == nil {
|
||||
return parsed, nil
|
||||
} else {
|
||||
return v, nil
|
||||
}
|
||||
parsed, err := unmarshalMapInfo(data, string(key))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
|
||||
}
|
||||
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func getList(key []byte, host string) ([]string, error) {
|
||||
data, err := get(key, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get data: %s", err)
|
||||
func copyTags(m map[string]string) map[string]string {
|
||||
out := make(map[string]string)
|
||||
for k, v := range m {
|
||||
out[k] = v
|
||||
}
|
||||
parsed, err := unmarshalListInfo(data, string(key))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
|
||||
}
|
||||
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func get(key []byte, host string) (map[string]string, error) {
|
||||
var err error
|
||||
var data map[string]string
|
||||
|
||||
asInfo := &aerospikeInfoCommand{
|
||||
msg: &aerospikeMessage{
|
||||
aerospikeMessageHeader: aerospikeMessageHeader{
|
||||
Version: uint8(MSG_VERSION),
|
||||
Type: uint8(MSG_TYPE),
|
||||
DataLen: msgLenToBytes(int64(len(key))),
|
||||
},
|
||||
Data: key,
|
||||
},
|
||||
}
|
||||
|
||||
cmd := asInfo.msg.Serialize()
|
||||
addr, err := net.ResolveTCPAddr("tcp", host)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Lookup failed for '%s': %s", host, err)
|
||||
}
|
||||
|
||||
conn, err := net.DialTCP("tcp", nil, addr)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Connection failed for '%s': %s", host, err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
_, err = conn.Write(cmd)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to send to '%s': %s", host, err)
|
||||
}
|
||||
|
||||
msgHeader := bytes.NewBuffer(make([]byte, MSG_HEADER_SIZE))
|
||||
_, err = readLenFromConn(conn, msgHeader.Bytes(), MSG_HEADER_SIZE)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to read header: %s", err)
|
||||
}
|
||||
err = binary.Read(msgHeader, binary.BigEndian, &asInfo.msg.aerospikeMessageHeader)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to unmarshal header: %s", err)
|
||||
}
|
||||
|
||||
msgLen := msgLenFromBytes(asInfo.msg.aerospikeMessageHeader.DataLen)
|
||||
|
||||
if int64(len(asInfo.msg.Data)) != msgLen {
|
||||
asInfo.msg.Data = make([]byte, msgLen)
|
||||
}
|
||||
|
||||
_, err = readLenFromConn(conn, asInfo.msg.Data, len(asInfo.msg.Data))
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to read from connection to '%s': %s", host, err)
|
||||
}
|
||||
|
||||
data, err = asInfo.parseMultiResponse()
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to parse response from '%s': %s", host, err)
|
||||
}
|
||||
|
||||
return data, err
|
||||
}
|
||||
|
||||
func readAerospikeStats(
|
||||
stats map[string]string,
|
||||
acc telegraf.Accumulator,
|
||||
host string,
|
||||
namespace string,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{
|
||||
"aerospike_host": host,
|
||||
"namespace": "_service",
|
||||
}
|
||||
|
||||
if namespace != "" {
|
||||
tags["namespace"] = namespace
|
||||
}
|
||||
for key, value := range stats {
|
||||
// We are going to ignore all string based keys
|
||||
val, err := strconv.ParseInt(value, 10, 64)
|
||||
if err == nil {
|
||||
if strings.Contains(key, "-") {
|
||||
key = strings.Replace(key, "-", "_", -1)
|
||||
}
|
||||
fields[key] = val
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) {
|
||||
key = strings.TrimSuffix(key, "\n")
|
||||
res := map[string]string{}
|
||||
|
||||
v, exists := infoMap[key]
|
||||
if !exists {
|
||||
return res, fmt.Errorf("Key '%s' missing from info", key)
|
||||
}
|
||||
|
||||
values := strings.Split(v, ";")
|
||||
for i := range values {
|
||||
kv := strings.Split(values[i], "=")
|
||||
if len(kv) > 1 {
|
||||
res[kv[0]] = kv[1]
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func unmarshalListInfo(infoMap map[string]string, key string) ([]string, error) {
|
||||
key = strings.TrimSuffix(key, "\n")
|
||||
|
||||
v, exists := infoMap[key]
|
||||
if !exists {
|
||||
return []string{}, fmt.Errorf("Key '%s' missing from info", key)
|
||||
}
|
||||
|
||||
values := strings.Split(v, ";")
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func readLenFromConn(c net.Conn, buffer []byte, length int) (total int, err error) {
|
||||
var r int
|
||||
for total < length {
|
||||
r, err = c.Read(buffer[total:length])
|
||||
total += r
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func msgLenToBytes(DataLen int64) [6]byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(DataLen))
|
||||
res := [6]byte{}
|
||||
copy(res[:], b[2:])
|
||||
return res
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func msgLenFromBytes(buf [6]byte) int64 {
|
||||
nbytes := append([]byte{0, 0}, buf[:]...)
|
||||
DataLen := binary.BigEndian.Uint64(nbytes)
|
||||
return int64(DataLen)
|
||||
return out
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package aerospike
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
@@ -11,7 +10,7 @@ import (
|
||||
|
||||
func TestAerospikeStatistics(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
t.Skip("Skipping aerospike integration tests.")
|
||||
}
|
||||
|
||||
a := &Aerospike{
|
||||
@@ -23,96 +22,46 @@ func TestAerospikeStatistics(t *testing.T) {
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Only use a few of the metrics
|
||||
asMetrics := []string{
|
||||
"transactions",
|
||||
"stat_write_errs",
|
||||
"stat_read_reqs",
|
||||
"stat_write_reqs",
|
||||
}
|
||||
|
||||
for _, metric := range asMetrics {
|
||||
assert.True(t, acc.HasIntField("aerospike", metric), metric)
|
||||
}
|
||||
|
||||
assert.True(t, acc.HasMeasurement("aerospike_node"))
|
||||
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
|
||||
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
|
||||
}
|
||||
|
||||
func TestAerospikeMsgLenFromToBytes(t *testing.T) {
|
||||
var i int64 = 8
|
||||
assert.True(t, i == msgLenFromBytes(msgLenToBytes(i)))
|
||||
}
|
||||
func TestAerospikeStatisticsPartialErr(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping aerospike integration tests.")
|
||||
}
|
||||
|
||||
a := &Aerospike{
|
||||
Servers: []string{
|
||||
testutil.GetLocalHost() + ":3000",
|
||||
testutil.GetLocalHost() + ":9999",
|
||||
},
|
||||
}
|
||||
|
||||
func TestReadAerospikeStatsNoNamespace(t *testing.T) {
|
||||
// Also test for re-writing
|
||||
var acc testutil.Accumulator
|
||||
stats := map[string]string{
|
||||
"stat-write-errs": "12345",
|
||||
"stat_read_reqs": "12345",
|
||||
}
|
||||
readAerospikeStats(stats, &acc, "host1", "")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"stat_write_errs": int64(12345),
|
||||
"stat_read_reqs": int64(12345),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"aerospike_host": "host1",
|
||||
"namespace": "_service",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||
err := a.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
|
||||
assert.True(t, acc.HasMeasurement("aerospike_node"))
|
||||
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
|
||||
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
|
||||
}
|
||||
|
||||
func TestReadAerospikeStatsNamespace(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
stats := map[string]string{
|
||||
"stat_write_errs": "12345",
|
||||
"stat_read_reqs": "12345",
|
||||
}
|
||||
readAerospikeStats(stats, &acc, "host1", "test")
|
||||
func TestAerospikeParseValue(t *testing.T) {
|
||||
// uint64 with value bigger than int64 max
|
||||
val, err := parseValue("18446744041841121751")
|
||||
assert.Nil(t, val)
|
||||
assert.Error(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"stat_write_errs": int64(12345),
|
||||
"stat_read_reqs": int64(12345),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"aerospike_host": "host1",
|
||||
"namespace": "test",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func TestAerospikeUnmarshalList(t *testing.T) {
|
||||
i := map[string]string{
|
||||
"test": "one;two;three",
|
||||
}
|
||||
|
||||
expected := []string{"one", "two", "three"}
|
||||
|
||||
list, err := unmarshalListInfo(i, "test2")
|
||||
assert.True(t, err != nil)
|
||||
|
||||
list, err = unmarshalListInfo(i, "test")
|
||||
assert.True(t, err == nil)
|
||||
equal := true
|
||||
for ix := range expected {
|
||||
if list[ix] != expected[ix] {
|
||||
equal = false
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, equal)
|
||||
}
|
||||
|
||||
func TestAerospikeUnmarshalMap(t *testing.T) {
|
||||
i := map[string]string{
|
||||
"test": "key1=value1;key2=value2",
|
||||
}
|
||||
|
||||
expected := map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
}
|
||||
m, err := unmarshalMapInfo(i, "test")
|
||||
assert.True(t, err == nil)
|
||||
assert.True(t, reflect.DeepEqual(m, expected))
|
||||
// int values
|
||||
val, err = parseValue("42")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, val, int64(42), "must be parsed as int")
|
||||
|
||||
// string values
|
||||
val, err = parseValue("BB977942A2CA502")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, val, `BB977942A2CA502`, "must be left as string")
|
||||
}
|
||||
|
||||
@@ -4,6 +4,14 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cgroup"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/chrony"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/conntrack"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/consul"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
|
||||
@@ -11,13 +19,22 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/internal"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kubernetes"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
@@ -29,10 +46,14 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/powerdns"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/procstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/prometheus"
|
||||
@@ -44,11 +65,18 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/system"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/webhooks"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
|
||||
|
||||
@@ -1,7 +1,17 @@
|
||||
# Telegraf plugin: Apache
|
||||
|
||||
#### Plugin arguments:
|
||||
- **urls** []string: List of apache-status URLs to collect from.
|
||||
- **urls** []string: List of apache-status URLs to collect from. Default is "http://localhost/server-status?auto".
|
||||
- **username** string: Username for HTTP basic authentication
|
||||
- **password** string: Password for HTTP basic authentication
|
||||
- **timeout** duration: time that the HTTP connection will remain waiting for response. Defalt 4 seconds ("4s")
|
||||
|
||||
##### Optional SSL Config
|
||||
|
||||
- **ssl_ca** string: the full path for the SSL CA certicate
|
||||
- **ssl_cert** string: the full path for the SSL certificate
|
||||
- **ssl_key** string: the full path for the key file
|
||||
- **insecure_skip_verify** bool: if true HTTP client will skip all SSL verifications related to peer and host. Default to false
|
||||
|
||||
#### Description
|
||||
|
||||
|
||||
@@ -8,20 +8,45 @@ import (
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Apache struct {
|
||||
Urls []string
|
||||
Urls []string
|
||||
Username string
|
||||
Password string
|
||||
ResponseTimeout internal.Duration
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of Apache status URI to gather stats.
|
||||
## Default is "http://localhost/server-status?auto".
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
## user credentials for basic HTTP authentication
|
||||
username = "myuser"
|
||||
password = "mypassword"
|
||||
|
||||
## Timeout to the complete conection and reponse time in seconds
|
||||
response_timeout = "25s" ## default to 5 seconds
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
func (n *Apache) SampleConfig() string {
|
||||
@@ -33,8 +58,15 @@ func (n *Apache) Description() string {
|
||||
}
|
||||
|
||||
func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
if len(n.Urls) == 0 {
|
||||
n.Urls = []string{"http://localhost/server-status?auto"}
|
||||
}
|
||||
if n.ResponseTimeout.Duration < time.Second {
|
||||
n.ResponseTimeout.Duration = time.Second * 5
|
||||
}
|
||||
|
||||
var outerr error
|
||||
var errch = make(chan error)
|
||||
|
||||
for _, u := range n.Urls {
|
||||
addr, err := url.Parse(u)
|
||||
@@ -42,30 +74,61 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
outerr = n.gatherUrl(addr, acc)
|
||||
errch <- n.gatherUrl(addr, acc)
|
||||
}(addr)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
// Drain channel, waiting for all requests to finish and save last error.
|
||||
for range n.Urls {
|
||||
if err := <-errch; err != nil {
|
||||
outerr = err
|
||||
}
|
||||
}
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{Transport: tr}
|
||||
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
resp, err := client.Get(addr.String())
|
||||
|
||||
var tr *http.Transport
|
||||
|
||||
if addr.Scheme == "https" {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
} else {
|
||||
tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: n.ResponseTimeout.Duration,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", addr.String(), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||
return fmt.Errorf("error on new request to %s : %s\n", addr.String(), err)
|
||||
}
|
||||
|
||||
if len(n.Username) != 0 && len(n.Password) != 0 {
|
||||
req.SetBasicAuth(n.Username, n.Password)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error on request to %s : %s\n", addr.String(), err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
|
||||
}
|
||||
|
||||
@@ -36,7 +36,8 @@ func TestHTTPApache(t *testing.T) {
|
||||
defer ts.Close()
|
||||
|
||||
a := Apache{
|
||||
Urls: []string{ts.URL},
|
||||
// Fetch it 2 times to catch possible data races.
|
||||
Urls: []string{ts.URL, ts.URL},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
125
plugins/inputs/cassandra/README.md
Normal file
125
plugins/inputs/cassandra/README.md
Normal file
@@ -0,0 +1,125 @@
|
||||
# Telegraf plugin: Cassandra
|
||||
|
||||
#### Plugin arguments:
|
||||
- **context** string: Context root used for jolokia url
|
||||
- **servers** []string: List of servers with the format "<user:passwd@><host>:port"
|
||||
- **metrics** []string: List of Jmx paths that identify mbeans attributes
|
||||
|
||||
#### Description
|
||||
|
||||
The Cassandra plugin collects Cassandra 3 / JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured.
|
||||
|
||||
See: https://jolokia.org/ and [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html)
|
||||
|
||||
# Measurements:
|
||||
Cassandra plugin produces one or more measurements for each metric configured, adding Server's name as `host` tag. More than one measurement is generated when querying table metrics with a wildcard for the keyspace or table name.
|
||||
|
||||
Given a configuration like:
|
||||
|
||||
```toml
|
||||
[[inputs.cassandra]]
|
||||
context = "/jolokia/read"
|
||||
servers = [":8778"]
|
||||
metrics = ["/java.lang:type=Memory/HeapMemoryUsage"]
|
||||
```
|
||||
|
||||
The collected metrics will be:
|
||||
|
||||
```
|
||||
javaMemory,host=myHost,mname=HeapMemoryUsage HeapMemoryUsage_committed=1040187392,HeapMemoryUsage_init=1050673152,HeapMemoryUsage_max=1040187392,HeapMemoryUsage_used=368155000 1459551767230567084
|
||||
```
|
||||
|
||||
# Useful Metrics:
|
||||
|
||||
Here is a list of metrics that might be useful to monitor your cassandra cluster. This was put together from multiple sources on the web.
|
||||
|
||||
- [How to monitor Cassandra performance metrics](https://www.datadoghq.com/blog/how-to-monitor-cassandra-performance-metrics)
|
||||
- [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html)
|
||||
|
||||
####measurement = javaGarbageCollector
|
||||
|
||||
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime
|
||||
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount
|
||||
- /java.lang:type=GarbageCollector,name=ParNew/CollectionTime
|
||||
- /java.lang:type=GarbageCollector,name=ParNew/CollectionCount
|
||||
|
||||
####measurement = javaMemory
|
||||
|
||||
- /java.lang:type=Memory/HeapMemoryUsage
|
||||
- /java.lang:type=Memory/NonHeapMemoryUsage
|
||||
|
||||
####measurement = cassandraCache
|
||||
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hits
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Entries
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Size
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Capacity
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Hits
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Requests
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Entries
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Size
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity
|
||||
|
||||
####measurement = cassandraClient
|
||||
|
||||
- /org.apache.cassandra.metrics:type=Client,name=connectedNativeClients
|
||||
|
||||
####measurement = cassandraClientRequest
|
||||
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Latency
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Latency
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Timeouts
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Timeouts
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Unavailables
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Unavailables
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures
|
||||
|
||||
####measurement = cassandraCommitLog
|
||||
|
||||
- /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=CommitLog,name=TotalCommitLogSize
|
||||
|
||||
####measurement = cassandraCompaction
|
||||
|
||||
- /org.apache.cassandra.metrics:type=Compaction,name=CompletedTasks
|
||||
- /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=Compaction,name=TotalCompactionsCompleted
|
||||
- /org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted
|
||||
|
||||
####measurement = cassandraStorage
|
||||
|
||||
- /org.apache.cassandra.metrics:type=Storage,name=Load
|
||||
- /org.apache.cassandra.metrics:type=Storage,name=Exceptions
|
||||
|
||||
####measurement = cassandraTable
|
||||
Using wildcards for "keyspace" and "scope" can create a lot of series as metrics will be reported for every table and keyspace including internal system tables. Specify a keyspace name and/or a table name to limit them.
|
||||
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=LiveDiskSpaceUsed
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=TotalDiskSpaceUsed
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadLatency
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=CoordinatorReadLatency
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteLatency
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadTotalLatency
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency
|
||||
|
||||
|
||||
####measurement = cassandraThreadPools
|
||||
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=CurrentlyBlockedTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CurrentlyBlockedTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CurrentlyBlockedTasks
|
||||
|
||||
|
||||
308
plugins/inputs/cassandra/cassandra.go
Normal file
308
plugins/inputs/cassandra/cassandra.go
Normal file
@@ -0,0 +1,308 @@
|
||||
package cassandra
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type JolokiaClient interface {
|
||||
MakeRequest(req *http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
type JolokiaClientImpl struct {
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func (c JolokiaClientImpl) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
return c.client.Do(req)
|
||||
}
|
||||
|
||||
type Cassandra struct {
|
||||
jClient JolokiaClient
|
||||
Context string
|
||||
Servers []string
|
||||
Metrics []string
|
||||
}
|
||||
|
||||
type javaMetric struct {
|
||||
host string
|
||||
metric string
|
||||
acc telegraf.Accumulator
|
||||
}
|
||||
|
||||
type cassandraMetric struct {
|
||||
host string
|
||||
metric string
|
||||
acc telegraf.Accumulator
|
||||
}
|
||||
|
||||
type jmxMetric interface {
|
||||
addTagsFields(out map[string]interface{})
|
||||
}
|
||||
|
||||
func newJavaMetric(host string, metric string,
|
||||
acc telegraf.Accumulator) *javaMetric {
|
||||
return &javaMetric{host: host, metric: metric, acc: acc}
|
||||
}
|
||||
|
||||
func newCassandraMetric(host string, metric string,
|
||||
acc telegraf.Accumulator) *cassandraMetric {
|
||||
return &cassandraMetric{host: host, metric: metric, acc: acc}
|
||||
}
|
||||
|
||||
func addValuesAsFields(values map[string]interface{}, fields map[string]interface{},
|
||||
mname string) {
|
||||
for k, v := range values {
|
||||
if v != nil {
|
||||
fields[mname+"_"+k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseJmxMetricRequest(mbean string) map[string]string {
|
||||
tokens := make(map[string]string)
|
||||
classAndPairs := strings.Split(mbean, ":")
|
||||
if classAndPairs[0] == "org.apache.cassandra.metrics" {
|
||||
tokens["class"] = "cassandra"
|
||||
} else if classAndPairs[0] == "java.lang" {
|
||||
tokens["class"] = "java"
|
||||
} else {
|
||||
return tokens
|
||||
}
|
||||
pairs := strings.Split(classAndPairs[1], ",")
|
||||
for _, pair := range pairs {
|
||||
p := strings.Split(pair, "=")
|
||||
tokens[p[0]] = p[1]
|
||||
}
|
||||
return tokens
|
||||
}
|
||||
|
||||
func addTokensToTags(tokens map[string]string, tags map[string]string) {
|
||||
for k, v := range tokens {
|
||||
if k == "name" {
|
||||
tags["mname"] = v // name seems to a reserved word in influxdb
|
||||
} else if k == "class" || k == "type" {
|
||||
continue // class and type are used in the metric name
|
||||
} else {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (j javaMetric) addTagsFields(out map[string]interface{}) {
|
||||
tags := make(map[string]string)
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
a := out["request"].(map[string]interface{})
|
||||
attribute := a["attribute"].(string)
|
||||
mbean := a["mbean"].(string)
|
||||
|
||||
tokens := parseJmxMetricRequest(mbean)
|
||||
addTokensToTags(tokens, tags)
|
||||
tags["cassandra_host"] = j.host
|
||||
|
||||
if _, ok := tags["mname"]; !ok {
|
||||
//Queries for a single value will not return a "name" tag in the response.
|
||||
tags["mname"] = attribute
|
||||
}
|
||||
|
||||
if values, ok := out["value"]; ok {
|
||||
switch t := values.(type) {
|
||||
case map[string]interface{}:
|
||||
addValuesAsFields(values.(map[string]interface{}), fields, attribute)
|
||||
case interface{}:
|
||||
fields[attribute] = t
|
||||
}
|
||||
j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
j.metric, out)
|
||||
}
|
||||
}
|
||||
|
||||
func addCassandraMetric(mbean string, c cassandraMetric,
|
||||
values map[string]interface{}) {
|
||||
|
||||
tags := make(map[string]string)
|
||||
fields := make(map[string]interface{})
|
||||
tokens := parseJmxMetricRequest(mbean)
|
||||
addTokensToTags(tokens, tags)
|
||||
tags["cassandra_host"] = c.host
|
||||
addValuesAsFields(values, fields, tags["mname"])
|
||||
c.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
|
||||
|
||||
}
|
||||
|
||||
func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
||||
|
||||
r := out["request"]
|
||||
|
||||
tokens := parseJmxMetricRequest(r.(map[string]interface{})["mbean"].(string))
|
||||
// Requests with wildcards for keyspace or table names will return nested
|
||||
// maps in the json response
|
||||
if (tokens["type"] == "Table" || tokens["type"] == "ColumnFamily") && (tokens["keyspace"] == "*" ||
|
||||
tokens["scope"] == "*") {
|
||||
if valuesMap, ok := out["value"]; ok {
|
||||
for k, v := range valuesMap.(map[string]interface{}) {
|
||||
addCassandraMetric(k, c, v.(map[string]interface{}))
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
c.metric, out)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if values, ok := out["value"]; ok {
|
||||
addCassandraMetric(r.(map[string]interface{})["mbean"].(string),
|
||||
c, values.(map[string]interface{}))
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
c.metric, out)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (j *Cassandra) SampleConfig() string {
|
||||
return `
|
||||
# This is the context root used to compose the jolokia url
|
||||
context = "/jolokia/read"
|
||||
## List of cassandra servers exposing jolokia read service
|
||||
servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
|
||||
## List of metrics collected on above servers
|
||||
## Each metric consists of a jmx path.
|
||||
## This will collect all heap memory usage metrics from the jvm and
|
||||
## ReadLatency metrics for all keyspaces and tables.
|
||||
## "type=Table" in the query works with Cassandra3.0. Older versions might
|
||||
## need to use "type=ColumnFamily"
|
||||
metrics = [
|
||||
"/java.lang:type=Memory/HeapMemoryUsage",
|
||||
"/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
|
||||
]
|
||||
`
|
||||
}
|
||||
|
||||
func (j *Cassandra) Description() string {
|
||||
return "Read Cassandra metrics through Jolokia"
|
||||
}
|
||||
|
||||
func (j *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
|
||||
// Create + send request
|
||||
req, err := http.NewRequest("GET", requestUrl.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := j.jClient.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Process response
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
|
||||
requestUrl,
|
||||
resp.StatusCode,
|
||||
http.StatusText(resp.StatusCode),
|
||||
http.StatusOK,
|
||||
http.StatusText(http.StatusOK))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// read body
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unmarshal json
|
||||
var jsonOut map[string]interface{}
|
||||
if err = json.Unmarshal([]byte(body), &jsonOut); err != nil {
|
||||
return nil, errors.New("Error decoding JSON response")
|
||||
}
|
||||
|
||||
return jsonOut, nil
|
||||
}
|
||||
|
||||
func parseServerTokens(server string) map[string]string {
|
||||
serverTokens := make(map[string]string)
|
||||
|
||||
hostAndUser := strings.Split(server, "@")
|
||||
hostPort := ""
|
||||
userPasswd := ""
|
||||
if len(hostAndUser) == 2 {
|
||||
hostPort = hostAndUser[1]
|
||||
userPasswd = hostAndUser[0]
|
||||
} else {
|
||||
hostPort = hostAndUser[0]
|
||||
}
|
||||
hostTokens := strings.Split(hostPort, ":")
|
||||
serverTokens["host"] = hostTokens[0]
|
||||
serverTokens["port"] = hostTokens[1]
|
||||
|
||||
if userPasswd != "" {
|
||||
userTokens := strings.Split(userPasswd, ":")
|
||||
serverTokens["user"] = userTokens[0]
|
||||
serverTokens["passwd"] = userTokens[1]
|
||||
}
|
||||
return serverTokens
|
||||
}
|
||||
|
||||
func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
context := c.Context
|
||||
servers := c.Servers
|
||||
metrics := c.Metrics
|
||||
|
||||
for _, server := range servers {
|
||||
for _, metric := range metrics {
|
||||
serverTokens := parseServerTokens(server)
|
||||
|
||||
var m jmxMetric
|
||||
if strings.HasPrefix(metric, "/java.lang:") {
|
||||
m = newJavaMetric(serverTokens["host"], metric, acc)
|
||||
} else if strings.HasPrefix(metric,
|
||||
"/org.apache.cassandra.metrics:") {
|
||||
m = newCassandraMetric(serverTokens["host"], metric, acc)
|
||||
} else {
|
||||
// unsupported metric type
|
||||
log.Printf("I! Unsupported Cassandra metric [%s], skipping",
|
||||
metric)
|
||||
continue
|
||||
}
|
||||
|
||||
// Prepare URL
|
||||
requestUrl, err := url.Parse("http://" + serverTokens["host"] + ":" +
|
||||
serverTokens["port"] + context + metric)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if serverTokens["user"] != "" && serverTokens["passwd"] != "" {
|
||||
requestUrl.User = url.UserPassword(serverTokens["user"],
|
||||
serverTokens["passwd"])
|
||||
}
|
||||
|
||||
out, err := c.getAttr(requestUrl)
|
||||
if out["status"] != 200.0 {
|
||||
fmt.Printf("URL returned with status %v\n", out["status"])
|
||||
continue
|
||||
}
|
||||
m.addTagsFields(out)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("cassandra", func() telegraf.Input {
|
||||
return &Cassandra{jClient: &JolokiaClientImpl{client: &http.Client{}}}
|
||||
})
|
||||
}
|
||||
286
plugins/inputs/cassandra/cassandra_test.go
Normal file
286
plugins/inputs/cassandra/cassandra_test.go
Normal file
@@ -0,0 +1,286 @@
|
||||
package cassandra
|
||||
|
||||
import (
|
||||
_ "fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
_ "github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const validJavaMultiValueJSON = `
|
||||
{
|
||||
"request":{
|
||||
"mbean":"java.lang:type=Memory",
|
||||
"attribute":"HeapMemoryUsage",
|
||||
"type":"read"
|
||||
},
|
||||
"value":{
|
||||
"init":67108864,
|
||||
"committed":456130560,
|
||||
"max":477626368,
|
||||
"used":203288528
|
||||
},
|
||||
"timestamp":1446129191,
|
||||
"status":200
|
||||
}`
|
||||
|
||||
const validCassandraMultiValueJSON = `
|
||||
{
|
||||
"request": {
|
||||
"mbean": "org.apache.cassandra.metrics:keyspace=test_keyspace1,name=ReadLatency,scope=test_table,type=Table",
|
||||
"type": "read"},
|
||||
"status": 200,
|
||||
"timestamp": 1458089229,
|
||||
"value": {
|
||||
"999thPercentile": 20.0,
|
||||
"99thPercentile": 10.0,
|
||||
"Count": 400,
|
||||
"DurationUnit": "microseconds",
|
||||
"Max": 30.0,
|
||||
"Mean": null,
|
||||
"MeanRate": 3.0,
|
||||
"Min": 1.0,
|
||||
"RateUnit": "events/second",
|
||||
"StdDev": null
|
||||
}
|
||||
}`
|
||||
|
||||
const validCassandraNestedMultiValueJSON = `
|
||||
{
|
||||
"request": {
|
||||
"mbean": "org.apache.cassandra.metrics:keyspace=test_keyspace1,name=ReadLatency,scope=*,type=Table",
|
||||
"type": "read"},
|
||||
"status": 200,
|
||||
"timestamp": 1458089184,
|
||||
"value": {
|
||||
"org.apache.cassandra.metrics:keyspace=test_keyspace1,name=ReadLatency,scope=test_table1,type=Table":
|
||||
{ "999thPercentile": 1.0,
|
||||
"Count": 100,
|
||||
"DurationUnit": "microseconds",
|
||||
"OneMinuteRate": 1.0,
|
||||
"RateUnit": "events/second",
|
||||
"StdDev": null
|
||||
},
|
||||
"org.apache.cassandra.metrics:keyspace=test_keyspace2,name=ReadLatency,scope=test_table2,type=Table":
|
||||
{ "999thPercentile": 2.0,
|
||||
"Count": 200,
|
||||
"DurationUnit": "microseconds",
|
||||
"OneMinuteRate": 2.0,
|
||||
"RateUnit": "events/second",
|
||||
"StdDev": null
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
const validSingleValueJSON = `
|
||||
{
|
||||
"request":{
|
||||
"path":"used",
|
||||
"mbean":"java.lang:type=Memory",
|
||||
"attribute":"HeapMemoryUsage",
|
||||
"type":"read"
|
||||
},
|
||||
"value":209274376,
|
||||
"timestamp":1446129256,
|
||||
"status":200
|
||||
}`
|
||||
|
||||
const validJavaMultiTypeJSON = `
|
||||
{
|
||||
"request":{
|
||||
"mbean":"java.lang:name=ConcurrentMarkSweep,type=GarbageCollector",
|
||||
"attribute":"CollectionCount",
|
||||
"type":"read"
|
||||
},
|
||||
"value":1,
|
||||
"timestamp":1459316570,
|
||||
"status":200
|
||||
}`
|
||||
|
||||
const invalidJSON = "I don't think this is JSON"
|
||||
|
||||
const empty = ""
|
||||
|
||||
var Servers = []string{"10.10.10.10:8778"}
|
||||
var AuthServers = []string{"user:passwd@10.10.10.10:8778"}
|
||||
var MultipleServers = []string{"10.10.10.10:8778", "10.10.10.11:8778"}
|
||||
var HeapMetric = "/java.lang:type=Memory/HeapMemoryUsage"
|
||||
var ReadLatencyMetric = "/org.apache.cassandra.metrics:type=Table,keyspace=test_keyspace1,scope=test_table,name=ReadLatency"
|
||||
var NestedReadLatencyMetric = "/org.apache.cassandra.metrics:type=Table,keyspace=test_keyspace1,scope=*,name=ReadLatency"
|
||||
var GarbageCollectorMetric1 = "/java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount"
|
||||
var GarbageCollectorMetric2 = "/java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime"
|
||||
var Context = "/jolokia/read"
|
||||
|
||||
type jolokiaClientStub struct {
|
||||
responseBody string
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
resp := http.Response{}
|
||||
resp.StatusCode = c.statusCode
|
||||
resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody))
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Generates a pointer to an HttpJson object that uses a mock HTTP client.
|
||||
// Parameters:
|
||||
// response : Body of the response that the mock HTTP client should return
|
||||
// statusCode: HTTP status code the mock HTTP client should return
|
||||
//
|
||||
// Returns:
|
||||
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
|
||||
func genJolokiaClientStub(response string, statusCode int, servers []string, metrics []string) *Cassandra {
|
||||
return &Cassandra{
|
||||
jClient: jolokiaClientStub{responseBody: response, statusCode: statusCode},
|
||||
Context: Context,
|
||||
Servers: servers,
|
||||
Metrics: metrics,
|
||||
}
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected for class=Java
|
||||
func TestHttpJsonJavaMultiValue(t *testing.T) {
|
||||
cassandra := genJolokiaClientStub(validJavaMultiValueJSON, 200,
|
||||
MultipleServers, []string{HeapMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := cassandra.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(acc.Metrics))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"HeapMemoryUsage_init": 67108864.0,
|
||||
"HeapMemoryUsage_committed": 456130560.0,
|
||||
"HeapMemoryUsage_max": 477626368.0,
|
||||
"HeapMemoryUsage_used": 203288528.0,
|
||||
}
|
||||
tags1 := map[string]string{
|
||||
"cassandra_host": "10.10.10.10",
|
||||
"mname": "HeapMemoryUsage",
|
||||
}
|
||||
|
||||
tags2 := map[string]string{
|
||||
"cassandra_host": "10.10.10.11",
|
||||
"mname": "HeapMemoryUsage",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "javaMemory", fields, tags1)
|
||||
acc.AssertContainsTaggedFields(t, "javaMemory", fields, tags2)
|
||||
}
|
||||
|
||||
func TestHttpJsonJavaMultiType(t *testing.T) {
|
||||
cassandra := genJolokiaClientStub(validJavaMultiTypeJSON, 200, AuthServers, []string{GarbageCollectorMetric1, GarbageCollectorMetric2})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := cassandra.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(acc.Metrics))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"CollectionCount": 1.0,
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"cassandra_host": "10.10.10.10",
|
||||
"mname": "ConcurrentMarkSweep",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "javaGarbageCollector", fields, tags)
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJsonOn404(t *testing.T) {
|
||||
|
||||
jolokia := genJolokiaClientStub(validJavaMultiValueJSON, 404, Servers,
|
||||
[]string{HeapMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := jolokia.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 0, len(acc.Metrics))
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected for class=Cassandra
|
||||
func TestHttpJsonCassandraMultiValue(t *testing.T) {
|
||||
cassandra := genJolokiaClientStub(validCassandraMultiValueJSON, 200, Servers, []string{ReadLatencyMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := cassandra.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(acc.Metrics))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"ReadLatency_999thPercentile": 20.0,
|
||||
"ReadLatency_99thPercentile": 10.0,
|
||||
"ReadLatency_Count": 400.0,
|
||||
"ReadLatency_DurationUnit": "microseconds",
|
||||
"ReadLatency_Max": 30.0,
|
||||
"ReadLatency_MeanRate": 3.0,
|
||||
"ReadLatency_Min": 1.0,
|
||||
"ReadLatency_RateUnit": "events/second",
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"cassandra_host": "10.10.10.10",
|
||||
"mname": "ReadLatency",
|
||||
"keyspace": "test_keyspace1",
|
||||
"scope": "test_table",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cassandraTable", fields, tags)
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected for class=Cassandra with
|
||||
// nested values
|
||||
func TestHttpJsonCassandraNestedMultiValue(t *testing.T) {
|
||||
cassandra := genJolokiaClientStub(validCassandraNestedMultiValueJSON, 200, Servers, []string{NestedReadLatencyMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := cassandra.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(acc.Metrics))
|
||||
|
||||
fields1 := map[string]interface{}{
|
||||
"ReadLatency_999thPercentile": 1.0,
|
||||
"ReadLatency_Count": 100.0,
|
||||
"ReadLatency_DurationUnit": "microseconds",
|
||||
"ReadLatency_OneMinuteRate": 1.0,
|
||||
"ReadLatency_RateUnit": "events/second",
|
||||
}
|
||||
|
||||
fields2 := map[string]interface{}{
|
||||
"ReadLatency_999thPercentile": 2.0,
|
||||
"ReadLatency_Count": 200.0,
|
||||
"ReadLatency_DurationUnit": "microseconds",
|
||||
"ReadLatency_OneMinuteRate": 2.0,
|
||||
"ReadLatency_RateUnit": "events/second",
|
||||
}
|
||||
|
||||
tags1 := map[string]string{
|
||||
"cassandra_host": "10.10.10.10",
|
||||
"mname": "ReadLatency",
|
||||
"keyspace": "test_keyspace1",
|
||||
"scope": "test_table1",
|
||||
}
|
||||
|
||||
tags2 := map[string]string{
|
||||
"cassandra_host": "10.10.10.10",
|
||||
"mname": "ReadLatency",
|
||||
"keyspace": "test_keyspace2",
|
||||
"scope": "test_table2",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "cassandraTable", fields1, tags1)
|
||||
acc.AssertContainsTaggedFields(t, "cassandraTable", fields2, tags2)
|
||||
}
|
||||
222
plugins/inputs/ceph/README.md
Normal file
222
plugins/inputs/ceph/README.md
Normal file
@@ -0,0 +1,222 @@
|
||||
# Ceph Storage Input Plugin
|
||||
|
||||
Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
|
||||
|
||||
*Admin Socket Stats*
|
||||
|
||||
This gatherer works by scanning the configured SocketDir for OSD and MON socket files. When it finds
|
||||
a MON socket, it runs **ceph --admin-daemon $file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf dump**
|
||||
|
||||
The resulting JSON is parsed and grouped into collections, based on top-level key. Top-level keys are
|
||||
used as collection tags, and all sub-keys are flattened. For example:
|
||||
|
||||
```
|
||||
{
|
||||
"paxos": {
|
||||
"refresh": 9363435,
|
||||
"refresh_latency": {
|
||||
"avgcount": 9363435,
|
||||
"sum": 5378.794002000
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Would be parsed into the following metrics, all of which would be tagged with collection=paxos:
|
||||
|
||||
- refresh = 9363435
|
||||
- refresh_latency.avgcount: 9363435
|
||||
- refresh_latency.sum: 5378.794002000
|
||||
|
||||
|
||||
*Cluster Stats*
|
||||
|
||||
This gatherer works by invoking ceph commands against the cluster thus only requires the ceph client, valid
|
||||
ceph configuration and an access key to function (the ceph_config and ceph_user configuration variables work
|
||||
in conjunction to specify these prerequisites). It may be run on any server you wish which has access to
|
||||
the cluster. The currently supported commands are:
|
||||
|
||||
* ceph status
|
||||
* ceph df
|
||||
* ceph osd pool stats
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
|
||||
[[inputs.ceph]]
|
||||
## This is the recommended interval to poll. Too frequent and you will lose
|
||||
## data points due to timeouts during rebalancing and recovery
|
||||
interval = '1m'
|
||||
|
||||
## All configuration values are optional, defaults are shown below
|
||||
|
||||
## location of ceph binary
|
||||
ceph_binary = "/usr/bin/ceph"
|
||||
|
||||
## directory in which to look for socket files
|
||||
socket_dir = "/var/run/ceph"
|
||||
|
||||
## prefix of MON and OSD socket files, used to determine socket type
|
||||
mon_prefix = "ceph-mon"
|
||||
osd_prefix = "ceph-osd"
|
||||
|
||||
## suffix used to identify socket files
|
||||
socket_suffix = "asok"
|
||||
|
||||
## Ceph user to authenticate as, ceph will search for the corresponding keyring
|
||||
## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the
|
||||
## client section of ceph.conf for example:
|
||||
##
|
||||
## [client.telegraf]
|
||||
## keyring = /etc/ceph/client.telegraf.keyring
|
||||
##
|
||||
## Consult the ceph documentation for more detail on keyring generation.
|
||||
ceph_user = "client.admin"
|
||||
|
||||
## Ceph configuration to use to locate the cluster
|
||||
ceph_config = "/etc/ceph/ceph.conf"
|
||||
|
||||
## Whether to gather statistics via the admin socket
|
||||
gather_admin_socket_stats = true
|
||||
|
||||
## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config
|
||||
## to be specified
|
||||
gather_cluster_stats = false
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
*Admin Socket Stats*
|
||||
|
||||
All fields are collected under the **ceph** measurement and stored as float64s. For a full list of fields, see the sample perf dumps in ceph_test.go.
|
||||
|
||||
*Cluster Stats*
|
||||
|
||||
* ceph\_osdmap
|
||||
* epoch (float)
|
||||
* full (boolean)
|
||||
* nearfull (boolean)
|
||||
* num\_in\_osds (float)
|
||||
* num\_osds (float)
|
||||
* num\_remremapped\_pgs (float)
|
||||
* num\_up\_osds (float)
|
||||
|
||||
* ceph\_pgmap
|
||||
* bytes\_avail (float)
|
||||
* bytes\_total (float)
|
||||
* bytes\_used (float)
|
||||
* data\_bytes (float)
|
||||
* num\_pgs (float)
|
||||
* op\_per\_sec (float)
|
||||
* read\_bytes\_sec (float)
|
||||
* version (float)
|
||||
* write\_bytes\_sec (float)
|
||||
* recovering\_bytes\_per\_sec (float)
|
||||
* recovering\_keys\_per\_sec (float)
|
||||
* recovering\_objects\_per\_sec (float)
|
||||
|
||||
* ceph\_pgmap\_state
|
||||
* state name e.g. active+clean (float)
|
||||
|
||||
* ceph\_usage
|
||||
* bytes\_used (float)
|
||||
* kb\_used (float)
|
||||
* max\_avail (float)
|
||||
* objects (float)
|
||||
|
||||
* ceph\_pool\_usage
|
||||
* bytes\_used (float)
|
||||
* kb\_used (float)
|
||||
* max\_avail (float)
|
||||
* objects (float)
|
||||
|
||||
* ceph\_pool\_stats
|
||||
* op\_per\_sec (float)
|
||||
* read\_bytes\_sec (float)
|
||||
* write\_bytes\_sec (float)
|
||||
* recovering\_object\_per\_sec (float)
|
||||
* recovering\_bytes\_per\_sec (float)
|
||||
* recovering\_keys\_per\_sec (float)
|
||||
|
||||
### Tags:
|
||||
|
||||
*Admin Socket Stats*
|
||||
|
||||
All measurements will have the following tags:
|
||||
|
||||
- type: either 'osd' or 'mon' to indicate which type of node was queried
|
||||
- id: a unique string identifier, parsed from the socket file name for the node
|
||||
- collection: the top-level key under which these fields were reported. Possible values are:
|
||||
- for MON nodes:
|
||||
- cluster
|
||||
- leveldb
|
||||
- mon
|
||||
- paxos
|
||||
- throttle-mon_client_bytes
|
||||
- throttle-mon_daemon_bytes
|
||||
- throttle-msgr_dispatch_throttler-mon
|
||||
- for OSD nodes:
|
||||
- WBThrottle
|
||||
- filestore
|
||||
- leveldb
|
||||
- mutex-FileJournal::completions_lock
|
||||
- mutex-FileJournal::finisher_lock
|
||||
- mutex-FileJournal::write_lock
|
||||
- mutex-FileJournal::writeq_lock
|
||||
- mutex-JOS::ApplyManager::apply_lock
|
||||
- mutex-JOS::ApplyManager::com_lock
|
||||
- mutex-JOS::SubmitManager::lock
|
||||
- mutex-WBThrottle::lock
|
||||
- objecter
|
||||
- osd
|
||||
- recoverystate_perf
|
||||
- throttle-filestore_bytes
|
||||
- throttle-filestore_ops
|
||||
- throttle-msgr_dispatch_throttler-client
|
||||
- throttle-msgr_dispatch_throttler-cluster
|
||||
- throttle-msgr_dispatch_throttler-hb_back_server
|
||||
- throttle-msgr_dispatch_throttler-hb_front_serve
|
||||
- throttle-msgr_dispatch_throttler-hbclient
|
||||
- throttle-msgr_dispatch_throttler-ms_objecter
|
||||
- throttle-objecter_bytes
|
||||
- throttle-objecter_ops
|
||||
- throttle-osd_client_bytes
|
||||
- throttle-osd_client_messages
|
||||
|
||||
*Cluster Stats*
|
||||
|
||||
* ceph\_pg\_state has the following tags:
|
||||
* state (state for which the value applies e.g. active+clean, active+remapped+backfill)
|
||||
* ceph\_pool\_usage has the following tags:
|
||||
* id
|
||||
* name
|
||||
* ceph\_pool\_stats has the following tags:
|
||||
* id
|
||||
* name
|
||||
|
||||
### Example Output:
|
||||
|
||||
*Admin Socket Stats*
|
||||
|
||||
<pre>
|
||||
telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d -input-filter ceph
|
||||
* Plugin: ceph, Collection 1
|
||||
> ceph,collection=paxos, id=node-2,role=openstack,type=mon accept_timeout=0,begin=14931264,begin_bytes.avgcount=14931264,begin_bytes.sum=180309683362,begin_keys.avgcount=0,begin_keys.sum=0,begin_latency.avgcount=14931264,begin_latency.sum=9293.29589,collect=1,collect_bytes.avgcount=1,collect_bytes.sum=24,collect_keys.avgcount=1,collect_keys.sum=1,collect_latency.avgcount=1,collect_latency.sum=0.00028,collect_timeout=0,collect_uncommitted=0,commit=14931264,commit_bytes.avgcount=0,commit_bytes.sum=0,commit_keys.avgcount=0,commit_keys.sum=0,commit_latency.avgcount=0,commit_latency.sum=0,lease_ack_timeout=0,lease_timeout=0,new_pn=0,new_pn_latency.avgcount=0,new_pn_latency.sum=0,refresh=14931264,refresh_latency.avgcount=14931264,refresh_latency.sum=8706.98498,restart=4,share_state=0,share_state_bytes.avgcount=0,share_state_bytes.sum=0,share_state_keys.avgcount=0,share_state_keys.sum=0,start_leader=0,start_peon=1,store_state=14931264,store_state_bytes.avgcount=14931264,store_state_bytes.sum=353119959211,store_state_keys.avgcount=14931264,store_state_keys.sum=289807523,store_state_latency.avgcount=14931264,store_state_latency.sum=10952.835724 1462821234814535148
|
||||
> ceph,collection=throttle-mon_client_bytes,id=node-2,type=mon get=1413017,get_or_fail_fail=0,get_or_fail_success=0,get_sum=71211705,max=104857600,put=1413013,put_sum=71211459,take=0,take_sum=0,val=246,wait.avgcount=0,wait.sum=0 1462821234814737219
|
||||
> ceph,collection=throttle-mon_daemon_bytes,id=node-2,type=mon get=4058121,get_or_fail_fail=0,get_or_fail_success=0,get_sum=6027348117,max=419430400,put=4058121,put_sum=6027348117,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814815661
|
||||
> ceph,collection=throttle-msgr_dispatch_throttler-mon,id=node-2,type=mon get=54276277,get_or_fail_fail=0,get_or_fail_success=0,get_sum=370232877040,max=104857600,put=54276277,put_sum=370232877040,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814872064
|
||||
</pre>
|
||||
|
||||
*Cluster Stats*
|
||||
|
||||
<pre>
|
||||
> ceph_osdmap,host=ceph-mon-0 epoch=170772,full=false,nearfull=false,num_in_osds=340,num_osds=340,num_remapped_pgs=0,num_up_osds=340 1468841037000000000
|
||||
> ceph_pgmap,host=ceph-mon-0 bytes_avail=634895531270144,bytes_total=812117151809536,bytes_used=177221620539392,data_bytes=56979991615058,num_pgs=22952,op_per_sec=15869,read_bytes_sec=43956026,version=39387592,write_bytes_sec=165344818 1468841037000000000
|
||||
> ceph_pgmap_state,host=ceph-mon-0 active+clean=22952 1468928660000000000
|
||||
> ceph_usage,host=ceph-mon-0 total_avail_bytes=634895514791936,total_bytes=812117151809536,total_used_bytes=177221637017600 1468841037000000000
|
||||
> ceph_pool_usage,host=ceph-mon-0,id=150,name=cinder.volumes bytes_used=12648553794802,kb_used=12352103316,max_avail=154342562489244,objects=3026295 1468841037000000000
|
||||
> ceph_pool_usage,host=ceph-mon-0,id=182,name=cinder.volumes.flash bytes_used=8541308223964,kb_used=8341121313,max_avail=39388593563936,objects=2075066 1468841037000000000
|
||||
> ceph_pool_stats,host=ceph-mon-0,id=150,name=cinder.volumes op_per_sec=1706,read_bytes_sec=28671674,write_bytes_sec=29994541 1468841037000000000
|
||||
> ceph_pool_stats,host=ceph-mon-0,id=182,name=cinder.volumes.flash op_per_sec=9748,read_bytes_sec=9605524,write_bytes_sec=45593310 1468841037000000000
|
||||
</pre>
|
||||
489
plugins/inputs/ceph/ceph.go
Normal file
489
plugins/inputs/ceph/ceph.go
Normal file
@@ -0,0 +1,489 @@
|
||||
package ceph
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
measurement = "ceph"
|
||||
typeMon = "monitor"
|
||||
typeOsd = "osd"
|
||||
osdPrefix = "ceph-osd"
|
||||
monPrefix = "ceph-mon"
|
||||
sockSuffix = "asok"
|
||||
)
|
||||
|
||||
type Ceph struct {
|
||||
CephBinary string
|
||||
OsdPrefix string
|
||||
MonPrefix string
|
||||
SocketDir string
|
||||
SocketSuffix string
|
||||
CephUser string
|
||||
CephConfig string
|
||||
GatherAdminSocketStats bool
|
||||
GatherClusterStats bool
|
||||
}
|
||||
|
||||
func (c *Ceph) Description() string {
|
||||
return "Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster."
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## This is the recommended interval to poll. Too frequent and you will lose
|
||||
## data points due to timeouts during rebalancing and recovery
|
||||
interval = '1m'
|
||||
|
||||
## All configuration values are optional, defaults are shown below
|
||||
|
||||
## location of ceph binary
|
||||
ceph_binary = "/usr/bin/ceph"
|
||||
|
||||
## directory in which to look for socket files
|
||||
socket_dir = "/var/run/ceph"
|
||||
|
||||
## prefix of MON and OSD socket files, used to determine socket type
|
||||
mon_prefix = "ceph-mon"
|
||||
osd_prefix = "ceph-osd"
|
||||
|
||||
## suffix used to identify socket files
|
||||
socket_suffix = "asok"
|
||||
|
||||
## Ceph user to authenticate as
|
||||
ceph_user = "client.admin"
|
||||
|
||||
## Ceph configuration to use to locate the cluster
|
||||
ceph_config = "/etc/ceph/ceph.conf"
|
||||
|
||||
## Whether to gather statistics via the admin socket
|
||||
gather_admin_socket_stats = true
|
||||
|
||||
## Whether to gather statistics via ceph commands
|
||||
gather_cluster_stats = false
|
||||
`
|
||||
|
||||
func (c *Ceph) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (c *Ceph) Gather(acc telegraf.Accumulator) error {
|
||||
if c.GatherAdminSocketStats {
|
||||
if err := c.gatherAdminSocketStats(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.GatherClusterStats {
|
||||
if err := c.gatherClusterStats(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
|
||||
sockets, err := findSockets(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find sockets at path '%s': %v", c.SocketDir, err)
|
||||
}
|
||||
|
||||
for _, s := range sockets {
|
||||
dump, err := perfDump(c.CephBinary, s)
|
||||
if err != nil {
|
||||
log.Printf("E! error reading from socket '%s': %v", s.socket, err)
|
||||
continue
|
||||
}
|
||||
data, err := parseDump(dump)
|
||||
if err != nil {
|
||||
log.Printf("E! error parsing dump from socket '%s': %v", s.socket, err)
|
||||
continue
|
||||
}
|
||||
for tag, metrics := range *data {
|
||||
acc.AddFields(measurement,
|
||||
map[string]interface{}(metrics),
|
||||
map[string]string{"type": s.sockType, "id": s.sockId, "collection": tag})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Ceph) gatherClusterStats(acc telegraf.Accumulator) error {
|
||||
jobs := []struct {
|
||||
command string
|
||||
parser func(telegraf.Accumulator, string) error
|
||||
}{
|
||||
{"status", decodeStatus},
|
||||
{"df", decodeDf},
|
||||
{"osd pool stats", decodeOsdPoolStats},
|
||||
}
|
||||
|
||||
// For each job, execute against the cluster, parse and accumulate the data points
|
||||
for _, job := range jobs {
|
||||
output, err := c.exec(job.command)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error executing command: %v", err)
|
||||
}
|
||||
err = job.parser(acc, output)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing output: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
c := Ceph{
|
||||
CephBinary: "/usr/bin/ceph",
|
||||
OsdPrefix: osdPrefix,
|
||||
MonPrefix: monPrefix,
|
||||
SocketDir: "/var/run/ceph",
|
||||
SocketSuffix: sockSuffix,
|
||||
CephUser: "client.admin",
|
||||
CephConfig: "/etc/ceph/ceph.conf",
|
||||
GatherAdminSocketStats: true,
|
||||
GatherClusterStats: false,
|
||||
}
|
||||
|
||||
inputs.Add(measurement, func() telegraf.Input { return &c })
|
||||
|
||||
}
|
||||
|
||||
var perfDump = func(binary string, socket *socket) (string, error) {
|
||||
cmdArgs := []string{"--admin-daemon", socket.socket}
|
||||
if socket.sockType == typeOsd {
|
||||
cmdArgs = append(cmdArgs, "perf", "dump")
|
||||
} else if socket.sockType == typeMon {
|
||||
cmdArgs = append(cmdArgs, "perfcounters_dump")
|
||||
} else {
|
||||
return "", fmt.Errorf("ignoring unknown socket type: %s", socket.sockType)
|
||||
}
|
||||
|
||||
cmd := exec.Command(binary, cmdArgs...)
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error running ceph dump: %s", err)
|
||||
}
|
||||
|
||||
return out.String(), nil
|
||||
}
|
||||
|
||||
var findSockets = func(c *Ceph) ([]*socket, error) {
|
||||
listing, err := ioutil.ReadDir(c.SocketDir)
|
||||
if err != nil {
|
||||
return []*socket{}, fmt.Errorf("Failed to read socket directory '%s': %v", c.SocketDir, err)
|
||||
}
|
||||
sockets := make([]*socket, 0, len(listing))
|
||||
for _, info := range listing {
|
||||
f := info.Name()
|
||||
var sockType string
|
||||
var sockPrefix string
|
||||
if strings.HasPrefix(f, c.MonPrefix) {
|
||||
sockType = typeMon
|
||||
sockPrefix = monPrefix
|
||||
}
|
||||
if strings.HasPrefix(f, c.OsdPrefix) {
|
||||
sockType = typeOsd
|
||||
sockPrefix = osdPrefix
|
||||
|
||||
}
|
||||
if sockType == typeOsd || sockType == typeMon {
|
||||
path := filepath.Join(c.SocketDir, f)
|
||||
sockets = append(sockets, &socket{parseSockId(f, sockPrefix, c.SocketSuffix), sockType, path})
|
||||
}
|
||||
}
|
||||
return sockets, nil
|
||||
}
|
||||
|
||||
func parseSockId(fname, prefix, suffix string) string {
|
||||
s := fname
|
||||
s = strings.TrimPrefix(s, prefix)
|
||||
s = strings.TrimSuffix(s, suffix)
|
||||
s = strings.Trim(s, ".-_")
|
||||
return s
|
||||
}
|
||||
|
||||
type socket struct {
|
||||
sockId string
|
||||
sockType string
|
||||
socket string
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
pathStack []string // lifo stack of name components
|
||||
value float64
|
||||
}
|
||||
|
||||
// Pops names of pathStack to build the flattened name for a metric
|
||||
func (m *metric) name() string {
|
||||
buf := bytes.Buffer{}
|
||||
for i := len(m.pathStack) - 1; i >= 0; i-- {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteString(".")
|
||||
}
|
||||
buf.WriteString(m.pathStack[i])
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type metricMap map[string]interface{}
|
||||
|
||||
type taggedMetricMap map[string]metricMap
|
||||
|
||||
// Parses a raw JSON string into a taggedMetricMap
|
||||
// Delegates the actual parsing to newTaggedMetricMap(..)
|
||||
func parseDump(dump string) (*taggedMetricMap, error) {
|
||||
data := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(dump), &data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse json: '%s': %v", dump, err)
|
||||
}
|
||||
|
||||
tmm := newTaggedMetricMap(data)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to tag dataset: '%v': %v", tmm, err)
|
||||
}
|
||||
|
||||
return tmm, nil
|
||||
}
|
||||
|
||||
// Builds a TaggedMetricMap out of a generic string map.
|
||||
// The top-level key is used as a tag and all sub-keys are flattened into metrics
|
||||
func newTaggedMetricMap(data map[string]interface{}) *taggedMetricMap {
|
||||
tmm := make(taggedMetricMap)
|
||||
for tag, datapoints := range data {
|
||||
mm := make(metricMap)
|
||||
for _, m := range flatten(datapoints) {
|
||||
mm[m.name()] = m.value
|
||||
}
|
||||
tmm[tag] = mm
|
||||
}
|
||||
return &tmm
|
||||
}
|
||||
|
||||
// Recursively flattens any k-v hierarchy present in data.
|
||||
// Nested keys are flattened into ordered slices associated with a metric value.
|
||||
// The key slices are treated as stacks, and are expected to be reversed and concatenated
|
||||
// when passed as metrics to the accumulator. (see (*metric).name())
|
||||
func flatten(data interface{}) []*metric {
|
||||
var metrics []*metric
|
||||
|
||||
switch val := data.(type) {
|
||||
case float64:
|
||||
metrics = []*metric{&metric{make([]string, 0, 1), val}}
|
||||
case map[string]interface{}:
|
||||
metrics = make([]*metric, 0, len(val))
|
||||
for k, v := range val {
|
||||
for _, m := range flatten(v) {
|
||||
m.pathStack = append(m.pathStack, k)
|
||||
metrics = append(metrics, m)
|
||||
}
|
||||
}
|
||||
default:
|
||||
log.Printf("I! Ignoring unexpected type '%T' for value %v", val, val)
|
||||
}
|
||||
|
||||
return metrics
|
||||
}
|
||||
|
||||
func (c *Ceph) exec(command string) (string, error) {
|
||||
cmdArgs := []string{"--conf", c.CephConfig, "--name", c.CephUser, "--format", "json"}
|
||||
cmdArgs = append(cmdArgs, strings.Split(command, " ")...)
|
||||
|
||||
cmd := exec.Command(c.CephBinary, cmdArgs...)
|
||||
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error running ceph %v: %s", command, err)
|
||||
}
|
||||
|
||||
output := out.String()
|
||||
|
||||
// Ceph doesn't sanitize its output, and may return invalid JSON. Patch this
|
||||
// up for them, as having some inaccurate data is better than none.
|
||||
output = strings.Replace(output, "-inf", "0", -1)
|
||||
output = strings.Replace(output, "inf", "0", -1)
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func decodeStatus(acc telegraf.Accumulator, input string) error {
|
||||
data := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(input), &data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse json: '%s': %v", input, err)
|
||||
}
|
||||
|
||||
err = decodeStatusOsdmap(acc, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = decodeStatusPgmap(acc, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = decodeStatusPgmapState(acc, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeStatusOsdmap(acc telegraf.Accumulator, data map[string]interface{}) error {
|
||||
osdmap, ok := data["osdmap"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement)
|
||||
}
|
||||
fields, ok := osdmap["osdmap"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement)
|
||||
}
|
||||
acc.AddFields("ceph_osdmap", fields, map[string]string{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeStatusPgmap(acc telegraf.Accumulator, data map[string]interface{}) error {
|
||||
pgmap, ok := data["pgmap"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
for key, value := range pgmap {
|
||||
switch value.(type) {
|
||||
case float64:
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
acc.AddFields("ceph_pgmap", fields, map[string]string{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error {
|
||||
pgmap, ok := data["pgmap"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
for key, value := range pgmap {
|
||||
switch value.(type) {
|
||||
case []interface{}:
|
||||
if key != "pgs_by_state" {
|
||||
continue
|
||||
}
|
||||
for _, state := range value.([]interface{}) {
|
||||
state_map, ok := state.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state", measurement)
|
||||
}
|
||||
state_name, ok := state_map["state_name"].(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement)
|
||||
}
|
||||
state_count, ok := state_map["count"].(float64)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement)
|
||||
}
|
||||
fields[state_name] = state_count
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("ceph_pgmap_state", fields, map[string]string{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeDf(acc telegraf.Accumulator, input string) error {
|
||||
data := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(input), &data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse json: '%s': %v", input, err)
|
||||
}
|
||||
|
||||
// ceph.usage: records global utilization and number of objects
|
||||
stats_fields, ok := data["stats"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode df stats", measurement)
|
||||
}
|
||||
acc.AddFields("ceph_usage", stats_fields, map[string]string{})
|
||||
|
||||
// ceph.pool.usage: records per pool utilization and number of objects
|
||||
pools, ok := data["pools"].([]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode df pools", measurement)
|
||||
}
|
||||
|
||||
for _, pool := range pools {
|
||||
pool_map, ok := pool.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode df pool", measurement)
|
||||
}
|
||||
pool_name, ok := pool_map["name"].(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode df pool name", measurement)
|
||||
}
|
||||
fields, ok := pool_map["stats"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode df pool stats", measurement)
|
||||
}
|
||||
tags := map[string]string{
|
||||
"name": pool_name,
|
||||
}
|
||||
acc.AddFields("ceph_pool_usage", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeOsdPoolStats(acc telegraf.Accumulator, input string) error {
|
||||
data := make([]map[string]interface{}, 0)
|
||||
err := json.Unmarshal([]byte(input), &data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse json: '%s': %v", input, err)
|
||||
}
|
||||
|
||||
// ceph.pool.stats: records pre pool IO and recovery throughput
|
||||
for _, pool := range data {
|
||||
pool_name, ok := pool["pool_name"].(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode osd pool stats name", measurement)
|
||||
}
|
||||
// Note: the 'recovery' object looks broken (in hammer), so it's omitted
|
||||
objects := []string{
|
||||
"client_io_rate",
|
||||
"recovery_rate",
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
for _, object := range objects {
|
||||
perfdata, ok := pool[object].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode osd pool stats", measurement)
|
||||
}
|
||||
for key, value := range perfdata {
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
tags := map[string]string{
|
||||
"name": pool_name,
|
||||
}
|
||||
acc.AddFields("ceph_pool_stats", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
687
plugins/inputs/ceph/ceph_test.go
Normal file
687
plugins/inputs/ceph/ceph_test.go
Normal file
@@ -0,0 +1,687 @@
|
||||
package ceph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
epsilon = float64(0.00000001)
|
||||
)
|
||||
|
||||
func TestParseSockId(t *testing.T) {
|
||||
s := parseSockId(sockFile(osdPrefix, 1), osdPrefix, sockSuffix)
|
||||
assert.Equal(t, s, "1")
|
||||
}
|
||||
|
||||
func TestParseMonDump(t *testing.T) {
|
||||
dump, err := parseDump(monPerfDump)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 5678670180, (*dump)["cluster"]["osd_kb_used"], epsilon)
|
||||
assert.InEpsilon(t, 6866.540527000, (*dump)["paxos"]["store_state_latency.sum"], epsilon)
|
||||
}
|
||||
|
||||
func TestParseOsdDump(t *testing.T) {
|
||||
dump, err := parseDump(osdPerfDump)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 552132.109360000, (*dump)["filestore"]["commitcycle_interval.sum"], epsilon)
|
||||
assert.Equal(t, float64(0), (*dump)["mutex-FileJournal::finisher_lock"]["wait.avgcount"])
|
||||
}
|
||||
|
||||
func TestGather(t *testing.T) {
|
||||
saveFind := findSockets
|
||||
saveDump := perfDump
|
||||
defer func() {
|
||||
findSockets = saveFind
|
||||
perfDump = saveDump
|
||||
}()
|
||||
|
||||
findSockets = func(c *Ceph) ([]*socket, error) {
|
||||
return []*socket{&socket{"osd.1", typeOsd, ""}}, nil
|
||||
}
|
||||
|
||||
perfDump = func(binary string, s *socket) (string, error) {
|
||||
return osdPerfDump, nil
|
||||
}
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
c := &Ceph{}
|
||||
c.Gather(acc)
|
||||
|
||||
}
|
||||
|
||||
func TestFindSockets(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "socktest")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := os.Remove(tmpdir)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
c := &Ceph{
|
||||
CephBinary: "foo",
|
||||
OsdPrefix: "ceph-osd",
|
||||
MonPrefix: "ceph-mon",
|
||||
SocketDir: tmpdir,
|
||||
SocketSuffix: "asok",
|
||||
CephUser: "client.admin",
|
||||
CephConfig: "/etc/ceph/ceph.conf",
|
||||
GatherAdminSocketStats: true,
|
||||
GatherClusterStats: false,
|
||||
}
|
||||
|
||||
for _, st := range sockTestParams {
|
||||
createTestFiles(tmpdir, st)
|
||||
|
||||
sockets, err := findSockets(c)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 1; i <= st.osds; i++ {
|
||||
assertFoundSocket(t, tmpdir, typeOsd, i, sockets)
|
||||
}
|
||||
|
||||
for i := 1; i <= st.mons; i++ {
|
||||
assertFoundSocket(t, tmpdir, typeMon, i, sockets)
|
||||
}
|
||||
cleanupTestFiles(tmpdir, st)
|
||||
}
|
||||
}
|
||||
|
||||
func assertFoundSocket(t *testing.T, dir, sockType string, i int, sockets []*socket) {
|
||||
var prefix string
|
||||
if sockType == typeOsd {
|
||||
prefix = osdPrefix
|
||||
} else {
|
||||
prefix = monPrefix
|
||||
}
|
||||
expected := path.Join(dir, sockFile(prefix, i))
|
||||
found := false
|
||||
for _, s := range sockets {
|
||||
fmt.Printf("Checking %s\n", s.socket)
|
||||
if s.socket == expected {
|
||||
found = true
|
||||
assert.Equal(t, s.sockType, sockType, "Unexpected socket type for '%s'", s)
|
||||
assert.Equal(t, s.sockId, strconv.Itoa(i))
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "Did not find socket: %s", expected)
|
||||
}
|
||||
|
||||
func sockFile(prefix string, i int) string {
|
||||
return strings.Join([]string{prefix, strconv.Itoa(i), sockSuffix}, ".")
|
||||
}
|
||||
|
||||
func createTestFiles(dir string, st *SockTest) {
|
||||
writeFile := func(prefix string, i int) {
|
||||
f := sockFile(prefix, i)
|
||||
fpath := path.Join(dir, f)
|
||||
ioutil.WriteFile(fpath, []byte(""), 0777)
|
||||
}
|
||||
tstFileApply(st, writeFile)
|
||||
}
|
||||
|
||||
func cleanupTestFiles(dir string, st *SockTest) {
|
||||
rmFile := func(prefix string, i int) {
|
||||
f := sockFile(prefix, i)
|
||||
fpath := path.Join(dir, f)
|
||||
err := os.Remove(fpath)
|
||||
if err != nil {
|
||||
fmt.Printf("Error removing test file %s: %v\n", fpath, err)
|
||||
}
|
||||
}
|
||||
tstFileApply(st, rmFile)
|
||||
}
|
||||
|
||||
func tstFileApply(st *SockTest, fn func(prefix string, i int)) {
|
||||
for i := 1; i <= st.osds; i++ {
|
||||
fn(osdPrefix, i)
|
||||
}
|
||||
for i := 1; i <= st.mons; i++ {
|
||||
fn(monPrefix, i)
|
||||
}
|
||||
}
|
||||
|
||||
type SockTest struct {
|
||||
osds int
|
||||
mons int
|
||||
}
|
||||
|
||||
var sockTestParams = []*SockTest{
|
||||
&SockTest{
|
||||
osds: 2,
|
||||
mons: 2,
|
||||
},
|
||||
&SockTest{
|
||||
mons: 1,
|
||||
},
|
||||
&SockTest{
|
||||
osds: 1,
|
||||
},
|
||||
&SockTest{},
|
||||
}
|
||||
|
||||
var monPerfDump = `
|
||||
{ "cluster": { "num_mon": 2,
|
||||
"num_mon_quorum": 2,
|
||||
"num_osd": 26,
|
||||
"num_osd_up": 26,
|
||||
"num_osd_in": 26,
|
||||
"osd_epoch": 3306,
|
||||
"osd_kb": 11487846448,
|
||||
"osd_kb_used": 5678670180,
|
||||
"osd_kb_avail": 5809176268,
|
||||
"num_pool": 12,
|
||||
"num_pg": 768,
|
||||
"num_pg_active_clean": 768,
|
||||
"num_pg_active": 768,
|
||||
"num_pg_peering": 0,
|
||||
"num_object": 397616,
|
||||
"num_object_degraded": 0,
|
||||
"num_object_unfound": 0,
|
||||
"num_bytes": 2917848227467,
|
||||
"num_mds_up": 0,
|
||||
"num_mds_in": 0,
|
||||
"num_mds_failed": 0,
|
||||
"mds_epoch": 1},
|
||||
"leveldb": { "leveldb_get": 321950312,
|
||||
"leveldb_transaction": 18729922,
|
||||
"leveldb_compact": 0,
|
||||
"leveldb_compact_range": 74141,
|
||||
"leveldb_compact_queue_merge": 0,
|
||||
"leveldb_compact_queue_len": 0},
|
||||
"mon": {},
|
||||
"paxos": { "start_leader": 0,
|
||||
"start_peon": 1,
|
||||
"restart": 4,
|
||||
"refresh": 9363435,
|
||||
"refresh_latency": { "avgcount": 9363435,
|
||||
"sum": 5378.794002000},
|
||||
"begin": 9363435,
|
||||
"begin_keys": { "avgcount": 0,
|
||||
"sum": 0},
|
||||
"begin_bytes": { "avgcount": 9363435,
|
||||
"sum": 110468605489},
|
||||
"begin_latency": { "avgcount": 9363435,
|
||||
"sum": 5850.060682000},
|
||||
"commit": 9363435,
|
||||
"commit_keys": { "avgcount": 0,
|
||||
"sum": 0},
|
||||
"commit_bytes": { "avgcount": 0,
|
||||
"sum": 0},
|
||||
"commit_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000},
|
||||
"collect": 1,
|
||||
"collect_keys": { "avgcount": 1,
|
||||
"sum": 1},
|
||||
"collect_bytes": { "avgcount": 1,
|
||||
"sum": 24},
|
||||
"collect_latency": { "avgcount": 1,
|
||||
"sum": 0.000280000},
|
||||
"collect_uncommitted": 0,
|
||||
"collect_timeout": 0,
|
||||
"accept_timeout": 0,
|
||||
"lease_ack_timeout": 0,
|
||||
"lease_timeout": 0,
|
||||
"store_state": 9363435,
|
||||
"store_state_keys": { "avgcount": 9363435,
|
||||
"sum": 176572789},
|
||||
"store_state_bytes": { "avgcount": 9363435,
|
||||
"sum": 216355887217},
|
||||
"store_state_latency": { "avgcount": 9363435,
|
||||
"sum": 6866.540527000},
|
||||
"share_state": 0,
|
||||
"share_state_keys": { "avgcount": 0,
|
||||
"sum": 0},
|
||||
"share_state_bytes": { "avgcount": 0,
|
||||
"sum": 0},
|
||||
"new_pn": 0,
|
||||
"new_pn_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-mon_client_bytes": { "val": 246,
|
||||
"max": 104857600,
|
||||
"get": 896030,
|
||||
"get_sum": 45854374,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 896026,
|
||||
"put_sum": 45854128,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-mon_daemon_bytes": { "val": 0,
|
||||
"max": 419430400,
|
||||
"get": 2773768,
|
||||
"get_sum": 3627676976,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 2773768,
|
||||
"put_sum": 3627676976,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-msgr_dispatch_throttler-mon": { "val": 0,
|
||||
"max": 104857600,
|
||||
"get": 34504949,
|
||||
"get_sum": 226860281124,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 34504949,
|
||||
"put_sum": 226860281124,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}}}
|
||||
`
|
||||
|
||||
var osdPerfDump = `
|
||||
{ "WBThrottle": { "bytes_dirtied": 28405539,
|
||||
"bytes_wb": 0,
|
||||
"ios_dirtied": 93,
|
||||
"ios_wb": 0,
|
||||
"inodes_dirtied": 86,
|
||||
"inodes_wb": 0},
|
||||
"filestore": { "journal_queue_max_ops": 0,
|
||||
"journal_queue_ops": 0,
|
||||
"journal_ops": 1108008,
|
||||
"journal_queue_max_bytes": 0,
|
||||
"journal_queue_bytes": 0,
|
||||
"journal_bytes": 73233416196,
|
||||
"journal_latency": { "avgcount": 1108008,
|
||||
"sum": 290.981036000},
|
||||
"journal_wr": 1091866,
|
||||
"journal_wr_bytes": { "avgcount": 1091866,
|
||||
"sum": 74925682688},
|
||||
"journal_full": 0,
|
||||
"committing": 0,
|
||||
"commitcycle": 110389,
|
||||
"commitcycle_interval": { "avgcount": 110389,
|
||||
"sum": 552132.109360000},
|
||||
"commitcycle_latency": { "avgcount": 110389,
|
||||
"sum": 178.657804000},
|
||||
"op_queue_max_ops": 50,
|
||||
"op_queue_ops": 0,
|
||||
"ops": 1108008,
|
||||
"op_queue_max_bytes": 104857600,
|
||||
"op_queue_bytes": 0,
|
||||
"bytes": 73226768148,
|
||||
"apply_latency": { "avgcount": 1108008,
|
||||
"sum": 947.742722000},
|
||||
"queue_transaction_latency_avg": { "avgcount": 1108008,
|
||||
"sum": 0.511327000}},
|
||||
"leveldb": { "leveldb_get": 4361221,
|
||||
"leveldb_transaction": 4351276,
|
||||
"leveldb_compact": 0,
|
||||
"leveldb_compact_range": 0,
|
||||
"leveldb_compact_queue_merge": 0,
|
||||
"leveldb_compact_queue_len": 0},
|
||||
"mutex-FileJournal::completions_lock": { "wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"mutex-FileJournal::finisher_lock": { "wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"mutex-FileJournal::write_lock": { "wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"mutex-FileJournal::writeq_lock": { "wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"mutex-JOS::ApplyManager::apply_lock": { "wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"mutex-JOS::ApplyManager::com_lock": { "wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"mutex-JOS::SubmitManager::lock": { "wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"mutex-WBThrottle::lock": { "wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"objecter": { "op_active": 0,
|
||||
"op_laggy": 0,
|
||||
"op_send": 0,
|
||||
"op_send_bytes": 0,
|
||||
"op_resend": 0,
|
||||
"op_ack": 0,
|
||||
"op_commit": 0,
|
||||
"op": 0,
|
||||
"op_r": 0,
|
||||
"op_w": 0,
|
||||
"op_rmw": 0,
|
||||
"op_pg": 0,
|
||||
"osdop_stat": 0,
|
||||
"osdop_create": 0,
|
||||
"osdop_read": 0,
|
||||
"osdop_write": 0,
|
||||
"osdop_writefull": 0,
|
||||
"osdop_append": 0,
|
||||
"osdop_zero": 0,
|
||||
"osdop_truncate": 0,
|
||||
"osdop_delete": 0,
|
||||
"osdop_mapext": 0,
|
||||
"osdop_sparse_read": 0,
|
||||
"osdop_clonerange": 0,
|
||||
"osdop_getxattr": 0,
|
||||
"osdop_setxattr": 0,
|
||||
"osdop_cmpxattr": 0,
|
||||
"osdop_rmxattr": 0,
|
||||
"osdop_resetxattrs": 0,
|
||||
"osdop_tmap_up": 0,
|
||||
"osdop_tmap_put": 0,
|
||||
"osdop_tmap_get": 0,
|
||||
"osdop_call": 0,
|
||||
"osdop_watch": 0,
|
||||
"osdop_notify": 0,
|
||||
"osdop_src_cmpxattr": 0,
|
||||
"osdop_pgls": 0,
|
||||
"osdop_pgls_filter": 0,
|
||||
"osdop_other": 0,
|
||||
"linger_active": 0,
|
||||
"linger_send": 0,
|
||||
"linger_resend": 0,
|
||||
"poolop_active": 0,
|
||||
"poolop_send": 0,
|
||||
"poolop_resend": 0,
|
||||
"poolstat_active": 0,
|
||||
"poolstat_send": 0,
|
||||
"poolstat_resend": 0,
|
||||
"statfs_active": 0,
|
||||
"statfs_send": 0,
|
||||
"statfs_resend": 0,
|
||||
"command_active": 0,
|
||||
"command_send": 0,
|
||||
"command_resend": 0,
|
||||
"map_epoch": 3300,
|
||||
"map_full": 0,
|
||||
"map_inc": 3293,
|
||||
"osd_sessions": 0,
|
||||
"osd_session_open": 0,
|
||||
"osd_session_close": 0,
|
||||
"osd_laggy": 0},
|
||||
"osd": { "opq": 0,
|
||||
"op_wip": 0,
|
||||
"op": 23939,
|
||||
"op_in_bytes": 1245903961,
|
||||
"op_out_bytes": 29103083856,
|
||||
"op_latency": { "avgcount": 23939,
|
||||
"sum": 440.192015000},
|
||||
"op_process_latency": { "avgcount": 23939,
|
||||
"sum": 30.170685000},
|
||||
"op_r": 23112,
|
||||
"op_r_out_bytes": 29103056146,
|
||||
"op_r_latency": { "avgcount": 23112,
|
||||
"sum": 19.373526000},
|
||||
"op_r_process_latency": { "avgcount": 23112,
|
||||
"sum": 14.625928000},
|
||||
"op_w": 549,
|
||||
"op_w_in_bytes": 1245804358,
|
||||
"op_w_rlat": { "avgcount": 549,
|
||||
"sum": 17.022299000},
|
||||
"op_w_latency": { "avgcount": 549,
|
||||
"sum": 418.494610000},
|
||||
"op_w_process_latency": { "avgcount": 549,
|
||||
"sum": 13.316555000},
|
||||
"op_rw": 278,
|
||||
"op_rw_in_bytes": 99603,
|
||||
"op_rw_out_bytes": 27710,
|
||||
"op_rw_rlat": { "avgcount": 278,
|
||||
"sum": 2.213785000},
|
||||
"op_rw_latency": { "avgcount": 278,
|
||||
"sum": 2.323879000},
|
||||
"op_rw_process_latency": { "avgcount": 278,
|
||||
"sum": 2.228202000},
|
||||
"subop": 1074774,
|
||||
"subop_in_bytes": 26841811636,
|
||||
"subop_latency": { "avgcount": 1074774,
|
||||
"sum": 745.509160000},
|
||||
"subop_w": 0,
|
||||
"subop_w_in_bytes": 26841811636,
|
||||
"subop_w_latency": { "avgcount": 1074774,
|
||||
"sum": 745.509160000},
|
||||
"subop_pull": 0,
|
||||
"subop_pull_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000},
|
||||
"subop_push": 0,
|
||||
"subop_push_in_bytes": 0,
|
||||
"subop_push_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000},
|
||||
"pull": 0,
|
||||
"push": 28,
|
||||
"push_out_bytes": 103483392,
|
||||
"push_in": 0,
|
||||
"push_in_bytes": 0,
|
||||
"recovery_ops": 15,
|
||||
"loadavg": 202,
|
||||
"buffer_bytes": 0,
|
||||
"numpg": 18,
|
||||
"numpg_primary": 8,
|
||||
"numpg_replica": 10,
|
||||
"numpg_stray": 0,
|
||||
"heartbeat_to_peers": 10,
|
||||
"heartbeat_from_peers": 0,
|
||||
"map_messages": 7413,
|
||||
"map_message_epochs": 9792,
|
||||
"map_message_epoch_dups": 10105,
|
||||
"messages_delayed_for_map": 83,
|
||||
"stat_bytes": 102123175936,
|
||||
"stat_bytes_used": 49961820160,
|
||||
"stat_bytes_avail": 52161355776,
|
||||
"copyfrom": 0,
|
||||
"tier_promote": 0,
|
||||
"tier_flush": 0,
|
||||
"tier_flush_fail": 0,
|
||||
"tier_try_flush": 0,
|
||||
"tier_try_flush_fail": 0,
|
||||
"tier_evict": 0,
|
||||
"tier_whiteout": 0,
|
||||
"tier_dirty": 230,
|
||||
"tier_clean": 0,
|
||||
"tier_delay": 0,
|
||||
"agent_wake": 0,
|
||||
"agent_skip": 0,
|
||||
"agent_flush": 0,
|
||||
"agent_evict": 0},
|
||||
"recoverystate_perf": { "initial_latency": { "avgcount": 473,
|
||||
"sum": 0.027207000},
|
||||
"started_latency": { "avgcount": 1480,
|
||||
"sum": 9854902.397648000},
|
||||
"reset_latency": { "avgcount": 1953,
|
||||
"sum": 0.096206000},
|
||||
"start_latency": { "avgcount": 1953,
|
||||
"sum": 0.059947000},
|
||||
"primary_latency": { "avgcount": 765,
|
||||
"sum": 4688922.186935000},
|
||||
"peering_latency": { "avgcount": 704,
|
||||
"sum": 1668.652135000},
|
||||
"backfilling_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000},
|
||||
"waitremotebackfillreserved_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000},
|
||||
"waitlocalbackfillreserved_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000},
|
||||
"notbackfilling_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000},
|
||||
"repnotrecovering_latency": { "avgcount": 462,
|
||||
"sum": 5158922.114600000},
|
||||
"repwaitrecoveryreserved_latency": { "avgcount": 15,
|
||||
"sum": 0.008275000},
|
||||
"repwaitbackfillreserved_latency": { "avgcount": 1,
|
||||
"sum": 0.000095000},
|
||||
"RepRecovering_latency": { "avgcount": 16,
|
||||
"sum": 2274.944727000},
|
||||
"activating_latency": { "avgcount": 514,
|
||||
"sum": 261.008520000},
|
||||
"waitlocalrecoveryreserved_latency": { "avgcount": 20,
|
||||
"sum": 0.175422000},
|
||||
"waitremoterecoveryreserved_latency": { "avgcount": 20,
|
||||
"sum": 0.682778000},
|
||||
"recovering_latency": { "avgcount": 20,
|
||||
"sum": 0.697551000},
|
||||
"recovered_latency": { "avgcount": 511,
|
||||
"sum": 0.011038000},
|
||||
"clean_latency": { "avgcount": 503,
|
||||
"sum": 4686961.154278000},
|
||||
"active_latency": { "avgcount": 506,
|
||||
"sum": 4687223.640464000},
|
||||
"replicaactive_latency": { "avgcount": 446,
|
||||
"sum": 5161197.078966000},
|
||||
"stray_latency": { "avgcount": 794,
|
||||
"sum": 4805.105128000},
|
||||
"getinfo_latency": { "avgcount": 704,
|
||||
"sum": 1138.477937000},
|
||||
"getlog_latency": { "avgcount": 678,
|
||||
"sum": 0.036393000},
|
||||
"waitactingchange_latency": { "avgcount": 69,
|
||||
"sum": 59.172893000},
|
||||
"incomplete_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000},
|
||||
"getmissing_latency": { "avgcount": 609,
|
||||
"sum": 0.012288000},
|
||||
"waitupthru_latency": { "avgcount": 576,
|
||||
"sum": 530.106999000}},
|
||||
"throttle-filestore_bytes": { "val": 0,
|
||||
"max": 0,
|
||||
"get": 0,
|
||||
"get_sum": 0,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 0,
|
||||
"put_sum": 0,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-filestore_ops": { "val": 0,
|
||||
"max": 0,
|
||||
"get": 0,
|
||||
"get_sum": 0,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 0,
|
||||
"put_sum": 0,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-msgr_dispatch_throttler-client": { "val": 0,
|
||||
"max": 104857600,
|
||||
"get": 130730,
|
||||
"get_sum": 1246039872,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 130730,
|
||||
"put_sum": 1246039872,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-msgr_dispatch_throttler-cluster": { "val": 0,
|
||||
"max": 104857600,
|
||||
"get": 1108033,
|
||||
"get_sum": 71277949992,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 1108033,
|
||||
"put_sum": 71277949992,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-msgr_dispatch_throttler-hb_back_server": { "val": 0,
|
||||
"max": 104857600,
|
||||
"get": 18320575,
|
||||
"get_sum": 861067025,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 18320575,
|
||||
"put_sum": 861067025,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-msgr_dispatch_throttler-hb_front_server": { "val": 0,
|
||||
"max": 104857600,
|
||||
"get": 18320575,
|
||||
"get_sum": 861067025,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 18320575,
|
||||
"put_sum": 861067025,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-msgr_dispatch_throttler-hbclient": { "val": 0,
|
||||
"max": 104857600,
|
||||
"get": 40479394,
|
||||
"get_sum": 1902531518,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 40479394,
|
||||
"put_sum": 1902531518,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-msgr_dispatch_throttler-ms_objecter": { "val": 0,
|
||||
"max": 104857600,
|
||||
"get": 0,
|
||||
"get_sum": 0,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 0,
|
||||
"put_sum": 0,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-objecter_bytes": { "val": 0,
|
||||
"max": 104857600,
|
||||
"get": 0,
|
||||
"get_sum": 0,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 0,
|
||||
"put_sum": 0,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-objecter_ops": { "val": 0,
|
||||
"max": 1024,
|
||||
"get": 0,
|
||||
"get_sum": 0,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 0,
|
||||
"put_sum": 0,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-osd_client_bytes": { "val": 0,
|
||||
"max": 524288000,
|
||||
"get": 24241,
|
||||
"get_sum": 1241992581,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 25958,
|
||||
"put_sum": 1241992581,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-osd_client_messages": { "val": 0,
|
||||
"max": 100,
|
||||
"get": 49214,
|
||||
"get_sum": 49214,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 49214,
|
||||
"put_sum": 49214,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}}}
|
||||
`
|
||||
62
plugins/inputs/cgroup/README.md
Normal file
62
plugins/inputs/cgroup/README.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# CGroup Input Plugin For Telegraf Agent
|
||||
|
||||
This input plugin will capture specific statistics per cgroup.
|
||||
|
||||
Consider restricting paths to the set of cgroups you really
|
||||
want to monitor if you have a large number of cgroups, to avoid
|
||||
any cardinality issues.
|
||||
|
||||
Following file formats are supported:
|
||||
|
||||
* Single value
|
||||
|
||||
```
|
||||
VAL\n
|
||||
```
|
||||
|
||||
* New line separated values
|
||||
|
||||
```
|
||||
VAL0\n
|
||||
VAL1\n
|
||||
```
|
||||
|
||||
* Space separated values
|
||||
|
||||
```
|
||||
VAL0 VAL1 ...\n
|
||||
```
|
||||
|
||||
* New line separated key-space-value's
|
||||
|
||||
```
|
||||
KEY0 VAL0\n
|
||||
KEY1 VAL1\n
|
||||
```
|
||||
|
||||
|
||||
### Tags:
|
||||
|
||||
All measurements have the following tags:
|
||||
- path
|
||||
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# [[inputs.cgroup]]
|
||||
# paths = [
|
||||
# "/cgroup/memory", # root cgroup
|
||||
# "/cgroup/memory/child1", # container cgroup
|
||||
# "/cgroup/memory/child2/*", # all children cgroups under child2, but not child2 itself
|
||||
# ]
|
||||
# files = ["memory.*usage*", "memory.limit_in_bytes"]
|
||||
|
||||
# [[inputs.cgroup]]
|
||||
# paths = [
|
||||
# "/cgroup/cpu", # root cgroup
|
||||
# "/cgroup/cpu/*", # all container cgroups
|
||||
# "/cgroup/cpu/*/*", # all children cgroups under each container cgroup
|
||||
# ]
|
||||
# files = ["cpuacct.usage", "cpu.cfs_period_us", "cpu.cfs_quota_us"]
|
||||
```
|
||||
38
plugins/inputs/cgroup/cgroup.go
Normal file
38
plugins/inputs/cgroup/cgroup.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package cgroup
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type CGroup struct {
|
||||
Paths []string `toml:"paths"`
|
||||
Files []string `toml:"files"`
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Directories in which to look for files, globs are supported.
|
||||
## Consider restricting paths to the set of cgroups you really
|
||||
## want to monitor if you have a large number of cgroups, to avoid
|
||||
## any cardinality issues.
|
||||
# paths = [
|
||||
# "/cgroup/memory",
|
||||
# "/cgroup/memory/child1",
|
||||
# "/cgroup/memory/child2/*",
|
||||
# ]
|
||||
## cgroup stat fields, as file names, globs are supported.
|
||||
## these file names are appended to each path from above.
|
||||
# files = ["memory.*usage*", "memory.limit_in_bytes"]
|
||||
`
|
||||
|
||||
func (g *CGroup) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (g *CGroup) Description() string {
|
||||
return "Read specific statistics per cgroup"
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("cgroup", func() telegraf.Input { return &CGroup{} })
|
||||
}
|
||||
244
plugins/inputs/cgroup/cgroup_linux.go
Normal file
244
plugins/inputs/cgroup/cgroup_linux.go
Normal file
@@ -0,0 +1,244 @@
|
||||
// +build linux
|
||||
|
||||
package cgroup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
const metricName = "cgroup"
|
||||
|
||||
func (g *CGroup) Gather(acc telegraf.Accumulator) error {
|
||||
list := make(chan pathInfo)
|
||||
go g.generateDirs(list)
|
||||
|
||||
for dir := range list {
|
||||
if dir.err != nil {
|
||||
return dir.err
|
||||
}
|
||||
if err := g.gatherDir(dir.path, acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *CGroup) gatherDir(dir string, acc telegraf.Accumulator) error {
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
list := make(chan pathInfo)
|
||||
go g.generateFiles(dir, list)
|
||||
|
||||
for file := range list {
|
||||
if file.err != nil {
|
||||
return file.err
|
||||
}
|
||||
|
||||
raw, err := ioutil.ReadFile(file.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(raw) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
fd := fileData{data: raw, path: file.path}
|
||||
if err := fd.parse(fields); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tags := map[string]string{"path": dir}
|
||||
|
||||
acc.AddFields(metricName, fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
type pathInfo struct {
|
||||
path string
|
||||
err error
|
||||
}
|
||||
|
||||
func isDir(path string) (bool, error) {
|
||||
result, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return result.IsDir(), nil
|
||||
}
|
||||
|
||||
func (g *CGroup) generateDirs(list chan<- pathInfo) {
|
||||
for _, dir := range g.Paths {
|
||||
// getting all dirs that match the pattern 'dir'
|
||||
items, err := filepath.Glob(dir)
|
||||
if err != nil {
|
||||
list <- pathInfo{err: err}
|
||||
return
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
ok, err := isDir(item)
|
||||
if err != nil {
|
||||
list <- pathInfo{err: err}
|
||||
return
|
||||
}
|
||||
// supply only dirs
|
||||
if ok {
|
||||
list <- pathInfo{path: item}
|
||||
}
|
||||
}
|
||||
}
|
||||
close(list)
|
||||
}
|
||||
|
||||
func (g *CGroup) generateFiles(dir string, list chan<- pathInfo) {
|
||||
for _, file := range g.Files {
|
||||
// getting all file paths that match the pattern 'dir + file'
|
||||
// path.Base make sure that file variable does not contains part of path
|
||||
items, err := filepath.Glob(path.Join(dir, path.Base(file)))
|
||||
if err != nil {
|
||||
list <- pathInfo{err: err}
|
||||
return
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
ok, err := isDir(item)
|
||||
if err != nil {
|
||||
list <- pathInfo{err: err}
|
||||
return
|
||||
}
|
||||
// supply only files not dirs
|
||||
if !ok {
|
||||
list <- pathInfo{path: item}
|
||||
}
|
||||
}
|
||||
}
|
||||
close(list)
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
type fileData struct {
|
||||
data []byte
|
||||
path string
|
||||
}
|
||||
|
||||
func (fd *fileData) format() (*fileFormat, error) {
|
||||
for _, ff := range fileFormats {
|
||||
ok, err := ff.match(fd.data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
return &ff, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("%v: unknown file format", fd.path)
|
||||
}
|
||||
|
||||
func (fd *fileData) parse(fields map[string]interface{}) error {
|
||||
format, err := fd.format()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
format.parser(filepath.Base(fd.path), fields, fd.data)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
type fileFormat struct {
|
||||
name string
|
||||
pattern string
|
||||
parser func(measurement string, fields map[string]interface{}, b []byte)
|
||||
}
|
||||
|
||||
const keyPattern = "[[:alpha:]_]+"
|
||||
const valuePattern = "[\\d-]+"
|
||||
|
||||
var fileFormats = [...]fileFormat{
|
||||
// VAL\n
|
||||
fileFormat{
|
||||
name: "Single value",
|
||||
pattern: "^" + valuePattern + "\n$",
|
||||
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||
re := regexp.MustCompile("^(" + valuePattern + ")\n$")
|
||||
matches := re.FindAllStringSubmatch(string(b), -1)
|
||||
fields[measurement] = numberOrString(matches[0][1])
|
||||
},
|
||||
},
|
||||
// VAL0\n
|
||||
// VAL1\n
|
||||
// ...
|
||||
fileFormat{
|
||||
name: "New line separated values",
|
||||
pattern: "^(" + valuePattern + "\n){2,}$",
|
||||
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||
re := regexp.MustCompile("(" + valuePattern + ")\n")
|
||||
matches := re.FindAllStringSubmatch(string(b), -1)
|
||||
for i, v := range matches {
|
||||
fields[measurement+"."+strconv.Itoa(i)] = numberOrString(v[1])
|
||||
}
|
||||
},
|
||||
},
|
||||
// VAL0 VAL1 ...\n
|
||||
fileFormat{
|
||||
name: "Space separated values",
|
||||
pattern: "^(" + valuePattern + " )+\n$",
|
||||
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||
re := regexp.MustCompile("(" + valuePattern + ") ")
|
||||
matches := re.FindAllStringSubmatch(string(b), -1)
|
||||
for i, v := range matches {
|
||||
fields[measurement+"."+strconv.Itoa(i)] = numberOrString(v[1])
|
||||
}
|
||||
},
|
||||
},
|
||||
// KEY0 VAL0\n
|
||||
// KEY1 VAL1\n
|
||||
// ...
|
||||
fileFormat{
|
||||
name: "New line separated key-space-value's",
|
||||
pattern: "^(" + keyPattern + " " + valuePattern + "\n)+$",
|
||||
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||
re := regexp.MustCompile("(" + keyPattern + ") (" + valuePattern + ")\n")
|
||||
matches := re.FindAllStringSubmatch(string(b), -1)
|
||||
for _, v := range matches {
|
||||
fields[measurement+"."+v[1]] = numberOrString(v[2])
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func numberOrString(s string) interface{} {
|
||||
i, err := strconv.Atoi(s)
|
||||
if err == nil {
|
||||
return i
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (f fileFormat) match(b []byte) (bool, error) {
|
||||
ok, err := regexp.Match(f.pattern, b)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
11
plugins/inputs/cgroup/cgroup_notlinux.go
Normal file
11
plugins/inputs/cgroup/cgroup_notlinux.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build !linux
|
||||
|
||||
package cgroup
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
func (g *CGroup) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
182
plugins/inputs/cgroup/cgroup_test.go
Normal file
182
plugins/inputs/cgroup/cgroup_test.go
Normal file
@@ -0,0 +1,182 @@
|
||||
// +build linux
|
||||
|
||||
package cgroup
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var cg1 = &CGroup{
|
||||
Paths: []string{"testdata/memory"},
|
||||
Files: []string{
|
||||
"memory.empty",
|
||||
"memory.max_usage_in_bytes",
|
||||
"memory.limit_in_bytes",
|
||||
"memory.stat",
|
||||
"memory.use_hierarchy",
|
||||
"notify_on_release",
|
||||
},
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_1(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg1.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/memory",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.stat.cache": 1739362304123123123,
|
||||
"memory.stat.rss": 1775325184,
|
||||
"memory.stat.rss_huge": 778043392,
|
||||
"memory.stat.mapped_file": 421036032,
|
||||
"memory.stat.dirty": -307200,
|
||||
"memory.max_usage_in_bytes.0": 0,
|
||||
"memory.max_usage_in_bytes.1": -1,
|
||||
"memory.max_usage_in_bytes.2": 2,
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"memory.use_hierarchy": "12-781",
|
||||
"notify_on_release": 0,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
var cg2 = &CGroup{
|
||||
Paths: []string{"testdata/cpu"},
|
||||
Files: []string{"cpuacct.usage_percpu"},
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_2(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg2.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/cpu",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"cpuacct.usage_percpu.0": -1452543795404,
|
||||
"cpuacct.usage_percpu.1": 1376681271659,
|
||||
"cpuacct.usage_percpu.2": 1450950799997,
|
||||
"cpuacct.usage_percpu.3": -1473113374257,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
var cg3 = &CGroup{
|
||||
Paths: []string{"testdata/memory/*"},
|
||||
Files: []string{"memory.limit_in_bytes"},
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_3(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg3.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/memory/group_1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
|
||||
tags = map[string]string{
|
||||
"path": "testdata/memory/group_2",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
var cg4 = &CGroup{
|
||||
Paths: []string{"testdata/memory/*/*", "testdata/memory/group_2"},
|
||||
Files: []string{"memory.limit_in_bytes"},
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_4(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg4.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/memory/group_1/group_1_1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
|
||||
tags = map[string]string{
|
||||
"path": "testdata/memory/group_1/group_1_2",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
|
||||
tags = map[string]string{
|
||||
"path": "testdata/memory/group_2",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
var cg5 = &CGroup{
|
||||
Paths: []string{"testdata/memory/*/group_1_1"},
|
||||
Files: []string{"memory.limit_in_bytes"},
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_5(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg5.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/memory/group_1/group_1_1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
|
||||
tags = map[string]string{
|
||||
"path": "testdata/memory/group_2/group_1_1",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
var cg6 = &CGroup{
|
||||
Paths: []string{"testdata/memory"},
|
||||
Files: []string{"memory.us*", "*/memory.kmem.*"},
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_6(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg6.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/memory",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.usage_in_bytes": 3513667584,
|
||||
"memory.use_hierarchy": "12-781",
|
||||
"memory.kmem.limit_in_bytes": 9223372036854771712,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
1
plugins/inputs/cgroup/testdata/blkio/blkio.io_serviced
vendored
Normal file
1
plugins/inputs/cgroup/testdata/blkio/blkio.io_serviced
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Total 0
|
||||
131
plugins/inputs/cgroup/testdata/blkio/blkio.throttle.io_serviced
vendored
Normal file
131
plugins/inputs/cgroup/testdata/blkio/blkio.throttle.io_serviced
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
11:0 Read 0
|
||||
11:0 Write 0
|
||||
11:0 Sync 0
|
||||
11:0 Async 0
|
||||
11:0 Total 0
|
||||
8:0 Read 49134
|
||||
8:0 Write 216703
|
||||
8:0 Sync 177906
|
||||
8:0 Async 87931
|
||||
8:0 Total 265837
|
||||
7:7 Read 0
|
||||
7:7 Write 0
|
||||
7:7 Sync 0
|
||||
7:7 Async 0
|
||||
7:7 Total 0
|
||||
7:6 Read 0
|
||||
7:6 Write 0
|
||||
7:6 Sync 0
|
||||
7:6 Async 0
|
||||
7:6 Total 0
|
||||
7:5 Read 0
|
||||
7:5 Write 0
|
||||
7:5 Sync 0
|
||||
7:5 Async 0
|
||||
7:5 Total 0
|
||||
7:4 Read 0
|
||||
7:4 Write 0
|
||||
7:4 Sync 0
|
||||
7:4 Async 0
|
||||
7:4 Total 0
|
||||
7:3 Read 0
|
||||
7:3 Write 0
|
||||
7:3 Sync 0
|
||||
7:3 Async 0
|
||||
7:3 Total 0
|
||||
7:2 Read 0
|
||||
7:2 Write 0
|
||||
7:2 Sync 0
|
||||
7:2 Async 0
|
||||
7:2 Total 0
|
||||
7:1 Read 0
|
||||
7:1 Write 0
|
||||
7:1 Sync 0
|
||||
7:1 Async 0
|
||||
7:1 Total 0
|
||||
7:0 Read 0
|
||||
7:0 Write 0
|
||||
7:0 Sync 0
|
||||
7:0 Async 0
|
||||
7:0 Total 0
|
||||
1:15 Read 3
|
||||
1:15 Write 0
|
||||
1:15 Sync 0
|
||||
1:15 Async 3
|
||||
1:15 Total 3
|
||||
1:14 Read 3
|
||||
1:14 Write 0
|
||||
1:14 Sync 0
|
||||
1:14 Async 3
|
||||
1:14 Total 3
|
||||
1:13 Read 3
|
||||
1:13 Write 0
|
||||
1:13 Sync 0
|
||||
1:13 Async 3
|
||||
1:13 Total 3
|
||||
1:12 Read 3
|
||||
1:12 Write 0
|
||||
1:12 Sync 0
|
||||
1:12 Async 3
|
||||
1:12 Total 3
|
||||
1:11 Read 3
|
||||
1:11 Write 0
|
||||
1:11 Sync 0
|
||||
1:11 Async 3
|
||||
1:11 Total 3
|
||||
1:10 Read 3
|
||||
1:10 Write 0
|
||||
1:10 Sync 0
|
||||
1:10 Async 3
|
||||
1:10 Total 3
|
||||
1:9 Read 3
|
||||
1:9 Write 0
|
||||
1:9 Sync 0
|
||||
1:9 Async 3
|
||||
1:9 Total 3
|
||||
1:8 Read 3
|
||||
1:8 Write 0
|
||||
1:8 Sync 0
|
||||
1:8 Async 3
|
||||
1:8 Total 3
|
||||
1:7 Read 3
|
||||
1:7 Write 0
|
||||
1:7 Sync 0
|
||||
1:7 Async 3
|
||||
1:7 Total 3
|
||||
1:6 Read 3
|
||||
1:6 Write 0
|
||||
1:6 Sync 0
|
||||
1:6 Async 3
|
||||
1:6 Total 3
|
||||
1:5 Read 3
|
||||
1:5 Write 0
|
||||
1:5 Sync 0
|
||||
1:5 Async 3
|
||||
1:5 Total 3
|
||||
1:4 Read 3
|
||||
1:4 Write 0
|
||||
1:4 Sync 0
|
||||
1:4 Async 3
|
||||
1:4 Total 3
|
||||
1:3 Read 3
|
||||
1:3 Write 0
|
||||
1:3 Sync 0
|
||||
1:3 Async 3
|
||||
1:3 Total 3
|
||||
1:2 Read 3
|
||||
1:2 Write 0
|
||||
1:2 Sync 0
|
||||
1:2 Async 3
|
||||
1:2 Total 3
|
||||
1:1 Read 3
|
||||
1:1 Write 0
|
||||
1:1 Sync 0
|
||||
1:1 Async 3
|
||||
1:1 Total 3
|
||||
1:0 Read 3
|
||||
1:0 Write 0
|
||||
1:0 Sync 0
|
||||
1:0 Async 3
|
||||
1:0 Total 3
|
||||
Total 265885
|
||||
1
plugins/inputs/cgroup/testdata/cpu/cpu.cfs_quota_us
vendored
Normal file
1
plugins/inputs/cgroup/testdata/cpu/cpu.cfs_quota_us
vendored
Normal file
@@ -0,0 +1 @@
|
||||
-1
|
||||
1
plugins/inputs/cgroup/testdata/cpu/cpuacct.usage_percpu
vendored
Normal file
1
plugins/inputs/cgroup/testdata/cpu/cpuacct.usage_percpu
vendored
Normal file
@@ -0,0 +1 @@
|
||||
-1452543795404 1376681271659 1450950799997 -1473113374257
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_1/group_1_1/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/group_1_1/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
223372036854771712
|
||||
5
plugins/inputs/cgroup/testdata/memory/group_1/group_1_1/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_1/group_1_1/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
cache 1739362304123123123
|
||||
rss 1775325184
|
||||
rss_huge 778043392
|
||||
mapped_file 421036032
|
||||
dirty -307200
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_1/group_1_2/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/group_1_2/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
223372036854771712
|
||||
5
plugins/inputs/cgroup/testdata/memory/group_1/group_1_2/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_1/group_1_2/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
cache 1739362304123123123
|
||||
rss 1775325184
|
||||
rss_huge 778043392
|
||||
mapped_file 421036032
|
||||
dirty -307200
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.kmem.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.kmem.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
9223372036854771712
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user