Compare commits
1806 Commits
v0.3.0-bet
...
1.5.0-rc2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fd964bd4eb | ||
|
|
994e75f1f0 | ||
|
|
2e2efafbfc | ||
|
|
39537ed86e | ||
|
|
558ce25c94 | ||
|
|
0438f412a9 | ||
|
|
ca8911fec0 | ||
|
|
2c5a5373f6 | ||
|
|
cabe10b88a | ||
|
|
7f66863b87 | ||
|
|
e400ec2b57 | ||
|
|
44320a5421 | ||
|
|
a9951710b3 | ||
|
|
6426bca1f8 | ||
|
|
f92a4f528f | ||
|
|
3ba5458220 | ||
|
|
beb9d7560d | ||
|
|
24d82aebe6 | ||
|
|
7dc256e845 | ||
|
|
297897ae0a | ||
|
|
414a7e34fb | ||
|
|
bf65e19486 | ||
|
|
2c70958c24 | ||
|
|
d727a6f85c | ||
|
|
4e9b19f7a6 | ||
|
|
132fb50150 | ||
|
|
d1ba75176d | ||
|
|
76240b9f18 | ||
|
|
06e22ee7ac | ||
|
|
a18eedb970 | ||
|
|
6514399baf | ||
|
|
27994abcb5 | ||
|
|
a9ada5f65b | ||
|
|
f758d0c6c3 | ||
|
|
7442b5645f | ||
|
|
d5bd426e0c | ||
|
|
154b263f14 | ||
|
|
92ca661662 | ||
|
|
54b0b9e727 | ||
|
|
dc2c8791d0 | ||
|
|
367bbdeb7e | ||
|
|
e544d742f9 | ||
|
|
393c4c6c2d | ||
|
|
4d1bc620b2 | ||
|
|
db8e767f1f | ||
|
|
afe05fcfef | ||
|
|
9422cca2cc | ||
|
|
a06ee58785 | ||
|
|
b13eea89b1 | ||
|
|
b813e2ecae | ||
|
|
8364417009 | ||
|
|
136c15ba33 | ||
|
|
19839c0167 | ||
|
|
72682973bd | ||
|
|
a411306fba | ||
|
|
cbd346117a | ||
|
|
181a56018f | ||
|
|
6ee6d55751 | ||
|
|
ebd73b7279 | ||
|
|
6a57395731 | ||
|
|
be13f69305 | ||
|
|
62ec3e50d9 | ||
|
|
07297e80a8 | ||
|
|
f0578b8c83 | ||
|
|
493af043d3 | ||
|
|
47d013132a | ||
|
|
dcff769fed | ||
|
|
5141f8a2a0 | ||
|
|
bb14589469 | ||
|
|
b81bea658f | ||
|
|
2c2dc97702 | ||
|
|
cbbdf1043b | ||
|
|
c55f285de0 | ||
|
|
e1295c41c8 | ||
|
|
e0df62c27b | ||
|
|
fdf12ce6b4 | ||
|
|
e5a265c8c7 | ||
|
|
112955a9f5 | ||
|
|
da0ca8a870 | ||
|
|
6e6aefe5da | ||
|
|
ae2635b547 | ||
|
|
c14478f025 | ||
|
|
2c31345c70 | ||
|
|
4a9fa7ef4b | ||
|
|
7db06d2aa4 | ||
|
|
871fae6eb3 | ||
|
|
8e587e74f5 | ||
|
|
440918a03b | ||
|
|
f64b23b724 | ||
|
|
c11739d143 | ||
|
|
883696c224 | ||
|
|
0ea0519e89 | ||
|
|
4596ae70a9 | ||
|
|
92caf33fff | ||
|
|
a987118b01 | ||
|
|
a6ada03b91 | ||
|
|
8ed00af10a | ||
|
|
86961cc814 | ||
|
|
ba462f5c94 | ||
|
|
1d1d5e6089 | ||
|
|
8560c2f88d | ||
|
|
5d135cece3 | ||
|
|
9b0af4478b | ||
|
|
26ccc1f205 | ||
|
|
76ed70340b | ||
|
|
5f215c22fe | ||
|
|
63842d48fd | ||
|
|
777b84d1dc | ||
|
|
c116af35c7 | ||
|
|
fcfcc803b1 | ||
|
|
4d5de8698b | ||
|
|
23ad959d71 | ||
|
|
d9fa916711 | ||
|
|
53b13a20d0 | ||
|
|
ffa8a4a716 | ||
|
|
8b4708c82a | ||
|
|
88ec171293 | ||
|
|
5885ef2c1c | ||
|
|
a519abf13f | ||
|
|
6ea61b55d9 | ||
|
|
206397d475 | ||
|
|
a6797a44d5 | ||
|
|
13c1f1524a | ||
|
|
9a062498e7 | ||
|
|
f64cf89db1 | ||
|
|
6d1777276c | ||
|
|
65580759fc | ||
|
|
d2f9fc7d8c | ||
|
|
77cc071796 | ||
|
|
4deb6238a3 | ||
|
|
7088d98304 | ||
|
|
4243403432 | ||
|
|
3bbc2beeed | ||
|
|
0e6a70b199 | ||
|
|
ec4efe5b03 | ||
|
|
adb1f5588c | ||
|
|
6e5915c59f | ||
|
|
9b59cdd10e | ||
|
|
02baa696c3 | ||
|
|
a4fa19252f | ||
|
|
7ba376964c | ||
|
|
a75ab3e190 | ||
|
|
2208657d73 | ||
|
|
9d8e935734 | ||
|
|
f5a9d1bc75 | ||
|
|
4b05edea53 | ||
|
|
246ffab3e0 | ||
|
|
3ea41e885c | ||
|
|
1f348037b7 | ||
|
|
86f19dee2b | ||
|
|
a1796989f7 | ||
|
|
6b67fedfdc | ||
|
|
5cd3327d5f | ||
|
|
bf9f94eb9d | ||
|
|
0f9f757da7 | ||
|
|
2f8d0f4d47 | ||
|
|
024dea2ff9 | ||
|
|
fa25e123d8 | ||
|
|
bed14e5037 | ||
|
|
c74c29b164 | ||
|
|
4e0c8e6026 | ||
|
|
d7ea83f39b | ||
|
|
b641f06552 | ||
|
|
c7a6d4eaa4 | ||
|
|
61b0336d97 | ||
|
|
761544f56d | ||
|
|
0f452ad0df | ||
|
|
4093bc98b7 | ||
|
|
75567d5b51 | ||
|
|
59bb31e765 | ||
|
|
13c7802b84 | ||
|
|
cce40c515a | ||
|
|
6e1fa559a3 | ||
|
|
f56dda0ac8 | ||
|
|
4fab572b6b | ||
|
|
b9f319529f | ||
|
|
0bb32570ba | ||
|
|
a4ea4c7a25 | ||
|
|
e69c3f9d1c | ||
|
|
002ccf3295 | ||
|
|
a163effa6d | ||
|
|
93ff811358 | ||
|
|
dd4299e925 | ||
|
|
b610276485 | ||
|
|
6aee40fac1 | ||
|
|
79f66dc5b3 | ||
|
|
0a55ab42b4 | ||
|
|
aba269e94c | ||
|
|
f67350107d | ||
|
|
8e3ed96d6f | ||
|
|
771fbc311a | ||
|
|
d7b88b10ad | ||
|
|
cdca81c999 | ||
|
|
ed6f438c9d | ||
|
|
366f3f560c | ||
|
|
e4f5547d37 | ||
|
|
e1bf655ef9 | ||
|
|
29b6f4168c | ||
|
|
3d62e045af | ||
|
|
ad4a5aa7a0 | ||
|
|
f2cb1da7cf | ||
|
|
c3d15f0aff | ||
|
|
b2453e3ec3 | ||
|
|
c3b11f9cfb | ||
|
|
cd1791494a | ||
|
|
402460f038 | ||
|
|
f85db90780 | ||
|
|
9bddd50a64 | ||
|
|
b8a0b8461a | ||
|
|
ee26191eb5 | ||
|
|
cadafa6405 | ||
|
|
22a9ffbb9d | ||
|
|
2e1457a496 | ||
|
|
8614445235 | ||
|
|
f23d1eb078 | ||
|
|
ef5c12bd86 | ||
|
|
c013cc1497 | ||
|
|
bb665cf013 | ||
|
|
5dff5932fd | ||
|
|
f823fc73f6 | ||
|
|
fd702e6bb8 | ||
|
|
50024c1860 | ||
|
|
a4b8805f7f | ||
|
|
837e6b1a32 | ||
|
|
063f3f68df | ||
|
|
b24663b0bd | ||
|
|
366bda45c3 | ||
|
|
c010fb1c3c | ||
|
|
08c197f73a | ||
|
|
cafb22d145 | ||
|
|
73df179bd6 | ||
|
|
c3bea59f3b | ||
|
|
52393582d2 | ||
|
|
ce29ca78e3 | ||
|
|
6e6ed075dc | ||
|
|
c0a4bd99a1 | ||
|
|
decb09e760 | ||
|
|
a63f80e017 | ||
|
|
daee48c861 | ||
|
|
dea8bf7ac0 | ||
|
|
292c5229bf | ||
|
|
0048bf2120 | ||
|
|
b8e134cd37 | ||
|
|
0339dc7faf | ||
|
|
575a07c985 | ||
|
|
b94cda6b46 | ||
|
|
73372872c2 | ||
|
|
103ae3b710 | ||
|
|
171332c579 | ||
|
|
875ab3c4b7 | ||
|
|
1c5ebd4be3 | ||
|
|
103d24bfba | ||
|
|
d5f48e3e96 | ||
|
|
7a41d2c586 | ||
|
|
fa1982323a | ||
|
|
cdf63c5776 | ||
|
|
0a8c2e0b3b | ||
|
|
9197a59cdb | ||
|
|
9c8f4afa37 | ||
|
|
eebee9759f | ||
|
|
ee85f9275e | ||
|
|
4e53464fe2 | ||
|
|
2163981872 | ||
|
|
c5cfde667a | ||
|
|
8a68e7424c | ||
|
|
cc63b3b667 | ||
|
|
5488f4b3ac | ||
|
|
14a4b108b4 | ||
|
|
32f313a6a6 | ||
|
|
c720200883 | ||
|
|
f62e543003 | ||
|
|
be83c8c8f0 | ||
|
|
c809debfd4 | ||
|
|
247c2e71fd | ||
|
|
7b08f9d099 | ||
|
|
d0b690f040 | ||
|
|
98ca22597d | ||
|
|
99dfc69fbb | ||
|
|
144862354a | ||
|
|
402a0f16e1 | ||
|
|
5d4eec606f | ||
|
|
ab1c11b06d | ||
|
|
864ea1efaf | ||
|
|
4fb1c3a2bc | ||
|
|
9796d3c99d | ||
|
|
98e784faf3 | ||
|
|
16d6011ca1 | ||
|
|
f43af72785 | ||
|
|
28d16188b3 | ||
|
|
19f3264073 | ||
|
|
8225bd0173 | ||
|
|
3806424aab | ||
|
|
ef8876b70b | ||
|
|
5fd8ab36d3 | ||
|
|
ac1fa05672 | ||
|
|
73d57c8a02 | ||
|
|
95fe0e43f5 | ||
|
|
02f7b0d030 | ||
|
|
a9a40cbf87 | ||
|
|
a98496591a | ||
|
|
0a6541dfa8 | ||
|
|
8ecc58639a | ||
|
|
6abecd0ac7 | ||
|
|
0502b65316 | ||
|
|
e400fcf5da | ||
|
|
d449833de9 | ||
|
|
58751fa4df | ||
|
|
656ce31d98 | ||
|
|
485e273187 | ||
|
|
f95c239a3f | ||
|
|
ae24a0754b | ||
|
|
f253623231 | ||
|
|
f0db4fd901 | ||
|
|
8c68bd9ddb | ||
|
|
9fc7220c2e | ||
|
|
6597b55477 | ||
|
|
1f4a997164 | ||
|
|
5224b526f4 | ||
|
|
371638ce56 | ||
|
|
53c5d3a290 | ||
|
|
b480022330 | ||
|
|
ccf17a9f93 | ||
|
|
13a6b917c3 | ||
|
|
1f1e9cc49f | ||
|
|
70c2b83f00 | ||
|
|
4de264ffc8 | ||
|
|
36c2c88fd2 | ||
|
|
e31d91f0f9 | ||
|
|
3006ccbf2f | ||
|
|
8b588ea37f | ||
|
|
7608251633 | ||
|
|
1e9d7cd6e9 | ||
|
|
a91457e001 | ||
|
|
fd3a9bf46a | ||
|
|
ca394fcfb2 | ||
|
|
3819607511 | ||
|
|
eb0215c382 | ||
|
|
09153c815c | ||
|
|
9bc13f143e | ||
|
|
032348c7a5 | ||
|
|
5fbdd09aaf | ||
|
|
7d5dae5a08 | ||
|
|
54be037911 | ||
|
|
5003809e97 | ||
|
|
1b50f14d55 | ||
|
|
b0109b3550 | ||
|
|
257b460f61 | ||
|
|
287a44de5e | ||
|
|
73897d1f1c | ||
|
|
1e2d594af0 | ||
|
|
83c003e594 | ||
|
|
84ce9629a8 | ||
|
|
3c14b46f6f | ||
|
|
8a2373e8c8 | ||
|
|
cb04fa1e9c | ||
|
|
92af42a847 | ||
|
|
bceb020d72 | ||
|
|
d9deb266df | ||
|
|
f3435f1c59 | ||
|
|
f9573ad969 | ||
|
|
40aacd9046 | ||
|
|
5e73f3e816 | ||
|
|
a1e7a5f474 | ||
|
|
828c5817f9 | ||
|
|
3e27134872 | ||
|
|
1fb5373962 | ||
|
|
75e6ebcf93 | ||
|
|
e21f2de8b8 | ||
|
|
795f02ab88 | ||
|
|
360d03e301 | ||
|
|
137b312fa9 | ||
|
|
ce12913bc2 | ||
|
|
d82c5062b8 | ||
|
|
6666e6a5a7 | ||
|
|
9c0aadf445 | ||
|
|
3bd14ed229 | ||
|
|
5e95367f6c | ||
|
|
c31e7d0b91 | ||
|
|
f8c84302a4 | ||
|
|
9143670d6e | ||
|
|
f0bd69d904 | ||
|
|
7179290dea | ||
|
|
c4297f40ad | ||
|
|
0d4c954e01 | ||
|
|
d6cf9f4f30 | ||
|
|
5f88be022c | ||
|
|
284ab79a37 | ||
|
|
2bd6c80506 | ||
|
|
0ca936a12e | ||
|
|
a26fc52181 | ||
|
|
83f575fcea | ||
|
|
ffd1f25b75 | ||
|
|
1658404cea | ||
|
|
82ea04f188 | ||
|
|
273d0b85b0 | ||
|
|
f3917ec5ff | ||
|
|
428455e032 | ||
|
|
573bd4aa32 | ||
|
|
ab5205f8c3 | ||
|
|
85aa212467 | ||
|
|
840d19db35 | ||
|
|
1c267e9b16 | ||
|
|
1ff6e92193 | ||
|
|
c82c0e596b | ||
|
|
31ce98fa91 | ||
|
|
4d66db1603 | ||
|
|
681d20083a | ||
|
|
4ee74ff54b | ||
|
|
16073e4172 | ||
|
|
3c204d409d | ||
|
|
d903a9142d | ||
|
|
a2d4453269 | ||
|
|
34c042c7dc | ||
|
|
4dfe2312d0 | ||
|
|
c740dce36d | ||
|
|
475a926d43 | ||
|
|
d2626f1da6 | ||
|
|
f5a8415c78 | ||
|
|
1d416a4213 | ||
|
|
731ab9773d | ||
|
|
d8f7b76253 | ||
|
|
dbe2f79019 | ||
|
|
ef63908541 | ||
|
|
27e47614c6 | ||
|
|
dc4a133b11 | ||
|
|
f4d67d8c3c | ||
|
|
785798611e | ||
|
|
b165ce4cd5 | ||
|
|
d9d1ca5a46 | ||
|
|
2c10806fef | ||
|
|
5d2c093105 | ||
|
|
f68bab1667 | ||
|
|
1388e2cf92 | ||
|
|
af318f4959 | ||
|
|
9f244cf1ac | ||
|
|
885aa8e6e1 | ||
|
|
945446b36f | ||
|
|
4209ebfa6e | ||
|
|
79f8ed874a | ||
|
|
739d97639a | ||
|
|
ac8e28f436 | ||
|
|
2740a3ba44 | ||
|
|
0f850400f2 | ||
|
|
74a764d549 | ||
|
|
a8a637809e | ||
|
|
75dbf2b0f8 | ||
|
|
90909ae708 | ||
|
|
d40e441240 | ||
|
|
cc3d420551 | ||
|
|
f2bb4acd4a | ||
|
|
a7595c918a | ||
|
|
a52f90122b | ||
|
|
d217cdc1a6 | ||
|
|
d5b6f92f3f | ||
|
|
1a636abaaf | ||
|
|
1fdbfa4719 | ||
|
|
22fc130e97 | ||
|
|
a726579d50 | ||
|
|
d774c2a170 | ||
|
|
6d5bb35f84 | ||
|
|
e028f10586 | ||
|
|
9276318faf | ||
|
|
82a04d904d | ||
|
|
364da9a83d | ||
|
|
ca9cec2c84 | ||
|
|
9211985c63 | ||
|
|
929ba0a637 | ||
|
|
dcdcb70cb1 | ||
|
|
cb5a12de3d | ||
|
|
193e8fa5ad | ||
|
|
00b37a7c0d | ||
|
|
736322dfc9 | ||
|
|
ba364988de | ||
|
|
a729a44284 | ||
|
|
3ecfd32df5 | ||
|
|
ea1888bd26 | ||
|
|
674c24f987 | ||
|
|
ca72df5868 | ||
|
|
ea787b83bf | ||
|
|
949072e8dc | ||
|
|
246f342e6a | ||
|
|
619b5d4c14 | ||
|
|
b0efc22140 | ||
|
|
5d1efdbfda | ||
|
|
e3ccd473d2 | ||
|
|
f0cbfe4d67 | ||
|
|
40d8e582ee | ||
|
|
02b55fe77f | ||
|
|
0c53de6700 | ||
|
|
b277e6e2d7 | ||
|
|
de4a312eba | ||
|
|
4b3b16ef1a | ||
|
|
4c534433aa | ||
|
|
f9447d01d4 | ||
|
|
2092443cd7 | ||
|
|
1c73caba04 | ||
|
|
84dbf8bb25 | ||
|
|
a275e6792a | ||
|
|
de7fb2acfe | ||
|
|
91f2764cd5 | ||
|
|
4e91b18bbe | ||
|
|
56a7ffe0e4 | ||
|
|
f9462d4fff | ||
|
|
035905d65e | ||
|
|
a47e6e6efe | ||
|
|
5bab4616ff | ||
|
|
37e01808b5 | ||
|
|
0b6db905ff | ||
|
|
9529199a44 | ||
|
|
be03abd464 | ||
|
|
04aa732e94 | ||
|
|
e7f9db297e | ||
|
|
24ea9fdc4d | ||
|
|
02d168705c | ||
|
|
7d7206b3e2 | ||
|
|
03ca3975b5 | ||
|
|
e1088b9eee | ||
|
|
f47924ffc5 | ||
|
|
a96f85c847 | ||
|
|
9148871608 | ||
|
|
7d198f0a68 | ||
|
|
1459fab4d6 | ||
|
|
b0bd4d55f5 | ||
|
|
9ab688d62c | ||
|
|
8fdc2aec80 | ||
|
|
91690b1d3e | ||
|
|
c61cd73eff | ||
|
|
93e638d63e | ||
|
|
501c22478e | ||
|
|
7155e90f66 | ||
|
|
c53d9fa9b7 | ||
|
|
ac5ac3161f | ||
|
|
bfeb3020a3 | ||
|
|
b01ecdccff | ||
|
|
da99777f6f | ||
|
|
dd537b3382 | ||
|
|
f74687dcc0 | ||
|
|
a47aa0dcc2 | ||
|
|
17d883c602 | ||
|
|
a1446a60f7 | ||
|
|
1931aac284 | ||
|
|
b88eb0f59d | ||
|
|
e7ad2d0463 | ||
|
|
c28ffb11cb | ||
|
|
018fd5ce5b | ||
|
|
cd0ec0185a | ||
|
|
8124cfa3ed | ||
|
|
5af985ef5f | ||
|
|
1ebd1aaa41 | ||
|
|
de3f52b990 | ||
|
|
4200018a0b | ||
|
|
67cd1669cc | ||
|
|
a8cfe03ba8 | ||
|
|
e2983383e4 | ||
|
|
8cf0dc769b | ||
|
|
613de8a80d | ||
|
|
f5c890cc1d | ||
|
|
f7f1eaef65 | ||
|
|
188703e204 | ||
|
|
52c19af0ba | ||
|
|
5c88965084 | ||
|
|
6e76731b7e | ||
|
|
c7a0e40c87 | ||
|
|
086a2f5f12 | ||
|
|
1da1c4753e | ||
|
|
a083e1af7d | ||
|
|
052e88ad5e | ||
|
|
b9ce455bba | ||
|
|
cd103c85db | ||
|
|
a3feacbd2f | ||
|
|
e1a734c525 | ||
|
|
53ab56de72 | ||
|
|
4e2fe598ac | ||
|
|
5fe5c46c6d | ||
|
|
153304d92b | ||
|
|
cb9aecbf04 | ||
|
|
c66e2896c6 | ||
|
|
9b874dff8d | ||
|
|
b243faa22b | ||
|
|
8f5cd6c2ae | ||
|
|
3c28b93514 | ||
|
|
06baf7cf78 | ||
|
|
801f6cb8a0 | ||
|
|
3684ec6315 | ||
|
|
da0773151b | ||
|
|
38e1c1de77 | ||
|
|
799c8bed29 | ||
|
|
a237301932 | ||
|
|
b03d78d00f | ||
|
|
748ca7d503 | ||
|
|
bf30ef89ee | ||
|
|
3690e1b9bf | ||
|
|
2542ef6d62 | ||
|
|
eb7ef5392e | ||
|
|
70b3e763e7 | ||
|
|
58ee962679 | ||
|
|
dc5779e2a7 | ||
|
|
b968759d10 | ||
|
|
b90a5b48a1 | ||
|
|
a12e082dbe | ||
|
|
cadd845b36 | ||
|
|
dff216c44d | ||
|
|
45c9b867f6 | ||
|
|
9388fff1f7 | ||
|
|
3e0c55bff9 | ||
|
|
49ab4e26f8 | ||
|
|
360b10c4de | ||
|
|
2c98e5ae66 | ||
|
|
0193cbee51 | ||
|
|
f55af7d21f | ||
|
|
516dffa4c4 | ||
|
|
62b5c1f7e7 | ||
|
|
07c428ef89 | ||
|
|
aa722fac9b | ||
|
|
7cc4ca2341 | ||
|
|
92fa20cef2 | ||
|
|
c9f8308f27 | ||
|
|
5ffc9fd379 | ||
|
|
8bf193dc06 | ||
|
|
f2805fd4aa | ||
|
|
35e4390168 | ||
|
|
51c99d5b67 | ||
|
|
540f98e228 | ||
|
|
c980c92cd5 | ||
|
|
9495b615f5 | ||
|
|
fb1c7d0154 | ||
|
|
03ee6022f3 | ||
|
|
cc5b2f68b6 | ||
|
|
2d7f612bd7 | ||
|
|
9e036b2d65 | ||
|
|
1100a98f11 | ||
|
|
37689f4df6 | ||
|
|
78c7f4e4af | ||
|
|
84a9f91f5c | ||
|
|
5612df48f9 | ||
|
|
0fa9001453 | ||
|
|
995546e7c6 | ||
|
|
1402c158b7 | ||
|
|
616b66f5cb | ||
|
|
70a0a84882 | ||
|
|
5c33c760c7 | ||
|
|
bb28fb256b | ||
|
|
a962e958eb | ||
|
|
8514acdc3c | ||
|
|
426182b81a | ||
|
|
7a5d857846 | ||
|
|
13f314a507 | ||
|
|
ea6e0b8259 | ||
|
|
e811e2600d | ||
|
|
49c212337f | ||
|
|
d243d69a09 | ||
|
|
ae6a5d2255 | ||
|
|
56aa89e5c8 | ||
|
|
7513fcac4e | ||
|
|
9df2974a0f | ||
|
|
ceb36adac7 | ||
|
|
7a8e821731 | ||
|
|
76bcdecd21 | ||
|
|
10744646db | ||
|
|
1873abd248 | ||
|
|
9618515926 | ||
|
|
a251adb838 | ||
|
|
9e810ac463 | ||
|
|
b9457a1092 | ||
|
|
6f2eeae498 | ||
|
|
42a41d33cc | ||
|
|
81408f9da7 | ||
|
|
c4212d69c9 | ||
|
|
e17164d3f0 | ||
|
|
e5349393f8 | ||
|
|
06176ef410 | ||
|
|
2a3448c8f3 | ||
|
|
5da40d56ad | ||
|
|
54c9a385d5 | ||
|
|
25c55419df | ||
|
|
c19fb1535e | ||
|
|
45a168e425 | ||
|
|
22243a8354 | ||
|
|
ff9369f1a1 | ||
|
|
21cf79163c | ||
|
|
f05fac74cb | ||
|
|
c8cc01ba6a | ||
|
|
694955c87b | ||
|
|
b1945c0493 | ||
|
|
1c4673e900 | ||
|
|
dfb4038654 | ||
|
|
b3537ef2a8 | ||
|
|
0ce44648cf | ||
|
|
55d3f70771 | ||
|
|
a610f8bd03 | ||
|
|
dfba3ff37a | ||
|
|
285be648c4 | ||
|
|
f7d551a807 | ||
|
|
3f224a15d5 | ||
|
|
c0bbde03ea | ||
|
|
97050e9669 | ||
|
|
eafd1dcc7c | ||
|
|
c528c53e5b | ||
|
|
07a6223932 | ||
|
|
aeb849d744 | ||
|
|
9003efc3fa | ||
|
|
32e06a489d | ||
|
|
2932db8480 | ||
|
|
19dee32287 | ||
|
|
4dad723088 | ||
|
|
54cfbb5b87 | ||
|
|
3e37dda7b0 | ||
|
|
fb7931591d | ||
|
|
e87ce22af9 | ||
|
|
738cbbdbb6 | ||
|
|
074e6d177c | ||
|
|
1d864ebd40 | ||
|
|
e9decadf75 | ||
|
|
3fa37a9212 | ||
|
|
c9e87a39f8 | ||
|
|
4a5d313693 | ||
|
|
168270ea5f | ||
|
|
c4d4185fb5 | ||
|
|
822333690f | ||
|
|
d7a8bb2214 | ||
|
|
a505123e60 | ||
|
|
be10b19760 | ||
|
|
b9ae3d6a57 | ||
|
|
c882570983 | ||
|
|
80411f99f0 | ||
|
|
6df3f0fdae | ||
|
|
22340ad984 | ||
|
|
c15504c509 | ||
|
|
20bf90ee52 | ||
|
|
3de6bfbcb8 | ||
|
|
e0c6262e0b | ||
|
|
9b2f6499e7 | ||
|
|
9262712f0a | ||
|
|
0c9da0985a | ||
|
|
b89c45b858 | ||
|
|
b60b360f13 | ||
|
|
734988d732 | ||
|
|
95bad9e55b | ||
|
|
e812a2efc6 | ||
|
|
411853fc74 | ||
|
|
b7d29ca0e9 | ||
|
|
947e1909ff | ||
|
|
31a4f03031 | ||
|
|
81f95e7a29 | ||
|
|
2aa2c796e5 | ||
|
|
a658e6c509 | ||
|
|
5f6766f6e1 | ||
|
|
7279018cfe | ||
|
|
4b08d127e0 | ||
|
|
fd1feff7b4 | ||
|
|
37bc9cf795 | ||
|
|
b762546fa7 | ||
|
|
bf5f2659a1 | ||
|
|
d2787e8ef5 | ||
|
|
a9f03a72f5 | ||
|
|
7fc57812a7 | ||
|
|
8a982ca68f | ||
|
|
200237a515 | ||
|
|
0ae1e0611c | ||
|
|
1392e73125 | ||
|
|
a90afd95c6 | ||
|
|
9866146545 | ||
|
|
8df325a68c | ||
|
|
48ae105a11 | ||
|
|
4e808c5c20 | ||
|
|
eb96443a34 | ||
|
|
e36c354ff5 | ||
|
|
f09c08d1f3 | ||
|
|
0e8122a2fc | ||
|
|
6723ea5fe6 | ||
|
|
e8bf968c78 | ||
|
|
9c8f24601f | ||
|
|
4957717df5 | ||
|
|
21fac3ebec | ||
|
|
ecbc634221 | ||
|
|
90cec20d1d | ||
|
|
bcbf82f8e8 | ||
|
|
3a45d8851d | ||
|
|
4a83c8c518 | ||
|
|
bc13d32d53 | ||
|
|
e6fc32bdf0 | ||
|
|
a970b9c62c | ||
|
|
17b307a7bc | ||
|
|
393f5044bb | ||
|
|
c630212dde | ||
|
|
f39db08c6d | ||
|
|
b4f9bc8745 | ||
|
|
5f06bd2566 | ||
|
|
8a4ab3654d | ||
|
|
e2f9617228 | ||
|
|
e097ae9632 | ||
|
|
07684fb030 | ||
|
|
17fa6f9b17 | ||
|
|
8e3fbaa9dd | ||
|
|
dede3e70ad | ||
|
|
7558081873 | ||
|
|
6e241611be | ||
|
|
fc9f921b62 | ||
|
|
12db3b9120 | ||
|
|
b58926dd26 | ||
|
|
91143dda1a | ||
|
|
efb64a049f | ||
|
|
4f6087a99d | ||
|
|
6b0e863556 | ||
|
|
11bc82379c | ||
|
|
a093ec1eaa | ||
|
|
d71a42cd1b | ||
|
|
d518d7d806 | ||
|
|
1d1afe6481 | ||
|
|
5a3f2e61f3 | ||
|
|
504f4e69db | ||
|
|
9f6666beb3 | ||
|
|
af6e7b9531 | ||
|
|
6fd7361364 | ||
|
|
e5c7a71d8e | ||
|
|
db7a4b24b6 | ||
|
|
332f678afb | ||
|
|
04a2b36a52 | ||
|
|
f862c6585d | ||
|
|
5c32521a07 | ||
|
|
9db30250c3 | ||
|
|
2b0cd2037b | ||
|
|
536dbfb724 | ||
|
|
b77398c4d3 | ||
|
|
fbf5bee051 | ||
|
|
81004c808f | ||
|
|
196509cc53 | ||
|
|
94ce67cc67 | ||
|
|
33ed528afe | ||
|
|
2435e47926 | ||
|
|
ff67a4b96c | ||
|
|
f816b952cf | ||
|
|
b905bc1b5d | ||
|
|
0ecbf9e349 | ||
|
|
1c7715780e | ||
|
|
5d3850c44e | ||
|
|
e84b356a12 | ||
|
|
b349800f7a | ||
|
|
47de43abf3 | ||
|
|
7a9fef80f5 | ||
|
|
dc28875437 | ||
|
|
a6ed4d4c3a | ||
|
|
fe6162b2a1 | ||
|
|
34182d9c9f | ||
|
|
16081b2d1a | ||
|
|
e43cfc2fce | ||
|
|
137272afea | ||
|
|
2150510bd4 | ||
|
|
fc59757a1a | ||
|
|
0cfa0d419a | ||
|
|
522658bd07 | ||
|
|
b1a97e35b9 | ||
|
|
c66363cba5 | ||
|
|
61269c3500 | ||
|
|
393d129982 | ||
|
|
80d4864844 | ||
|
|
f729fa990d | ||
|
|
662db7a944 | ||
|
|
c849b58de9 | ||
|
|
097b1e09db | ||
|
|
babd37bf35 | ||
|
|
91f48e7ad5 | ||
|
|
a12bd878e0 | ||
|
|
a4e8f24b16 | ||
|
|
a65447d22e | ||
|
|
b00ad65b08 | ||
|
|
a84ce5d5cb | ||
|
|
8ca4a50c18 | ||
|
|
03b2984ac2 | ||
|
|
9540a6532f | ||
|
|
cace663bbf | ||
|
|
acfdd15aa9 | ||
|
|
78f544c0aa | ||
|
|
2175a72fcc | ||
|
|
b03c1d9691 | ||
|
|
fead80844e | ||
|
|
ef885eda62 | ||
|
|
64a71263a1 | ||
|
|
974221f0cf | ||
|
|
bccef2856d | ||
|
|
80df3f7634 | ||
|
|
e96f7a9b12 | ||
|
|
2bbb6aa6f2 | ||
|
|
1ff721ad84 | ||
|
|
3e3b094270 | ||
|
|
1f7a8fceef | ||
|
|
b702a9758b | ||
|
|
3b607aa8ae | ||
|
|
4a4a6892f9 | ||
|
|
56b627dfe2 | ||
|
|
5c87b92976 | ||
|
|
dbcc312b0e | ||
|
|
2d842fefb8 | ||
|
|
d63e3c8cc4 | ||
|
|
187a894fe9 | ||
|
|
ca55c4a55d | ||
|
|
d627bdbbdb | ||
|
|
4f06f6b3d8 | ||
|
|
7f0fe78615 | ||
|
|
5913f7cb36 | ||
|
|
8dc42ad9f2 | ||
|
|
886bdd2ef2 | ||
|
|
5a86a2ff26 | ||
|
|
817d696628 | ||
|
|
4ab0344ebf | ||
|
|
7b05170145 | ||
|
|
b48ad4b737 | ||
|
|
9feb639bbd | ||
|
|
ce5054c850 | ||
|
|
c7834209d2 | ||
|
|
78ced6bc30 | ||
|
|
ca8e512e5b | ||
|
|
573628dbdd | ||
|
|
e477620dc5 | ||
|
|
32268fb25b | ||
|
|
80391bfe1f | ||
|
|
e19845c202 | ||
|
|
52134555d6 | ||
|
|
e7e39df6a0 | ||
|
|
055ef168ae | ||
|
|
2778b7be30 | ||
|
|
953db51b2c | ||
|
|
c043461f6c | ||
|
|
ddc07f9ef8 | ||
|
|
2cf1db0837 | ||
|
|
17e6496830 | ||
|
|
1d10eda84e | ||
|
|
9ea3dbeee8 | ||
|
|
100501ba72 | ||
|
|
f12368698b | ||
|
|
6b25a73629 | ||
|
|
90c7475c68 | ||
|
|
6648c101dd | ||
|
|
8d3285522c | ||
|
|
b613405f42 | ||
|
|
e999298078 | ||
|
|
0f0ab953f6 | ||
|
|
aaddbd153e | ||
|
|
9b2e2cc41f | ||
|
|
bc22309459 | ||
|
|
b6f81b538a | ||
|
|
c3aa43a6bd | ||
|
|
b2ea39077e | ||
|
|
811567a2f4 | ||
|
|
ca8fb440cc | ||
|
|
ac58a6bb3c | ||
|
|
9757d39240 | ||
|
|
5a9e7d77b8 | ||
|
|
e963b7f01b | ||
|
|
e7899d4dc5 | ||
|
|
301c79e57c | ||
|
|
67c288abda | ||
|
|
8dd2a8527a | ||
|
|
2fe427b3b3 | ||
|
|
6b1cc67664 | ||
|
|
1271f9d71a | ||
|
|
49ea4e9f39 | ||
|
|
50ef3282b6 | ||
|
|
b63dedb74d | ||
|
|
5628049440 | ||
|
|
54c9ba7639 | ||
|
|
b18d375d6c | ||
|
|
6dbbe65897 | ||
|
|
03d8abccdd | ||
|
|
0f6d317a8e | ||
|
|
792682590c | ||
|
|
2d3da343b3 | ||
|
|
094eda22c0 | ||
|
|
4886109d9c | ||
|
|
2dc47285bd | ||
|
|
6e33a6d62f | ||
|
|
a8f9eb23cc | ||
|
|
41a5ee6571 | ||
|
|
7d8de4b8e1 | ||
|
|
cc2b53abf4 | ||
|
|
32aa1cc814 | ||
|
|
38d877165a | ||
|
|
5c5984bfe1 | ||
|
|
30cdc31a27 | ||
|
|
602a36e241 | ||
|
|
b863ee1d65 | ||
|
|
ca49babf3a | ||
|
|
cf37b5cdcf | ||
|
|
969f388ef2 | ||
|
|
0589a1d0a5 | ||
|
|
4e019a176d | ||
|
|
a0e23d30fe | ||
|
|
e931706249 | ||
|
|
2457d95262 | ||
|
|
e9d33726a9 | ||
|
|
2462e04bf2 | ||
|
|
7fac74919c | ||
|
|
b022b5567d | ||
|
|
dbf6380e4b | ||
|
|
a0e42f8a61 | ||
|
|
94e673fe85 | ||
|
|
7600757f16 | ||
|
|
4ce8dd5f9a | ||
|
|
26315bfbea | ||
|
|
a282fb8524 | ||
|
|
dee98612e2 | ||
|
|
69e4e862a3 | ||
|
|
c0e895c3a7 | ||
|
|
fec9760f72 | ||
|
|
1989a5855d | ||
|
|
abcd19493e | ||
|
|
e457b7a8df | ||
|
|
3853d0d065 | ||
|
|
53e31cf1b5 | ||
|
|
c99c22534b | ||
|
|
8e22526756 | ||
|
|
7b6713b094 | ||
|
|
b0ef506a88 | ||
|
|
22c293de62 | ||
|
|
d3bb1e7010 | ||
|
|
49988b15a3 | ||
|
|
f0357b7a12 | ||
|
|
9d3ad6309e | ||
|
|
b55e9e78e3 | ||
|
|
4bc6fdb09e | ||
|
|
2b43b385de | ||
|
|
13865f9e04 | ||
|
|
497353e586 | ||
|
|
2d86dfba8b | ||
|
|
30dbfd9af8 | ||
|
|
c991b579d2 | ||
|
|
841729c0f9 | ||
|
|
412f5b5acb | ||
|
|
0b3958d3cd | ||
|
|
e68f251df7 | ||
|
|
986735234b | ||
|
|
4363eebc1b | ||
|
|
1be6ea5696 | ||
|
|
8acda0da8f | ||
|
|
ee240a5599 | ||
|
|
29ea433763 | ||
|
|
0462af164e | ||
|
|
1c24665b29 | ||
|
|
0af0fa7c2e | ||
|
|
191608041f | ||
|
|
42d9d5d237 | ||
|
|
d54b169d67 | ||
|
|
82166a36d0 | ||
|
|
cbf5a55c7d | ||
|
|
5f14ad9fa1 | ||
|
|
0be69b8a44 | ||
|
|
375710488d | ||
|
|
03d02fa67a | ||
|
|
b58cd78c79 | ||
|
|
dabb6f5466 | ||
|
|
281a4d5500 | ||
|
|
1c2965703d | ||
|
|
5dc4cce157 | ||
|
|
8c7edeb53b | ||
|
|
1d9745ee98 | ||
|
|
2d6c8767f7 | ||
|
|
b4a6d9c647 | ||
|
|
6afe9ceef1 | ||
|
|
704d9ad76c | ||
|
|
300d9adbd0 | ||
|
|
207c5498e7 | ||
|
|
d5e7439343 | ||
|
|
21add2c799 | ||
|
|
4651ab88ad | ||
|
|
53f40063b3 | ||
|
|
97d92bba67 | ||
|
|
bfdd665435 | ||
|
|
821d3fafa6 | ||
|
|
7c9b312cee | ||
|
|
69ab8a645c | ||
|
|
7b550c11cb | ||
|
|
bb4f18ca88 | ||
|
|
6efe91ea9c | ||
|
|
5f0a63f554 | ||
|
|
d14e7536ab | ||
|
|
c873937356 | ||
|
|
e1c3800cd9 | ||
|
|
c046232425 | ||
|
|
2d4864e126 | ||
|
|
048448aa93 | ||
|
|
755b2ec953 | ||
|
|
f62c493c77 | ||
|
|
a6365a6086 | ||
|
|
f7e057ec55 | ||
|
|
30cc00d11b | ||
|
|
d641c42029 | ||
|
|
9c2ca805da | ||
|
|
b0484d8a0c | ||
|
|
5ddd61d2e2 | ||
|
|
50ea7f4a9d | ||
|
|
b18134a4e3 | ||
|
|
7825df4771 | ||
|
|
d6951dacdc | ||
|
|
e603825e37 | ||
|
|
e3448153e1 | ||
|
|
25848c545a | ||
|
|
3098564896 | ||
|
|
4b6f9b93dd | ||
|
|
2beef21231 | ||
|
|
cb3c54a1ae | ||
|
|
d50a1e83ac | ||
|
|
1f10639222 | ||
|
|
af0979cce5 | ||
|
|
5b43901bd8 | ||
|
|
d7efb7a71d | ||
|
|
4d242836ee | ||
|
|
06cb5a041e | ||
|
|
ea2521bf27 | ||
|
|
4cd1f7a104 | ||
|
|
137843b2f6 | ||
|
|
008ed17a79 | ||
|
|
75e6cb9064 | ||
|
|
ad88a9421a | ||
|
|
346deb30a3 | ||
|
|
8c3d7cd145 | ||
|
|
821b30eb92 | ||
|
|
a362352587 | ||
|
|
94f952787f | ||
|
|
3ff184c061 | ||
|
|
80368e3936 | ||
|
|
2c448e22e1 | ||
|
|
1aabd38eb2 | ||
|
|
675457873a | ||
|
|
8173338f8a | ||
|
|
c4841843a9 | ||
|
|
f08a27be5d | ||
|
|
a4b36d12dd | ||
|
|
c842724b61 | ||
|
|
fb5f40319e | ||
|
|
52b9fc837c | ||
|
|
6f991ec78a | ||
|
|
7921d87a45 | ||
|
|
9f7a758bf9 | ||
|
|
0aff7a0bc1 | ||
|
|
c4cfdb8a25 | ||
|
|
342cfc4087 | ||
|
|
bd1282eddf | ||
|
|
892abec025 | ||
|
|
e809c4e445 | ||
|
|
9ff536d94d | ||
|
|
4f27315720 | ||
|
|
958ef2f872 | ||
|
|
069764f05e | ||
|
|
eeeab5192b | ||
|
|
a7dfbce3d3 | ||
|
|
ed2d1d9bb7 | ||
|
|
0fb2d2ffae | ||
|
|
3af65e7abb | ||
|
|
984b6cb0fb | ||
|
|
ca504a19ec | ||
|
|
c2797c85d1 | ||
|
|
d5add07c0b | ||
|
|
0ebf1c1ad7 | ||
|
|
42d7fc5e16 | ||
|
|
6828fc48e1 | ||
|
|
98d91b1c89 | ||
|
|
9bbdb2d562 | ||
|
|
a8334c3261 | ||
|
|
9144f9630b | ||
|
|
3e4a19539a | ||
|
|
5fe7e6e40e | ||
|
|
58f2ba1247 | ||
|
|
5f3a91bffd | ||
|
|
6351aa5167 | ||
|
|
9966099d1a | ||
|
|
1ef5599361 | ||
|
|
c78b6cdb4e | ||
|
|
d736c7235a | ||
|
|
475252d873 | ||
|
|
e103923430 | ||
|
|
cb59517ceb | ||
|
|
1248934f3e | ||
|
|
204ebf6bf6 | ||
|
|
52d5b19219 | ||
|
|
8e92d3a4a0 | ||
|
|
c44ecf54a5 | ||
|
|
c6699c36d3 | ||
|
|
d6ceae7005 | ||
|
|
4dcb82bf08 | ||
|
|
4f5d5926d9 | ||
|
|
3c5c3b98df | ||
|
|
56aee1ceee | ||
|
|
f176c28a56 | ||
|
|
2e68bd1412 | ||
|
|
35eb65460d | ||
|
|
ab54064689 | ||
|
|
debf7bf149 | ||
|
|
1dbe3b8231 | ||
|
|
b065573e23 | ||
|
|
e94e50181c | ||
|
|
69dfe63809 | ||
|
|
f32916a5bd | ||
|
|
be7ca56872 | ||
|
|
33cacc71b8 | ||
|
|
c292e3931a | ||
|
|
a87d6f0545 | ||
|
|
3a01b6d5b7 | ||
|
|
39df2635bd | ||
|
|
08ecfb8a67 | ||
|
|
a59bf7246a | ||
|
|
281296cd3f | ||
|
|
61d190b1ae | ||
|
|
dc89f029ad | ||
|
|
7557056a31 | ||
|
|
20c45a150c | ||
|
|
46bf0ef271 | ||
|
|
a7b632eb5e | ||
|
|
90a98c76a0 | ||
|
|
12357ee8c5 | ||
|
|
bb254fc2b9 | ||
|
|
aeadc2c43a | ||
|
|
ed492fe950 | ||
|
|
775daba8f5 | ||
|
|
677dd7ad53 | ||
|
|
85dee02a3b | ||
|
|
afdebbc3a2 | ||
|
|
5deb22a539 | ||
|
|
36b9e2e077 | ||
|
|
5348937c3d | ||
|
|
72fcacbbc7 | ||
|
|
4c28f15b35 | ||
|
|
095ef04c04 | ||
|
|
7d49979658 | ||
|
|
7a36695a21 | ||
|
|
5865587bd0 | ||
|
|
219bf93566 | ||
|
|
8371546a66 | ||
|
|
0b9b7bddd7 | ||
|
|
4c8449f4bc | ||
|
|
36d7b5c9ab | ||
|
|
f2b0ea6722 | ||
|
|
46f4be88a6 | ||
|
|
6381efa7ce | ||
|
|
85ee66efb9 | ||
|
|
40dccf5b29 | ||
|
|
c114849a31 | ||
|
|
4e9798d0e6 | ||
|
|
a30b1a394f | ||
|
|
91460436cf | ||
|
|
3f807a9432 | ||
|
|
cbe32c7482 | ||
|
|
5d3c582ecf | ||
|
|
3ed006d216 | ||
|
|
3e1026286b | ||
|
|
b59266249d | ||
|
|
015261a524 | ||
|
|
024e1088eb | ||
|
|
08f4b1ae8a | ||
|
|
1390c22004 | ||
|
|
8742ead585 | ||
|
|
59a297abe6 | ||
|
|
18636ea628 | ||
|
|
cf5980ace2 | ||
|
|
a7b0861436 | ||
|
|
89f2c0b0a4 | ||
|
|
ee4f4d7800 | ||
|
|
4de75ce621 | ||
|
|
1c4043ab39 | ||
|
|
44c945b9f5 | ||
|
|
c7719ac365 | ||
|
|
b9c24189e4 | ||
|
|
411d8d7439 | ||
|
|
671b40df2a | ||
|
|
249a860c6f | ||
|
|
0367a39e1f | ||
|
|
1a7340bb02 | ||
|
|
ce7d852d22 | ||
|
|
01b01c5969 | ||
|
|
c159460b2c | ||
|
|
07728d7425 | ||
|
|
d3a25e4dc1 | ||
|
|
1751c35f69 | ||
|
|
93f5b8cc4a | ||
|
|
5b1e59a48c | ||
|
|
7b27cad1ba | ||
|
|
1b083d63ab | ||
|
|
23f2b47531 | ||
|
|
194288c00e | ||
|
|
f9c8ed0dc3 | ||
|
|
88def9b71b | ||
|
|
f818f44693 | ||
|
|
8a395fdb4a | ||
|
|
c0588926b8 | ||
|
|
f1b7ecb2a2 | ||
|
|
4bcf157d88 | ||
|
|
2f7da03cce | ||
|
|
f1c995dcb8 | ||
|
|
9aec58c6b8 | ||
|
|
46aaaa9b70 | ||
|
|
46543d6323 | ||
|
|
a585119a67 | ||
|
|
8cc72368ca | ||
|
|
92e57ee06c | ||
|
|
c737a19d9f | ||
|
|
708a97d773 | ||
|
|
b95a90dbd6 | ||
|
|
a2d1ee08d4 | ||
|
|
7e64dc380f | ||
|
|
046cb6a564 | ||
|
|
644ce9edab | ||
|
|
059b601b13 | ||
|
|
d59999f510 | ||
|
|
c5d31e7527 | ||
|
|
c121e38da6 | ||
|
|
b16bc3d2e3 | ||
|
|
c732abbda2 | ||
|
|
61d681a7c8 | ||
|
|
7828bc09cf | ||
|
|
36d330fea0 | ||
|
|
4d46589d39 | ||
|
|
93f57edd3a | ||
|
|
8ec8ae0587 | ||
|
|
ce94e636bb | ||
|
|
21c7378b61 | ||
|
|
75a9845d20 | ||
|
|
d638f6e411 | ||
|
|
81d0a64d46 | ||
|
|
f76739cb1b | ||
|
|
7f992fd321 | ||
|
|
e428d11add | ||
|
|
0ed5b75a14 | ||
|
|
b1b4adec74 | ||
|
|
1934cc2e62 | ||
|
|
ae8cf8c35e | ||
|
|
f5878eafb9 | ||
|
|
27fe4f7062 | ||
|
|
7ad8b26297 | ||
|
|
1a383b7d90 | ||
|
|
445946792e | ||
|
|
de82c7d5ac | ||
|
|
07f0d561dc | ||
|
|
be379f3dac | ||
|
|
1bf904fe60 | ||
|
|
c6faf005cb | ||
|
|
b534b58542 | ||
|
|
920711533e | ||
|
|
194110433e | ||
|
|
7926396d2a | ||
|
|
797522e8ca | ||
|
|
64b5d1a269 | ||
|
|
d00d3802c9 | ||
|
|
46fff13341 | ||
|
|
be3374a3ef | ||
|
|
264ac0b017 | ||
|
|
17033b3c6c | ||
|
|
c4ea122d66 | ||
|
|
90185dc6b3 | ||
|
|
377b030d88 | ||
|
|
437bd87d7c | ||
|
|
73a7916ce3 | ||
|
|
f947fa86e3 | ||
|
|
7219efbdb7 | ||
|
|
b7435b9cd1 | ||
|
|
207ab5a0d1 | ||
|
|
70aa0ef85d | ||
|
|
dfbe231a51 | ||
|
|
cce35da366 | ||
|
|
1a612bcae9 | ||
|
|
d5b9e003fe | ||
|
|
e19c474a92 | ||
|
|
8274798499 | ||
|
|
9f68a32934 | ||
|
|
5c688daff1 | ||
|
|
741fb1181f | ||
|
|
fd1f05c8e0 | ||
|
|
708cbf937f | ||
|
|
32213cad01 | ||
|
|
9320a6e115 | ||
|
|
0f16c0f4cf | ||
|
|
30464396d9 | ||
|
|
64066c4ea8 | ||
|
|
7e97787d9d | ||
|
|
40f2dd8c6c | ||
|
|
4dd364e1c3 | ||
|
|
03f2a35b31 | ||
|
|
73bd98df57 | ||
|
|
bcf1fc658d | ||
|
|
863cbe512d | ||
|
|
d871e9aee7 | ||
|
|
70ef61ac6d | ||
|
|
a4a140bfad | ||
|
|
d2d91e713a | ||
|
|
d9bb1ceaec | ||
|
|
5fe8903fd2 | ||
|
|
8509101b83 | ||
|
|
357849c348 | ||
|
|
0f1b4e06f5 | ||
|
|
8e041420cd | ||
|
|
9211d22b2b | ||
|
|
f5246eb167 | ||
|
|
e436b2d720 | ||
|
|
51f4e9c0d3 | ||
|
|
8c3371c4ac | ||
|
|
6ff0fc6d83 | ||
|
|
9347a70425 | ||
|
|
91957f0848 | ||
|
|
62105bb353 | ||
|
|
e03f684508 | ||
|
|
2f41ae24f8 | ||
|
|
4ad551be9a | ||
|
|
bd640ae2c5 | ||
|
|
2cfc882c62 | ||
|
|
21ece2d76d | ||
|
|
d055d7f496 | ||
|
|
b1cfb1afe4 | ||
|
|
2f215356d6 | ||
|
|
e07c79259b | ||
|
|
59085f072a | ||
|
|
474d6db42f | ||
|
|
a95710ed0c | ||
|
|
51d7724255 | ||
|
|
276e7629bd | ||
|
|
69606a45e0 | ||
|
|
7f65ffcb15 | ||
|
|
4f5f6761f3 | ||
|
|
f543dbb42f | ||
|
|
5917a42997 | ||
|
|
fbe1664214 | ||
|
|
d09bb13cb6 | ||
|
|
31c323c097 | ||
|
|
8f09aadfdf | ||
|
|
20b4e8c779 | ||
|
|
402a0108ae | ||
|
|
9de4a8efcf | ||
|
|
077fa2e6b9 | ||
|
|
2ae9316f48 | ||
|
|
9b5a90e3b9 | ||
|
|
483942dc41 | ||
|
|
2ddda6457f | ||
|
|
681e695170 | ||
|
|
a043664dc4 | ||
|
|
e940f99646 | ||
|
|
22073042a9 | ||
|
|
2634cc408a | ||
|
|
36446bcbc2 | ||
|
|
b371ec5cf6 | ||
|
|
18f4afb388 | ||
|
|
77dcbe95c0 | ||
|
|
061b749041 | ||
|
|
5b0c3951f6 | ||
|
|
f2394b5a8d | ||
|
|
fe7b884cc9 | ||
|
|
5c1b635229 | ||
|
|
63410491b7 | ||
|
|
26e0a4bbde | ||
|
|
c356e56522 | ||
|
|
fd26bbbd0b | ||
|
|
7aa55371b5 | ||
|
|
ba06533c3e | ||
|
|
d66d66e74b | ||
|
|
d6b5f3efe6 | ||
|
|
b5a431624b | ||
|
|
8e7284de5a | ||
|
|
b2d38cd31c | ||
|
|
a15fed35b7 | ||
|
|
eee6b0059c | ||
|
|
530b4f3bee | ||
|
|
bac1c223de | ||
|
|
57f7582b4d | ||
|
|
5afe819ebd | ||
|
|
59568f5311 | ||
|
|
822706367b | ||
|
|
f8e9fafda3 | ||
|
|
c2bb9db012 | ||
|
|
e4e7d7fbfc | ||
|
|
035e4cf90a | ||
|
|
18fff4a3f5 | ||
|
|
675b6dc305 | ||
|
|
4071c78b2b | ||
|
|
2cb32a683e | ||
|
|
b6dc9c004b | ||
|
|
4ea0c707c1 | ||
|
|
2fbcb5c6d8 | ||
|
|
a4d60d9750 | ||
|
|
d3925890b1 | ||
|
|
8c6c144f28 | ||
|
|
db8c24cc7b | ||
|
|
ecbbb8426f | ||
|
|
0752879fc8 | ||
|
|
3f2a04b25b | ||
|
|
aa15e7916e | ||
|
|
7b09623fa8 | ||
|
|
2f45b8b7f5 | ||
|
|
5ffa2a30be | ||
|
|
b102ae141a | ||
|
|
845abcdd77 | ||
|
|
805db7ca50 | ||
|
|
bd3d0c330f | ||
|
|
0060df9877 | ||
|
|
cd66e203bd | ||
|
|
240f99478a | ||
|
|
41534c73f0 | ||
|
|
6139a69fa8 | ||
|
|
3cca312e61 | ||
|
|
7e312797ec | ||
|
|
fe44fa648a | ||
|
|
3249030257 | ||
|
|
8f98c20c51 | ||
|
|
1c76d5d096 | ||
|
|
35f1e28809 | ||
|
|
20999979de | ||
|
|
c6706a86f1 | ||
|
|
b4b1866286 | ||
|
|
28eb9b4c29 | ||
|
|
0a9accccc1 | ||
|
|
c3d220175f | ||
|
|
095c90ad22 | ||
|
|
a77bfecb02 | ||
|
|
72027b5b3c | ||
|
|
e5503c56ad | ||
|
|
ee7b225272 | ||
|
|
03d37725a9 | ||
|
|
29d1cbb673 | ||
|
|
e81278b800 | ||
|
|
e5482a5725 | ||
|
|
8464be691e | ||
|
|
ed9937bbd8 | ||
|
|
b2a4d4a018 | ||
|
|
74aaf4f75b | ||
|
|
2945f9daa9 | ||
|
|
3b496ab3d8 | ||
|
|
e1f30aeff9 | ||
|
|
a92e73231d | ||
|
|
8d91115623 | ||
|
|
9af8d6912a | ||
|
|
fe43fb47e1 | ||
|
|
ca3a80fbe1 | ||
|
|
f0747e76da | ||
|
|
7416d6ea71 | ||
|
|
ea7cbc781e | ||
|
|
3568fb9f93 | ||
|
|
43b7ce4f6d | ||
|
|
baa38d6266 | ||
|
|
1677960caa | ||
|
|
0fab573c98 | ||
|
|
04a8e5b888 | ||
|
|
6284e2011c | ||
|
|
a97c93abe4 | ||
|
|
664816383a | ||
|
|
fc4cb1654c | ||
|
|
f1fa915985 | ||
|
|
11482a75a1 | ||
|
|
e983d35c25 | ||
|
|
85c4f753ad | ||
|
|
1847ce3f3d | ||
|
|
83c27cc7b1 | ||
|
|
3e8f96a463 | ||
|
|
69e4f16b13 | ||
|
|
918c3fb260 | ||
|
|
54ee44839c | ||
|
|
8362aa9d66 | ||
|
|
2a6ff16819 | ||
|
|
47ad73cc89 | ||
|
|
9687f71a17 | ||
|
|
ed684be18d | ||
|
|
5aef725c13 | ||
|
|
d00550c45f | ||
|
|
9ce8d78835 | ||
|
|
29016822fd | ||
|
|
bb50d7edb4 | ||
|
|
d43d6f2b13 | ||
|
|
636dc27ead | ||
|
|
a18f535f21 | ||
|
|
6994d4a712 | ||
|
|
c9d0ae7cf3 | ||
|
|
9edc25999e | ||
|
|
53c130b704 | ||
|
|
e4e174981d | ||
|
|
584a52ac21 | ||
|
|
f9b5767dae | ||
|
|
3179829fa5 | ||
|
|
187d1b853d | ||
|
|
8d2e5f0bda | ||
|
|
7def6663bd | ||
|
|
a13d19c582 | ||
|
|
1837f83282 | ||
|
|
b14cfd6c64 | ||
|
|
963c51f473 | ||
|
|
1f77b75e14 | ||
|
|
e5f3acd139 | ||
|
|
c8365b3b7e | ||
|
|
29c671ce46 | ||
|
|
38ac9d2ecf | ||
|
|
3573d93855 | ||
|
|
3cc2cda026 | ||
|
|
7d10986f10 | ||
|
|
8c6a6604ce | ||
|
|
7170280401 | ||
|
|
babecb6d49 | ||
|
|
9770802901 | ||
|
|
4c1e817b38 | ||
|
|
52b329be4e | ||
|
|
1d50d62a79 | ||
|
|
07502c9804 | ||
|
|
59e0e49822 | ||
|
|
05170d78be | ||
|
|
88c83277c6 | ||
|
|
d0734b105b | ||
|
|
4860dc148c | ||
|
|
ee468be696 | ||
|
|
7f539c951a | ||
|
|
e495ae9030 | ||
|
|
ccb6b3c64b | ||
|
|
85594cc92e | ||
|
|
0b72612cd2 | ||
|
|
dd086c7830 | ||
|
|
6a601ceb97 | ||
|
|
8236534e3c | ||
|
|
0fef147713 | ||
|
|
0198296ced | ||
|
|
37726a02af | ||
|
|
a9c135488e | ||
|
|
72f5c9b62d | ||
|
|
8d0f50a6fd | ||
|
|
6c353e8b8f | ||
|
|
512d9822f0 | ||
|
|
d003ca46c7 | ||
|
|
7587dc350e | ||
|
|
28664fedb2 | ||
|
|
ef20f05221 | ||
|
|
cabf5d004d | ||
|
|
d551da26e5 | ||
|
|
893357f01e | ||
|
|
fc7fa4b6c5 | ||
|
|
fb75db2f1f | ||
|
|
7c20522a30 | ||
|
|
c09884c686 | ||
|
|
9273782093 | ||
|
|
44ffe29c10 | ||
|
|
e619493ece | ||
|
|
1449c8b887 | ||
|
|
6b06a23102 | ||
|
|
b55a93a3e1 | ||
|
|
f5f43e6d1b | ||
|
|
1e03a9440b | ||
|
|
9a59512f75 | ||
|
|
35150caea4 | ||
|
|
f01da8fee4 | ||
|
|
434c08a357 | ||
|
|
bd9c5b6995 | ||
|
|
b941d270ce | ||
|
|
9406961125 | ||
|
|
0d391b66a3 | ||
|
|
a11e07e250 | ||
|
|
d266dad1f4 | ||
|
|
331b700d1b | ||
|
|
2163fde0a4 | ||
|
|
24a2aaef4b | ||
|
|
042cf517b2 | ||
|
|
b97027ac9a | ||
|
|
4ea3f82e50 | ||
|
|
38c4111e6c | ||
|
|
338341add8 | ||
|
|
93bb679f9d | ||
|
|
40d859354f | ||
|
|
9e7c8df384 | ||
|
|
f088dd7e00 | ||
|
|
10c4e4f63f | ||
|
|
962325cc40 | ||
|
|
a9c33abfa5 | ||
|
|
d835c19fce | ||
|
|
1f1384afc6 | ||
|
|
9d4b55be19 | ||
|
|
c549ab907a | ||
|
|
9c0d14bb60 | ||
|
|
a822d942cd | ||
|
|
3a64a01f91 | ||
|
|
6ebb6bc7ee | ||
|
|
be95dfdd0e | ||
|
|
88890fa7c2 | ||
|
|
f8930b9cbc | ||
|
|
c10227a766 | ||
|
|
7e7e462de1 | ||
|
|
a93e1ceac8 | ||
|
|
7f8469b66a | ||
|
|
cf568487c8 | ||
|
|
4c74a2dd3a | ||
|
|
a70452219b | ||
|
|
47ea2d5fb4 | ||
|
|
16540e35f1 | ||
|
|
3bfb3a9fe2 | ||
|
|
f9517dcf24 | ||
|
|
7878b22b09 | ||
|
|
e6d7e4e309 | ||
|
|
40d0da404e | ||
|
|
8675bd125a | ||
|
|
4e5dfa5d33 | ||
|
|
89f5b77550 | ||
|
|
5b15cd9163 | ||
|
|
dbf1383a38 | ||
|
|
46b367e74b | ||
|
|
3da390682d | ||
|
|
5349a3b6d1 | ||
|
|
f2ab5f61f5 | ||
|
|
e910a03af4 | ||
|
|
4d0dc8b7c8 | ||
|
|
e0dc1ef5bd | ||
|
|
f24f5e98dd | ||
|
|
6647cfc228 | ||
|
|
ddcd99a1ce | ||
|
|
55c07f23b0 | ||
|
|
8192572e23 | ||
|
|
0cdf1b07e9 | ||
|
|
8653bae6ac | ||
|
|
fc1aa7d3b4 | ||
|
|
8bdcd6d576 | ||
|
|
d3925fe578 | ||
|
|
d3a5cca1bc | ||
|
|
f3b553712a | ||
|
|
839651fadb | ||
|
|
6a50fceea4 | ||
|
|
7efe108686 | ||
|
|
c313af1b24 | ||
|
|
1388b1b58b | ||
|
|
551db20657 | ||
|
|
bc71e956a5 | ||
|
|
5af6974796 | ||
|
|
a712036b56 | ||
|
|
37b96c192b | ||
|
|
8cbdf0f907 | ||
|
|
ef5c630d3a | ||
|
|
6eea89f4c0 | ||
|
|
dbbb2d9877 | ||
|
|
c483e16d72 | ||
|
|
40a5bad968 | ||
|
|
1421bce371 | ||
|
|
5e7dd6d51b | ||
|
|
71f4e72b22 | ||
|
|
b24e71b232 | ||
|
|
f60c090e4c | ||
|
|
50334e6bac | ||
|
|
963a9429dd | ||
|
|
2eda8d64c7 | ||
|
|
9b96c62e46 | ||
|
|
378b7467a4 | ||
|
|
c0d98ecd4b | ||
|
|
b44644b6bf | ||
|
|
7bfb42946e | ||
|
|
e8907acd28 | ||
|
|
d6ef3b1e02 | ||
|
|
a39a7a7a03 | ||
|
|
923be102b3 | ||
|
|
7531e218c1 | ||
|
|
3cc1fecb53 | ||
|
|
3c89847489 | ||
|
|
fb837ca66d | ||
|
|
2ec1ffdc11 | ||
|
|
56509a61b9 | ||
|
|
f37f8ac815 | ||
|
|
231b5feb23 | ||
|
|
81fa063338 | ||
|
|
07b4a4dbca | ||
|
|
fd6daaa73b | ||
|
|
6496d185ab | ||
|
|
7499c1f969 | ||
|
|
9c5db1057d | ||
|
|
30d24a3c1c | ||
|
|
ad4af06802 | ||
|
|
64b98a9b61 | ||
|
|
4fdcb136bc | ||
|
|
0e398f5802 | ||
|
|
b9869eadc3 | ||
|
|
936c5a8a7a | ||
|
|
10f19fade1 | ||
|
|
c01594c2a4 | ||
|
|
ccbd7bb785 | ||
|
|
6eb49dee5d | ||
|
|
6a4bf9fcff | ||
|
|
9ada89d51a | ||
|
|
524fddedb4 | ||
|
|
c4a7711e02 | ||
|
|
2e20fc413c | ||
|
|
498482d0f6 | ||
|
|
4bd5b6a4d6 | ||
|
|
2e764cb22d | ||
|
|
c8914679b7 | ||
|
|
e25ac0d587 | ||
|
|
41374aabcb | ||
|
|
f60d846eb3 | ||
|
|
96e54ab326 | ||
|
|
40a3feaad0 | ||
|
|
2611931f82 | ||
|
|
ec39d10695 | ||
|
|
30d8ed411a | ||
|
|
64a832467e | ||
|
|
9c5321c538 | ||
|
|
aba123dae0 | ||
|
|
5aca58ad2a | ||
|
|
a34418d724 | ||
|
|
5f4262921a | ||
|
|
6fcd05b855 | ||
|
|
7746a2b3cd | ||
|
|
2749dcd128 | ||
|
|
92343d91d6 | ||
|
|
ce7b48143a | ||
|
|
e30e98a496 | ||
|
|
4798bd9d33 | ||
|
|
38d6cb97ad | ||
|
|
3be111a160 | ||
|
|
97a66b73cf | ||
|
|
50fc3ec974 | ||
|
|
ee8d99b955 | ||
|
|
5bf7c4d241 | ||
|
|
c2b5f21832 | ||
|
|
bdac9b7241 | ||
|
|
ec6eae9537 |
4
.gitattributes
vendored
Normal file
4
.gitattributes
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
CHANGELOG.md merge=union
|
||||
README.md merge=union
|
||||
plugins/inputs/all/all.go merge=union
|
||||
plugins/outputs/all/all.go merge=union
|
||||
44
.github/ISSUE_TEMPLATE.md
vendored
Normal file
44
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
## Directions
|
||||
|
||||
GitHub Issues are reserved for actionable bug reports and feature requests.
|
||||
General questions should be asked at the [InfluxData Community](https://community.influxdata.com) site.
|
||||
|
||||
Before opening an issue, search for similar bug reports or feature requests on GitHub Issues.
|
||||
If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.
|
||||
Erase the other section and everything on and above this line.
|
||||
|
||||
*Please note, the quickest way to fix a bug is to open a Pull Request.*
|
||||
|
||||
## Bug report
|
||||
|
||||
### Relevant telegraf.conf:
|
||||
|
||||
### System info:
|
||||
|
||||
[Include Telegraf version, operating system name, and other relevant details]
|
||||
|
||||
### Steps to reproduce:
|
||||
|
||||
1. ...
|
||||
2. ...
|
||||
|
||||
### Expected behavior:
|
||||
|
||||
### Actual behavior:
|
||||
|
||||
### Additional info:
|
||||
|
||||
[Include gist of relevant config, logs, etc.]
|
||||
|
||||
|
||||
## Feature Request
|
||||
|
||||
Opening a feature request kicks off a discussion.
|
||||
|
||||
### Proposal:
|
||||
|
||||
### Current behavior:
|
||||
|
||||
### Desired behavior:
|
||||
|
||||
### Use case: [Why is this important (helps with prioritizing requests)]
|
||||
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
### Required for all PRs:
|
||||
|
||||
- [ ] Signed [CLA](https://influxdata.com/community/cla/).
|
||||
- [ ] Associated README.md updated.
|
||||
- [ ] Has appropriate unit tests.
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,4 +1,7 @@
|
||||
build
|
||||
tivan
|
||||
.vagrant
|
||||
telegraf
|
||||
/telegraf
|
||||
.idea
|
||||
*~
|
||||
*#
|
||||
|
||||
1489
CHANGELOG.md
1489
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
501
CONTRIBUTING.md
501
CONTRIBUTING.md
@@ -1,103 +1,74 @@
|
||||
## Steps for Contributing:
|
||||
|
||||
1. [Sign the CLA](http://influxdb.com/community/cla.html)
|
||||
1. Make changes or write plugin (see below for details)
|
||||
1. Add your plugin to one of: `plugins/{inputs,outputs,aggregators,processors}/all/all.go`
|
||||
1. If your plugin requires a new Go package,
|
||||
[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency)
|
||||
1. Write a README for your plugin, if it's an input plugin, it should be structured
|
||||
like the [input example here](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md).
|
||||
Output plugins READMEs are less structured,
|
||||
but any information you can provide on how the data will look is appreciated.
|
||||
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
||||
for a good example.
|
||||
1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin.
|
||||
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf).
|
||||
|
||||
## GoDoc
|
||||
|
||||
Public interfaces for inputs, outputs, processors, aggregators, metrics,
|
||||
and the accumulator can be found on the GoDoc
|
||||
|
||||
[](https://godoc.org/github.com/influxdata/telegraf)
|
||||
|
||||
## Sign the CLA
|
||||
|
||||
Before we can merge a pull request, you will need to sign the CLA,
|
||||
which can be found [on our website](http://influxdb.com/community/cla.html)
|
||||
|
||||
## Plugins
|
||||
## Adding a dependency
|
||||
|
||||
This section is for developers who want to create new collection plugins.
|
||||
Assuming you can already build the project, run these in the telegraf directory:
|
||||
|
||||
1. `go get github.com/sparrc/gdm`
|
||||
1. `gdm restore`
|
||||
1. `GOOS=linux gdm save`
|
||||
|
||||
## Input Plugins
|
||||
|
||||
This section is for developers who want to create new collection inputs.
|
||||
Telegraf is entirely plugin driven. This interface allows for operators to
|
||||
pick and chose what is gathered as well as makes it easy for developers
|
||||
pick and chose what is gathered and makes it easy for developers
|
||||
to create new ways of generating metrics.
|
||||
|
||||
Plugin authorship is kept as simple as possible to promote people to develop
|
||||
and submit new plugins.
|
||||
and submit new inputs.
|
||||
|
||||
### Plugin Guidelines
|
||||
### Input Plugin Guidelines
|
||||
|
||||
* A plugin must conform to the `plugins.Plugin` interface.
|
||||
* Each generated metric automatically has the name of the plugin that generated
|
||||
it prepended. This is to keep plugins honest.
|
||||
* Plugins should call `plugins.Add` in their `init` function to register themselves.
|
||||
* A plugin must conform to the [`telegraf.Input`](https://godoc.org/github.com/influxdata/telegraf#Input) interface.
|
||||
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdb/telegraf/plugins/all/all.go` file.
|
||||
* Input Plugins must be added to the
|
||||
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
plugin can be configured. This is include in `telegraf -sample-config`.
|
||||
plugin can be configured. This is include in `telegraf config`.
|
||||
* The `Description` function should say in one line what this plugin does.
|
||||
|
||||
### Plugin interface
|
||||
Let's say you've written a plugin that emits metrics about processes on the
|
||||
current host.
|
||||
|
||||
```go
|
||||
type Plugin interface {
|
||||
SampleConfig() string
|
||||
Description() string
|
||||
Gather(Accumulator) error
|
||||
}
|
||||
|
||||
type Accumulator interface {
|
||||
Add(measurement string,
|
||||
value interface{},
|
||||
tags map[string]string,
|
||||
timestamp ...time.Time)
|
||||
AddFields(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
timestamp ...time.Time)
|
||||
}
|
||||
```
|
||||
|
||||
### Accumulator
|
||||
|
||||
The way that a plugin emits metrics is by interacting with the Accumulator.
|
||||
|
||||
The `Add` function takes 3 arguments:
|
||||
* **measurement**: A string description of the metric. For instance `bytes_read` or `faults`.
|
||||
* **value**: A value for the metric. This accepts 5 different types of value:
|
||||
* **int**: The most common type. All int types are accepted but favor using `int64`
|
||||
Useful for counters, etc.
|
||||
* **float**: Favor `float64`, useful for gauges, percentages, etc.
|
||||
* **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`, etc.
|
||||
* **string**: Typically used to indicate a message, or some kind of freeform information.
|
||||
* **time.Time**: Useful for indicating when a state last occurred, for instance `light_on_since`.
|
||||
* **tags**: This is a map of strings to strings to describe the where or who
|
||||
about the metric. For instance, the `net` plugin adds a tag named `"interface"`
|
||||
set to the name of the network interface, like `"eth0"`.
|
||||
|
||||
The `AddFieldsWithTime` allows multiple values for a point to be passed. The values
|
||||
used are the same type profile as **value** above. The **timestamp** argument
|
||||
allows a point to be registered as having occurred at an arbitrary time.
|
||||
|
||||
Let's say you've written a plugin that emits metrics about processes on the current host.
|
||||
|
||||
```go
|
||||
|
||||
type Process struct {
|
||||
CPUTime float64
|
||||
MemoryBytes int64
|
||||
PID int
|
||||
}
|
||||
|
||||
func Gather(acc plugins.Accumulator) error {
|
||||
for _, process := range system.Processes() {
|
||||
tags := map[string]string {
|
||||
"pid": fmt.Sprintf("%d", process.Pid),
|
||||
}
|
||||
|
||||
acc.Add("cpu", process.CPUTime, tags, time.Now())
|
||||
acc.Add("memory", process.MemoryBytes, tags, time.Now())
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Plugin Example
|
||||
### Input Plugin Example
|
||||
|
||||
```go
|
||||
package simple
|
||||
|
||||
// simple.go
|
||||
|
||||
import "github.com/influxdb/telegraf/plugins"
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Simple struct {
|
||||
Ok bool
|
||||
@@ -111,30 +82,83 @@ func (s *Simple) SampleConfig() string {
|
||||
return "ok = true # indicate if everything is fine"
|
||||
}
|
||||
|
||||
func (s *Simple) Gather(acc plugins.Accumulator) error {
|
||||
func (s *Simple) Gather(acc telegraf.Accumulator) error {
|
||||
if s.Ok {
|
||||
acc.Add("state", "pretty good", nil)
|
||||
acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil)
|
||||
} else {
|
||||
acc.Add("state", "not great", nil)
|
||||
acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
plugins.Add("simple", func() plugins.Plugin { return &Simple{} })
|
||||
inputs.Add("simple", func() telegraf.Input { return &Simple{} })
|
||||
}
|
||||
```
|
||||
|
||||
## Service Plugins
|
||||
## Adding Typed Metrics
|
||||
|
||||
In addition the the `AddFields` function, the accumulator also supports an
|
||||
`AddGauge` and `AddCounter` function. These functions are for adding _typed_
|
||||
metrics. Metric types are ignored for the InfluxDB output, but can be used
|
||||
for other outputs, such as [prometheus](https://prometheus.io/docs/concepts/metric_types/).
|
||||
|
||||
## Input Plugins Accepting Arbitrary Data Formats
|
||||
|
||||
Some input plugins (such as
|
||||
[exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec))
|
||||
accept arbitrary input data formats. An overview of these data formats can
|
||||
be found
|
||||
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
|
||||
|
||||
In order to enable this, you must specify a `SetParser(parser parsers.Parser)`
|
||||
function on the plugin object (see the exec plugin for an example), as well as
|
||||
defining `parser` as a field of the object.
|
||||
|
||||
You can then utilize the parser internally in your plugin, parsing data as you
|
||||
see fit. Telegraf's configuration layer will take care of instantiating and
|
||||
creating the `Parser` object.
|
||||
|
||||
You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
Below is the `Parser` interface.
|
||||
|
||||
```go
|
||||
// Parser is an interface defining functions that a parser plugin must satisfy.
|
||||
type Parser interface {
|
||||
// Parse takes a byte buffer separated by newlines
|
||||
// ie, `cpu.usage.idle 90\ncpu.usage.busy 10`
|
||||
// and parses it into telegraf metrics
|
||||
Parse(buf []byte) ([]telegraf.Metric, error)
|
||||
|
||||
// ParseLine takes a single string metric
|
||||
// ie, "cpu.usage.idle 90"
|
||||
// and parses it into a telegraf metric.
|
||||
ParseLine(line string) (telegraf.Metric, error)
|
||||
}
|
||||
```
|
||||
|
||||
And you can view the code
|
||||
[here.](https://github.com/influxdata/telegraf/blob/henrypfhu-master/plugins/parsers/registry.go)
|
||||
|
||||
## Service Input Plugins
|
||||
|
||||
This section is for developers who want to create new "service" collection
|
||||
plugins. A service plugin differs from a regular plugin in that it operates
|
||||
inputs. A service plugin differs from a regular plugin in that it operates
|
||||
a background service while Telegraf is running. One example would be the `statsd`
|
||||
plugin, which operates a statsd server.
|
||||
|
||||
Service Plugins are substantially more complicated than a regular plugin, as they
|
||||
will require threads and locks to verify data integrity. Service Plugins should
|
||||
Service Input Plugins are substantially more complicated than a regular plugin, as they
|
||||
will require threads and locks to verify data integrity. Service Input Plugins should
|
||||
be avoided unless there is no way to create their behavior with a regular plugin.
|
||||
|
||||
Their interface is quite similar to a regular plugin, with the addition of `Start()`
|
||||
@@ -143,49 +167,25 @@ and `Stop()` methods.
|
||||
### Service Plugin Guidelines
|
||||
|
||||
* Same as the `Plugin` guidelines, except that they must conform to the
|
||||
`plugins.ServicePlugin` interface.
|
||||
`inputs.ServiceInput` interface.
|
||||
|
||||
### Service Plugin interface
|
||||
|
||||
```go
|
||||
type ServicePlugin interface {
|
||||
SampleConfig() string
|
||||
Description() string
|
||||
Gather(Accumulator) error
|
||||
Start() error
|
||||
Stop()
|
||||
}
|
||||
```
|
||||
|
||||
## Outputs
|
||||
## Output Plugins
|
||||
|
||||
This section is for developers who want to create a new output sink. Outputs
|
||||
are created in a similar manner as collection plugins, and their interface has
|
||||
similar constructs.
|
||||
|
||||
### Output Guidelines
|
||||
### Output Plugin Guidelines
|
||||
|
||||
* An output must conform to the `outputs.Output` interface.
|
||||
* An output must conform to the [`telegraf.Output`](https://godoc.org/github.com/influxdata/telegraf#Output) interface.
|
||||
* Outputs should call `outputs.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdb/telegraf/outputs/all/all.go` file.
|
||||
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
output can be configured. This is include in `telegraf -sample-config`.
|
||||
output can be configured. This is include in `telegraf config`.
|
||||
* The `Description` function should say in one line what this output does.
|
||||
|
||||
### Output interface
|
||||
|
||||
```go
|
||||
type Output interface {
|
||||
Connect() error
|
||||
Close() error
|
||||
Description() string
|
||||
SampleConfig() string
|
||||
Write(points []*client.Point) error
|
||||
}
|
||||
```
|
||||
|
||||
### Output Example
|
||||
|
||||
```go
|
||||
@@ -193,7 +193,10 @@ package simpleoutput
|
||||
|
||||
// simpleoutput.go
|
||||
|
||||
import "github.com/influxdb/telegraf/outputs"
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
type Simple struct {
|
||||
Ok bool
|
||||
@@ -217,20 +220,47 @@ func (s *Simple) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Simple) Write(points []*client.Point) error {
|
||||
for _, pt := range points {
|
||||
// write `pt` to the output sink here
|
||||
func (s *Simple) Write(metrics []telegraf.Metric) error {
|
||||
for _, metric := range metrics {
|
||||
// write `metric` to the output sink here
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("simpleoutput", func() outputs.Output { return &Simple{} })
|
||||
outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} })
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Service Outputs
|
||||
## Output Plugins Writing Arbitrary Data Formats
|
||||
|
||||
Some output plugins (such as
|
||||
[file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file))
|
||||
can write arbitrary output data formats. An overview of these data formats can
|
||||
be found
|
||||
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md).
|
||||
|
||||
In order to enable this, you must specify a
|
||||
`SetSerializer(serializer serializers.Serializer)`
|
||||
function on the plugin object (see the file plugin for an example), as well as
|
||||
defining `serializer` as a field of the object.
|
||||
|
||||
You can then utilize the serializer internally in your plugin, serializing data
|
||||
before it's written. Telegraf's configuration layer will take care of
|
||||
instantiating and creating the `Serializer` object.
|
||||
|
||||
You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
## Service Output Plugins
|
||||
|
||||
This section is for developers who want to create new "service" output. A
|
||||
service output differs from a regular output in that it operates a background service
|
||||
@@ -243,51 +273,212 @@ and `Stop()` methods.
|
||||
### Service Output Guidelines
|
||||
|
||||
* Same as the `Output` guidelines, except that they must conform to the
|
||||
`plugins.ServiceOutput` interface.
|
||||
`output.ServiceOutput` interface.
|
||||
|
||||
### Service Output interface
|
||||
## Processor Plugins
|
||||
|
||||
This section is for developers who want to create a new processor plugin.
|
||||
|
||||
### Processor Plugin Guidelines
|
||||
|
||||
* A processor must conform to the [`telegraf.Processor`](https://godoc.org/github.com/influxdata/telegraf#Processor) interface.
|
||||
* Processors should call `processors.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdata/telegraf/plugins/processors/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
processor can be configured. This is include in the output of `telegraf config`.
|
||||
* The `Description` function should say in one line what this processor does.
|
||||
|
||||
### Processor Example
|
||||
|
||||
```go
|
||||
type ServiceOutput interface {
|
||||
Connect() error
|
||||
Close() error
|
||||
Description() string
|
||||
SampleConfig() string
|
||||
Write(points []*client.Point) error
|
||||
Start() error
|
||||
Stop()
|
||||
package printer
|
||||
|
||||
// printer.go
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/processors"
|
||||
)
|
||||
|
||||
type Printer struct {
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
`
|
||||
|
||||
func (p *Printer) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (p *Printer) Description() string {
|
||||
return "Print all metrics that pass through this filter."
|
||||
}
|
||||
|
||||
func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
||||
for _, metric := range in {
|
||||
fmt.Println(metric.String())
|
||||
}
|
||||
return in
|
||||
}
|
||||
|
||||
func init() {
|
||||
processors.Add("printer", func() telegraf.Processor {
|
||||
return &Printer{}
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
## Aggregator Plugins
|
||||
|
||||
This section is for developers who want to create a new aggregator plugin.
|
||||
|
||||
### Aggregator Plugin Guidelines
|
||||
|
||||
* A aggregator must conform to the [`telegraf.Aggregator`](https://godoc.org/github.com/influxdata/telegraf#Aggregator) interface.
|
||||
* Aggregators should call `aggregators.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
aggregator can be configured. This is include in `telegraf config`.
|
||||
* The `Description` function should say in one line what this aggregator does.
|
||||
* The Aggregator plugin will need to keep caches of metrics that have passed
|
||||
through it. This should be done using the builtin `HashID()` function of each
|
||||
metric.
|
||||
* When the `Reset()` function is called, all caches should be cleared.
|
||||
|
||||
### Aggregator Example
|
||||
|
||||
```go
|
||||
package min
|
||||
|
||||
// min.go
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||
)
|
||||
|
||||
type Min struct {
|
||||
// caches for metric fields, names, and tags
|
||||
fieldCache map[uint64]map[string]float64
|
||||
nameCache map[uint64]string
|
||||
tagCache map[uint64]map[string]string
|
||||
}
|
||||
|
||||
func NewMin() telegraf.Aggregator {
|
||||
m := &Min{}
|
||||
m.Reset()
|
||||
return m
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## period is the flush & clear interval of the aggregator.
|
||||
period = "30s"
|
||||
## If true drop_original will drop the original metrics and
|
||||
## only send aggregates.
|
||||
drop_original = false
|
||||
`
|
||||
|
||||
func (m *Min) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (m *Min) Description() string {
|
||||
return "Keep the aggregate min of each metric passing through."
|
||||
}
|
||||
|
||||
func (m *Min) Add(in telegraf.Metric) {
|
||||
id := in.HashID()
|
||||
if _, ok := m.nameCache[id]; !ok {
|
||||
// hit an uncached metric, create caches for first time:
|
||||
m.nameCache[id] = in.Name()
|
||||
m.tagCache[id] = in.Tags()
|
||||
m.fieldCache[id] = make(map[string]float64)
|
||||
for k, v := range in.Fields() {
|
||||
if fv, ok := convert(v); ok {
|
||||
m.fieldCache[id][k] = fv
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for k, v := range in.Fields() {
|
||||
if fv, ok := convert(v); ok {
|
||||
if _, ok := m.fieldCache[id][k]; !ok {
|
||||
// hit an uncached field of a cached metric
|
||||
m.fieldCache[id][k] = fv
|
||||
continue
|
||||
}
|
||||
if fv < m.fieldCache[id][k] {
|
||||
// set new minimum
|
||||
m.fieldCache[id][k] = fv
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Min) Push(acc telegraf.Accumulator) {
|
||||
for id, _ := range m.nameCache {
|
||||
fields := map[string]interface{}{}
|
||||
for k, v := range m.fieldCache[id] {
|
||||
fields[k+"_min"] = v
|
||||
}
|
||||
acc.AddFields(m.nameCache[id], fields, m.tagCache[id])
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Min) Reset() {
|
||||
m.fieldCache = make(map[uint64]map[string]float64)
|
||||
m.nameCache = make(map[uint64]string)
|
||||
m.tagCache = make(map[uint64]map[string]string)
|
||||
}
|
||||
|
||||
func convert(in interface{}) (float64, bool) {
|
||||
switch v := in.(type) {
|
||||
case float64:
|
||||
return v, true
|
||||
case int64:
|
||||
return float64(v), true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
aggregators.Add("min", func() telegraf.Aggregator {
|
||||
return NewMin()
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
## Unit Tests
|
||||
|
||||
Before opening a pull request you should run the linter checks and
|
||||
the short tests.
|
||||
|
||||
### Execute linter
|
||||
|
||||
execute `make lint`
|
||||
|
||||
### Execute short tests
|
||||
|
||||
execute `make test-short`
|
||||
execute `make test`
|
||||
|
||||
### Execute long tests
|
||||
### Execute integration tests
|
||||
|
||||
As Telegraf collects metrics from several third-party services it becomes a
|
||||
difficult task to mock each service as some of them have complicated protocols
|
||||
which would take some time to replicate.
|
||||
Running the integration tests requires several docker containers to be
|
||||
running. You can start the containers with:
|
||||
```
|
||||
make docker-run
|
||||
```
|
||||
|
||||
To overcome this situation we've decided to use docker containers to provide a
|
||||
fast and reproducible environment to test those services which require it.
|
||||
For other situations
|
||||
(i.e: https://github.com/influxdb/telegraf/blob/master/plugins/redis/redis_test.go )
|
||||
a simple mock will suffice.
|
||||
And run the full test suite with:
|
||||
```
|
||||
make test-all
|
||||
```
|
||||
|
||||
To execute Telegraf tests follow these simple steps:
|
||||
|
||||
- Install docker following [these](https://docs.docker.com/installation/)
|
||||
instructions
|
||||
- execute `make test`
|
||||
|
||||
**OSX users**: you will need to install `boot2docker` or `docker-machine`.
|
||||
The Makefile will assume that you have a `docker-machine` box called `default` to
|
||||
get the IP address.
|
||||
|
||||
### Unit test troubleshooting
|
||||
|
||||
Try cleaning up your test environment by executing `make docker-kill` and
|
||||
re-running
|
||||
Use `make docker-kill` to stop the containers.
|
||||
|
||||
138
Godeps
138
Godeps
@@ -1,52 +1,92 @@
|
||||
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034
|
||||
github.com/Shopify/sarama 159e9990b0796511607dd0d7aaa3eb37d1829d16
|
||||
github.com/Sirupsen/logrus 446d1c146faa8ed3f4218f056fcd165f6bcfda81
|
||||
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
|
||||
github.com/armon/go-metrics 06b60999766278efd6d2b5d8418a58c3d5b99e87
|
||||
github.com/aws/aws-sdk-go 999b1591218c36d5050d1ba7266eba956e65965f
|
||||
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
|
||||
github.com/boltdb/bolt b34b35ea8d06bb9ae69d9a349119252e4c1d8ee0
|
||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||
github.com/dancannon/gorethink a124c9663325ed9f7fb669d17c69961b59151e6e
|
||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
github.com/eapache/go-resiliency f341fb4dca45128e4aa86389fa6a675d55fe25e1
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/fsouza/go-dockerclient 7177a9e3543b0891a5d91dbf7051e0f71455c8ef
|
||||
github.com/go-ini/ini 9314fb0ef64171d6a3d0a4fa570dfa33441cba05
|
||||
github.com/go-sql-driver/mysql d512f204a577a4ab037a1816604c48c9c13210be
|
||||
github.com/gogo/protobuf e492fd34b12d0230755c45aa5fb1e1eea6a84aa9
|
||||
github.com/golang/protobuf 68415e7123da32b07eab49c96d2c4d6158360e9b
|
||||
github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/hailocab/go-hostpool 0637eae892be221164aff5fcbccc57171aea6406
|
||||
github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458
|
||||
github.com/hashicorp/raft d136cd15dfb7876fd7c89cad1995bc4f19ceb294
|
||||
github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee
|
||||
github.com/influxdb/influxdb 69a7664f2d4b75aec300b7cbfc7e57c971721f04
|
||||
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
|
||||
github.com/klauspost/crc32 0aff1ea9c20474c3901672b5b6ead0ac611156de
|
||||
github.com/lib/pq 11fc39a580a008f1f39bb3d11d984fb34ed778d9
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
|
||||
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
|
||||
github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
|
||||
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
|
||||
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/bsm/sarama-cluster abf039439f66c1ce78017f560b490612552f6472
|
||||
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
|
||||
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
|
||||
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
|
||||
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/dgrijalva/jwt-go dbeaa9332f19a944acb5736b4456cfcc02140e29
|
||||
github.com/docker/docker f5ec1e2936dcbe7b5001c2b817188b095c700c27
|
||||
github.com/docker/go-connections 990a1a1a70b0da4c4cb70e117971a4f0babfbf1a
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
|
||||
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
|
||||
github.com/eclipse/paho.mqtt.golang d4f545eb108a2d19f9b1a735689dbfb719bc21fb
|
||||
github.com/go-logfmt/logfmt 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
|
||||
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
|
||||
github.com/gogo/protobuf 7b6c6391c4ff245962047fc1e2c6e08b1cdfa0e8
|
||||
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
|
||||
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
|
||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
||||
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
|
||||
github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
|
||||
github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea
|
||||
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
|
||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||
github.com/jackc/pgx 63f58fd32edb5684b9e9f4cfaac847c6b42b3917
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
|
||||
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
|
||||
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
|
||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
||||
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
|
||||
github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4
|
||||
github.com/multiplay/go-ts3 07477f49b8dfa3ada231afc7b7b17617d42afe8e
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
|
||||
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
|
||||
github.com/pborman/uuid cccd189d45f7ac3368a0d127efb7f4d08ae0b655
|
||||
github.com/pmezard/go-difflib e8554b8641db39598be7f6342874b958f12ae1d4
|
||||
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
|
||||
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
|
||||
github.com/nsqio/go-nsq eee57a3ac4174c55924125bb15eeeda8cffb6e6f
|
||||
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
||||
github.com/opentracing-contrib/go-observer a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
|
||||
github.com/opentracing/opentracing-go 06f47b42c792fef2796e9681353e1d908c417827
|
||||
github.com/openzipkin/zipkin-go-opentracing 1cafbdfde94fbf2b373534764e0863aa3bd0bf7b
|
||||
github.com/pierrec/lz4 5c9560bfa9ace2bf86080bf40d46b34ae44604df
|
||||
github.com/pierrec/xxHash 5a004441f897722c627870a981d02b29924215fa
|
||||
github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d
|
||||
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/prometheus/client_golang c317fb74746eac4fc65fe3909195f4cf67c5562a
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common 56b90312e937d43b930f06a59bf0d6a4ae1944bc
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil fc932d9090f13a84fb4b3cb8baa124610cab184c
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
|
||||
github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
|
||||
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||
github.com/shirou/gopsutil 384a55110aa5ae052eb93ea94940548c1e305a99
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
|
||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify e3a8ff8ce36581f87a15341206f205b1da467059
|
||||
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
golang.org/x/crypto 7b85b097bf7527677d54d3220065e966a0e3b613
|
||||
golang.org/x/net 1796f9b8b7178e3c7587dff118d3bb9d37f9b0b3
|
||||
gopkg.in/dancannon/gorethink.v1 a124c9663325ed9f7fb669d17c69961b59151e6e
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
gopkg.in/mgo.v2 e30de8ac9ae3b30df7065f766c71f88bba7d4e49
|
||||
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4
|
||||
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
|
||||
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
|
||||
github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
|
||||
github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto dc137beb6cce2043eb6b5f223ab8bf51c32459f4
|
||||
golang.org/x/net f2499483f923065a842d38eb4c7f1927e6fc6e6d
|
||||
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
|
||||
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
|
||||
gopkg.in/asn1-ber.v1 4e86f4367175e39f69d9358a5f17b4dda270378d
|
||||
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
|
||||
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
|
||||
gopkg.in/gorethink/gorethink.v3 7ab832f7b65573104a555d84a27992ae9ea1f659
|
||||
gopkg.in/ldap.v2 8168ee085ee43257585e50c6441aadf54ecb2c9f
|
||||
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
|
||||
gopkg.in/olivere/elastic.v5 3113f9b9ad37509fe5f8a0e5e91c96fdc4435e26
|
||||
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
|
||||
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
# List
|
||||
- github.com/Shopify/sarama [MIT LICENSE](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
|
||||
- github.com/Sirupsen/logrus [MIT LICENSE](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
|
||||
- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
|
||||
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
|
||||
- github.com/cenkalti/backoff [MIT LICENSE](https://github.com/cenkalti/backoff/blob/master/LICENSE)
|
||||
- github.com/dancannon/gorethink [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
|
||||
- github.com/eapache/go-resiliency [MIT LICENSE](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
|
||||
- github.com/eapache/queue [MIT LICENSE](https://github.com/eapache/queue/blob/master/LICENSE)
|
||||
- github.com/fsouza/go-dockerclient [BSD LICENSE](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
|
||||
- github.com/go-sql-driver/mysql [MPL LICENSE](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
|
||||
- github.com/gogo/protobuf [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
|
||||
- github.com/gonuts/go-shellquote (No License, but the project it was forked from https://github.com/kballard/go-shellquote is [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)).
|
||||
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
||||
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||
- github.com/naoina/toml [MIT LICENSE](https://github.com/naoina/toml/blob/master/LICENSE)
|
||||
- github.com/prometheus/client_golang [APACHE LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
|
||||
- github.com/samuel/go-zookeeper [BSD LICENSE](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
|
||||
- github.com/stretchr/objx [MIT LICENSE](github.com/stretchr/objx)
|
||||
- github.com/stretchr/testify [MIT LICENSE](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
|
||||
- github.com/wvanbergen/kafka [MIT LICENSE](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
|
||||
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
||||
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||
- golang.org/x/crypto/* [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||
- internal Glob function [MIT LICENSE](https://github.com/ryanuber/go-glob/blob/master/LICENSE)
|
||||
|
||||
173
Makefile
173
Makefile
@@ -1,98 +1,133 @@
|
||||
UNAME := $(shell sh -c 'uname')
|
||||
VERSION := $(shell sh -c 'git describe --always --tags')
|
||||
PREFIX := /usr/local
|
||||
VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
|
||||
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
COMMIT := $(shell git rev-parse --short HEAD)
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
else
|
||||
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
|
||||
endif
|
||||
|
||||
# Standard Telegraf build
|
||||
default: prepare build
|
||||
TELEGRAF := telegraf$(shell go tool dist env | grep -q 'GOOS=.windows.' && echo .exe)
|
||||
|
||||
# Only run the build (no dependency grabbing)
|
||||
build:
|
||||
go build -o telegraf -ldflags \
|
||||
"-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)
|
||||
ifdef VERSION
|
||||
LDFLAGS += -X main.version=$(VERSION)
|
||||
endif
|
||||
|
||||
# Build with race detector
|
||||
dev: prepare
|
||||
go build -race -o telegraf -ldflags \
|
||||
"-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
all:
|
||||
$(MAKE) deps
|
||||
$(MAKE) telegraf
|
||||
|
||||
# Build linux 64-bit, 32-bit and arm architectures
|
||||
build-linux-bins: prepare
|
||||
GOARCH=amd64 GOOS=linux go build -o telegraf_linux_amd64 \
|
||||
-ldflags "-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
GOARCH=386 GOOS=linux go build -o telegraf_linux_386 \
|
||||
-ldflags "-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
GOARCH=arm GOOS=linux go build -o telegraf_linux_arm \
|
||||
-ldflags "-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# Get dependencies and use gdm to checkout changesets
|
||||
prepare:
|
||||
go get ./...
|
||||
deps:
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
|
||||
# Run all docker containers necessary for unit tests
|
||||
telegraf:
|
||||
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
|
||||
|
||||
go-install:
|
||||
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
|
||||
|
||||
install: telegraf
|
||||
mkdir -p $(DESTDIR)$(PREFIX)/bin/
|
||||
cp $(TELEGRAF) $(DESTDIR)$(PREFIX)/bin/
|
||||
|
||||
test:
|
||||
go test -short ./...
|
||||
|
||||
test-windows:
|
||||
go test ./plugins/inputs/ping/...
|
||||
go test ./plugins/inputs/win_perf_counters/...
|
||||
go test ./plugins/inputs/win_services/...
|
||||
|
||||
lint:
|
||||
go vet ./...
|
||||
|
||||
test-all: lint
|
||||
go test ./...
|
||||
|
||||
package:
|
||||
./scripts/build.py --package --platform=all --arch=all
|
||||
|
||||
clean:
|
||||
-rm -f telegraf
|
||||
-rm -f telegraf.exe
|
||||
|
||||
docker-image:
|
||||
./scripts/build.py --package --platform=linux --arch=amd64
|
||||
cp build/telegraf*$(COMMIT)*.deb .
|
||||
docker build -f scripts/dev.docker --build-arg "package=telegraf*$(COMMIT)*.deb" -t "telegraf-dev:$(COMMIT)" .
|
||||
|
||||
# Run all docker containers necessary for integration tests
|
||||
docker-run:
|
||||
ifeq ($(UNAME), Darwin)
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
||||
docker run --name zookeeper -p "2181:2181" -d wurstmeister/zookeeper
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=$(shell sh -c 'boot2docker ip || docker-machine ip default') \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
endif
|
||||
ifeq ($(UNAME), Linux)
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
endif
|
||||
--link zookeeper:zookeeper \
|
||||
-e KAFKA_ADVERTISED_HOST_NAME=localhost \
|
||||
-e KAFKA_ADVERTISED_PORT=9092 \
|
||||
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
|
||||
-e KAFKA_CREATE_TOPICS="test:1:1" \
|
||||
-p "9092:9092" \
|
||||
-d wurstmeister/kafka
|
||||
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
|
||||
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
||||
docker run --name memcached -p "11211:11211" -d memcached
|
||||
docker run --name postgres -p "5432:5432" -d postgres
|
||||
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
||||
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
||||
docker run --name redis -p "6379:6379" -d redis
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
docker run --name openldap \
|
||||
-e SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" \
|
||||
-e SLAPD_CONFIG_ROOTPW="secret" \
|
||||
-p "389:389" -p "636:636" \
|
||||
-d cobaugh/openldap-alpine
|
||||
docker run --name cratedb \
|
||||
-p "6543:5432" \
|
||||
-d crate crate \
|
||||
-Cnetwork.host=0.0.0.0 \
|
||||
-Ctransport.host=localhost \
|
||||
-Clicense.enterprise=false
|
||||
|
||||
# Run docker containers necessary for CircleCI unit tests
|
||||
# Run docker containers necessary for integration tests; skipping services provided
|
||||
# by CircleCI
|
||||
docker-run-circle:
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
||||
docker run --name zookeeper -p "2181:2181" -d wurstmeister/zookeeper
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||
--link zookeeper:zookeeper \
|
||||
-e KAFKA_ADVERTISED_HOST_NAME=localhost \
|
||||
-e KAFKA_ADVERTISED_PORT=9092 \
|
||||
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
|
||||
-e KAFKA_CREATE_TOPICS="test:1:1" \
|
||||
-p "9092:9092" \
|
||||
-d wurstmeister/kafka
|
||||
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
docker run --name openldap \
|
||||
-e SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" \
|
||||
-e SLAPD_CONFIG_ROOTPW="secret" \
|
||||
-p "389:389" -p "636:636" \
|
||||
-d cobaugh/openldap-alpine
|
||||
docker run --name cratedb \
|
||||
-p "6543:5432" \
|
||||
-d crate crate \
|
||||
-Cnetwork.host=0.0.0.0 \
|
||||
-Ctransport.host=localhost \
|
||||
-Clicense.enterprise=false
|
||||
|
||||
# Kill all docker containers, ignore errors
|
||||
docker-kill:
|
||||
-docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann
|
||||
-docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann
|
||||
-docker kill aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
|
||||
openldap postgres rabbitmq redis riemann zookeeper cratedb
|
||||
-docker rm aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
|
||||
openldap postgres rabbitmq redis riemann zookeeper cratedb
|
||||
|
||||
# Run full unit tests using docker containers (includes setup and teardown)
|
||||
test: docker-kill docker-run
|
||||
# Sleeping for kafka leadership election, TSDB setup, etc.
|
||||
sleep 60
|
||||
# SUCCESS, running tests
|
||||
go test -race ./...
|
||||
|
||||
# Run "short" unit tests
|
||||
test-short:
|
||||
go test -short ./...
|
||||
|
||||
.PHONY: test
|
||||
.PHONY: deps telegraf telegraf.exe install test test-windows lint test-all \
|
||||
package clean docker-run docker-run-circle docker-kill docker-image
|
||||
|
||||
569
README.md
569
README.md
@@ -1,294 +1,297 @@
|
||||
# Telegraf - A native agent for InfluxDB [](https://circleci.com/gh/influxdb/telegraf)
|
||||
# Telegraf [](https://circleci.com/gh/influxdata/telegraf) [](https://hub.docker.com/_/telegraf/)
|
||||
|
||||
Telegraf is an agent written in Go for collecting metrics from the system it's
|
||||
running on, or from other services, and writing them into InfluxDB.
|
||||
Telegraf is an agent written in Go for collecting, processing, aggregating,
|
||||
and writing metrics.
|
||||
|
||||
Design goals are to have a minimal memory footprint with a plugin system so
|
||||
that developers in the community can easily add support for collecting metrics
|
||||
from well known services (like Hadoop, Postgres, or Redis) and third party
|
||||
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
|
||||
from local or remote services.
|
||||
|
||||
We'll eagerly accept pull requests for new plugins and will manage the set of
|
||||
plugins that Telegraf supports. See the
|
||||
[contributing guide](CONTRIBUTING.md) for instructions on
|
||||
writing new plugins.
|
||||
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
|
||||
|
||||
## Installation:
|
||||
1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs
|
||||
2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics
|
||||
3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.)
|
||||
4. [Output Plugins](#output-plugins) write metrics to various destinations
|
||||
|
||||
### Linux deb and rpm packages:
|
||||
For more information on Processor and Aggregator plugins please [read this](./docs/AGGREGATORS_AND_PROCESSORS.md).
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.2.4-1.x86_64.rpm
|
||||
|
||||
##### Package instructions:
|
||||
|
||||
* Telegraf binary is installed in `/opt/telegraf/telegraf`
|
||||
* Telegraf daemon configuration file is in `/etc/opt/telegraf/telegraf.conf`
|
||||
* On sysv systems, the telegraf daemon can be controlled via
|
||||
`service telegraf [action]`
|
||||
* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be
|
||||
controlled via `systemctl [action] telegraf`
|
||||
|
||||
### Linux binaries:
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_386_0.2.4.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.2.4.tar.gz
|
||||
|
||||
##### Binary instructions:
|
||||
|
||||
These are standalone binaries that can be unpacked and executed on any linux
|
||||
system. They can be unpacked and renamed in a location such as
|
||||
`/usr/local/bin` for convenience. A config file will need to be generated,
|
||||
see "How to use it" below.
|
||||
|
||||
### OSX via Homebrew:
|
||||
|
||||
```
|
||||
brew update
|
||||
brew install telegraf
|
||||
```
|
||||
|
||||
### From Source:
|
||||
|
||||
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
|
||||
which gets installed via the Makefile
|
||||
if you don't have it already. You also must build with golang version 1.4+.
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install)
|
||||
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
|
||||
3. Run `go get github.com/influxdb/telegraf`
|
||||
4. Run `cd $GOPATH/src/github.com/influxdb/telegraf`
|
||||
5. Run `make`
|
||||
|
||||
### How to use it:
|
||||
|
||||
* Run `telegraf -sample-config > telegraf.conf` to create an initial configuration.
|
||||
* Or run `telegraf -sample-config -filter cpu:mem -outputfilter influxdb > telegraf.conf`.
|
||||
to create a config file with only CPU and memory plugins defined, and InfluxDB
|
||||
output defined.
|
||||
* Edit the configuration to match your needs.
|
||||
* Run `telegraf -config telegraf.conf -test` to output one full measurement
|
||||
sample to STDOUT. NOTE: you may want to run as the telegraf user if you are using
|
||||
the linux packages `sudo -u telegraf telegraf -config telegraf.conf -test`
|
||||
* Run `telegraf -config telegraf.conf` to gather and send metrics to configured outputs.
|
||||
* Run `telegraf -config telegraf.conf -filter system:swap`.
|
||||
to run telegraf with only the system & swap plugins defined in the config.
|
||||
|
||||
## Telegraf Options
|
||||
|
||||
Telegraf has a few options you can configure under the `agent` section of the
|
||||
config.
|
||||
|
||||
* **hostname**: The hostname is passed as a tag. By default this will be
|
||||
the value returned by `hostname` on the machine running Telegraf.
|
||||
You can override that value here.
|
||||
* **interval**: How often to gather metrics. Uses a simple number +
|
||||
unit parser, e.g. "10s" for 10 seconds or "5m" for 5 minutes.
|
||||
* **debug**: Set to true to gather and send metrics to STDOUT as well as
|
||||
InfluxDB.
|
||||
|
||||
## Plugin Options
|
||||
|
||||
There are 5 configuration options that are configurable per plugin:
|
||||
|
||||
* **pass**: An array of strings that is used to filter metrics generated by the
|
||||
current plugin. Each string in the array is tested as a glob match against metric names
|
||||
and if it matches, the metric is emitted.
|
||||
* **drop**: The inverse of pass, if a metric name matches, it is not emitted.
|
||||
* **tagpass**: tag names and arrays of strings that are used to filter metrics by the current plugin. Each string in the array is tested as a glob match against
|
||||
the tag name, and if it matches the metric is emitted.
|
||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the metric is not emitted.
|
||||
This is tested on metrics that have passed the tagpass test.
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||
global interval, but if one particular plugin should be run less or more often,
|
||||
you can configure that here.
|
||||
|
||||
### Plugin Configuration Examples
|
||||
|
||||
This is a full working config that will output CPU data to an InfluxDB instance
|
||||
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
|
||||
measurements at a 10s interval and will collect per-cpu data, dropping any
|
||||
measurements which begin with `cpu_time`.
|
||||
|
||||
```toml
|
||||
[tags]
|
||||
dc = "denver-1"
|
||||
|
||||
[agent]
|
||||
interval = "10s"
|
||||
|
||||
# OUTPUTS
|
||||
[outputs]
|
||||
[[outputs.influxdb]]
|
||||
url = "http://192.168.59.103:8086" # required.
|
||||
database = "telegraf" # required.
|
||||
precision = "s"
|
||||
|
||||
# PLUGINS
|
||||
[plugins]
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time*"]
|
||||
```
|
||||
|
||||
Below is how to configure `tagpass` and `tagdrop` parameters
|
||||
|
||||
```toml
|
||||
[plugins]
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time"]
|
||||
# Don't collect CPU data for cpu6 & cpu7
|
||||
[plugins.cpu.tagdrop]
|
||||
cpu = [ "cpu6", "cpu7" ]
|
||||
|
||||
[[plugins.disk]]
|
||||
[plugins.disk.tagpass]
|
||||
# tagpass conditions are OR, not AND.
|
||||
# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home)
|
||||
# then the metric passes
|
||||
fstype = [ "ext4", "xfs" ]
|
||||
# Globs can also be used on the tag values
|
||||
path = [ "/opt", "/home*" ]
|
||||
```
|
||||
|
||||
Below is how to configure `pass` and `drop` parameters
|
||||
|
||||
```toml
|
||||
# Drop all metrics for guest CPU usage
|
||||
[[plugins.cpu]]
|
||||
drop = [ "cpu_usage_guest" ]
|
||||
|
||||
# Only store inode related metrics for disks
|
||||
[[plugins.disk]]
|
||||
pass = [ "disk_inodes*" ]
|
||||
```
|
||||
|
||||
|
||||
Additional plugins (or outputs) of the same type can be specified,
|
||||
just define more instances in the config file:
|
||||
|
||||
```toml
|
||||
[[plugins.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time*"]
|
||||
```
|
||||
|
||||
## Supported Plugins
|
||||
|
||||
**You can view usage instructions for each plugin by running**
|
||||
`telegraf -usage <pluginname>`.
|
||||
|
||||
Telegraf currently has support for collecting metrics from:
|
||||
|
||||
* aerospike
|
||||
* apache
|
||||
* bcache
|
||||
* disque
|
||||
* elasticsearch
|
||||
* exec (generic JSON-emitting executable plugin)
|
||||
* haproxy
|
||||
* httpjson (generic JSON-emitting http service plugin)
|
||||
* influxdb
|
||||
* jolokia (remote JMX with JSON over HTTP)
|
||||
* leofs
|
||||
* lustre2
|
||||
* mailchimp
|
||||
* memcached
|
||||
* mongodb
|
||||
* mysql
|
||||
* nginx
|
||||
* phpfpm
|
||||
* ping
|
||||
* postgresql
|
||||
* procstat
|
||||
* prometheus
|
||||
* puppetagent
|
||||
* rabbitmq
|
||||
* redis
|
||||
* rethinkdb
|
||||
* twemproxy
|
||||
* zfs
|
||||
* zookeeper
|
||||
* system
|
||||
* cpu
|
||||
* mem
|
||||
* io
|
||||
* net
|
||||
* netstat
|
||||
* disk
|
||||
* swap
|
||||
|
||||
## Supported Service Plugins
|
||||
|
||||
Telegraf can collect metrics via the following services:
|
||||
|
||||
* statsd
|
||||
* kafka_consumer
|
||||
|
||||
We'll be adding support for many more over the coming months. Read on if you
|
||||
want to add support for another service or third-party API.
|
||||
|
||||
## Output options
|
||||
|
||||
Telegraf also supports specifying multiple output sinks to send data to,
|
||||
configuring each output sink is different, but examples can be
|
||||
found by running `telegraf -sample-config`.
|
||||
|
||||
Outputs also support the same configurable options as plugins
|
||||
(pass, drop, tagpass, tagdrop), added in 0.2.4
|
||||
|
||||
```toml
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf"
|
||||
precision = "s"
|
||||
# Drop all measurements that start with "aerospike"
|
||||
drop = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-aerospike-data"
|
||||
precision = "s"
|
||||
# Only accept aerospike data:
|
||||
pass = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-cpu0-data"
|
||||
precision = "s"
|
||||
# Only store measurements where the tag "cpu" matches the value "cpu0"
|
||||
[outputs.influxdb.tagpass]
|
||||
cpu = ["cpu0"]
|
||||
```
|
||||
|
||||
|
||||
## Supported Outputs
|
||||
|
||||
* influxdb
|
||||
* nsq
|
||||
* kafka
|
||||
* datadog
|
||||
* opentsdb
|
||||
* amqp (rabbitmq)
|
||||
* mqtt
|
||||
* librato
|
||||
* prometheus
|
||||
* amon
|
||||
* riemann
|
||||
New plugins are designed to be easy to contribute,
|
||||
we'll eagerly accept pull
|
||||
requests and will manage the set of plugins that Telegraf supports.
|
||||
|
||||
## Contributing
|
||||
|
||||
Please see the
|
||||
[contributing guide](CONTRIBUTING.md)
|
||||
for details on contributing a plugin or output to Telegraf.
|
||||
There are many ways to contribute:
|
||||
- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new)
|
||||
- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation)
|
||||
- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls)
|
||||
- Answer questions on github and on the [Community Site](https://community.influxdata.com/)
|
||||
- [Contribute plugins](CONTRIBUTING.md)
|
||||
|
||||
## Installation:
|
||||
|
||||
You can download the binaries directly from the [downloads](https://www.influxdata.com/downloads) page
|
||||
or from the [releases](https://github.com/influxdata/telegraf/releases) section.
|
||||
|
||||
### Ansible Role:
|
||||
|
||||
Ansible role: https://github.com/rossmcdonald/telegraf
|
||||
|
||||
### From Source:
|
||||
|
||||
Telegraf requires golang version 1.8+, the Makefile requires GNU make.
|
||||
|
||||
Dependencies are managed with [gdm](https://github.com/sparrc/gdm),
|
||||
which is installed by the Makefile if you don't have it already.
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install)
|
||||
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
|
||||
3. Run `go get -d github.com/influxdata/telegraf`
|
||||
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
|
||||
5. Run `make`
|
||||
|
||||
### Nightly Builds
|
||||
|
||||
These builds are generated from the master branch:
|
||||
- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb)
|
||||
- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb)
|
||||
- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm)
|
||||
- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb)
|
||||
- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm)
|
||||
- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb)
|
||||
- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm)
|
||||
- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz)
|
||||
- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz)
|
||||
- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb)
|
||||
- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm)
|
||||
- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz)
|
||||
- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz)
|
||||
- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz)
|
||||
- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz)
|
||||
- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz)
|
||||
- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz)
|
||||
- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb)
|
||||
- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm)
|
||||
- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip)
|
||||
- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip)
|
||||
- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm)
|
||||
- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz)
|
||||
|
||||
## How to use it:
|
||||
|
||||
See usage with:
|
||||
|
||||
```
|
||||
./telegraf --help
|
||||
```
|
||||
|
||||
#### Generate a telegraf config file:
|
||||
|
||||
```
|
||||
./telegraf config > telegraf.conf
|
||||
```
|
||||
|
||||
#### Generate config with only cpu input & influxdb output plugins defined:
|
||||
|
||||
```
|
||||
./telegraf --input-filter cpu --output-filter influxdb config
|
||||
```
|
||||
|
||||
#### Run a single telegraf collection, outputing metrics to stdout:
|
||||
|
||||
```
|
||||
./telegraf --config telegraf.conf --test
|
||||
```
|
||||
|
||||
#### Run telegraf with all plugins defined in config file:
|
||||
|
||||
```
|
||||
./telegraf --config telegraf.conf
|
||||
```
|
||||
|
||||
#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins:
|
||||
|
||||
```
|
||||
./telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
```
|
||||
|
||||
|
||||
## Configuration
|
||||
|
||||
See the [configuration guide](docs/CONFIGURATION.md) for a rundown of the more advanced
|
||||
configuration options.
|
||||
|
||||
## Input Plugins
|
||||
|
||||
* [aerospike](./plugins/inputs/aerospike)
|
||||
* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq)
|
||||
* [apache](./plugins/inputs/apache)
|
||||
* [aws cloudwatch](./plugins/inputs/cloudwatch)
|
||||
* [bcache](./plugins/inputs/bcache)
|
||||
* [bond](./plugins/inputs/bond)
|
||||
* [cassandra](./plugins/inputs/cassandra)
|
||||
* [ceph](./plugins/inputs/ceph)
|
||||
* [cgroup](./plugins/inputs/cgroup)
|
||||
* [chrony](./plugins/inputs/chrony)
|
||||
* [consul](./plugins/inputs/consul)
|
||||
* [conntrack](./plugins/inputs/conntrack)
|
||||
* [couchbase](./plugins/inputs/couchbase)
|
||||
* [couchdb](./plugins/inputs/couchdb)
|
||||
* [DC/OS](./plugins/inputs/dcos)
|
||||
* [disque](./plugins/inputs/disque)
|
||||
* [dmcache](./plugins/inputs/dmcache)
|
||||
* [dns query time](./plugins/inputs/dns_query)
|
||||
* [docker](./plugins/inputs/docker)
|
||||
* [dovecot](./plugins/inputs/dovecot)
|
||||
* [elasticsearch](./plugins/inputs/elasticsearch)
|
||||
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||
* [fail2ban](./plugins/inputs/fail2ban)
|
||||
* [filestat](./plugins/inputs/filestat)
|
||||
* [fluentd](./plugins/inputs/fluentd)
|
||||
* [graylog](./plugins/inputs/graylog)
|
||||
* [haproxy](./plugins/inputs/haproxy)
|
||||
* [hddtemp](./plugins/inputs/hddtemp)
|
||||
* [http_response](./plugins/inputs/http_response)
|
||||
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||
* [internal](./plugins/inputs/internal)
|
||||
* [influxdb](./plugins/inputs/influxdb)
|
||||
* [interrupts](./plugins/inputs/interrupts)
|
||||
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
|
||||
* [iptables](./plugins/inputs/iptables)
|
||||
* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
|
||||
* [jolokia2](./plugins/inputs/jolokia2)
|
||||
* [kapacitor](./plugins/inputs/kapacitor)
|
||||
* [kubernetes](./plugins/inputs/kubernetes)
|
||||
* [leofs](./plugins/inputs/leofs)
|
||||
* [lustre2](./plugins/inputs/lustre2)
|
||||
* [mailchimp](./plugins/inputs/mailchimp)
|
||||
* [memcached](./plugins/inputs/memcached)
|
||||
* [mesos](./plugins/inputs/mesos)
|
||||
* [minecraft](./plugins/inputs/minecraft)
|
||||
* [mongodb](./plugins/inputs/mongodb)
|
||||
* [mysql](./plugins/inputs/mysql)
|
||||
* [net_response](./plugins/inputs/net_response)
|
||||
* [nginx](./plugins/inputs/nginx)
|
||||
* [nginx_plus](./plugins/inputs/nginx_plus)
|
||||
* [nsq](./plugins/inputs/nsq)
|
||||
* [nstat](./plugins/inputs/nstat)
|
||||
* [ntpq](./plugins/inputs/ntpq)
|
||||
* [openldap](./plugins/inputs/openldap)
|
||||
* [opensmtpd](./plugins/inputs/opensmtpd)
|
||||
* [pf](./plugins/inputs/pf)
|
||||
* [phpfpm](./plugins/inputs/phpfpm)
|
||||
* [phusion passenger](./plugins/inputs/passenger)
|
||||
* [ping](./plugins/inputs/ping)
|
||||
* [postfix](./plugins/inputs/postfix)
|
||||
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
|
||||
* [postgresql](./plugins/inputs/postgresql)
|
||||
* [powerdns](./plugins/inputs/powerdns)
|
||||
* [procstat](./plugins/inputs/procstat)
|
||||
* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server))
|
||||
* [puppetagent](./plugins/inputs/puppetagent)
|
||||
* [rabbitmq](./plugins/inputs/rabbitmq)
|
||||
* [raindrops](./plugins/inputs/raindrops)
|
||||
* [redis](./plugins/inputs/redis)
|
||||
* [rethinkdb](./plugins/inputs/rethinkdb)
|
||||
* [riak](./plugins/inputs/riak)
|
||||
* [salesforce](./plugins/inputs/salesforce)
|
||||
* [sensors](./plugins/inputs/sensors)
|
||||
* [smart](./plugins/inputs/smart)
|
||||
* [snmp](./plugins/inputs/snmp)
|
||||
* [snmp_legacy](./plugins/inputs/snmp_legacy)
|
||||
* [solr](./plugins/inputs/solr)
|
||||
* [sql server](./plugins/inputs/sqlserver) (microsoft)
|
||||
* [teamspeak](./plugins/inputs/teamspeak)
|
||||
* [tomcat](./plugins/inputs/tomcat)
|
||||
* [twemproxy](./plugins/inputs/twemproxy)
|
||||
* [unbound](./plugins/input/unbound)
|
||||
* [varnish](./plugins/inputs/varnish)
|
||||
* [zfs](./plugins/inputs/zfs)
|
||||
* [zookeeper](./plugins/inputs/zookeeper)
|
||||
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
|
||||
* [win_services](./plugins/inputs/win_services)
|
||||
* [sysstat](./plugins/inputs/sysstat)
|
||||
* [system](./plugins/inputs/system)
|
||||
* cpu
|
||||
* mem
|
||||
* net
|
||||
* netstat
|
||||
* disk
|
||||
* diskio
|
||||
* swap
|
||||
* processes
|
||||
* kernel (/proc/stat)
|
||||
* kernel (/proc/vmstat)
|
||||
* linux_sysctl_fs (/proc/sys/fs)
|
||||
|
||||
Telegraf can also collect metrics via the following service plugins:
|
||||
|
||||
* [http_listener](./plugins/inputs/http_listener)
|
||||
* [kafka_consumer](./plugins/inputs/kafka_consumer)
|
||||
* [mqtt_consumer](./plugins/inputs/mqtt_consumer)
|
||||
* [nats_consumer](./plugins/inputs/nats_consumer)
|
||||
* [nsq_consumer](./plugins/inputs/nsq_consumer)
|
||||
* [logparser](./plugins/inputs/logparser)
|
||||
* [statsd](./plugins/inputs/statsd)
|
||||
* [socket_listener](./plugins/inputs/socket_listener)
|
||||
* [tail](./plugins/inputs/tail)
|
||||
* [tcp_listener](./plugins/inputs/socket_listener)
|
||||
* [udp_listener](./plugins/inputs/socket_listener)
|
||||
* [webhooks](./plugins/inputs/webhooks)
|
||||
* [filestack](./plugins/inputs/webhooks/filestack)
|
||||
* [github](./plugins/inputs/webhooks/github)
|
||||
* [mandrill](./plugins/inputs/webhooks/mandrill)
|
||||
* [papertrail](./plugins/inputs/webhooks/papertrail)
|
||||
* [particle](./plugins/inputs/webhooks/particle)
|
||||
* [rollbar](./plugins/inputs/webhooks/rollbar)
|
||||
* [zipkin](./plugins/inputs/zipkin)
|
||||
|
||||
Telegraf is able to parse the following input data formats into metrics, these
|
||||
formats may be used with input plugins supporting the `data_format` option:
|
||||
|
||||
* [InfluxDB Line Protocol](./docs/DATA_FORMATS_INPUT.md#influx)
|
||||
* [JSON](./docs/DATA_FORMATS_INPUT.md#json)
|
||||
* [Graphite](./docs/DATA_FORMATS_INPUT.md#graphite)
|
||||
* [Value](./docs/DATA_FORMATS_INPUT.md#value)
|
||||
* [Nagios](./docs/DATA_FORMATS_INPUT.md#nagios)
|
||||
* [Collectd](./docs/DATA_FORMATS_INPUT.md#collectd)
|
||||
|
||||
## Processor Plugins
|
||||
|
||||
* [printer](./plugins/processors/printer)
|
||||
|
||||
## Aggregator Plugins
|
||||
|
||||
* [basicstats](./plugins/aggregators/basicstats)
|
||||
* [minmax](./plugins/aggregators/minmax)
|
||||
* [histogram](./plugins/aggregators/histogram)
|
||||
|
||||
## Output Plugins
|
||||
|
||||
* [influxdb](./plugins/outputs/influxdb)
|
||||
* [amon](./plugins/outputs/amon)
|
||||
* [amqp](./plugins/outputs/amqp) (rabbitmq)
|
||||
* [aws kinesis](./plugins/outputs/kinesis)
|
||||
* [aws cloudwatch](./plugins/outputs/cloudwatch)
|
||||
* [cratedb](./plugins/outputs/cratedb)
|
||||
* [datadog](./plugins/outputs/datadog)
|
||||
* [discard](./plugins/outputs/discard)
|
||||
* [elasticsearch](./plugins/outputs/elasticsearch)
|
||||
* [file](./plugins/outputs/file)
|
||||
* [graphite](./plugins/outputs/graphite)
|
||||
* [graylog](./plugins/outputs/graylog)
|
||||
* [instrumental](./plugins/outputs/instrumental)
|
||||
* [kafka](./plugins/outputs/kafka)
|
||||
* [librato](./plugins/outputs/librato)
|
||||
* [mqtt](./plugins/outputs/mqtt)
|
||||
* [nats](./plugins/outputs/nats)
|
||||
* [nsq](./plugins/outputs/nsq)
|
||||
* [opentsdb](./plugins/outputs/opentsdb)
|
||||
* [prometheus](./plugins/outputs/prometheus_client)
|
||||
* [riemann](./plugins/outputs/riemann)
|
||||
* [riemann_legacy](./plugins/outputs/riemann_legacy)
|
||||
* [socket_writer](./plugins/outputs/socket_writer)
|
||||
* [tcp](./plugins/outputs/socket_writer)
|
||||
* [udp](./plugins/outputs/socket_writer)
|
||||
* [wavefront](./plugins/outputs/wavefront)
|
||||
|
||||
184
accumulator.go
184
accumulator.go
@@ -1,154 +1,46 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/internal/config"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
)
|
||||
import "time"
|
||||
|
||||
// Accumulator is an interface for "accumulating" metrics from plugin(s).
|
||||
// The metrics are sent down a channel shared between all plugins.
|
||||
type Accumulator interface {
|
||||
Add(measurement string, value interface{},
|
||||
tags map[string]string, t ...time.Time)
|
||||
AddFields(measurement string, fields map[string]interface{},
|
||||
tags map[string]string, t ...time.Time)
|
||||
// AddFields adds a metric to the accumulator with the given measurement
|
||||
// name, fields, and tags (and timestamp). If a timestamp is not provided,
|
||||
// then the accumulator sets it to "now".
|
||||
// Create a point with a value, decorating it with tags
|
||||
// NOTE: tags is expected to be owned by the caller, don't mutate
|
||||
// it after passing to Add.
|
||||
AddFields(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
SetDefaultTags(tags map[string]string)
|
||||
AddDefaultTag(key, value string)
|
||||
// AddGauge is the same as AddFields, but will add the metric as a "Gauge" type
|
||||
AddGauge(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
Prefix() string
|
||||
SetPrefix(prefix string)
|
||||
// AddCounter is the same as AddFields, but will add the metric as a "Counter" type
|
||||
AddCounter(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
Debug() bool
|
||||
SetDebug(enabled bool)
|
||||
}
|
||||
|
||||
func NewAccumulator(
|
||||
pluginConfig *config.PluginConfig,
|
||||
points chan *client.Point,
|
||||
) Accumulator {
|
||||
acc := accumulator{}
|
||||
acc.points = points
|
||||
acc.pluginConfig = pluginConfig
|
||||
return &acc
|
||||
}
|
||||
|
||||
type accumulator struct {
|
||||
sync.Mutex
|
||||
|
||||
points chan *client.Point
|
||||
|
||||
defaultTags map[string]string
|
||||
|
||||
debug bool
|
||||
|
||||
pluginConfig *config.PluginConfig
|
||||
|
||||
prefix string
|
||||
}
|
||||
|
||||
func (ac *accumulator) Add(
|
||||
measurement string,
|
||||
value interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
fields["value"] = value
|
||||
ac.AddFields(measurement, fields, tags, t...)
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddFields(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
// Validate uint64 and float64 fields
|
||||
for k, v := range fields {
|
||||
switch val := v.(type) {
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
fields[k] = int64(val)
|
||||
} else {
|
||||
fields[k] = int64(9223372036854775807)
|
||||
}
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
if ac.debug {
|
||||
log.Printf("Measurement [%s] has a NaN or Inf field, skipping",
|
||||
measurement)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
|
||||
var timestamp time.Time
|
||||
if len(t) > 0 {
|
||||
timestamp = t[0]
|
||||
} else {
|
||||
timestamp = time.Now()
|
||||
}
|
||||
|
||||
if ac.prefix != "" {
|
||||
measurement = ac.prefix + measurement
|
||||
}
|
||||
|
||||
if ac.pluginConfig != nil {
|
||||
if !ac.pluginConfig.Filter.ShouldPass(measurement) || !ac.pluginConfig.Filter.ShouldTagsPass(tags) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range ac.defaultTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
pt, err := client.NewPoint(measurement, tags, fields, timestamp)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return
|
||||
}
|
||||
if ac.debug {
|
||||
fmt.Println("> " + pt.String())
|
||||
}
|
||||
ac.points <- pt
|
||||
}
|
||||
|
||||
func (ac *accumulator) SetDefaultTags(tags map[string]string) {
|
||||
ac.defaultTags = tags
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddDefaultTag(key, value string) {
|
||||
ac.defaultTags[key] = value
|
||||
}
|
||||
|
||||
func (ac *accumulator) Prefix() string {
|
||||
return ac.prefix
|
||||
}
|
||||
|
||||
func (ac *accumulator) SetPrefix(prefix string) {
|
||||
ac.prefix = prefix
|
||||
}
|
||||
|
||||
func (ac *accumulator) Debug() bool {
|
||||
return ac.debug
|
||||
}
|
||||
|
||||
func (ac *accumulator) SetDebug(debug bool) {
|
||||
ac.debug = debug
|
||||
// AddSummary is the same as AddFields, but will add the metric as a "Summary" type
|
||||
AddSummary(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
// AddHistogram is the same as AddFields, but will add the metric as a "Histogram" type
|
||||
AddHistogram(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
SetPrecision(precision, interval time.Duration)
|
||||
|
||||
AddError(err error)
|
||||
}
|
||||
|
||||
397
agent.go
397
agent.go
@@ -1,397 +0,0 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/internal/config"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
)
|
||||
|
||||
// Agent runs telegraf and collects data based on the given config
|
||||
type Agent struct {
|
||||
Config *config.Config
|
||||
}
|
||||
|
||||
// NewAgent returns an Agent struct based off the given Config
|
||||
func NewAgent(config *config.Config) (*Agent, error) {
|
||||
a := &Agent{
|
||||
Config: config,
|
||||
}
|
||||
|
||||
if a.Config.Agent.Hostname == "" {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a.Config.Agent.Hostname = hostname
|
||||
}
|
||||
|
||||
config.Tags["host"] = a.Config.Agent.Hostname
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Connect connects to all configured outputs
|
||||
func (a *Agent) Connect() error {
|
||||
for _, o := range a.Config.Outputs {
|
||||
switch ot := o.Output.(type) {
|
||||
case outputs.ServiceOutput:
|
||||
if err := ot.Start(); err != nil {
|
||||
log.Printf("Service for output %s failed to start, exiting\n%s\n",
|
||||
o.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if a.Config.Agent.Debug {
|
||||
log.Printf("Attempting connection to output: %s\n", o.Name)
|
||||
}
|
||||
err := o.Output.Connect()
|
||||
if err != nil {
|
||||
log.Printf("Failed to connect to output %s, retrying in 15s\n", o.Name)
|
||||
time.Sleep(15 * time.Second)
|
||||
err = o.Output.Connect()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if a.Config.Agent.Debug {
|
||||
log.Printf("Successfully connected to output: %s\n", o.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the connection to all configured outputs
|
||||
func (a *Agent) Close() error {
|
||||
var err error
|
||||
for _, o := range a.Config.Outputs {
|
||||
err = o.Output.Close()
|
||||
switch ot := o.Output.(type) {
|
||||
case outputs.ServiceOutput:
|
||||
ot.Stop()
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// gatherParallel runs the plugins that are using the same reporting interval
|
||||
// as the telegraf agent.
|
||||
func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
start := time.Now()
|
||||
counter := 0
|
||||
for _, plugin := range a.Config.Plugins {
|
||||
if plugin.Config.Interval != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
counter++
|
||||
go func(plugin *config.RunningPlugin) {
|
||||
defer wg.Done()
|
||||
|
||||
acc := NewAccumulator(plugin.Config, pointChan)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.SetPrefix(plugin.Name + "_")
|
||||
acc.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
if err := plugin.Plugin.Gather(acc); err != nil {
|
||||
log.Printf("Error in plugin [%s]: %s", plugin.Name, err)
|
||||
}
|
||||
|
||||
}(plugin)
|
||||
}
|
||||
|
||||
if counter == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
elapsed := time.Since(start)
|
||||
log.Printf("Gathered metrics, (%s interval), from %d plugins in %s\n",
|
||||
a.Config.Agent.Interval, counter, elapsed)
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherSeparate runs the plugins that have been configured with their own
|
||||
// reporting interval.
|
||||
func (a *Agent) gatherSeparate(
|
||||
shutdown chan struct{},
|
||||
plugin *config.RunningPlugin,
|
||||
pointChan chan *client.Point,
|
||||
) error {
|
||||
ticker := time.NewTicker(plugin.Config.Interval)
|
||||
|
||||
for {
|
||||
var outerr error
|
||||
start := time.Now()
|
||||
|
||||
acc := NewAccumulator(plugin.Config, pointChan)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.SetPrefix(plugin.Name + "_")
|
||||
acc.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
if err := plugin.Plugin.Gather(acc); err != nil {
|
||||
log.Printf("Error in plugin [%s]: %s", plugin.Name, err)
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
|
||||
plugin.Config.Interval, plugin.Name, elapsed)
|
||||
|
||||
if outerr != nil {
|
||||
return outerr
|
||||
}
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test verifies that we can 'Gather' from all plugins with their configured
|
||||
// Config struct
|
||||
func (a *Agent) Test() error {
|
||||
shutdown := make(chan struct{})
|
||||
defer close(shutdown)
|
||||
pointChan := make(chan *client.Point)
|
||||
|
||||
// dummy receiver for the point channel
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-pointChan:
|
||||
// do nothing
|
||||
case <-shutdown:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for _, plugin := range a.Config.Plugins {
|
||||
acc := NewAccumulator(plugin.Config, pointChan)
|
||||
acc.SetDebug(true)
|
||||
acc.SetPrefix(plugin.Name + "_")
|
||||
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", plugin.Name)
|
||||
if plugin.Config.Interval != 0 {
|
||||
fmt.Printf("* Internal: %s\n", plugin.Config.Interval)
|
||||
}
|
||||
|
||||
if err := plugin.Plugin.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Special instructions for some plugins. cpu, for example, needs to be
|
||||
// run twice in order to return cpu usage percentages.
|
||||
switch plugin.Name {
|
||||
case "cpu", "mongodb":
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", plugin.Name)
|
||||
if err := plugin.Plugin.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeOutput writes a list of points to a single output, with retries.
|
||||
// Optionally takes a `done` channel to indicate that it is done writing.
|
||||
func (a *Agent) writeOutput(
|
||||
points []*client.Point,
|
||||
ro *config.RunningOutput,
|
||||
shutdown chan struct{},
|
||||
wg *sync.WaitGroup,
|
||||
) {
|
||||
defer wg.Done()
|
||||
if len(points) == 0 {
|
||||
return
|
||||
}
|
||||
retry := 0
|
||||
retries := a.Config.Agent.FlushRetries
|
||||
start := time.Now()
|
||||
|
||||
for {
|
||||
filtered := ro.FilterPoints(points)
|
||||
err := ro.Output.Write(filtered)
|
||||
if err == nil {
|
||||
// Write successful
|
||||
elapsed := time.Since(start)
|
||||
log.Printf("Flushed %d metrics to output %s in %s\n",
|
||||
len(filtered), ro.Name, elapsed)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return
|
||||
default:
|
||||
if retry >= retries {
|
||||
// No more retries
|
||||
msg := "FATAL: Write to output [%s] failed %d times, dropping" +
|
||||
" %d metrics\n"
|
||||
log.Printf(msg, ro.Name, retries+1, len(points))
|
||||
return
|
||||
} else if err != nil {
|
||||
// Sleep for a retry
|
||||
log.Printf("Error in output [%s]: %s, retrying in %s",
|
||||
ro.Name, err.Error(), a.Config.Agent.FlushInterval.Duration)
|
||||
time.Sleep(a.Config.Agent.FlushInterval.Duration)
|
||||
}
|
||||
}
|
||||
|
||||
retry++
|
||||
}
|
||||
}
|
||||
|
||||
// flush writes a list of points to all configured outputs
|
||||
func (a *Agent) flush(
|
||||
points []*client.Point,
|
||||
shutdown chan struct{},
|
||||
wait bool,
|
||||
) {
|
||||
var wg sync.WaitGroup
|
||||
for _, o := range a.Config.Outputs {
|
||||
wg.Add(1)
|
||||
go a.writeOutput(points, o, shutdown, &wg)
|
||||
}
|
||||
if wait {
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
// flusher monitors the points input channel and flushes on the minimum interval
|
||||
func (a *Agent) flusher(shutdown chan struct{}, pointChan chan *client.Point) error {
|
||||
// Inelegant, but this sleep is to allow the Gather threads to run, so that
|
||||
// the flusher will flush after metrics are collected.
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
|
||||
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
|
||||
points := make([]*client.Point, 0)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
log.Println("Hang on, flushing any cached points before shutdown")
|
||||
a.flush(points, shutdown, true)
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
a.flush(points, shutdown, false)
|
||||
points = make([]*client.Point, 0)
|
||||
case pt := <-pointChan:
|
||||
points = append(points, pt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// jitterInterval applies the the interval jitter to the flush interval using
|
||||
// crypto/rand number generator
|
||||
func jitterInterval(ininterval, injitter time.Duration) time.Duration {
|
||||
var jitter int64
|
||||
outinterval := ininterval
|
||||
if injitter.Nanoseconds() != 0 {
|
||||
maxjitter := big.NewInt(injitter.Nanoseconds())
|
||||
if j, err := rand.Int(rand.Reader, maxjitter); err == nil {
|
||||
jitter = j.Int64()
|
||||
}
|
||||
outinterval = time.Duration(jitter + ininterval.Nanoseconds())
|
||||
}
|
||||
|
||||
if outinterval.Nanoseconds() < time.Duration(500*time.Millisecond).Nanoseconds() {
|
||||
log.Printf("Flush interval %s too low, setting to 500ms\n", outinterval)
|
||||
outinterval = time.Duration(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
return outinterval
|
||||
}
|
||||
|
||||
// Run runs the agent daemon, gathering every Interval
|
||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
a.Config.Agent.FlushInterval.Duration = jitterInterval(a.Config.Agent.FlushInterval.Duration,
|
||||
a.Config.Agent.FlushJitter.Duration)
|
||||
|
||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v, "+
|
||||
"Flush Interval:%s\n",
|
||||
a.Config.Agent.Interval, a.Config.Agent.Debug,
|
||||
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval)
|
||||
|
||||
// channel shared between all plugin threads for accumulating points
|
||||
pointChan := make(chan *client.Point, 1000)
|
||||
|
||||
// Round collection to nearest interval by sleeping
|
||||
if a.Config.Agent.RoundInterval {
|
||||
i := int64(a.Config.Agent.Interval.Duration)
|
||||
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
|
||||
}
|
||||
ticker := time.NewTicker(a.Config.Agent.Interval.Duration)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := a.flusher(shutdown, pointChan); err != nil {
|
||||
log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
|
||||
close(shutdown)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, plugin := range a.Config.Plugins {
|
||||
|
||||
// Start service of any ServicePlugins
|
||||
switch p := plugin.Plugin.(type) {
|
||||
case plugins.ServicePlugin:
|
||||
if err := p.Start(); err != nil {
|
||||
log.Printf("Service for plugin %s failed to start, exiting\n%s\n",
|
||||
plugin.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
defer p.Stop()
|
||||
}
|
||||
|
||||
// Special handling for plugins that have their own collection interval
|
||||
// configured. Default intervals are handled below with gatherParallel
|
||||
if plugin.Config.Interval != 0 {
|
||||
wg.Add(1)
|
||||
go func(plugin *config.RunningPlugin) {
|
||||
defer wg.Done()
|
||||
if err := a.gatherSeparate(shutdown, plugin, pointChan); err != nil {
|
||||
log.Printf(err.Error())
|
||||
}
|
||||
}(plugin)
|
||||
}
|
||||
}
|
||||
|
||||
defer wg.Wait()
|
||||
|
||||
for {
|
||||
if err := a.gatherParallel(pointChan); err != nil {
|
||||
log.Printf(err.Error())
|
||||
}
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
141
agent/accumulator.go
Normal file
141
agent/accumulator.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
var (
|
||||
NErrors = selfstat.Register("agent", "gather_errors", map[string]string{})
|
||||
)
|
||||
|
||||
type MetricMaker interface {
|
||||
Name() string
|
||||
MakeMetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric
|
||||
}
|
||||
|
||||
func NewAccumulator(
|
||||
maker MetricMaker,
|
||||
metrics chan telegraf.Metric,
|
||||
) *accumulator {
|
||||
acc := accumulator{
|
||||
maker: maker,
|
||||
metrics: metrics,
|
||||
precision: time.Nanosecond,
|
||||
}
|
||||
return &acc
|
||||
}
|
||||
|
||||
type accumulator struct {
|
||||
metrics chan telegraf.Metric
|
||||
|
||||
maker MetricMaker
|
||||
|
||||
precision time.Duration
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddFields(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Untyped, ac.getTime(t)); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddGauge(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Gauge, ac.getTime(t)); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddCounter(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Counter, ac.getTime(t)); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddSummary(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Summary, ac.getTime(t)); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddHistogram(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Histogram, ac.getTime(t)); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
// AddError passes a runtime error to the accumulator.
|
||||
// The error will be tagged with the plugin name and written to the log.
|
||||
func (ac *accumulator) AddError(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
NErrors.Incr(1)
|
||||
//TODO suppress/throttle consecutive duplicate errors?
|
||||
log.Printf("E! Error in plugin [%s]: %s", ac.maker.Name(), err)
|
||||
}
|
||||
|
||||
// SetPrecision takes two time.Duration objects. If the first is non-zero,
|
||||
// it sets that as the precision. Otherwise, it takes the second argument
|
||||
// as the order of time that the metrics should be rounded to, with the
|
||||
// maximum being 1s.
|
||||
func (ac *accumulator) SetPrecision(precision, interval time.Duration) {
|
||||
if precision > 0 {
|
||||
ac.precision = precision
|
||||
return
|
||||
}
|
||||
switch {
|
||||
case interval >= time.Second:
|
||||
ac.precision = time.Second
|
||||
case interval >= time.Millisecond:
|
||||
ac.precision = time.Millisecond
|
||||
case interval >= time.Microsecond:
|
||||
ac.precision = time.Microsecond
|
||||
default:
|
||||
ac.precision = time.Nanosecond
|
||||
}
|
||||
}
|
||||
|
||||
func (ac accumulator) getTime(t []time.Time) time.Time {
|
||||
var timestamp time.Time
|
||||
if len(t) > 0 {
|
||||
timestamp = t[0]
|
||||
} else {
|
||||
timestamp = time.Now()
|
||||
}
|
||||
return timestamp.Round(ac.precision)
|
||||
}
|
||||
340
agent/accumulator_test.go
Normal file
340
agent/accumulator_test.go
Normal file
@@ -0,0 +1,340 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddGauge("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddCounter("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest usage=99")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test usage=99")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test usage=99 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAccAddError(t *testing.T) {
|
||||
errBuf := bytes.NewBuffer(nil)
|
||||
log.SetOutput(errBuf)
|
||||
defer log.SetOutput(os.Stderr)
|
||||
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddError(fmt.Errorf("foo"))
|
||||
a.AddError(fmt.Errorf("bar"))
|
||||
a.AddError(fmt.Errorf("baz"))
|
||||
|
||||
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
|
||||
assert.EqualValues(t, int64(3), NErrors.Get())
|
||||
require.Len(t, errs, 4) // 4 because of trailing newline
|
||||
assert.Contains(t, string(errs[0]), "TestPlugin")
|
||||
assert.Contains(t, string(errs[0]), "foo")
|
||||
assert.Contains(t, string(errs[1]), "TestPlugin")
|
||||
assert.Contains(t, string(errs[1]), "bar")
|
||||
assert.Contains(t, string(errs[2]), "TestPlugin")
|
||||
assert.Contains(t, string(errs[2]), "baz")
|
||||
}
|
||||
|
||||
func TestAddNoIntervalWithPrecision(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
a.SetPrecision(0, time.Second)
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddDisablePrecision(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(time.Nanosecond, 0)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(0, time.Second)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestDifferentPrecisions(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(0, time.Second)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Millisecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800083000000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Microsecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082913000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Nanosecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddGauge(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
}
|
||||
|
||||
func TestAddCounter(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
}
|
||||
|
||||
type TestMetricMaker struct {
|
||||
}
|
||||
|
||||
func (tm *TestMetricMaker) Name() string {
|
||||
return "TestPlugin"
|
||||
}
|
||||
func (tm *TestMetricMaker) MakeMetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric {
|
||||
switch mType {
|
||||
case telegraf.Untyped:
|
||||
if m, err := metric.New(measurement, tags, fields, t); err == nil {
|
||||
return m
|
||||
}
|
||||
case telegraf.Counter:
|
||||
if m, err := metric.New(measurement, tags, fields, t, telegraf.Counter); err == nil {
|
||||
return m
|
||||
}
|
||||
case telegraf.Gauge:
|
||||
if m, err := metric.New(measurement, tags, fields, t, telegraf.Gauge); err == nil {
|
||||
return m
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
429
agent/agent.go
Normal file
429
agent/agent.go
Normal file
@@ -0,0 +1,429 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
// Agent runs telegraf and collects data based on the given config
|
||||
type Agent struct {
|
||||
Config *config.Config
|
||||
}
|
||||
|
||||
// NewAgent returns an Agent struct based off the given Config
|
||||
func NewAgent(config *config.Config) (*Agent, error) {
|
||||
a := &Agent{
|
||||
Config: config,
|
||||
}
|
||||
|
||||
if !a.Config.Agent.OmitHostname {
|
||||
if a.Config.Agent.Hostname == "" {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a.Config.Agent.Hostname = hostname
|
||||
}
|
||||
|
||||
config.Tags["host"] = a.Config.Agent.Hostname
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Connect connects to all configured outputs
|
||||
func (a *Agent) Connect() error {
|
||||
for _, o := range a.Config.Outputs {
|
||||
switch ot := o.Output.(type) {
|
||||
case telegraf.ServiceOutput:
|
||||
if err := ot.Start(); err != nil {
|
||||
log.Printf("E! Service for output %s failed to start, exiting\n%s\n",
|
||||
o.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("D! Attempting connection to output: %s\n", o.Name)
|
||||
err := o.Output.Connect()
|
||||
if err != nil {
|
||||
log.Printf("E! Failed to connect to output %s, retrying in 15s, "+
|
||||
"error was '%s' \n", o.Name, err)
|
||||
time.Sleep(15 * time.Second)
|
||||
err = o.Output.Connect()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
log.Printf("D! Successfully connected to output: %s\n", o.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the connection to all configured outputs
|
||||
func (a *Agent) Close() error {
|
||||
var err error
|
||||
for _, o := range a.Config.Outputs {
|
||||
err = o.Output.Close()
|
||||
switch ot := o.Output.(type) {
|
||||
case telegraf.ServiceOutput:
|
||||
ot.Stop()
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func panicRecover(input *models.RunningInput) {
|
||||
if err := recover(); err != nil {
|
||||
trace := make([]byte, 2048)
|
||||
runtime.Stack(trace, true)
|
||||
log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
|
||||
input.Name(), err, trace)
|
||||
log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " +
|
||||
"stack trace, configuration, and OS information: " +
|
||||
"https://github.com/influxdata/telegraf/issues/new")
|
||||
}
|
||||
}
|
||||
|
||||
// gatherer runs the inputs that have been configured with their own
|
||||
// reporting interval.
|
||||
func (a *Agent) gatherer(
|
||||
shutdown chan struct{},
|
||||
input *models.RunningInput,
|
||||
interval time.Duration,
|
||||
metricC chan telegraf.Metric,
|
||||
) {
|
||||
defer panicRecover(input)
|
||||
|
||||
GatherTime := selfstat.RegisterTiming("gather",
|
||||
"gather_time_ns",
|
||||
map[string]string{"input": input.Config.Name},
|
||||
)
|
||||
|
||||
acc := NewAccumulator(input, metricC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown)
|
||||
|
||||
start := time.Now()
|
||||
gatherWithTimeout(shutdown, input, acc, interval)
|
||||
elapsed := time.Since(start)
|
||||
|
||||
GatherTime.Incr(elapsed.Nanoseconds())
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// gatherWithTimeout gathers from the given input, with the given timeout.
|
||||
// when the given timeout is reached, gatherWithTimeout logs an error message
|
||||
// but continues waiting for it to return. This is to avoid leaving behind
|
||||
// hung processes, and to prevent re-calling the same hung process over and
|
||||
// over.
|
||||
func gatherWithTimeout(
|
||||
shutdown chan struct{},
|
||||
input *models.RunningInput,
|
||||
acc *accumulator,
|
||||
timeout time.Duration,
|
||||
) {
|
||||
ticker := time.NewTicker(timeout)
|
||||
defer ticker.Stop()
|
||||
done := make(chan error)
|
||||
go func() {
|
||||
done <- input.Input.Gather(acc)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case err := <-done:
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
}
|
||||
return
|
||||
case <-ticker.C:
|
||||
err := fmt.Errorf("took longer to collect than collection interval (%s)",
|
||||
timeout)
|
||||
acc.AddError(err)
|
||||
continue
|
||||
case <-shutdown:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test verifies that we can 'Gather' from all inputs with their configured
|
||||
// Config struct
|
||||
func (a *Agent) Test() error {
|
||||
shutdown := make(chan struct{})
|
||||
defer close(shutdown)
|
||||
metricC := make(chan telegraf.Metric)
|
||||
|
||||
// dummy receiver for the point channel
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-metricC:
|
||||
// do nothing
|
||||
case <-shutdown:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
if _, ok := input.Input.(telegraf.ServiceInput); ok {
|
||||
fmt.Printf("\nWARNING: skipping plugin [[%s]]: service inputs not supported in --test mode\n",
|
||||
input.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
acc := NewAccumulator(input, metricC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
input.SetTrace(true)
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name())
|
||||
if input.Config.Interval != 0 {
|
||||
fmt.Printf("* Internal: %s\n", input.Config.Interval)
|
||||
}
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Special instructions for some inputs. cpu, for example, needs to be
|
||||
// run twice in order to return cpu usage percentages.
|
||||
switch input.Name() {
|
||||
case "inputs.cpu", "inputs.mongodb", "inputs.procstat":
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name())
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// flush writes a list of metrics to all configured outputs
|
||||
func (a *Agent) flush() {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(len(a.Config.Outputs))
|
||||
for _, o := range a.Config.Outputs {
|
||||
go func(output *models.RunningOutput) {
|
||||
defer wg.Done()
|
||||
err := output.Write()
|
||||
if err != nil {
|
||||
log.Printf("E! Error writing to output [%s]: %s\n",
|
||||
output.Name, err.Error())
|
||||
}
|
||||
}(o)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// flusher monitors the metrics input channel and flushes on the minimum interval
|
||||
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, aggC chan telegraf.Metric) error {
|
||||
// Inelegant, but this sleep is to allow the Gather threads to run, so that
|
||||
// the flusher will flush after metrics are collected.
|
||||
time.Sleep(time.Millisecond * 300)
|
||||
|
||||
// create an output metric channel and a gorouting that continuously passes
|
||||
// each metric onto the output plugins & aggregators.
|
||||
outMetricC := make(chan telegraf.Metric, 100)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
if len(outMetricC) > 0 {
|
||||
// keep going until outMetricC is flushed
|
||||
continue
|
||||
}
|
||||
return
|
||||
case m := <-outMetricC:
|
||||
// if dropOriginal is set to true, then we will only send this
|
||||
// metric to the aggregators, not the outputs.
|
||||
var dropOriginal bool
|
||||
if !m.IsAggregate() {
|
||||
for _, agg := range a.Config.Aggregators {
|
||||
if ok := agg.Add(m.Copy()); ok {
|
||||
dropOriginal = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !dropOriginal {
|
||||
for i, o := range a.Config.Outputs {
|
||||
if i == len(a.Config.Outputs)-1 {
|
||||
o.AddMetric(m)
|
||||
} else {
|
||||
o.AddMetric(m.Copy())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
if len(aggC) > 0 {
|
||||
// keep going until aggC is flushed
|
||||
continue
|
||||
}
|
||||
return
|
||||
case metric := <-aggC:
|
||||
metrics := []telegraf.Metric{metric}
|
||||
for _, processor := range a.Config.Processors {
|
||||
metrics = processor.Apply(metrics...)
|
||||
}
|
||||
for _, m := range metrics {
|
||||
outMetricC <- m
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
|
||||
semaphore := make(chan struct{}, 1)
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
log.Println("I! Hang on, flushing any cached metrics before shutdown")
|
||||
// wait for outMetricC to get flushed before flushing outputs
|
||||
wg.Wait()
|
||||
a.flush()
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
go func() {
|
||||
select {
|
||||
case semaphore <- struct{}{}:
|
||||
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
|
||||
a.flush()
|
||||
<-semaphore
|
||||
default:
|
||||
// skipping this flush because one is already happening
|
||||
log.Println("W! Skipping a scheduled flush because there is" +
|
||||
" already a flush ongoing.")
|
||||
}
|
||||
}()
|
||||
case metric := <-metricC:
|
||||
// NOTE potential bottleneck here as we put each metric through the
|
||||
// processors serially.
|
||||
mS := []telegraf.Metric{metric}
|
||||
for _, processor := range a.Config.Processors {
|
||||
mS = processor.Apply(mS...)
|
||||
}
|
||||
for _, m := range mS {
|
||||
outMetricC <- m
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run runs the agent daemon, gathering every Interval
|
||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
log.Printf("I! Agent Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+
|
||||
"Flush Interval:%s \n",
|
||||
a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet,
|
||||
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
|
||||
|
||||
// channel shared between all input threads for accumulating metrics
|
||||
metricC := make(chan telegraf.Metric, 100)
|
||||
aggC := make(chan telegraf.Metric, 100)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// Start all ServicePlugins
|
||||
for _, input := range a.Config.Inputs {
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
switch p := input.Input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
acc := NewAccumulator(input, metricC)
|
||||
// Service input plugins should set their own precision of their
|
||||
// metrics.
|
||||
acc.SetPrecision(time.Nanosecond, 0)
|
||||
if err := p.Start(acc); err != nil {
|
||||
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name(), err.Error())
|
||||
return err
|
||||
}
|
||||
defer p.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// Round collection to nearest interval by sleeping
|
||||
if a.Config.Agent.RoundInterval {
|
||||
i := int64(a.Config.Agent.Interval.Duration)
|
||||
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := a.flusher(shutdown, metricC, aggC); err != nil {
|
||||
log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error())
|
||||
close(shutdown)
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(len(a.Config.Aggregators))
|
||||
for _, aggregator := range a.Config.Aggregators {
|
||||
go func(agg *models.RunningAggregator) {
|
||||
defer wg.Done()
|
||||
acc := NewAccumulator(agg, aggC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
agg.Run(acc, now, shutdown)
|
||||
}(aggregator)
|
||||
}
|
||||
|
||||
wg.Add(len(a.Config.Inputs))
|
||||
for _, input := range a.Config.Inputs {
|
||||
interval := a.Config.Agent.Interval.Duration
|
||||
// overwrite global interval if this plugin has it's own.
|
||||
if input.Config.Interval != 0 {
|
||||
interval = input.Config.Interval
|
||||
}
|
||||
go func(in *models.RunningInput, interv time.Duration) {
|
||||
defer wg.Done()
|
||||
a.gatherer(shutdown, in, interv, metricC)
|
||||
}(input, interval)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
a.Close()
|
||||
return nil
|
||||
}
|
||||
111
agent/agent_test.go
Normal file
111
agent/agent_test.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
|
||||
// needing to load the plugins
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
// needing to load the outputs
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAgent_OmitHostname(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.Agent.OmitHostname = true
|
||||
_, err := NewAgent(c)
|
||||
assert.NoError(t, err)
|
||||
assert.NotContains(t, c.Tags, "host")
|
||||
}
|
||||
|
||||
func TestAgent_LoadPlugin(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.InputFilters = []string{"mysql"}
|
||||
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ := NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"foo"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 0, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "foo"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "redis"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "foo", "redis", "bar"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Inputs))
|
||||
}
|
||||
|
||||
func TestAgent_LoadOutput(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb"}
|
||||
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ := NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"kafka"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"foo"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 0, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "foo"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "kafka"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(c.Outputs))
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
}
|
||||
156
agent_test.go
156
agent_test.go
@@ -1,156 +0,0 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/internal/config"
|
||||
|
||||
// needing to load the plugins
|
||||
_ "github.com/influxdb/telegraf/plugins/all"
|
||||
// needing to load the outputs
|
||||
_ "github.com/influxdb/telegraf/outputs/all"
|
||||
)
|
||||
|
||||
func TestAgent_LoadPlugin(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.PluginFilters = []string{"mysql"}
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ := NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Plugins))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.PluginFilters = []string{"foo"}
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 0, len(a.Config.Plugins))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.PluginFilters = []string{"mysql", "foo"}
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Plugins))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.PluginFilters = []string{"mysql", "redis"}
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Plugins))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.PluginFilters = []string{"mysql", "foo", "redis", "bar"}
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Plugins))
|
||||
}
|
||||
|
||||
func TestAgent_LoadOutput(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb"}
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ := NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{}
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"foo"}
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 0, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "foo"}
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "kafka"}
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
}
|
||||
|
||||
func TestAgent_ZeroJitter(t *testing.T) {
|
||||
flushinterval := jitterInterval(time.Duration(10*time.Second),
|
||||
time.Duration(0*time.Second))
|
||||
|
||||
actual := flushinterval.Nanoseconds()
|
||||
exp := time.Duration(10 * time.Second).Nanoseconds()
|
||||
|
||||
if actual != exp {
|
||||
t.Errorf("Actual %v, expected %v", actual, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_ZeroInterval(t *testing.T) {
|
||||
min := time.Duration(500 * time.Millisecond).Nanoseconds()
|
||||
max := time.Duration(5 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
||||
time.Duration(5*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
|
||||
if actual > max {
|
||||
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
||||
break
|
||||
}
|
||||
if actual < min {
|
||||
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_ZeroBoth(t *testing.T) {
|
||||
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
||||
time.Duration(0*time.Second))
|
||||
|
||||
actual := flushinterval
|
||||
exp := time.Duration(500 * time.Millisecond)
|
||||
|
||||
if actual != exp {
|
||||
t.Errorf("Actual %v, expected %v", actual, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_JitterMax(t *testing.T) {
|
||||
max := time.Duration(32 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
||||
time.Duration(2*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
if actual > max {
|
||||
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_JitterMin(t *testing.T) {
|
||||
min := time.Duration(30 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
||||
time.Duration(2*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
if actual < min {
|
||||
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
22
aggregator.go
Normal file
22
aggregator.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package telegraf
|
||||
|
||||
// Aggregator is an interface for implementing an Aggregator plugin.
|
||||
// the RunningAggregator wraps this interface and guarantees that
|
||||
// Add, Push, and Reset can not be called concurrently, so locking is not
|
||||
// required when implementing an Aggregator plugin.
|
||||
type Aggregator interface {
|
||||
// SampleConfig returns the default configuration of the Input.
|
||||
SampleConfig() string
|
||||
|
||||
// Description returns a one-sentence description on the Input.
|
||||
Description() string
|
||||
|
||||
// Add the metric to the aggregator.
|
||||
Add(in Metric)
|
||||
|
||||
// Push pushes the current aggregates to the accumulator.
|
||||
Push(acc Accumulator)
|
||||
|
||||
// Reset resets the aggregators caches and aggregates.
|
||||
Reset()
|
||||
}
|
||||
32
appveyor.yml
Normal file
32
appveyor.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
version: "{build}"
|
||||
|
||||
cache:
|
||||
- C:\Cache
|
||||
|
||||
clone_folder: C:\gopath\src\github.com\influxdata\telegraf
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
|
||||
platform: x64
|
||||
|
||||
install:
|
||||
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
|
||||
- IF NOT EXIST "C:\Cache\go1.9.2.msi" curl -o "C:\Cache\go1.9.2.msi" https://storage.googleapis.com/golang/go1.9.2.windows-amd64.msi
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
|
||||
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
|
||||
- msiexec.exe /i "C:\Cache\go1.9.2.msi" /quiet
|
||||
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
|
||||
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
|
||||
- go version
|
||||
- go env
|
||||
|
||||
build_script:
|
||||
- cmd: C:\GnuWin32\bin\make
|
||||
|
||||
test_script:
|
||||
- cmd: C:\GnuWin32\bin\make test-windows
|
||||
|
||||
artifacts:
|
||||
- path: telegraf.exe
|
||||
13
circle.yml
13
circle.yml
@@ -1,17 +1,16 @@
|
||||
machine:
|
||||
services:
|
||||
- docker
|
||||
- memcached
|
||||
- redis
|
||||
- rabbitmq-server
|
||||
post:
|
||||
- sudo service zookeeper stop
|
||||
- go version
|
||||
- go version | grep 1.5.1 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.5.1.linux-amd64.tar.gz
|
||||
- sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.9.2.linux-amd64.tar.gz
|
||||
- go version
|
||||
|
||||
dependencies:
|
||||
cache_directories:
|
||||
- "~/telegraf-build/src"
|
||||
override:
|
||||
- docker info
|
||||
|
||||
|
||||
@@ -4,149 +4,407 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/influxdb/telegraf"
|
||||
"github.com/influxdb/telegraf/internal/config"
|
||||
_ "github.com/influxdb/telegraf/outputs/all"
|
||||
_ "github.com/influxdb/telegraf/plugins/all"
|
||||
"github.com/influxdata/telegraf/agent"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/logger"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
_ "github.com/influxdata/telegraf/plugins/processors/all"
|
||||
"github.com/kardianos/service"
|
||||
)
|
||||
|
||||
var fDebug = flag.Bool("debug", false,
|
||||
"show metrics as they're generated to stdout")
|
||||
"turn on debug logging")
|
||||
var pprofAddr = flag.String("pprof-addr", "",
|
||||
"pprof address to listen on, not activate pprof if empty")
|
||||
var fQuiet = flag.Bool("quiet", false,
|
||||
"run in quiet mode")
|
||||
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
||||
var fConfig = flag.String("config", "", "configuration file to load")
|
||||
var fConfigDirectory = flag.String("configdirectory", "",
|
||||
var fConfigDirectory = flag.String("config-directory", "",
|
||||
"directory containing additional *.conf files")
|
||||
var fVersion = flag.Bool("version", false, "display the version")
|
||||
var fSampleConfig = flag.Bool("sample-config", false,
|
||||
"print out full sample configuration")
|
||||
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
|
||||
var fPLuginFilters = flag.String("filter", "",
|
||||
"filter the plugins to enable, separator is :")
|
||||
var fOutputFilters = flag.String("outputfilter", "",
|
||||
var fInputFilters = flag.String("input-filter", "",
|
||||
"filter the inputs to enable, separator is :")
|
||||
var fInputList = flag.Bool("input-list", false,
|
||||
"print available input plugins.")
|
||||
var fOutputFilters = flag.String("output-filter", "",
|
||||
"filter the outputs to enable, separator is :")
|
||||
var fOutputList = flag.Bool("output-list", false,
|
||||
"print available output plugins.")
|
||||
var fAggregatorFilters = flag.String("aggregator-filter", "",
|
||||
"filter the aggregators to enable, separator is :")
|
||||
var fProcessorFilters = flag.String("processor-filter", "",
|
||||
"filter the processors to enable, separator is :")
|
||||
var fUsage = flag.String("usage", "",
|
||||
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
||||
"print usage for a plugin, ie, 'telegraf --usage mysql'")
|
||||
var fService = flag.String("service", "",
|
||||
"operate on the service")
|
||||
|
||||
// Telegraf version
|
||||
// -ldflags "-X main.Version=`git describe --always --tags`"
|
||||
var Version string
|
||||
var (
|
||||
nextVersion = "1.5.0"
|
||||
version string
|
||||
commit string
|
||||
branch string
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
var pluginFilters []string
|
||||
if *fPLuginFilters != "" {
|
||||
pluginsFilter := strings.TrimSpace(*fPLuginFilters)
|
||||
pluginFilters = strings.Split(":"+pluginsFilter+":", ":")
|
||||
func init() {
|
||||
// If commit or branch are not set, make that clear.
|
||||
if commit == "" {
|
||||
commit = "unknown"
|
||||
}
|
||||
|
||||
var outputFilters []string
|
||||
if *fOutputFilters != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFilters)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
if branch == "" {
|
||||
branch = "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
if *fVersion {
|
||||
v := fmt.Sprintf("Telegraf - Version %s", Version)
|
||||
fmt.Println(v)
|
||||
return
|
||||
}
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
if *fSampleConfig {
|
||||
config.PrintSampleConfig(pluginFilters, outputFilters)
|
||||
return
|
||||
}
|
||||
Usage:
|
||||
|
||||
if *fUsage != "" {
|
||||
if err := config.PrintPluginConfig(*fUsage); err != nil {
|
||||
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
||||
log.Fatalf("%s and %s", err, err2)
|
||||
telegraf [commands|flags]
|
||||
|
||||
The commands & flags are:
|
||||
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
--config-directory directory containing additional *.conf files
|
||||
--input-filter filter the input plugins to enable, separator is :
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||
--quiet run in quiet mode
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf --config telegraf.conf --test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf --config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
|
||||
# run telegraf with pprof
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
`
|
||||
|
||||
var stop chan struct{}
|
||||
|
||||
func reloadLoop(
|
||||
stop chan struct{},
|
||||
inputFilters []string,
|
||||
outputFilters []string,
|
||||
aggregatorFilters []string,
|
||||
processorFilters []string,
|
||||
) {
|
||||
reload := make(chan bool, 1)
|
||||
reload <- true
|
||||
for <-reload {
|
||||
reload <- false
|
||||
|
||||
// If no other options are specified, load the config file and run.
|
||||
c := config.NewConfig()
|
||||
c.OutputFilters = outputFilters
|
||||
c.InputFilters = inputFilters
|
||||
err := c.LoadConfig(*fConfig)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
|
||||
if *fConfigDirectory != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectory)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
}
|
||||
if !*fTest && len(c.Outputs) == 0 {
|
||||
log.Fatalf("E! Error: no outputs found, did you provide a valid config file?")
|
||||
}
|
||||
if len(c.Inputs) == 0 {
|
||||
log.Fatalf("E! Error: no inputs found, did you provide a valid config file?")
|
||||
}
|
||||
|
||||
if int64(c.Agent.Interval.Duration) <= 0 {
|
||||
log.Fatalf("E! Agent interval must be positive, found %s",
|
||||
c.Agent.Interval.Duration)
|
||||
}
|
||||
|
||||
if int64(c.Agent.FlushInterval.Duration) <= 0 {
|
||||
log.Fatalf("E! Agent flush_interval must be positive; found %s",
|
||||
c.Agent.Interval.Duration)
|
||||
}
|
||||
|
||||
ag, err := agent.NewAgent(c)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
|
||||
// Setup logging
|
||||
logger.SetupLogging(
|
||||
ag.Config.Agent.Debug || *fDebug,
|
||||
ag.Config.Agent.Quiet || *fQuiet,
|
||||
ag.Config.Agent.Logfile,
|
||||
)
|
||||
|
||||
if *fTest {
|
||||
err = ag.Test()
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
err = ag.Connect()
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
signals := make(chan os.Signal)
|
||||
signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
|
||||
go func() {
|
||||
select {
|
||||
case sig := <-signals:
|
||||
if sig == os.Interrupt {
|
||||
close(shutdown)
|
||||
}
|
||||
if sig == syscall.SIGHUP {
|
||||
log.Printf("I! Reloading Telegraf config\n")
|
||||
<-reload
|
||||
reload <- true
|
||||
close(shutdown)
|
||||
}
|
||||
case <-stop:
|
||||
close(shutdown)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("I! Starting Telegraf %s\n", displayVersion())
|
||||
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("I! Tags enabled: %s", c.ListTags())
|
||||
|
||||
if *fPidfile != "" {
|
||||
f, err := os.OpenFile(*fPidfile, os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
log.Printf("E! Unable to create pidfile: %s", err)
|
||||
} else {
|
||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||
|
||||
f.Close()
|
||||
|
||||
defer func() {
|
||||
err := os.Remove(*fPidfile)
|
||||
if err != nil {
|
||||
log.Printf("E! Unable to remove pidfile: %s", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
ag.Run(shutdown)
|
||||
}
|
||||
}
|
||||
|
||||
func usageExit(rc int) {
|
||||
fmt.Println(usage)
|
||||
os.Exit(rc)
|
||||
}
|
||||
|
||||
type program struct {
|
||||
inputFilters []string
|
||||
outputFilters []string
|
||||
aggregatorFilters []string
|
||||
processorFilters []string
|
||||
}
|
||||
|
||||
func (p *program) Start(s service.Service) error {
|
||||
go p.run()
|
||||
return nil
|
||||
}
|
||||
func (p *program) run() {
|
||||
stop = make(chan struct{})
|
||||
reloadLoop(
|
||||
stop,
|
||||
p.inputFilters,
|
||||
p.outputFilters,
|
||||
p.aggregatorFilters,
|
||||
p.processorFilters,
|
||||
)
|
||||
}
|
||||
func (p *program) Stop(s service.Service) error {
|
||||
close(stop)
|
||||
return nil
|
||||
}
|
||||
|
||||
func displayVersion() string {
|
||||
if version == "" {
|
||||
return fmt.Sprintf("v%s~%s", nextVersion, commit)
|
||||
}
|
||||
return "v" + version
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() { usageExit(0) }
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
|
||||
inputFilters, outputFilters := []string{}, []string{}
|
||||
if *fInputFilters != "" {
|
||||
inputFilters = strings.Split(":"+strings.TrimSpace(*fInputFilters)+":", ":")
|
||||
}
|
||||
if *fOutputFilters != "" {
|
||||
outputFilters = strings.Split(":"+strings.TrimSpace(*fOutputFilters)+":", ":")
|
||||
}
|
||||
|
||||
aggregatorFilters, processorFilters := []string{}, []string{}
|
||||
if *fAggregatorFilters != "" {
|
||||
aggregatorFilters = strings.Split(":"+strings.TrimSpace(*fAggregatorFilters)+":", ":")
|
||||
}
|
||||
if *fProcessorFilters != "" {
|
||||
processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":")
|
||||
}
|
||||
|
||||
if *pprofAddr != "" {
|
||||
go func() {
|
||||
pprofHostPort := *pprofAddr
|
||||
parts := strings.Split(pprofHostPort, ":")
|
||||
if len(parts) == 2 && parts[0] == "" {
|
||||
pprofHostPort = fmt.Sprintf("localhost:%s", parts[1])
|
||||
}
|
||||
pprofHostPort = "http://" + pprofHostPort + "/debug/pprof"
|
||||
|
||||
log.Printf("I! Starting pprof HTTP server at: %s", pprofHostPort)
|
||||
|
||||
if err := http.ListenAndServe(*pprofAddr, nil); err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if len(args) > 0 {
|
||||
switch args[0] {
|
||||
case "version":
|
||||
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
|
||||
return
|
||||
case "config":
|
||||
config.PrintSampleConfig(
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// switch for flags which just do something and exit immediately
|
||||
switch {
|
||||
case *fOutputList:
|
||||
fmt.Println("Available Output Plugins:")
|
||||
for k, _ := range outputs.Outputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
case *fInputList:
|
||||
fmt.Println("Available Input Plugins:")
|
||||
for k, _ := range inputs.Inputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
case *fVersion:
|
||||
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
|
||||
return
|
||||
case *fSampleConfig:
|
||||
config.PrintSampleConfig(
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
return
|
||||
case *fUsage != "":
|
||||
err := config.PrintInputConfig(*fUsage)
|
||||
err2 := config.PrintOutputConfig(*fUsage)
|
||||
if err != nil && err2 != nil {
|
||||
log.Fatalf("E! %s and %s", err, err2)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
c *config.Config
|
||||
err error
|
||||
)
|
||||
if runtime.GOOS == "windows" {
|
||||
svcConfig := &service.Config{
|
||||
Name: "telegraf",
|
||||
DisplayName: "Telegraf Data Collector Service",
|
||||
Description: "Collects data using a series of plugins and publishes it to" +
|
||||
"another series of plugins.",
|
||||
Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
|
||||
}
|
||||
|
||||
if *fConfig != "" {
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = outputFilters
|
||||
c.PluginFilters = pluginFilters
|
||||
err = c.LoadConfig(*fConfig)
|
||||
prg := &program{
|
||||
inputFilters: inputFilters,
|
||||
outputFilters: outputFilters,
|
||||
aggregatorFilters: aggregatorFilters,
|
||||
processorFilters: processorFilters,
|
||||
}
|
||||
s, err := service.New(prg, svcConfig)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
// Handle the -service flag here to prevent any issues with tooling that
|
||||
// may not have an interactive session, e.g. installing from Ansible.
|
||||
if *fService != "" {
|
||||
if *fConfig != "" {
|
||||
(*svcConfig).Arguments = []string{"-config", *fConfig}
|
||||
}
|
||||
if *fConfigDirectory != "" {
|
||||
(*svcConfig).Arguments = append((*svcConfig).Arguments, "-config-directory", *fConfigDirectory)
|
||||
}
|
||||
err := service.Control(s, *fService)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
os.Exit(0)
|
||||
} else {
|
||||
err = s.Run()
|
||||
if err != nil {
|
||||
log.Println("E! " + err.Error())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Usage: Telegraf")
|
||||
flag.PrintDefaults()
|
||||
return
|
||||
stop = make(chan struct{})
|
||||
reloadLoop(
|
||||
stop,
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
}
|
||||
|
||||
if *fConfigDirectory != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectory)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
if len(c.Outputs) == 0 {
|
||||
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
|
||||
}
|
||||
if len(c.Plugins) == 0 {
|
||||
log.Fatalf("Error: no plugins found, did you provide a valid config file?")
|
||||
}
|
||||
|
||||
ag, err := telegraf.NewAgent(c)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if *fDebug {
|
||||
ag.Config.Agent.Debug = true
|
||||
}
|
||||
|
||||
if *fTest {
|
||||
err = ag.Test()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = ag.Connect()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
signals := make(chan os.Signal)
|
||||
signal.Notify(signals, os.Interrupt)
|
||||
go func() {
|
||||
<-signals
|
||||
close(shutdown)
|
||||
}()
|
||||
|
||||
log.Printf("Starting Telegraf (version %s)\n", Version)
|
||||
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("Loaded plugins: %s", strings.Join(c.PluginNames(), " "))
|
||||
log.Printf("Tags enabled: %s", c.ListTags())
|
||||
|
||||
if *fPidfile != "" {
|
||||
f, err := os.Create(*fPidfile)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to create pidfile: %s", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||
|
||||
f.Close()
|
||||
}
|
||||
|
||||
ag.Run(shutdown)
|
||||
}
|
||||
|
||||
64
docs/AGGREGATORS_AND_PROCESSORS.md
Normal file
64
docs/AGGREGATORS_AND_PROCESSORS.md
Normal file
@@ -0,0 +1,64 @@
|
||||
# Telegraf Aggregator & Processor Plugins
|
||||
|
||||
As of release 1.1.0, Telegraf has the concept of Aggregator and Processor Plugins.
|
||||
|
||||
These plugins sit in-between Input & Output plugins, aggregating and processing
|
||||
metrics as they pass through Telegraf:
|
||||
|
||||
```
|
||||
┌───────────┐
|
||||
│ │
|
||||
│ CPU │───┐
|
||||
│ │ │
|
||||
└───────────┘ │
|
||||
│
|
||||
┌───────────┐ │ ┌───────────┐
|
||||
│ │ │ │ │
|
||||
│ Memory │───┤ ┌──▶│ InfluxDB │
|
||||
│ │ │ │ │ │
|
||||
└───────────┘ │ ┌─────────────┐ ┌─────────────┐ │ └───────────┘
|
||||
│ │ │ │Aggregate │ │
|
||||
┌───────────┐ │ │Process │ │ - mean │ │ ┌───────────┐
|
||||
│ │ │ │ - transform │ │ - quantiles │ │ │ │
|
||||
│ MySQL │───┼───▶│ - decorate │────▶│ - min/max │───┼──▶│ File │
|
||||
│ │ │ │ - filter │ │ - count │ │ │ │
|
||||
└───────────┘ │ │ │ │ │ │ └───────────┘
|
||||
│ └─────────────┘ └─────────────┘ │
|
||||
┌───────────┐ │ │ ┌───────────┐
|
||||
│ │ │ │ │ │
|
||||
│ SNMP │───┤ └──▶│ Kafka │
|
||||
│ │ │ │ │
|
||||
└───────────┘ │ └───────────┘
|
||||
│
|
||||
┌───────────┐ │
|
||||
│ │ │
|
||||
│ Docker │───┘
|
||||
│ │
|
||||
└───────────┘
|
||||
```
|
||||
|
||||
Both Aggregators and Processors analyze metrics as they pass through Telegraf.
|
||||
|
||||
Use [measurement filtering](CONFIGURATION.md#measurement-filtering)
|
||||
to control which metrics are passed through a processor or aggregator. If a
|
||||
metric is filtered out the metric bypasses the plugin and is passed downstream
|
||||
to the next plugin.
|
||||
|
||||
**Processor** plugins process metrics as they pass through and immediately emit
|
||||
results based on the values they process. For example, this could be printing
|
||||
all metrics or adding a tag to all metrics that pass through.
|
||||
|
||||
**Aggregator** plugins, on the other hand, are a bit more complicated. Aggregators
|
||||
are typically for emitting new _aggregate_ metrics, such as a running mean,
|
||||
minimum, maximum, quantiles, or standard deviation. For this reason, all _aggregator_
|
||||
plugins are configured with a `period`. The `period` is the size of the window
|
||||
of metrics that each _aggregate_ represents. In other words, the emitted
|
||||
_aggregate_ metric will be the aggregated value of the past `period` seconds.
|
||||
Since many users will only care about their aggregates and not every single metric
|
||||
gathered, there is also a `drop_original` argument, which tells Telegraf to only
|
||||
emit the aggregates and not the original metrics.
|
||||
|
||||
**NOTE** That since aggregators only aggregate metrics within their period, that
|
||||
historical data is not supported. In other words, if your metric timestamp is more
|
||||
than `now() - period` in the past, it will not be aggregated. If this is a feature
|
||||
that you need, please comment on this [github issue](https://github.com/influxdata/telegraf/issues/1992)
|
||||
403
docs/CONFIGURATION.md
Normal file
403
docs/CONFIGURATION.md
Normal file
@@ -0,0 +1,403 @@
|
||||
# Telegraf Configuration
|
||||
|
||||
You can see the latest config file with all available plugins here:
|
||||
[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf)
|
||||
|
||||
## Generating a Configuration File
|
||||
|
||||
A default Telegraf config file can be auto-generated by telegraf:
|
||||
|
||||
```
|
||||
telegraf config > telegraf.conf
|
||||
```
|
||||
|
||||
To generate a file with specific inputs and outputs, you can use the
|
||||
--input-filter and --output-filter flags:
|
||||
|
||||
```
|
||||
telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
Environment variables can be used anywhere in the config file, simply prepend
|
||||
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
|
||||
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
|
||||
|
||||
When using the `.deb` or `.rpm` packages, you can define environment variables
|
||||
in the `/etc/default/telegraf` file.
|
||||
|
||||
## Configuration file locations
|
||||
|
||||
The location of the configuration file can be set via the `--config` command
|
||||
line flag.
|
||||
|
||||
When the `--config-directory` command line flag is used files ending with
|
||||
`.conf` in the specified directory will also be included in the Telegraf
|
||||
configuration.
|
||||
|
||||
On most systems, the default locations are `/etc/telegraf/telegraf.conf` for
|
||||
the main configuration file and `/etc/telegraf/telegraf.d` for the directory of
|
||||
configuration files.
|
||||
|
||||
# Global Tags
|
||||
|
||||
Global tags can be specified in the `[global_tags]` section of the config file
|
||||
in key="value" format. All metrics being gathered on this host will be tagged
|
||||
with the tags specified here.
|
||||
|
||||
## Agent Configuration
|
||||
|
||||
Telegraf has a few options you can configure under the `[agent]` section of the
|
||||
config.
|
||||
|
||||
* **interval**: Default data collection interval for all inputs
|
||||
* **round_interval**: Rounds collection interval to 'interval'
|
||||
ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
* **metric_batch_size**: Telegraf will send metrics to output in batch of at
|
||||
most metric_batch_size metrics.
|
||||
* **metric_buffer_limit**: Telegraf will cache metric_buffer_limit metrics
|
||||
for each output, and will flush this buffer on a successful write.
|
||||
This should be a multiple of metric_batch_size and could not be less
|
||||
than 2 times metric_batch_size.
|
||||
* **collection_jitter**: Collection jitter is used to jitter
|
||||
the collection by a random amount.
|
||||
Each plugin will sleep for a random time within jitter before collecting.
|
||||
This can be used to avoid many plugins querying things like sysfs at the
|
||||
same time, which can have a measurable effect on the system.
|
||||
* **flush_interval**: Default data flushing interval for all outputs.
|
||||
You should not set this below
|
||||
interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
* **flush_jitter**: Jitter the flush interval by a random amount.
|
||||
This is primarily to avoid
|
||||
large write spikes for users running a large number of telegraf instances.
|
||||
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
|
||||
* **precision**:
|
||||
By default or when set to "0s", precision will be set to the same
|
||||
timestamp order as the collection interval, with the maximum being 1s.
|
||||
Precision will NOT be used for service inputs. It is up to each individual
|
||||
service input to set the timestamp at the appropriate precision.
|
||||
Valid time units are "ns", "us" (or "µs"), "ms", "s".
|
||||
|
||||
* **logfile**: Specify the log file name. The empty string means to log to stderr.
|
||||
* **debug**: Run telegraf in debug mode.
|
||||
* **quiet**: Run telegraf in quiet mode (error messages only).
|
||||
* **hostname**: Override default hostname, if empty use os.Hostname().
|
||||
* **omit_hostname**: If true, do no set the "host" tag in the telegraf agent.
|
||||
|
||||
## Input Configuration
|
||||
|
||||
The following config parameters are available for all inputs:
|
||||
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||
global interval, but if one particular input should be run less or more often,
|
||||
you can configure that here.
|
||||
* **name_override**: Override the base name of the measurement.
|
||||
(Default is the name of the input).
|
||||
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||
|
||||
The [measurement filtering](#measurement-filtering) parameters can be used to
|
||||
limit what metrics are emitted from the input plugin.
|
||||
|
||||
## Output Configuration
|
||||
|
||||
The [measurement filtering](#measurement-filtering) parameters can be used to
|
||||
limit what metrics are emitted from the output plugin.
|
||||
|
||||
## Aggregator Configuration
|
||||
|
||||
The following config parameters are available for all aggregators:
|
||||
|
||||
* **period**: The period on which to flush & clear each aggregator. All metrics
|
||||
that are sent with timestamps outside of this period will be ignored by the
|
||||
aggregator.
|
||||
* **delay**: The delay before each aggregator is flushed. This is to control
|
||||
how long for aggregators to wait before receiving metrics from input plugins,
|
||||
in the case that aggregators are flushing and inputs are gathering on the
|
||||
same interval.
|
||||
* **drop_original**: If true, the original metric will be dropped by the
|
||||
aggregator and will not get sent to the output plugins.
|
||||
* **name_override**: Override the base name of the measurement.
|
||||
(Default is the name of the input).
|
||||
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||
|
||||
The [measurement filtering](#measurement-filtering) parameters be used to
|
||||
limit what metrics are handled by the aggregator. Excluded metrics are passed
|
||||
downstream to the next aggregator.
|
||||
|
||||
## Processor Configuration
|
||||
|
||||
The following config parameters are available for all processors:
|
||||
|
||||
* **order**: This is the order in which the processor(s) get executed. If this
|
||||
is not specified then processor execution order will be random.
|
||||
|
||||
The [measurement filtering](#measurement-filtering) can parameters may be used
|
||||
to limit what metrics are handled by the processor. Excluded metrics are
|
||||
passed downstream to the next processor.
|
||||
|
||||
#### Measurement Filtering
|
||||
|
||||
Filters can be configured per input, output, processor, or aggregator,
|
||||
see below for examples.
|
||||
|
||||
* **namepass**:
|
||||
An array of glob pattern strings. Only points whose measurement name matches
|
||||
a pattern in this list are emitted.
|
||||
* **namedrop**:
|
||||
The inverse of `namepass`. If a match is found the point is discarded. This
|
||||
is tested on points after they have passed the `namepass` test.
|
||||
* **fieldpass**:
|
||||
An array of glob pattern strings. Only fields whose field key matches a
|
||||
pattern in this list are emitted. Not available for outputs.
|
||||
* **fielddrop**:
|
||||
The inverse of `fieldpass`. Fields with a field key matching one of the
|
||||
patterns will be discarded from the point. This is tested on points after
|
||||
they have passed the `fieldpass` test. Not available for outputs.
|
||||
* **tagpass**:
|
||||
A table mapping tag keys to arrays of glob pattern strings. Only points
|
||||
that contain a tag key in the table and a tag value matching one of its
|
||||
patterns is emitted.
|
||||
* **tagdrop**:
|
||||
The inverse of `tagpass`. If a match is found the point is discarded. This
|
||||
is tested on points after they have passed the `tagpass` test.
|
||||
* **taginclude**:
|
||||
An array of glob pattern strings. Only tags with a tag key matching one of
|
||||
the patterns are emitted. In contrast to `tagpass`, which will pass an entire
|
||||
point based on its tag, `taginclude` removes all non matching tags from the
|
||||
point. This filter can be used on both inputs & outputs, but it is
|
||||
_recommended_ to be used on inputs, as it is more efficient to filter out tags
|
||||
at the ingestion point.
|
||||
* **tagexclude**:
|
||||
The inverse of `taginclude`. Tags with a tag key matching one of the patterns
|
||||
will be discarded from the point.
|
||||
|
||||
**NOTE** Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters
|
||||
must be defined at the _end_ of the plugin definition, otherwise subsequent
|
||||
plugin config options will be interpreted as part of the tagpass/tagdrop
|
||||
tables.
|
||||
|
||||
#### Input Configuration Examples
|
||||
|
||||
This is a full working config that will output CPU data to an InfluxDB instance
|
||||
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
|
||||
measurements at a 10s interval and will collect per-cpu data, dropping any
|
||||
fields which begin with `time_`.
|
||||
|
||||
```toml
|
||||
[global_tags]
|
||||
dc = "denver-1"
|
||||
|
||||
[agent]
|
||||
interval = "10s"
|
||||
|
||||
# OUTPUTS
|
||||
[[outputs.influxdb]]
|
||||
url = "http://192.168.59.103:8086" # required.
|
||||
database = "telegraf" # required.
|
||||
|
||||
# INPUTS
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
# filter all fields beginning with 'time_'
|
||||
fielddrop = ["time_*"]
|
||||
```
|
||||
|
||||
#### Input Config: tagpass and tagdrop
|
||||
|
||||
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
|
||||
the plugin definition, otherwise subsequent plugin config options will be
|
||||
interpreted as part of the tagpass/tagdrop map.
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
fielddrop = ["cpu_time"]
|
||||
# Don't collect CPU data for cpu6 & cpu7
|
||||
[inputs.cpu.tagdrop]
|
||||
cpu = [ "cpu6", "cpu7" ]
|
||||
|
||||
[[inputs.disk]]
|
||||
[inputs.disk.tagpass]
|
||||
# tagpass conditions are OR, not AND.
|
||||
# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home)
|
||||
# then the metric passes
|
||||
fstype = [ "ext4", "xfs" ]
|
||||
# Globs can also be used on the tag values
|
||||
path = [ "/opt", "/home*" ]
|
||||
```
|
||||
|
||||
#### Input Config: fieldpass and fielddrop
|
||||
|
||||
```toml
|
||||
# Drop all metrics for guest & steal CPU usage
|
||||
[[inputs.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
fielddrop = ["usage_guest", "usage_steal"]
|
||||
|
||||
# Only store inode related metrics for disks
|
||||
[[inputs.disk]]
|
||||
fieldpass = ["inodes*"]
|
||||
```
|
||||
|
||||
#### Input Config: namepass and namedrop
|
||||
|
||||
```toml
|
||||
# Drop all metrics about containers for kubelet
|
||||
[[inputs.prometheus]]
|
||||
urls = ["http://kube-node-1:4194/metrics"]
|
||||
namedrop = ["container_*"]
|
||||
|
||||
# Only store rest client related metrics for kubelet
|
||||
[[inputs.prometheus]]
|
||||
urls = ["http://kube-node-1:4194/metrics"]
|
||||
namepass = ["rest_client_*"]
|
||||
```
|
||||
|
||||
#### Input Config: taginclude and tagexclude
|
||||
|
||||
```toml
|
||||
# Only include the "cpu" tag in the measurements for the cpu plugin.
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = true
|
||||
taginclude = ["cpu"]
|
||||
|
||||
# Exclude the "fstype" tag from the measurements for the disk plugin.
|
||||
[[inputs.disk]]
|
||||
tagexclude = ["fstype"]
|
||||
```
|
||||
|
||||
#### Input config: prefix, suffix, and override
|
||||
|
||||
This plugin will emit measurements with the name `cpu_total`
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
name_suffix = "_total"
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
```
|
||||
|
||||
This will emit measurements with the name `foobar`
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
name_override = "foobar"
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
```
|
||||
|
||||
#### Input config: tags
|
||||
|
||||
This plugin will emit measurements with two additional tags: `tag1=foo` and
|
||||
`tag2=bar`
|
||||
|
||||
NOTE: Order matters, the `[inputs.cpu.tags]` table must be at the _end_ of the
|
||||
plugin definition.
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
[inputs.cpu.tags]
|
||||
tag1 = "foo"
|
||||
tag2 = "bar"
|
||||
```
|
||||
|
||||
#### Multiple inputs of the same type
|
||||
|
||||
Additional inputs (or outputs) of the same type can be specified,
|
||||
just define more instances in the config file. It is highly recommended that
|
||||
you utilize `name_override`, `name_prefix`, or `name_suffix` config options
|
||||
to avoid measurement collisions:
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
name_override = "percpu_usage"
|
||||
fielddrop = ["cpu_time*"]
|
||||
```
|
||||
|
||||
#### Output Configuration Examples:
|
||||
|
||||
```toml
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf"
|
||||
# Drop all measurements that start with "aerospike"
|
||||
namedrop = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-aerospike-data"
|
||||
# Only accept aerospike data:
|
||||
namepass = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-cpu0-data"
|
||||
# Only store measurements where the tag "cpu" matches the value "cpu0"
|
||||
[outputs.influxdb.tagpass]
|
||||
cpu = ["cpu0"]
|
||||
```
|
||||
|
||||
#### Aggregator Configuration Examples:
|
||||
|
||||
This will collect and emit the min/max of the system load1 metric every
|
||||
30s, dropping the originals.
|
||||
|
||||
```toml
|
||||
[[inputs.system]]
|
||||
fieldpass = ["load1"] # collects system load1 metric.
|
||||
|
||||
[[aggregators.minmax]]
|
||||
period = "30s" # send & clear the aggregate every 30s.
|
||||
drop_original = true # drop the original metrics.
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
```
|
||||
|
||||
This will collect and emit the min/max of the swap metrics every
|
||||
30s, dropping the originals. The aggregator will not be applied
|
||||
to the system load metrics due to the `namepass` parameter.
|
||||
|
||||
```toml
|
||||
[[inputs.swap]]
|
||||
|
||||
[[inputs.system]]
|
||||
fieldpass = ["load1"] # collects system load1 metric.
|
||||
|
||||
[[aggregators.minmax]]
|
||||
period = "30s" # send & clear the aggregate every 30s.
|
||||
drop_original = true # drop the original metrics.
|
||||
namepass = ["swap"] # only "pass" swap metrics through the aggregator.
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
```
|
||||
|
||||
#### Processor Configuration Examples:
|
||||
|
||||
Print only the metrics with `cpu` as the measurement name, all metrics are
|
||||
passed to the output:
|
||||
```toml
|
||||
[[processors.printer]]
|
||||
namepass = "cpu"
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["/tmp/metrics.out"]
|
||||
```
|
||||
481
docs/DATA_FORMATS_INPUT.md
Normal file
481
docs/DATA_FORMATS_INPUT.md
Normal file
@@ -0,0 +1,481 @@
|
||||
# Telegraf Input Data Formats
|
||||
|
||||
Telegraf is able to parse the following input data formats into metrics:
|
||||
|
||||
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx)
|
||||
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#json)
|
||||
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite)
|
||||
1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
|
||||
1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
|
||||
1. [Collectd](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#collectd)
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||
are a combination of four basic parts:
|
||||
|
||||
1. Measurement Name
|
||||
1. Tags
|
||||
1. Fields
|
||||
1. Timestamp
|
||||
|
||||
These four parts are easily defined when using InfluxDB line-protocol as a
|
||||
data format. But there are other data formats that users may want to use which
|
||||
require more advanced configuration to create usable Telegraf metrics.
|
||||
|
||||
Plugins such as `exec` and `kafka_consumer` parse textual data. Up until now,
|
||||
these plugins were statically configured to parse just a single
|
||||
data format. `exec` mostly only supported parsing JSON, and `kafka_consumer` only
|
||||
supported data in InfluxDB line-protocol.
|
||||
|
||||
But now we are normalizing the parsing of various data formats across all
|
||||
plugins that can support it. You will be able to identify a plugin that supports
|
||||
different data formats by the presence of a `data_format` config option, for
|
||||
example, in the exec plugin:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
|
||||
## Additional configuration options go here
|
||||
```
|
||||
|
||||
Each data_format has an additional set of configuration options available, which
|
||||
I'll go over below.
|
||||
|
||||
# Influx:
|
||||
|
||||
There are no additional configuration options for InfluxDB line-protocol. The
|
||||
metrics are parsed directly into Telegraf metrics.
|
||||
|
||||
#### Influx Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
# JSON:
|
||||
|
||||
The JSON data format flattens JSON into metric _fields_.
|
||||
NOTE: Only numerical values are converted to fields, and they are converted
|
||||
into a float. strings are ignored unless specified as a tag_key (see below).
|
||||
|
||||
So for example, this JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"a": 5,
|
||||
"b": {
|
||||
"c": 6
|
||||
},
|
||||
"ignored": "I'm a string"
|
||||
}
|
||||
```
|
||||
|
||||
Would get translated into _fields_ of a measurement:
|
||||
|
||||
```
|
||||
myjsonmetric a=5,b_c=6
|
||||
```
|
||||
|
||||
The _measurement_ _name_ is usually the name of the plugin,
|
||||
but can be overridden using the `name_override` config option.
|
||||
|
||||
#### JSON Configuration:
|
||||
|
||||
The JSON data format supports specifying "tag keys". If specified, keys
|
||||
will be searched for in the root-level of the JSON blob. If the key(s) exist,
|
||||
they will be applied as tags to the Telegraf metrics.
|
||||
|
||||
For example, if you had this configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
|
||||
## List of tag names to extract from top-level of JSON server response
|
||||
tag_keys = [
|
||||
"my_tag_1",
|
||||
"my_tag_2"
|
||||
]
|
||||
```
|
||||
|
||||
with this JSON output from a command:
|
||||
|
||||
```json
|
||||
{
|
||||
"a": 5,
|
||||
"b": {
|
||||
"c": 6
|
||||
},
|
||||
"my_tag_1": "foo"
|
||||
}
|
||||
```
|
||||
|
||||
Your Telegraf metrics would get tagged with "my_tag_1"
|
||||
|
||||
```
|
||||
exec_mycollector,my_tag_1=foo a=5,b_c=6
|
||||
```
|
||||
|
||||
If the JSON data is an array, then each element of the array is parsed with the configured settings.
|
||||
Each resulting metric will be output with the same timestamp.
|
||||
|
||||
For example, if the following configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
|
||||
## List of tag names to extract from top-level of JSON server response
|
||||
tag_keys = [
|
||||
"my_tag_1",
|
||||
"my_tag_2"
|
||||
]
|
||||
```
|
||||
|
||||
with this JSON output from a command:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"a": 5,
|
||||
"b": {
|
||||
"c": 6
|
||||
},
|
||||
"my_tag_1": "foo",
|
||||
"my_tag_2": "baz"
|
||||
},
|
||||
{
|
||||
"a": 7,
|
||||
"b": {
|
||||
"c": 8
|
||||
},
|
||||
"my_tag_1": "bar",
|
||||
"my_tag_2": "baz"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Your Telegraf metrics would get tagged with "my_tag_1" and "my_tag_2"
|
||||
|
||||
```
|
||||
exec_mycollector,my_tag_1=foo,my_tag_2=baz a=5,b_c=6
|
||||
exec_mycollector,my_tag_1=bar,my_tag_2=baz a=7,b_c=8
|
||||
```
|
||||
|
||||
# Value:
|
||||
|
||||
The "value" data format translates single values into Telegraf metrics. This
|
||||
is done by assigning a measurement name and setting a single field ("value")
|
||||
as the parsed metric.
|
||||
|
||||
#### Value Configuration:
|
||||
|
||||
You **must** tell Telegraf what type of metric to collect by using the
|
||||
`data_type` configuration option. Available options are:
|
||||
|
||||
1. integer
|
||||
2. float or long
|
||||
3. string
|
||||
4. boolean
|
||||
|
||||
**Note:** It is also recommended that you set `name_override` to a measurement
|
||||
name that makes sense for your metric, otherwise it will just be set to the
|
||||
name of the plugin.
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["cat /proc/sys/kernel/random/entropy_avail"]
|
||||
|
||||
## override the default metric name of "exec"
|
||||
name_override = "entropy_available"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "value"
|
||||
data_type = "integer" # required
|
||||
```
|
||||
|
||||
# Graphite:
|
||||
|
||||
The Graphite data format translates graphite _dot_ buckets directly into
|
||||
telegraf measurement names, with a single value field, and without any tags.
|
||||
By default, the separator is left as ".", but this can be changed using the
|
||||
"separator" argument. For more advanced options,
|
||||
Telegraf supports specifying "templates" to translate
|
||||
graphite buckets into Telegraf metrics.
|
||||
|
||||
Templates are of the form:
|
||||
|
||||
```
|
||||
"host.mytag.mytag.measurement.measurement.field*"
|
||||
```
|
||||
|
||||
Where the following keywords exist:
|
||||
|
||||
1. `measurement`: specifies that this section of the graphite bucket corresponds
|
||||
to the measurement name. This can be specified multiple times.
|
||||
2. `field`: specifies that this section of the graphite bucket corresponds
|
||||
to the field name. This can be specified multiple times.
|
||||
3. `measurement*`: specifies that all remaining elements of the graphite bucket
|
||||
correspond to the measurement name.
|
||||
4. `field*`: specifies that all remaining elements of the graphite bucket
|
||||
correspond to the field name.
|
||||
|
||||
Any part of the template that is not a keyword is treated as a tag key. This
|
||||
can also be specified multiple times.
|
||||
|
||||
NOTE: `field*` cannot be used in conjunction with `measurement*`!
|
||||
|
||||
#### Measurement & Tag Templates:
|
||||
|
||||
The most basic template is to specify a single transformation to apply to all
|
||||
incoming metrics. So the following template:
|
||||
|
||||
```toml
|
||||
templates = [
|
||||
"region.region.measurement*"
|
||||
]
|
||||
```
|
||||
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
us.west.cpu.load 100
|
||||
=> cpu.load,region=us.west value=100
|
||||
```
|
||||
|
||||
Multiple templates can also be specified, but these should be differentiated
|
||||
using _filters_ (see below for more details)
|
||||
|
||||
```toml
|
||||
templates = [
|
||||
"*.*.* region.region.measurement", # <- all 3-part measurements will match this one.
|
||||
"*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one.
|
||||
]
|
||||
```
|
||||
|
||||
#### Field Templates:
|
||||
|
||||
The field keyword tells Telegraf to give the metric that field name.
|
||||
So the following template:
|
||||
|
||||
```toml
|
||||
separator = "_"
|
||||
templates = [
|
||||
"measurement.measurement.field.field.region"
|
||||
]
|
||||
```
|
||||
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
cpu.usage.idle.percent.eu-east 100
|
||||
=> cpu_usage,region=eu-east idle_percent=100
|
||||
```
|
||||
|
||||
The field key can also be derived from all remaining elements of the graphite
|
||||
bucket by specifying `field*`:
|
||||
|
||||
```toml
|
||||
separator = "_"
|
||||
templates = [
|
||||
"measurement.measurement.region.field*"
|
||||
]
|
||||
```
|
||||
|
||||
which would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
cpu.usage.eu-east.idle.percentage 100
|
||||
=> cpu_usage,region=eu-east idle_percentage=100
|
||||
```
|
||||
|
||||
#### Filter Templates:
|
||||
|
||||
Users can also filter the template(s) to use based on the name of the bucket,
|
||||
using glob matching, like so:
|
||||
|
||||
```toml
|
||||
templates = [
|
||||
"cpu.* measurement.measurement.region",
|
||||
"mem.* measurement.measurement.host"
|
||||
]
|
||||
```
|
||||
|
||||
which would result in the following transformation:
|
||||
|
||||
```
|
||||
cpu.load.eu-east 100
|
||||
=> cpu_load,region=eu-east value=100
|
||||
|
||||
mem.cached.localhost 256
|
||||
=> mem_cached,host=localhost value=256
|
||||
```
|
||||
|
||||
#### Adding Tags:
|
||||
|
||||
Additional tags can be added to a metric that don't exist on the received metric.
|
||||
You can add additional tags by specifying them after the pattern.
|
||||
Tags have the same format as the line protocol.
|
||||
Multiple tags are separated by commas.
|
||||
|
||||
```toml
|
||||
templates = [
|
||||
"measurement.measurement.field.region datacenter=1a"
|
||||
]
|
||||
```
|
||||
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
cpu.usage.idle.eu-east 100
|
||||
=> cpu_usage,region=eu-east,datacenter=1a idle=100
|
||||
```
|
||||
|
||||
There are many more options available,
|
||||
[More details can be found here](https://github.com/influxdata/influxdb/tree/master/services/graphite#templates)
|
||||
|
||||
#### Graphite Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "graphite"
|
||||
|
||||
## This string will be used to join the matched values.
|
||||
separator = "_"
|
||||
|
||||
## Each template line requires a template pattern. It can have an optional
|
||||
## filter before the template and separated by spaces. It can also have optional extra
|
||||
## tags following the template. Multiple tags should be separated by commas and no spaces
|
||||
## similar to the line protocol format. There can be only one default template.
|
||||
## Templates support below format:
|
||||
## 1. filter + template
|
||||
## 2. filter + template + extra tag(s)
|
||||
## 3. filter + template with field key
|
||||
## 4. default template
|
||||
templates = [
|
||||
"*.app env.service.resource.measurement",
|
||||
"stats.* .host.measurement* region=eu-east,agent=sensu",
|
||||
"stats2.* .host.measurement.field",
|
||||
"measurement*"
|
||||
]
|
||||
```
|
||||
|
||||
# Nagios:
|
||||
|
||||
There are no additional configuration options for Nagios line-protocol. The
|
||||
metrics are parsed directly into Telegraf metrics.
|
||||
|
||||
Note: Nagios Input Data Formats is only supported in `exec` input plugin.
|
||||
|
||||
#### Nagios Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "nagios"
|
||||
```
|
||||
|
||||
# Collectd:
|
||||
|
||||
The collectd format parses the collectd binary network protocol. Tags are
|
||||
created for host, instance, type, and type instance. All collectd values are
|
||||
added as float64 fields.
|
||||
|
||||
For more information about the binary network protocol see
|
||||
[here](https://collectd.org/wiki/index.php/Binary_protocol).
|
||||
|
||||
You can control the cryptographic settings with parser options. Create an
|
||||
authentication file and set `collectd_auth_file` to the path of the file, then
|
||||
set the desired security level in `collectd_security_level`.
|
||||
|
||||
Additional information including client setup can be found
|
||||
[here](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup).
|
||||
|
||||
You can also change the path to the typesdb or add additional typesdb using
|
||||
`collectd_typesdb`.
|
||||
|
||||
#### Collectd Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.socket_listener]]
|
||||
service_address = "udp://127.0.0.1:25826"
|
||||
name_prefix = "collectd_"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "collectd"
|
||||
|
||||
## Authentication file for cryptographic security levels
|
||||
collectd_auth_file = "/etc/collectd/auth_file"
|
||||
## One of none (default), sign, or encrypt
|
||||
collectd_security_level = "encrypt"
|
||||
## Path of to TypesDB specifications
|
||||
collectd_typesdb = ["/usr/share/collectd/types.db"]
|
||||
```
|
||||
163
docs/DATA_FORMATS_OUTPUT.md
Normal file
163
docs/DATA_FORMATS_OUTPUT.md
Normal file
@@ -0,0 +1,163 @@
|
||||
# Telegraf Output Data Formats
|
||||
|
||||
Telegraf is able to serialize metrics into the following output data formats:
|
||||
|
||||
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#influx)
|
||||
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#json)
|
||||
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite)
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||
are a combination of four basic parts:
|
||||
|
||||
1. Measurement Name
|
||||
1. Tags
|
||||
1. Fields
|
||||
1. Timestamp
|
||||
|
||||
In InfluxDB line protocol, these 4 parts are easily defined in textual form:
|
||||
|
||||
```
|
||||
measurement_name[,tag1=val1,...] field1=val1[,field2=val2,...] [timestamp]
|
||||
```
|
||||
|
||||
For Telegraf outputs that write textual data (such as `kafka`, `mqtt`, and `file`),
|
||||
InfluxDB line protocol was originally the only available output format. But now
|
||||
we are normalizing telegraf metric "serializers" into a
|
||||
[plugin-like interface](https://github.com/influxdata/telegraf/tree/master/plugins/serializers)
|
||||
across all output plugins that can support it.
|
||||
You will be able to identify a plugin that supports different data formats
|
||||
by the presence of a `data_format`
|
||||
config option, for example, in the `file` output plugin:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
files = ["stdout"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
## Additional configuration options go here
|
||||
```
|
||||
|
||||
Each data_format has an additional set of configuration options available, which
|
||||
I'll go over below.
|
||||
|
||||
# Influx:
|
||||
|
||||
There are no additional configuration options for InfluxDB line-protocol. The
|
||||
metrics are serialized directly into InfluxDB line-protocol.
|
||||
|
||||
### Influx Configuration:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
# Graphite:
|
||||
|
||||
The Graphite data format translates Telegraf metrics into _dot_ buckets. A
|
||||
template can be specified for the output of Telegraf metrics into Graphite
|
||||
buckets. The default template is:
|
||||
|
||||
```
|
||||
template = "host.tags.measurement.field"
|
||||
```
|
||||
|
||||
In the above template, we have four parts:
|
||||
|
||||
1. _host_ is a tag key. This can be any tag key that is in the Telegraf
|
||||
metric(s). If the key doesn't exist, it will be ignored. If it does exist, the
|
||||
tag value will be filled in.
|
||||
1. _tags_ is a special keyword that outputs all remaining tag values, separated
|
||||
by dots and in alphabetical order (by tag key). These will be filled after all
|
||||
tag keys are filled.
|
||||
1. _measurement_ is a special keyword that outputs the measurement name.
|
||||
1. _field_ is a special keyword that outputs the field name.
|
||||
|
||||
Which means the following influx metric -> graphite conversion would happen:
|
||||
|
||||
```
|
||||
cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
|
||||
=>
|
||||
tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
|
||||
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
||||
```
|
||||
|
||||
Fields with string values will be skipped. Boolean fields will be converted
|
||||
to 1 (true) or 0 (false).
|
||||
|
||||
### Graphite Configuration:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "graphite"
|
||||
|
||||
# prefix each graphite bucket
|
||||
prefix = "telegraf"
|
||||
# graphite template
|
||||
template = "host.tags.measurement.field"
|
||||
```
|
||||
|
||||
# JSON:
|
||||
|
||||
The JSON data format serialized Telegraf metrics in json format. The format is:
|
||||
|
||||
```json
|
||||
{
|
||||
"fields":{
|
||||
"field_1":30,
|
||||
"field_2":4,
|
||||
"field_N":59,
|
||||
"n_images":660
|
||||
},
|
||||
"name":"docker",
|
||||
"tags":{
|
||||
"host":"raynor"
|
||||
},
|
||||
"timestamp":1458229140
|
||||
}
|
||||
```
|
||||
|
||||
### JSON Configuration:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "json"
|
||||
json_timestamp_units = "1ns"
|
||||
```
|
||||
|
||||
By default, the timestamp that is output in JSON data format serialized Telegraf
|
||||
metrics is in seconds. The precision of this timestamp can be adjusted for any output
|
||||
by adding the optional `json_timestamp_units` parameter to the configuration for
|
||||
that output. This parameter can be used to set the timestamp units to nanoseconds (`ns`),
|
||||
microseconds (`us` or `µs`), milliseconds (`ms`), or seconds (`s`). Note that this
|
||||
parameter will be truncated to the nearest power of 10 that, so if the `json_timestamp_units`
|
||||
are set to `15ms` the timestamps for the JSON format serialized Telegraf metrics will be
|
||||
output in hundredths of a second (`10ms`).
|
||||
46
docs/FAQ.md
Normal file
46
docs/FAQ.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Frequently Asked Questions
|
||||
|
||||
### Q: How can I monitor the Docker Engine Host from within a container?
|
||||
|
||||
You will need to setup several volume mounts as well as some environment
|
||||
variables:
|
||||
```
|
||||
docker run --name telegraf
|
||||
-v /:/hostfs:ro
|
||||
-v /etc:/hostfs/etc:ro
|
||||
-v /proc:/hostfs/proc:ro
|
||||
-v /sys:/hostfs/sys:ro
|
||||
-v /var/run/utmp:/var/run/utmp:ro
|
||||
-e HOST_ETC=/hostfs/etc
|
||||
-e HOST_PROC=/hostfs/proc
|
||||
-e HOST_SYS=/hostfs/sys
|
||||
-e HOST_MOUNT_PREFIX=/hostfs
|
||||
telegraf
|
||||
```
|
||||
|
||||
|
||||
### Q: Why do I get a "no such host" error resolving hostnames that other
|
||||
programs can resolve?
|
||||
|
||||
Go uses a pure Go resolver by default for [name resolution](https://golang.org/pkg/net/#hdr-Name_Resolution).
|
||||
This resolver behaves differently than the C library functions but is more
|
||||
efficient when used with the Go runtime.
|
||||
|
||||
If you encounter problems or want to use more advanced name resolution methods
|
||||
that are unsupported by the pure Go resolver, you can switch to the cgo
|
||||
resolver.
|
||||
|
||||
If running manually set:
|
||||
```
|
||||
export GODEBUG=netdns=cgo
|
||||
```
|
||||
|
||||
If running as a service add the environment variable to `/etc/default/telegraf`:
|
||||
```
|
||||
GODEBUG=netdns=cgo
|
||||
```
|
||||
|
||||
### Q: When will the next version be released?
|
||||
|
||||
The latest release date estimate can be viewed on the
|
||||
[milestones](https://github.com/influxdata/telegraf/milestones) page.
|
||||
104
docs/LICENSE_OF_DEPENDENCIES.md
Normal file
104
docs/LICENSE_OF_DEPENDENCIES.md
Normal file
@@ -0,0 +1,104 @@
|
||||
# Licenses of dependencies
|
||||
|
||||
When distributed in a binary form, Telegraf may contain portions of the
|
||||
following works:
|
||||
|
||||
- collectd.org [MIT](https://github.com/collectd/go-collectd/blob/master/LICENSE)
|
||||
- github.com/aerospike/aerospike-client-go [APACHE](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE)
|
||||
- github.com/amir/raidman [PUBLIC DOMAIN](https://github.com/amir/raidman/blob/master/UNLICENSE)
|
||||
- github.com/armon/go-metrics [MIT](https://github.com/armon/go-metrics/blob/master/LICENSE)
|
||||
- github.com/aws/aws-sdk-go [APACHE](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
|
||||
- github.com/beorn7/perks [MIT](https://github.com/beorn7/perks/blob/master/LICENSE)
|
||||
- github.com/boltdb/bolt [MIT](https://github.com/boltdb/bolt/blob/master/LICENSE)
|
||||
- github.com/bsm/sarama-cluster [MIT](https://github.com/bsm/sarama-cluster/blob/master/LICENSE)
|
||||
- github.com/cenkalti/backoff [MIT](https://github.com/cenkalti/backoff/blob/master/LICENSE)
|
||||
- github.com/chuckpreslar/rcon [MIT](https://github.com/chuckpreslar/rcon#license)
|
||||
- github.com/couchbase/go-couchbase [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
|
||||
- github.com/couchbase/gomemcached [MIT](https://github.com/couchbase/gomemcached/blob/master/LICENSE)
|
||||
- github.com/couchbase/goutils [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
|
||||
- github.com/dancannon/gorethink [APACHE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
|
||||
- github.com/davecgh/go-spew [ISC](https://github.com/davecgh/go-spew/blob/master/LICENSE)
|
||||
- github.com/docker/docker [APACHE](https://github.com/docker/docker/blob/master/LICENSE)
|
||||
- github.com/docker/cli [APACHE](https://github.com/docker/cli/blob/master/LICENSE)
|
||||
- github.com/eapache/go-resiliency [MIT](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
|
||||
- github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE)
|
||||
- github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE)
|
||||
- github.com/eclipse/paho.mqtt.golang [ECLIPSE](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE)
|
||||
- github.com/fsouza/go-dockerclient [BSD](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
|
||||
- github.com/gobwas/glob [MIT](https://github.com/gobwas/glob/blob/master/LICENSE)
|
||||
- github.com/google/go-cmp [BSD](https://github.com/google/go-cmp/blob/master/LICENSE)
|
||||
- github.com/gogo/protobuf [BSD](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/protobuf [BSD](https://github.com/golang/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/snappy [BSD](https://github.com/golang/snappy/blob/master/LICENSE)
|
||||
- github.com/go-logfmt/logfmt [MIT](https://github.com/go-logfmt/logfmt/blob/master/LICENSE)
|
||||
- github.com/gorilla/mux [BSD](https://github.com/gorilla/mux/blob/master/LICENSE)
|
||||
- github.com/go-ini/ini [APACHE](https://github.com/go-ini/ini/blob/master/LICENSE)
|
||||
- github.com/go-ole/go-ole [MPL](http://mattn.mit-license.org/2013)
|
||||
- github.com/go-sql-driver/mysql [MPL](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
|
||||
- github.com/hailocab/go-hostpool [MIT](https://github.com/hailocab/go-hostpool/blob/master/LICENSE)
|
||||
- github.com/hashicorp/consul [MPL](https://github.com/hashicorp/consul/blob/master/LICENSE)
|
||||
- github.com/hashicorp/go-msgpack [BSD](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft-boltdb [MPL](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft [MPL](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||
- github.com/influxdata/tail [MIT](https://github.com/influxdata/tail/blob/master/LICENSE.txt)
|
||||
- github.com/influxdata/toml [MIT](https://github.com/influxdata/toml/blob/master/LICENSE)
|
||||
- github.com/influxdata/wlog [MIT](https://github.com/influxdata/wlog/blob/master/LICENSE)
|
||||
- github.com/jackc/pgx [MIT](https://github.com/jackc/pgx/blob/master/LICENSE)
|
||||
- github.com/jmespath/go-jmespath [APACHE](https://github.com/jmespath/go-jmespath/blob/master/LICENSE)
|
||||
- github.com/kardianos/osext [BSD](https://github.com/kardianos/osext/blob/master/LICENSE)
|
||||
- github.com/kardianos/service [ZLIB](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
|
||||
- github.com/kballard/go-shellquote [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
|
||||
- github.com/lib/pq [MIT](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||
- github.com/matttproud/golang_protobuf_extensions [APACHE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||
- github.com/Microsoft/go-winio [MIT](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
|
||||
- github.com/miekg/dns [BSD](https://github.com/miekg/dns/blob/master/LICENSE)
|
||||
- github.com/naoina/go-stringutil [MIT](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||
- github.com/naoina/toml [MIT](https://github.com/naoina/toml/blob/master/LICENSE)
|
||||
- github.com/nats-io/go-nats [MIT](https://github.com/nats-io/go-nats/blob/master/LICENSE)
|
||||
- github.com/nats-io/nats [MIT](https://github.com/nats-io/nats/blob/master/LICENSE)
|
||||
- github.com/nats-io/nuid [MIT](https://github.com/nats-io/nuid/blob/master/LICENSE)
|
||||
- github.com/nsqio/go-nsq [MIT](https://github.com/nsqio/go-nsq/blob/master/LICENSE)
|
||||
- github.com/opentracing-contrib/go-observer [APACHE](https://github.com/opentracing-contrib/go-observer/blob/master/LICENSE)
|
||||
- github.com/opentracing/opentracing-go [MIT](https://github.com/opentracing/opentracing-go/blob/master/LICENSE)
|
||||
- github.com/openzipkin/zipkin-go-opentracing [MIT](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE)
|
||||
- github.com/pierrec/lz4 [BSD](https://github.com/pierrec/lz4/blob/master/LICENSE)
|
||||
- github.com/pierrec/xxHash [BSD](https://github.com/pierrec/xxHash/blob/master/LICENSE)
|
||||
- github.com/pkg/errors [BSD](https://github.com/pkg/errors/blob/master/LICENSE)
|
||||
- github.com/pmezard/go-difflib [BSD](https://github.com/pmezard/go-difflib/blob/master/LICENSE)
|
||||
- github.com/prometheus/client_golang [APACHE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
|
||||
- github.com/prometheus/client_model [APACHE](https://github.com/prometheus/client_model/blob/master/LICENSE)
|
||||
- github.com/prometheus/common [APACHE](https://github.com/prometheus/common/blob/master/LICENSE)
|
||||
- github.com/prometheus/procfs [APACHE](https://github.com/prometheus/procfs/blob/master/LICENSE)
|
||||
- github.com/rcrowley/go-metrics [BSD](https://github.com/rcrowley/go-metrics/blob/master/LICENSE)
|
||||
- github.com/samuel/go-zookeeper [BSD](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
|
||||
- github.com/satori/go.uuid [MIT](https://github.com/satori/go.uuid/blob/master/LICENSE)
|
||||
- github.com/shirou/gopsutil [BSD](https://github.com/shirou/gopsutil/blob/master/LICENSE)
|
||||
- github.com/shirou/w32 [BSD](https://github.com/shirou/w32/blob/master/LICENSE)
|
||||
- github.com/Shopify/sarama [MIT](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
|
||||
- github.com/Sirupsen/logrus [MIT](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
|
||||
- github.com/StackExchange/wmi [MIT](https://github.com/StackExchange/wmi/blob/master/LICENSE)
|
||||
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
|
||||
- github.com/soniah/gosnmp [BSD](https://github.com/soniah/gosnmp/blob/master/LICENSE)
|
||||
- github.com/streadway/amqp [BSD](https://github.com/streadway/amqp/blob/master/LICENSE)
|
||||
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
|
||||
- github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
|
||||
- github.com/mitchellh/mapstructure [MIT](https://github.com/mitchellh/mapstructure/blob/master/LICENSE)
|
||||
- github.com/multiplay/go-ts3 [BSD](https://github.com/multiplay/go-ts3/blob/master/LICENSE)
|
||||
- github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE)
|
||||
- github.com/wvanbergen/kafka [MIT](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
|
||||
- github.com/wvanbergen/kazoo-go [MIT](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
||||
- github.com/yuin/gopher-lua [MIT](https://github.com/yuin/gopher-lua/blob/master/LICENSE)
|
||||
- github.com/zensqlmonitor/go-mssqldb [BSD](https://github.com/zensqlmonitor/go-mssqldb/blob/master/LICENSE.txt)
|
||||
- golang.org/x/crypto [BSD](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||
- golang.org/x/net [BSD](https://go.googlesource.com/net/+/master/LICENSE)
|
||||
- golang.org/x/text [BSD](https://go.googlesource.com/text/+/master/LICENSE)
|
||||
- golang.org/x/sys [BSD](https://go.googlesource.com/sys/+/master/LICENSE)
|
||||
- gopkg.in/asn1-ber.v1 [MIT](https://github.com/go-asn1-ber/asn1-ber/blob/v1.2/LICENSE)
|
||||
- gopkg.in/dancannon/gorethink.v1 [APACHE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||
- gopkg.in/fatih/pool.v2 [MIT](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
|
||||
- gopkg.in/fsnotify.v1 [BSD](https://github.com/fsnotify/fsnotify/blob/v1.4.2/LICENSE)
|
||||
- gopkg.in/ldap.v2 [MIT](https://github.com/go-ldap/ldap/blob/v2.5.0/LICENSE)
|
||||
- gopkg.in/mgo.v2 [BSD](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||
- gopkg.in/olivere/elastic.v5 [MIT](https://github.com/olivere/elastic/blob/v5.0.38/LICENSE)
|
||||
- gopkg.in/tomb.v1 [BSD](https://github.com/go-tomb/tomb/blob/v1/LICENSE)
|
||||
- gopkg.in/yaml.v2 [APACHE](https://github.com/go-yaml/yaml/blob/v2/LICENSE)
|
||||
24
docs/PROFILING.md
Normal file
24
docs/PROFILING.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Telegraf profiling
|
||||
|
||||
Telegraf uses the standard package `net/http/pprof`. This package serves via its HTTP server runtime profiling data in the format expected by the pprof visualization tool.
|
||||
|
||||
By default, the profiling is turned off.
|
||||
|
||||
To enable profiling you need to specify address to config parameter `pprof-addr`, for example:
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
```
|
||||
|
||||
There are several paths to get different profiling information:
|
||||
|
||||
To look at the heap profile:
|
||||
|
||||
`go tool pprof http://localhost:6060/debug/pprof/heap`
|
||||
|
||||
or to look at a 30-second CPU profile:
|
||||
|
||||
`go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30`
|
||||
|
||||
To view all available profiles, open `http://localhost:6060/debug/pprof/` in your browser.
|
||||
|
||||
45
docs/WINDOWS_SERVICE.md
Normal file
45
docs/WINDOWS_SERVICE.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# Running Telegraf as a Windows Service
|
||||
|
||||
Telegraf natively supports running as a Windows Service. Outlined below is are
|
||||
the general steps to set it up.
|
||||
|
||||
1. Obtain the telegraf windows distribution
|
||||
2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
|
||||
location simply specify the `-config` parameter with the desired location)
|
||||
3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf`
|
||||
4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""):
|
||||
|
||||
```
|
||||
> C:\"Program Files"\Telegraf\telegraf.exe --service install
|
||||
```
|
||||
|
||||
5. Edit the configuration file to meet your needs
|
||||
6. To check that it works, run:
|
||||
|
||||
```
|
||||
> C:\"Program Files"\Telegraf\telegraf.exe --config C:\"Program Files"\Telegraf\telegraf.conf --test
|
||||
```
|
||||
|
||||
7. To start collecting data, run:
|
||||
|
||||
```
|
||||
> net start telegraf
|
||||
```
|
||||
|
||||
## Other supported operations
|
||||
|
||||
Telegraf can manage its own service through the --service flag:
|
||||
|
||||
| Command | Effect |
|
||||
|------------------------------------|-------------------------------|
|
||||
| `telegraf.exe --service install` | Install telegraf as a service |
|
||||
| `telegraf.exe --service uninstall` | Remove the telegraf service |
|
||||
| `telegraf.exe --service start` | Start the telegraf service |
|
||||
| `telegraf.exe --service stop` | Stop the telegraf service |
|
||||
|
||||
|
||||
Troubleshooting common error #1067
|
||||
|
||||
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start
|
||||
|
||||
--config C:\"Program Files"\Telegraf\telegraf.conf
|
||||
3173
etc/telegraf.conf
3173
etc/telegraf.conf
File diff suppressed because it is too large
Load Diff
266
etc/telegraf_windows.conf
Normal file
266
etc/telegraf_windows.conf
Normal file
@@ -0,0 +1,266 @@
|
||||
# Telegraf configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs, and sent to the declared outputs.
|
||||
|
||||
# Plugins must be declared in here to be active.
|
||||
# To deactivate a plugin, comment out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# Global tags can be specified here in key="value" format.
|
||||
[global_tags]
|
||||
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
||||
# rack = "1a"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
## Default data collection interval for all inputs
|
||||
interval = "10s"
|
||||
## Rounds collection interval to 'interval'
|
||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
round_interval = true
|
||||
|
||||
## Telegraf will cache metric_buffer_limit metrics for each output, and will
|
||||
## flush this buffer on a successful write.
|
||||
metric_buffer_limit = 1000
|
||||
## Flush the buffer whenever full, regardless of flush_interval.
|
||||
flush_buffer_when_full = true
|
||||
|
||||
## Collection jitter is used to jitter the collection by a random amount.
|
||||
## Each plugin will sleep for a random time within jitter before collecting.
|
||||
## This can be used to avoid many plugins querying things like sysfs at the
|
||||
## same time, which can have a measurable effect on the system.
|
||||
collection_jitter = "0s"
|
||||
|
||||
## Default flushing interval for all outputs. You shouldn't set this below
|
||||
## interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
flush_interval = "10s"
|
||||
## Jitter the flush interval by a random amount. This is primarily to avoid
|
||||
## large write spikes for users running a large number of telegraf instances.
|
||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
## Logging configuration:
|
||||
## Run telegraf in debug mode
|
||||
debug = false
|
||||
## Run telegraf in quiet mode
|
||||
quiet = false
|
||||
## Specify the log file name. The empty string means to log to stdout.
|
||||
logfile = "/Program Files/Telegraf/telegraf.log"
|
||||
|
||||
## Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
|
||||
|
||||
###############################################################################
|
||||
# OUTPUTS #
|
||||
###############################################################################
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[[outputs.influxdb]]
|
||||
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||
# Multiple urls can be specified but it is assumed that they are part of the same
|
||||
# cluster, this means that only ONE of the urls will be written to each interval.
|
||||
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
|
||||
urls = ["http://127.0.0.1:8086"] # required
|
||||
# The target database for metrics (telegraf will create it if not exists)
|
||||
database = "telegraf" # required
|
||||
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
# note: using second precision greatly helps InfluxDB compression
|
||||
precision = "s"
|
||||
|
||||
## Write timeout (for the InfluxDB client), formatted as a string.
|
||||
## If not provided, will default to 5s. 0s means no timeout (not recommended).
|
||||
timeout = "5s"
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
||||
# udp_payload = 512
|
||||
|
||||
|
||||
###############################################################################
|
||||
# INPUTS #
|
||||
###############################################################################
|
||||
|
||||
# Windows Performance Counters plugin.
|
||||
# These are the recommended method of monitoring system metrics on windows,
|
||||
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
|
||||
# which utilize more system resources.
|
||||
#
|
||||
# See more configuration examples at:
|
||||
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters
|
||||
|
||||
[[inputs.win_perf_counters]]
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Processor usage, alternative to native, reports on a per core.
|
||||
ObjectName = "Processor"
|
||||
Instances = ["*"]
|
||||
Counters = [
|
||||
"% Idle Time",
|
||||
"% Interrupt Time",
|
||||
"% Privileged Time",
|
||||
"% User Time",
|
||||
"% Processor Time",
|
||||
"% DPC Time",
|
||||
]
|
||||
Measurement = "win_cpu"
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
IncludeTotal=true
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Disk times and queues
|
||||
ObjectName = "LogicalDisk"
|
||||
Instances = ["*"]
|
||||
Counters = [
|
||||
"% Idle Time",
|
||||
"% Disk Time",
|
||||
"% Disk Read Time",
|
||||
"% Disk Write Time",
|
||||
"Current Disk Queue Length",
|
||||
"% Free Space",
|
||||
"Free Megabytes",
|
||||
]
|
||||
Measurement = "win_disk"
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "PhysicalDisk"
|
||||
Instances = ["*"]
|
||||
Counters = [
|
||||
"Disk Read Bytes/sec",
|
||||
"Disk Write Bytes/sec",
|
||||
"Current Disk Queue Length",
|
||||
"Disk Reads/sec",
|
||||
"Disk Writes/sec",
|
||||
"% Disk Time",
|
||||
"% Disk Read Time",
|
||||
"% Disk Write Time",
|
||||
]
|
||||
Measurement = "win_diskio"
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "Network Interface"
|
||||
Instances = ["*"]
|
||||
Counters = [
|
||||
"Bytes Received/sec",
|
||||
"Bytes Sent/sec",
|
||||
"Packets Received/sec",
|
||||
"Packets Sent/sec",
|
||||
"Packets Received Discarded",
|
||||
"Packets Outbound Discarded",
|
||||
"Packets Received Errors",
|
||||
"Packets Outbound Errors",
|
||||
]
|
||||
Measurement = "win_net"
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "System"
|
||||
Counters = [
|
||||
"Context Switches/sec",
|
||||
"System Calls/sec",
|
||||
"Processor Queue Length",
|
||||
"System Up Time",
|
||||
]
|
||||
Instances = ["------"]
|
||||
Measurement = "win_system"
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Example query where the Instance portion must be removed to get data back,
|
||||
# such as from the Memory object.
|
||||
ObjectName = "Memory"
|
||||
Counters = [
|
||||
"Available Bytes",
|
||||
"Cache Faults/sec",
|
||||
"Demand Zero Faults/sec",
|
||||
"Page Faults/sec",
|
||||
"Pages/sec",
|
||||
"Transition Faults/sec",
|
||||
"Pool Nonpaged Bytes",
|
||||
"Pool Paged Bytes",
|
||||
"Standby Cache Reserve Bytes",
|
||||
"Standby Cache Normal Priority Bytes",
|
||||
"Standby Cache Core Bytes",
|
||||
|
||||
]
|
||||
# Use 6 x - to remove the Instance bit from the query.
|
||||
Instances = ["------"]
|
||||
Measurement = "win_mem"
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Example query where the Instance portion must be removed to get data back,
|
||||
# such as from the Paging File object.
|
||||
ObjectName = "Paging File"
|
||||
Counters = [
|
||||
"% Usage",
|
||||
]
|
||||
Instances = ["_Total"]
|
||||
Measurement = "win_swap"
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "Network Interface"
|
||||
Instances = ["*"]
|
||||
Counters = [
|
||||
"Bytes Sent/sec",
|
||||
"Bytes Received/sec",
|
||||
"Packets Sent/sec",
|
||||
"Packets Received/sec",
|
||||
"Packets Received Discarded",
|
||||
"Packets Received Errors",
|
||||
"Packets Outbound Discarded",
|
||||
"Packets Outbound Errors",
|
||||
]
|
||||
|
||||
|
||||
|
||||
# Windows system plugins using WMI (disabled by default, using
|
||||
# win_perf_counters over WMI is recommended)
|
||||
|
||||
# # Read metrics about cpu usage
|
||||
# [[inputs.cpu]]
|
||||
# ## Whether to report per-cpu stats or not
|
||||
# percpu = true
|
||||
# ## Whether to report total system cpu stats or not
|
||||
# totalcpu = true
|
||||
# ## Comment this line if you want the raw CPU time metrics
|
||||
# fielddrop = ["time_*"]
|
||||
|
||||
|
||||
# # Read metrics about disk usage by mount point
|
||||
# [[inputs.disk]]
|
||||
# ## By default, telegraf gather stats for all mountpoints.
|
||||
# ## Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||
# ## mount_points=["/"]
|
||||
#
|
||||
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
# ## present on /run, /var/run, /dev/shm or /dev).
|
||||
# # ignore_fs = ["tmpfs", "devtmpfs"]
|
||||
|
||||
|
||||
# # Read metrics about disk IO by device
|
||||
# [[inputs.diskio]]
|
||||
# ## By default, telegraf will gather stats for all devices including
|
||||
# ## disk partitions.
|
||||
# ## Setting devices will restrict the stats to the specified devices.
|
||||
# ## devices = ["sda", "sdb"]
|
||||
# ## Uncomment the following line if you do not need disk serial numbers.
|
||||
# ## skip_serial_number = true
|
||||
|
||||
|
||||
# # Read metrics about memory usage
|
||||
# [[inputs.mem]]
|
||||
# # no configuration
|
||||
|
||||
|
||||
# # Read metrics about swap memory usage
|
||||
# [[inputs.swap]]
|
||||
# # no configuration
|
||||
|
||||
116
filter/filter.go
Normal file
116
filter/filter.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/gobwas/glob"
|
||||
)
|
||||
|
||||
type Filter interface {
|
||||
Match(string) bool
|
||||
}
|
||||
|
||||
// Compile takes a list of string filters and returns a Filter interface
|
||||
// for matching a given string against the filter list. The filter list
|
||||
// supports glob matching too, ie:
|
||||
//
|
||||
// f, _ := Compile([]string{"cpu", "mem", "net*"})
|
||||
// f.Match("cpu") // true
|
||||
// f.Match("network") // true
|
||||
// f.Match("memory") // false
|
||||
//
|
||||
func Compile(filters []string) (Filter, error) {
|
||||
// return if there is nothing to compile
|
||||
if len(filters) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// check if we can compile a non-glob filter
|
||||
noGlob := true
|
||||
for _, filter := range filters {
|
||||
if hasMeta(filter) {
|
||||
noGlob = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case noGlob:
|
||||
// return non-globbing filter if not needed.
|
||||
return compileFilterNoGlob(filters), nil
|
||||
case len(filters) == 1:
|
||||
return glob.Compile(filters[0])
|
||||
default:
|
||||
return glob.Compile("{" + strings.Join(filters, ",") + "}")
|
||||
}
|
||||
}
|
||||
|
||||
// hasMeta reports whether path contains any magic glob characters.
|
||||
func hasMeta(s string) bool {
|
||||
return strings.IndexAny(s, "*?[") >= 0
|
||||
}
|
||||
|
||||
type filter struct {
|
||||
m map[string]struct{}
|
||||
}
|
||||
|
||||
func (f *filter) Match(s string) bool {
|
||||
_, ok := f.m[s]
|
||||
return ok
|
||||
}
|
||||
|
||||
type filtersingle struct {
|
||||
s string
|
||||
}
|
||||
|
||||
func (f *filtersingle) Match(s string) bool {
|
||||
return f.s == s
|
||||
}
|
||||
|
||||
func compileFilterNoGlob(filters []string) Filter {
|
||||
if len(filters) == 1 {
|
||||
return &filtersingle{s: filters[0]}
|
||||
}
|
||||
out := filter{m: make(map[string]struct{})}
|
||||
for _, filter := range filters {
|
||||
out.m[filter] = struct{}{}
|
||||
}
|
||||
return &out
|
||||
}
|
||||
|
||||
type IncludeExcludeFilter struct {
|
||||
include Filter
|
||||
exclude Filter
|
||||
}
|
||||
|
||||
func NewIncludeExcludeFilter(
|
||||
include []string,
|
||||
exclude []string,
|
||||
) (Filter, error) {
|
||||
in, err := Compile(include)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ex, err := Compile(exclude)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &IncludeExcludeFilter{in, ex}, nil
|
||||
}
|
||||
|
||||
func (f *IncludeExcludeFilter) Match(s string) bool {
|
||||
if f.include != nil {
|
||||
if !f.include.Match(s) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if f.exclude != nil {
|
||||
if f.exclude.Match(s) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
96
filter/filter_test.go
Normal file
96
filter/filter_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCompile(t *testing.T) {
|
||||
f, err := Compile([]string{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, f)
|
||||
|
||||
f, err = Compile([]string{"cpu"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, f.Match("cpu"))
|
||||
assert.False(t, f.Match("cpu0"))
|
||||
assert.False(t, f.Match("mem"))
|
||||
|
||||
f, err = Compile([]string{"cpu*"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, f.Match("cpu"))
|
||||
assert.True(t, f.Match("cpu0"))
|
||||
assert.False(t, f.Match("mem"))
|
||||
|
||||
f, err = Compile([]string{"cpu", "mem"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, f.Match("cpu"))
|
||||
assert.False(t, f.Match("cpu0"))
|
||||
assert.True(t, f.Match("mem"))
|
||||
|
||||
f, err = Compile([]string{"cpu", "mem", "net*"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, f.Match("cpu"))
|
||||
assert.False(t, f.Match("cpu0"))
|
||||
assert.True(t, f.Match("mem"))
|
||||
assert.True(t, f.Match("network"))
|
||||
}
|
||||
|
||||
var benchbool bool
|
||||
|
||||
func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
|
||||
f, _ := Compile([]string{"cpu"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("network")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
|
||||
func BenchmarkFilterSingleNoGlobTrue(b *testing.B) {
|
||||
f, _ := Compile([]string{"cpu"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("cpu")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
|
||||
func BenchmarkFilter(b *testing.B) {
|
||||
f, _ := Compile([]string{"cpu", "mem", "net*"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("network")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
|
||||
func BenchmarkFilterNoGlob(b *testing.B) {
|
||||
f, _ := Compile([]string{"cpu", "mem", "net"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("net")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
|
||||
func BenchmarkFilter2(b *testing.B) {
|
||||
f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
||||
"aw", "az", "axxx", "ab", "cpu", "mem", "net*"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("network")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
|
||||
func BenchmarkFilter2NoGlob(b *testing.B) {
|
||||
f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
||||
"aw", "az", "axxx", "ab", "cpu", "mem", "net"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("net")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
31
input.go
Normal file
31
input.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package telegraf
|
||||
|
||||
type Input interface {
|
||||
// SampleConfig returns the default configuration of the Input
|
||||
SampleConfig() string
|
||||
|
||||
// Description returns a one-sentence description on the Input
|
||||
Description() string
|
||||
|
||||
// Gather takes in an accumulator and adds the metrics that the Input
|
||||
// gathers. This is called every "interval"
|
||||
Gather(Accumulator) error
|
||||
}
|
||||
|
||||
type ServiceInput interface {
|
||||
// SampleConfig returns the default configuration of the Input
|
||||
SampleConfig() string
|
||||
|
||||
// Description returns a one-sentence description on the Input
|
||||
Description() string
|
||||
|
||||
// Gather takes in an accumulator and adds the metrics that the Input
|
||||
// gathers. This is called every "interval"
|
||||
Gather(Accumulator) error
|
||||
|
||||
// Start starts the ServiceInput's service, whatever that may be
|
||||
Start(Accumulator) error
|
||||
|
||||
// Stop stops the services and closes any necessary channels and connections
|
||||
Stop()
|
||||
}
|
||||
76
internal/buffer/buffer.go
Normal file
76
internal/buffer/buffer.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
var (
|
||||
MetricsWritten = selfstat.Register("agent", "metrics_written", map[string]string{})
|
||||
MetricsDropped = selfstat.Register("agent", "metrics_dropped", map[string]string{})
|
||||
)
|
||||
|
||||
// Buffer is an object for storing metrics in a circular buffer.
|
||||
type Buffer struct {
|
||||
buf chan telegraf.Metric
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewBuffer returns a Buffer
|
||||
// size is the maximum number of metrics that Buffer will cache. If Add is
|
||||
// called when the buffer is full, then the oldest metric(s) will be dropped.
|
||||
func NewBuffer(size int) *Buffer {
|
||||
return &Buffer{
|
||||
buf: make(chan telegraf.Metric, size),
|
||||
}
|
||||
}
|
||||
|
||||
// IsEmpty returns true if Buffer is empty.
|
||||
func (b *Buffer) IsEmpty() bool {
|
||||
return len(b.buf) == 0
|
||||
}
|
||||
|
||||
// Len returns the current length of the buffer.
|
||||
func (b *Buffer) Len() int {
|
||||
return len(b.buf)
|
||||
}
|
||||
|
||||
// Add adds metrics to the buffer.
|
||||
func (b *Buffer) Add(metrics ...telegraf.Metric) {
|
||||
for i, _ := range metrics {
|
||||
MetricsWritten.Incr(1)
|
||||
select {
|
||||
case b.buf <- metrics[i]:
|
||||
default:
|
||||
b.mu.Lock()
|
||||
MetricsDropped.Incr(1)
|
||||
<-b.buf
|
||||
b.buf <- metrics[i]
|
||||
b.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Batch returns a batch of metrics of size batchSize.
|
||||
// the batch will be of maximum length batchSize. It can be less than batchSize,
|
||||
// if the length of Buffer is less than batchSize.
|
||||
func (b *Buffer) Batch(batchSize int) []telegraf.Metric {
|
||||
b.mu.Lock()
|
||||
n := min(len(b.buf), batchSize)
|
||||
out := make([]telegraf.Metric, n)
|
||||
for i := 0; i < n; i++ {
|
||||
out[i] = <-b.buf
|
||||
}
|
||||
b.mu.Unlock()
|
||||
return out
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if b < a {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
100
internal/buffer/buffer_test.go
Normal file
100
internal/buffer/buffer_test.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var metricList = []telegraf.Metric{
|
||||
testutil.TestMetric(2, "mymetric1"),
|
||||
testutil.TestMetric(1, "mymetric2"),
|
||||
testutil.TestMetric(11, "mymetric3"),
|
||||
testutil.TestMetric(15, "mymetric4"),
|
||||
testutil.TestMetric(8, "mymetric5"),
|
||||
}
|
||||
|
||||
func BenchmarkAddMetrics(b *testing.B) {
|
||||
buf := NewBuffer(10000)
|
||||
m := testutil.TestMetric(1, "mymetric")
|
||||
for n := 0; n < b.N; n++ {
|
||||
buf.Add(m)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewBufferBasicFuncs(t *testing.T) {
|
||||
b := NewBuffer(10)
|
||||
MetricsDropped.Set(0)
|
||||
MetricsWritten.Set(0)
|
||||
|
||||
assert.True(t, b.IsEmpty())
|
||||
assert.Zero(t, b.Len())
|
||||
assert.Zero(t, MetricsDropped.Get())
|
||||
assert.Zero(t, MetricsWritten.Get())
|
||||
|
||||
m := testutil.TestMetric(1, "mymetric")
|
||||
b.Add(m)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 1)
|
||||
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(1), MetricsWritten.Get())
|
||||
|
||||
b.Add(metricList...)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 6)
|
||||
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(6), MetricsWritten.Get())
|
||||
}
|
||||
|
||||
func TestDroppingMetrics(t *testing.T) {
|
||||
b := NewBuffer(10)
|
||||
MetricsDropped.Set(0)
|
||||
MetricsWritten.Set(0)
|
||||
|
||||
// Add up to the size of the buffer
|
||||
b.Add(metricList...)
|
||||
b.Add(metricList...)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 10)
|
||||
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(10), MetricsWritten.Get())
|
||||
|
||||
// Add 5 more and verify they were dropped
|
||||
b.Add(metricList...)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 10)
|
||||
assert.Equal(t, int64(5), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(15), MetricsWritten.Get())
|
||||
}
|
||||
|
||||
func TestGettingBatches(t *testing.T) {
|
||||
b := NewBuffer(20)
|
||||
MetricsDropped.Set(0)
|
||||
MetricsWritten.Set(0)
|
||||
|
||||
// Verify that the buffer returned is smaller than requested when there are
|
||||
// not as many items as requested.
|
||||
b.Add(metricList...)
|
||||
batch := b.Batch(10)
|
||||
assert.Len(t, batch, 5)
|
||||
|
||||
// Verify that the buffer is now empty
|
||||
assert.True(t, b.IsEmpty())
|
||||
assert.Zero(t, b.Len())
|
||||
assert.Zero(t, MetricsDropped.Get())
|
||||
assert.Equal(t, int64(5), MetricsWritten.Get())
|
||||
|
||||
// Verify that the buffer returned is not more than the size requested
|
||||
b.Add(metricList...)
|
||||
batch = b.Batch(3)
|
||||
assert.Len(t, batch, 3)
|
||||
|
||||
// Verify that buffer is not empty
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 2)
|
||||
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(10), MetricsWritten.Get())
|
||||
}
|
||||
49
internal/config/aws/credentials.go
Normal file
49
internal/config/aws/credentials.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
)
|
||||
|
||||
type CredentialConfig struct {
|
||||
Region string
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
RoleARN string
|
||||
Profile string
|
||||
Filename string
|
||||
Token string
|
||||
}
|
||||
|
||||
func (c *CredentialConfig) Credentials() client.ConfigProvider {
|
||||
if c.RoleARN != "" {
|
||||
return c.assumeCredentials()
|
||||
} else {
|
||||
return c.rootCredentials()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CredentialConfig) rootCredentials() client.ConfigProvider {
|
||||
config := &aws.Config{
|
||||
Region: aws.String(c.Region),
|
||||
}
|
||||
if c.AccessKey != "" || c.SecretKey != "" {
|
||||
config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token)
|
||||
} else if c.Profile != "" || c.Filename != "" {
|
||||
config.Credentials = credentials.NewSharedCredentials(c.Filename, c.Profile)
|
||||
}
|
||||
|
||||
return session.New(config)
|
||||
}
|
||||
|
||||
func (c *CredentialConfig) assumeCredentials() client.ConfigProvider {
|
||||
rootCredentials := c.rootCredentials()
|
||||
config := &aws.Config{
|
||||
Region: aws.String(c.Region),
|
||||
}
|
||||
config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN)
|
||||
return session.New(config)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,48 +1,99 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
"github.com/influxdb/telegraf/plugins/exec"
|
||||
"github.com/influxdb/telegraf/plugins/memcached"
|
||||
"github.com/influxdb/telegraf/plugins/procstat"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/procstat"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConfig_LoadSinglePlugin(t *testing.T) {
|
||||
func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
|
||||
c := NewConfig()
|
||||
err := os.Setenv("MY_TEST_SERVER", "192.168.1.1")
|
||||
assert.NoError(t, err)
|
||||
err = os.Setenv("TEST_INTERVAL", "10s")
|
||||
assert.NoError(t, err)
|
||||
c.LoadConfig("./testdata/single_plugin_env_vars.toml")
|
||||
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"192.168.1.1"}
|
||||
|
||||
filter := models.Filter{
|
||||
NameDrop: []string{"metricname2"},
|
||||
NamePass: []string{"metricname1"},
|
||||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, filter.Compile())
|
||||
mConfig := &models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: filter,
|
||||
Interval: 10 * time.Second,
|
||||
}
|
||||
mConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, memcached, c.Inputs[0].Input,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
}
|
||||
|
||||
func TestConfig_LoadSingleInput(t *testing.T) {
|
||||
c := NewConfig()
|
||||
c.LoadConfig("./testdata/single_plugin.toml")
|
||||
|
||||
memcached := plugins.Plugins["memcached"]().(*memcached.Memcached)
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"localhost"}
|
||||
|
||||
mConfig := &PluginConfig{
|
||||
Name: "memcached",
|
||||
Filter: Filter{
|
||||
Drop: []string{"other", "stuff"},
|
||||
Pass: []string{"some", "strings"},
|
||||
TagDrop: []TagFilter{
|
||||
TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
filter := models.Filter{
|
||||
NameDrop: []string{"metricname2"},
|
||||
NamePass: []string{"metricname1"},
|
||||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
TagPass: []TagFilter{
|
||||
TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
},
|
||||
TagPass: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, filter.Compile())
|
||||
mConfig := &models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: filter,
|
||||
Interval: 5 * time.Second,
|
||||
}
|
||||
mConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, memcached, c.Plugins[0].Plugin,
|
||||
assert.Equal(t, memcached, c.Inputs[0].Input,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Plugins[0].Config,
|
||||
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
}
|
||||
|
||||
@@ -57,240 +108,69 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
memcached := plugins.Plugins["memcached"]().(*memcached.Memcached)
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"localhost"}
|
||||
|
||||
mConfig := &PluginConfig{
|
||||
Name: "memcached",
|
||||
Filter: Filter{
|
||||
Drop: []string{"other", "stuff"},
|
||||
Pass: []string{"some", "strings"},
|
||||
TagDrop: []TagFilter{
|
||||
TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
filter := models.Filter{
|
||||
NameDrop: []string{"metricname2"},
|
||||
NamePass: []string{"metricname1"},
|
||||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
TagPass: []TagFilter{
|
||||
TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
},
|
||||
TagPass: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, filter.Compile())
|
||||
mConfig := &models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: filter,
|
||||
Interval: 5 * time.Second,
|
||||
}
|
||||
assert.Equal(t, memcached, c.Plugins[0].Plugin,
|
||||
mConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, memcached, c.Inputs[0].Input,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Plugins[0].Config,
|
||||
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
|
||||
ex := plugins.Plugins["exec"]().(*exec.Exec)
|
||||
ex.Commands = []*exec.Command{
|
||||
&exec.Command{
|
||||
Command: "/usr/bin/myothercollector --foo=bar",
|
||||
Name: "myothercollector",
|
||||
},
|
||||
ex := inputs.Inputs["exec"]().(*exec.Exec)
|
||||
p, err := parsers.NewJSONParser("exec", nil, nil)
|
||||
assert.NoError(t, err)
|
||||
ex.SetParser(p)
|
||||
ex.Command = "/usr/bin/myothercollector --foo=bar"
|
||||
eConfig := &models.InputConfig{
|
||||
Name: "exec",
|
||||
MeasurementSuffix: "_myothercollector",
|
||||
}
|
||||
eConfig := &PluginConfig{Name: "exec"}
|
||||
assert.Equal(t, ex, c.Plugins[1].Plugin,
|
||||
eConfig.Tags = make(map[string]string)
|
||||
assert.Equal(t, ex, c.Inputs[1].Input,
|
||||
"Merged Testdata did not produce a correct exec struct.")
|
||||
assert.Equal(t, eConfig, c.Plugins[1].Config,
|
||||
assert.Equal(t, eConfig, c.Inputs[1].Config,
|
||||
"Merged Testdata did not produce correct exec metadata.")
|
||||
|
||||
memcached.Servers = []string{"192.168.1.1"}
|
||||
assert.Equal(t, memcached, c.Plugins[2].Plugin,
|
||||
assert.Equal(t, memcached, c.Inputs[2].Input,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Plugins[2].Config,
|
||||
assert.Equal(t, mConfig, c.Inputs[2].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
|
||||
pstat := plugins.Plugins["procstat"]().(*procstat.Procstat)
|
||||
pstat.Specifications = []*procstat.Specification{
|
||||
&procstat.Specification{
|
||||
PidFile: "/var/run/grafana-server.pid",
|
||||
},
|
||||
&procstat.Specification{
|
||||
PidFile: "/var/run/influxdb/influxd.pid",
|
||||
},
|
||||
}
|
||||
pstat := inputs.Inputs["procstat"]().(*procstat.Procstat)
|
||||
pstat.PidFile = "/var/run/grafana-server.pid"
|
||||
|
||||
pConfig := &PluginConfig{Name: "procstat"}
|
||||
pConfig := &models.InputConfig{Name: "procstat"}
|
||||
pConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, pstat, c.Plugins[3].Plugin,
|
||||
assert.Equal(t, pstat, c.Inputs[3].Input,
|
||||
"Merged Testdata did not produce a correct procstat struct.")
|
||||
assert.Equal(t, pConfig, c.Plugins[3].Config,
|
||||
assert.Equal(t, pConfig, c.Inputs[3].Config,
|
||||
"Merged Testdata did not produce correct procstat metadata.")
|
||||
}
|
||||
|
||||
func TestFilter_Empty(t *testing.T) {
|
||||
f := Filter{}
|
||||
|
||||
measurements := []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"barfoo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"supercalifradjulisticexpialidocious",
|
||||
}
|
||||
|
||||
for _, measurement := range measurements {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_Pass(t *testing.T) {
|
||||
f := Filter{
|
||||
Pass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_Drop(t *testing.T) {
|
||||
f := Filter{
|
||||
Drop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagPass(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagPass: filters,
|
||||
}
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagDrop(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
12
internal/config/testdata/single_plugin.toml
vendored
12
internal/config/testdata/single_plugin.toml
vendored
@@ -1,9 +1,11 @@
|
||||
[[plugins.memcached]]
|
||||
[[inputs.memcached]]
|
||||
servers = ["localhost"]
|
||||
pass = ["some", "strings"]
|
||||
drop = ["other", "stuff"]
|
||||
namepass = ["metricname1"]
|
||||
namedrop = ["metricname2"]
|
||||
fieldpass = ["some", "strings"]
|
||||
fielddrop = ["other", "stuff"]
|
||||
interval = "5s"
|
||||
[plugins.memcached.tagpass]
|
||||
[inputs.memcached.tagpass]
|
||||
goodtag = ["mytag"]
|
||||
[plugins.memcached.tagdrop]
|
||||
[inputs.memcached.tagdrop]
|
||||
badtag = ["othertag"]
|
||||
|
||||
11
internal/config/testdata/single_plugin_env_vars.toml
vendored
Normal file
11
internal/config/testdata/single_plugin_env_vars.toml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
[[inputs.memcached]]
|
||||
servers = ["$MY_TEST_SERVER"]
|
||||
namepass = ["metricname1"]
|
||||
namedrop = ["metricname2"]
|
||||
fieldpass = ["some", "strings"]
|
||||
fielddrop = ["other", "stuff"]
|
||||
interval = "$TEST_INTERVAL"
|
||||
[inputs.memcached.tagpass]
|
||||
goodtag = ["mytag"]
|
||||
[inputs.memcached.tagdrop]
|
||||
badtag = ["othertag"]
|
||||
8
internal/config/testdata/subconfig/exec.conf
vendored
8
internal/config/testdata/subconfig/exec.conf
vendored
@@ -1,8 +1,4 @@
|
||||
[[plugins.exec]]
|
||||
# specify commands via an array of tables
|
||||
[[plugins.exec.commands]]
|
||||
[[inputs.exec]]
|
||||
# the command to run
|
||||
command = "/usr/bin/myothercollector --foo=bar"
|
||||
|
||||
# name of the command (used as a prefix for measurements)
|
||||
name = "myothercollector"
|
||||
name_suffix = "_myothercollector"
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
[[plugins.memcached]]
|
||||
[[inputs.memcached]]
|
||||
servers = ["192.168.1.1"]
|
||||
namepass = ["metricname1"]
|
||||
namedrop = ["metricname2"]
|
||||
pass = ["some", "strings"]
|
||||
drop = ["other", "stuff"]
|
||||
interval = "5s"
|
||||
[plugins.memcached.tagpass]
|
||||
[inputs.memcached.tagpass]
|
||||
goodtag = ["mytag"]
|
||||
[plugins.memcached.tagdrop]
|
||||
[inputs.memcached.tagdrop]
|
||||
badtag = ["othertag"]
|
||||
|
||||
@@ -1,5 +1,2 @@
|
||||
[[plugins.procstat]]
|
||||
[[plugins.procstat.specifications]]
|
||||
[[inputs.procstat]]
|
||||
pid_file = "/var/run/grafana-server.pid"
|
||||
[[plugins.procstat.specifications]]
|
||||
pid_file = "/var/run/influxdb/influxd.pid"
|
||||
|
||||
154
internal/config/testdata/telegraf-agent.toml
vendored
154
internal/config/testdata/telegraf-agent.toml
vendored
@@ -1,7 +1,7 @@
|
||||
# Telegraf configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared plugins.
|
||||
# declared inputs.
|
||||
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
@@ -20,21 +20,14 @@
|
||||
# with 'required'. Be sure to edit those to make this configuration work.
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
[tags]
|
||||
# dc = "us-east-1"
|
||||
[global_tags]
|
||||
dc = "us-east-1"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
# Default data collection interval for all plugins
|
||||
interval = "10s"
|
||||
|
||||
# If utc = false, uses local time (utc is highly recommended)
|
||||
utc = true
|
||||
|
||||
# Precision of writes, valid values are n, u, ms, s, m, and h
|
||||
# note: using second precision greatly helps InfluxDB compression
|
||||
precision = "s"
|
||||
|
||||
# run telegraf in debug mode
|
||||
debug = false
|
||||
|
||||
@@ -46,8 +39,6 @@
|
||||
# OUTPUTS #
|
||||
###############################################################################
|
||||
|
||||
[outputs]
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[[outputs.influxdb]]
|
||||
# The full HTTP endpoint URL for your InfluxDB instance
|
||||
@@ -58,17 +49,6 @@
|
||||
# The target database for metrics. This database must already exist
|
||||
database = "telegraf" # required.
|
||||
|
||||
# Connection timeout (for the connection with InfluxDB), formatted as a string.
|
||||
# Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
# If not provided, will default to 0 (no timeout)
|
||||
# timeout = "5s"
|
||||
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
|
||||
# Set the user agent for the POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = ["udp://localhost:8089"]
|
||||
database = "udp-telegraf"
|
||||
@@ -80,7 +60,7 @@
|
||||
# Kafka topic for producer messages
|
||||
topic = "telegraf"
|
||||
# Telegraf tag to use as a routing key
|
||||
# ie, if this tag exists, it's value will be used as the routing key
|
||||
# ie, if this tag exists, its value will be used as the routing key
|
||||
routing_tag = "host"
|
||||
|
||||
|
||||
@@ -88,15 +68,13 @@
|
||||
# PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
[plugins]
|
||||
|
||||
# Read Apache status information (mod_status)
|
||||
[[plugins.apache]]
|
||||
# An array of Apache status URI to gather stats.
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
[[inputs.apache]]
|
||||
# An array of Apache status URI to gather stats.
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
|
||||
# Read metrics about cpu usage
|
||||
[[plugins.cpu]]
|
||||
[[inputs.cpu]]
|
||||
# Whether to report per-cpu stats or not
|
||||
percpu = true
|
||||
# Whether to report total system cpu stats or not
|
||||
@@ -105,11 +83,11 @@ urls = ["http://localhost/server-status?auto"]
|
||||
drop = ["cpu_time"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
[[plugins.disk]]
|
||||
[[inputs.diskio]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics from one or many disque servers
|
||||
[[plugins.disque]]
|
||||
[[inputs.disque]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
|
||||
# 10.0.0.1:10000, etc.
|
||||
@@ -118,7 +96,7 @@ urls = ["http://localhost/server-status?auto"]
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read stats from one or more Elasticsearch servers or clusters
|
||||
[[plugins.elasticsearch]]
|
||||
[[inputs.elasticsearch]]
|
||||
# specify a list of one or more Elasticsearch servers
|
||||
servers = ["http://localhost:9200"]
|
||||
|
||||
@@ -127,17 +105,13 @@ urls = ["http://localhost/server-status?auto"]
|
||||
local = true
|
||||
|
||||
# Read flattened metrics from one or more commands that output JSON to stdout
|
||||
[[plugins.exec]]
|
||||
# specify commands via an array of tables
|
||||
[[exec.commands]]
|
||||
[[inputs.exec]]
|
||||
# the command to run
|
||||
command = "/usr/bin/mycollector --foo=bar"
|
||||
|
||||
# name of the command (used as a prefix for measurements)
|
||||
name = "mycollector"
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
# Read metrics of haproxy, via socket or csv stats page
|
||||
[[plugins.haproxy]]
|
||||
[[inputs.haproxy]]
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port. ie localhost, 10.10.3.33:1936, etc.
|
||||
#
|
||||
@@ -147,46 +121,55 @@ urls = ["http://localhost/server-status?auto"]
|
||||
# servers = ["socket:/run/haproxy/admin.sock"]
|
||||
|
||||
# Read flattened metrics from one or more JSON HTTP endpoints
|
||||
[[plugins.httpjson]]
|
||||
# Specify services via an array of tables
|
||||
[[httpjson.services]]
|
||||
[[inputs.httpjson]]
|
||||
# a name for the service being polled
|
||||
name = "webserver_stats"
|
||||
|
||||
# a name for the service being polled
|
||||
name = "webserver_stats"
|
||||
# URL of each server in the service's cluster
|
||||
servers = [
|
||||
"http://localhost:9999/stats/",
|
||||
"http://localhost:9998/stats/",
|
||||
]
|
||||
|
||||
# URL of each server in the service's cluster
|
||||
servers = [
|
||||
"http://localhost:9999/stats/",
|
||||
"http://localhost:9998/stats/",
|
||||
]
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
# HTTP parameters (all values must be strings)
|
||||
[httpjson.services.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
# HTTP parameters (all values must be strings)
|
||||
[httpjson.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
[[plugins.io]]
|
||||
[[inputs.diskio]]
|
||||
# no configuration
|
||||
|
||||
# read metrics from a Kafka topic
|
||||
[[plugins.kafka_consumer]]
|
||||
# topic(s) to consume
|
||||
# read metrics from a Kafka 0.9+ topic
|
||||
[[inputs.kafka_consumer]]
|
||||
## kafka brokers
|
||||
brokers = ["localhost:9092"]
|
||||
## topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
## the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
## Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
# read metrics from a Kafka legacy topic
|
||||
[[inputs.kafka_consumer_legacy]]
|
||||
## topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
# an array of Zookeeper connection strings
|
||||
zookeeper_peers = ["localhost:2181"]
|
||||
# the name of the consumer group
|
||||
## the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
# Maximum number of points to buffer between collection intervals
|
||||
point_buffer = 100000
|
||||
# Offset (must be either "oldest" or "newest")
|
||||
## Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
|
||||
# Read metrics from a LeoFS Server via SNMP
|
||||
[[plugins.leofs]]
|
||||
[[inputs.leofs]]
|
||||
# An array of URI to gather stats about LeoFS.
|
||||
# Specify an ip or hostname with port. ie 127.0.0.1:4020
|
||||
#
|
||||
@@ -194,7 +177,7 @@ urls = ["http://localhost/server-status?auto"]
|
||||
servers = ["127.0.0.1:4021"]
|
||||
|
||||
# Read metrics from local Lustre service on OST, MDS
|
||||
[[plugins.lustre2]]
|
||||
[[inputs.lustre2]]
|
||||
# An array of /proc globs to search for Lustre stats
|
||||
# If not specified, the default will work on Lustre 2.5.x
|
||||
#
|
||||
@@ -202,19 +185,28 @@ urls = ["http://localhost/server-status?auto"]
|
||||
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
|
||||
|
||||
# Read metrics about memory usage
|
||||
[[plugins.mem]]
|
||||
[[inputs.mem]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics from one or many memcached servers
|
||||
[[plugins.memcached]]
|
||||
[[inputs.memcached]]
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port. ie localhost, 10.0.0.1:11211, etc.
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Telegraf plugin for gathering metrics from N Mesos masters
|
||||
[[inputs.mesos]]
|
||||
# Timeout, in ms.
|
||||
timeout = 100
|
||||
# A list of Mesos masters, default value is localhost:5050.
|
||||
masters = ["localhost:5050"]
|
||||
# Metrics groups to be collected, by default, all enabled.
|
||||
master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"]
|
||||
|
||||
# Read metrics from one or many MongoDB servers
|
||||
[[plugins.mongodb]]
|
||||
[[inputs.mongodb]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
|
||||
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
||||
@@ -223,7 +215,7 @@ urls = ["http://localhost/server-status?auto"]
|
||||
servers = ["127.0.0.1:27017"]
|
||||
|
||||
# Read metrics from one or many mysql servers
|
||||
[[plugins.mysql]]
|
||||
[[inputs.mysql]]
|
||||
# specify servers via a url matching:
|
||||
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
||||
# e.g.
|
||||
@@ -234,7 +226,7 @@ urls = ["http://localhost/server-status?auto"]
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics about network interface usage
|
||||
[[plugins.net]]
|
||||
[[inputs.net]]
|
||||
# By default, telegraf gathers stats from any up interface (excluding loopback)
|
||||
# Setting interfaces will tell it to gather these explicit interfaces,
|
||||
# regardless of status.
|
||||
@@ -242,12 +234,12 @@ urls = ["http://localhost/server-status?auto"]
|
||||
# interfaces = ["eth0", ... ]
|
||||
|
||||
# Read Nginx's basic status information (ngx_http_stub_status_module)
|
||||
[[plugins.nginx]]
|
||||
[[inputs.nginx]]
|
||||
# An array of Nginx stub_status URI to gather stats.
|
||||
urls = ["http://localhost/status"]
|
||||
|
||||
# Ping given url(s) and return statistics
|
||||
[[plugins.ping]]
|
||||
[[inputs.ping]]
|
||||
# urls to ping
|
||||
urls = ["www.google.com"] # required
|
||||
# number of pings to send (ping -c <COUNT>)
|
||||
@@ -260,10 +252,7 @@ urls = ["http://localhost/server-status?auto"]
|
||||
interface = ""
|
||||
|
||||
# Read metrics from one or many postgresql servers
|
||||
[[plugins.postgresql]]
|
||||
# specify servers via an array of tables
|
||||
[[postgresql.servers]]
|
||||
|
||||
[[inputs.postgresql]]
|
||||
# specify address via a url matching:
|
||||
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
|
||||
# or a simple string:
|
||||
@@ -290,14 +279,13 @@ urls = ["http://localhost/server-status?auto"]
|
||||
# address = "influx@remoteserver"
|
||||
|
||||
# Read metrics from one or many prometheus clients
|
||||
[[plugins.prometheus]]
|
||||
[[inputs.prometheus]]
|
||||
# An array of urls to scrape metrics from.
|
||||
urls = ["http://localhost:9100/metrics"]
|
||||
|
||||
# Read metrics from one or many RabbitMQ servers via the management API
|
||||
[[plugins.rabbitmq]]
|
||||
[[inputs.rabbitmq]]
|
||||
# Specify servers via an array of tables
|
||||
[[rabbitmq.servers]]
|
||||
# name = "rmq-server-1" # optional tag
|
||||
# url = "http://localhost:15672"
|
||||
# username = "guest"
|
||||
@@ -308,7 +296,7 @@ urls = ["http://localhost/server-status?auto"]
|
||||
# nodes = ["rabbit@node1", "rabbit@node2"]
|
||||
|
||||
# Read metrics from one or many redis servers
|
||||
[[plugins.redis]]
|
||||
[[inputs.redis]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port add password. ie redis://localhost, redis://10.10.3.33:18832,
|
||||
# 10.0.0.1:10000, etc.
|
||||
@@ -317,7 +305,7 @@ urls = ["http://localhost/server-status?auto"]
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics from one or many RethinkDB servers
|
||||
[[plugins.rethinkdb]]
|
||||
[[inputs.rethinkdb]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105,
|
||||
# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
||||
@@ -326,9 +314,9 @@ urls = ["http://localhost/server-status?auto"]
|
||||
servers = ["127.0.0.1:28015"]
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
[[plugins.swap]]
|
||||
[[inputs.swap]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about system load & uptime
|
||||
[[plugins.system]]
|
||||
[[inputs.system]]
|
||||
# no configuration
|
||||
|
||||
116
internal/globpath/globpath.go
Normal file
116
internal/globpath/globpath.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package globpath
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/gobwas/glob"
|
||||
)
|
||||
|
||||
var sepStr = fmt.Sprintf("%v", string(os.PathSeparator))
|
||||
|
||||
type GlobPath struct {
|
||||
path string
|
||||
hasMeta bool
|
||||
hasSuperMeta bool
|
||||
g glob.Glob
|
||||
root string
|
||||
}
|
||||
|
||||
func Compile(path string) (*GlobPath, error) {
|
||||
out := GlobPath{
|
||||
hasMeta: hasMeta(path),
|
||||
hasSuperMeta: hasSuperMeta(path),
|
||||
path: path,
|
||||
}
|
||||
|
||||
// if there are no glob meta characters in the path, don't bother compiling
|
||||
// a glob object or finding the root directory. (see short-circuit in Match)
|
||||
if !out.hasMeta || !out.hasSuperMeta {
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
if out.g, err = glob.Compile(path, os.PathSeparator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Get the root directory for this filepath
|
||||
out.root = findRootDir(path)
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (g *GlobPath) Match() map[string]os.FileInfo {
|
||||
if !g.hasMeta {
|
||||
out := make(map[string]os.FileInfo)
|
||||
info, err := os.Stat(g.path)
|
||||
if err == nil {
|
||||
out[g.path] = info
|
||||
}
|
||||
return out
|
||||
}
|
||||
if !g.hasSuperMeta {
|
||||
out := make(map[string]os.FileInfo)
|
||||
files, _ := filepath.Glob(g.path)
|
||||
for _, file := range files {
|
||||
info, err := os.Stat(file)
|
||||
if err == nil {
|
||||
out[file] = info
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
return walkFilePath(g.root, g.g)
|
||||
}
|
||||
|
||||
// walk the filepath from the given root and return a list of files that match
|
||||
// the given glob.
|
||||
func walkFilePath(root string, g glob.Glob) map[string]os.FileInfo {
|
||||
matchedFiles := make(map[string]os.FileInfo)
|
||||
walkfn := func(path string, info os.FileInfo, _ error) error {
|
||||
if g.Match(path) {
|
||||
matchedFiles[path] = info
|
||||
}
|
||||
return nil
|
||||
}
|
||||
filepath.Walk(root, walkfn)
|
||||
return matchedFiles
|
||||
}
|
||||
|
||||
// find the root dir of the given path (could include globs).
|
||||
// ie:
|
||||
// /var/log/telegraf.conf -> /var/log
|
||||
// /home/** -> /home
|
||||
// /home/*/** -> /home
|
||||
// /lib/share/*/*/**.txt -> /lib/share
|
||||
func findRootDir(path string) string {
|
||||
pathItems := strings.Split(path, sepStr)
|
||||
out := sepStr
|
||||
for i, item := range pathItems {
|
||||
if i == len(pathItems)-1 {
|
||||
break
|
||||
}
|
||||
if item == "" {
|
||||
continue
|
||||
}
|
||||
if hasMeta(item) {
|
||||
break
|
||||
}
|
||||
out += item + sepStr
|
||||
}
|
||||
if out != "/" {
|
||||
out = strings.TrimSuffix(out, "/")
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// hasMeta reports whether path contains any magic glob characters.
|
||||
func hasMeta(path string) bool {
|
||||
return strings.IndexAny(path, "*?[") >= 0
|
||||
}
|
||||
|
||||
// hasSuperMeta reports whether path contains any super magic glob characters (**).
|
||||
func hasSuperMeta(path string) bool {
|
||||
return strings.Index(path, "**") >= 0
|
||||
}
|
||||
90
internal/globpath/globpath_test.go
Normal file
90
internal/globpath/globpath_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package globpath
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCompileAndMatch(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
// test super asterisk
|
||||
g1, err := Compile(dir + "/**")
|
||||
require.NoError(t, err)
|
||||
// test single asterisk
|
||||
g2, err := Compile(dir + "/*.log")
|
||||
require.NoError(t, err)
|
||||
// test no meta characters (file exists)
|
||||
g3, err := Compile(dir + "/log1.log")
|
||||
require.NoError(t, err)
|
||||
// test file that doesn't exist
|
||||
g4, err := Compile(dir + "/i_dont_exist.log")
|
||||
require.NoError(t, err)
|
||||
// test super asterisk that doesn't exist
|
||||
g5, err := Compile(dir + "/dir_doesnt_exist/**")
|
||||
require.NoError(t, err)
|
||||
|
||||
matches := g1.Match()
|
||||
assert.Len(t, matches, 6)
|
||||
matches = g2.Match()
|
||||
assert.Len(t, matches, 2)
|
||||
matches = g3.Match()
|
||||
assert.Len(t, matches, 1)
|
||||
matches = g4.Match()
|
||||
assert.Len(t, matches, 0)
|
||||
matches = g5.Match()
|
||||
assert.Len(t, matches, 0)
|
||||
}
|
||||
|
||||
func TestFindRootDir(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
output string
|
||||
}{
|
||||
{"/var/log/telegraf.conf", "/var/log"},
|
||||
{"/home/**", "/home"},
|
||||
{"/home/*/**", "/home"},
|
||||
{"/lib/share/*/*/**.txt", "/lib/share"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
actual := findRootDir(test.input)
|
||||
assert.Equal(t, test.output, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindNestedTextFile(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
// test super asterisk
|
||||
g1, err := Compile(dir + "/**.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
matches := g1.Match()
|
||||
assert.Len(t, matches, 1)
|
||||
}
|
||||
|
||||
func getTestdataDir() string {
|
||||
_, filename, _, _ := runtime.Caller(1)
|
||||
return strings.Replace(filename, "globpath_test.go", "testdata", 1)
|
||||
}
|
||||
|
||||
func TestMatch_ErrPermission(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected map[string]os.FileInfo
|
||||
}{
|
||||
{"/root/foo", map[string]os.FileInfo{}},
|
||||
{"/root/f*", map[string]os.FileInfo{}},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
glob, err := Compile(test.input)
|
||||
require.NoError(t, err)
|
||||
actual := glob.Match()
|
||||
require.Equal(t, test.expected, actual)
|
||||
}
|
||||
}
|
||||
0
internal/globpath/testdata/log1.log
vendored
Normal file
0
internal/globpath/testdata/log1.log
vendored
Normal file
0
internal/globpath/testdata/log2.log
vendored
Normal file
0
internal/globpath/testdata/log2.log
vendored
Normal file
0
internal/globpath/testdata/nested1/nested2/nested.txt
vendored
Normal file
0
internal/globpath/testdata/nested1/nested2/nested.txt
vendored
Normal file
5
internal/globpath/testdata/test.conf
vendored
Normal file
5
internal/globpath/testdata/test.conf
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# this is a fake testing config file
|
||||
# for testing the filestat plugin
|
||||
|
||||
option1 = "foo"
|
||||
option2 = "bar"
|
||||
@@ -2,10 +2,29 @@ package internal
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/big"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
|
||||
var (
|
||||
TimeoutErr = errors.New("Command timed out.")
|
||||
|
||||
NotImplementedError = errors.New("not implemented yet")
|
||||
)
|
||||
|
||||
// Duration just wraps time.Duration
|
||||
@@ -15,18 +34,39 @@ type Duration struct {
|
||||
|
||||
// UnmarshalTOML parses the duration from the TOML config file
|
||||
func (d *Duration) UnmarshalTOML(b []byte) error {
|
||||
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
|
||||
if err != nil {
|
||||
return err
|
||||
var err error
|
||||
b = bytes.Trim(b, `'`)
|
||||
|
||||
// see if we can directly convert it
|
||||
d.Duration, err = time.ParseDuration(string(b))
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
d.Duration = dur
|
||||
// Parse string duration, ie, "1s"
|
||||
if uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 {
|
||||
d.Duration, err = time.ParseDuration(uq)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// First try parsing as integer seconds
|
||||
sI, err := strconv.ParseInt(string(b), 10, 64)
|
||||
if err == nil {
|
||||
d.Duration = time.Second * time.Duration(sI)
|
||||
return nil
|
||||
}
|
||||
// Second try parsing as float seconds
|
||||
sF, err := strconv.ParseFloat(string(b), 64)
|
||||
if err == nil {
|
||||
d.Duration = time.Second * time.Duration(sF)
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var NotImplementedError = errors.New("not implemented yet")
|
||||
|
||||
// ReadLines reads contents from a file and splits them by new lines.
|
||||
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
|
||||
func ReadLines(filename string) ([]string, error) {
|
||||
@@ -62,58 +102,141 @@ func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Glob will test a string pattern, potentially containing globs, against a
|
||||
// subject string. The result is a simple true/false, determining whether or
|
||||
// not the glob pattern matched the subject text.
|
||||
//
|
||||
// Adapted from https://github.com/ryanuber/go-glob/blob/master/glob.go
|
||||
// thanks Ryan Uber!
|
||||
func Glob(pattern, measurement string) bool {
|
||||
// Empty pattern can only match empty subject
|
||||
if pattern == "" {
|
||||
return measurement == pattern
|
||||
// RandomString returns a random string of alpha-numeric characters
|
||||
func RandomString(n int) string {
|
||||
var bytes = make([]byte, n)
|
||||
rand.Read(bytes)
|
||||
for i, b := range bytes {
|
||||
bytes[i] = alphanum[b%byte(len(alphanum))]
|
||||
}
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files.
|
||||
// you must give the full path to the files.
|
||||
// If all files are blank and InsecureSkipVerify=false, returns a nil pointer.
|
||||
func GetTLSConfig(
|
||||
SSLCert, SSLKey, SSLCA string,
|
||||
InsecureSkipVerify bool,
|
||||
) (*tls.Config, error) {
|
||||
if SSLCert == "" && SSLKey == "" && SSLCA == "" && !InsecureSkipVerify {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// If the pattern _is_ a glob, it matches everything
|
||||
if pattern == "*" {
|
||||
return true
|
||||
t := &tls.Config{
|
||||
InsecureSkipVerify: InsecureSkipVerify,
|
||||
}
|
||||
|
||||
parts := strings.Split(pattern, "*")
|
||||
|
||||
if len(parts) == 1 {
|
||||
// No globs in pattern, so test for match
|
||||
return pattern == measurement
|
||||
}
|
||||
|
||||
leadingGlob := strings.HasPrefix(pattern, "*")
|
||||
trailingGlob := strings.HasSuffix(pattern, "*")
|
||||
end := len(parts) - 1
|
||||
|
||||
for i, part := range parts {
|
||||
switch i {
|
||||
case 0:
|
||||
if leadingGlob {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(measurement, part) {
|
||||
return false
|
||||
}
|
||||
case end:
|
||||
if len(measurement) > 0 {
|
||||
return trailingGlob || strings.HasSuffix(measurement, part)
|
||||
}
|
||||
default:
|
||||
if !strings.Contains(measurement, part) {
|
||||
return false
|
||||
}
|
||||
if SSLCA != "" {
|
||||
caCert, err := ioutil.ReadFile(SSLCA)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s",
|
||||
err))
|
||||
}
|
||||
|
||||
// Trim evaluated text from measurement as we loop over the pattern.
|
||||
idx := strings.Index(measurement, part) + len(part)
|
||||
measurement = measurement[idx:]
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
t.RootCAs = caCertPool
|
||||
}
|
||||
|
||||
// All parts of the pattern matched
|
||||
return true
|
||||
if SSLCert != "" && SSLKey != "" {
|
||||
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf(
|
||||
"Could not load TLS client key/certificate from %s:%s: %s",
|
||||
SSLKey, SSLCert, err))
|
||||
}
|
||||
|
||||
t.Certificates = []tls.Certificate{cert}
|
||||
t.BuildNameToCertificate()
|
||||
}
|
||||
|
||||
// will be nil by default if nothing is provided
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// SnakeCase converts the given string to snake case following the Golang format:
|
||||
// acronyms are converted to lower-case and preceded by an underscore.
|
||||
func SnakeCase(in string) string {
|
||||
runes := []rune(in)
|
||||
length := len(runes)
|
||||
|
||||
var out []rune
|
||||
for i := 0; i < length; i++ {
|
||||
if i > 0 && unicode.IsUpper(runes[i]) && ((i+1 < length && unicode.IsLower(runes[i+1])) || unicode.IsLower(runes[i-1])) {
|
||||
out = append(out, '_')
|
||||
}
|
||||
out = append(out, unicode.ToLower(runes[i]))
|
||||
}
|
||||
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// CombinedOutputTimeout runs the given command with the given timeout and
|
||||
// returns the combined output of stdout and stderr.
|
||||
// If the command times out, it attempts to kill the process.
|
||||
func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) {
|
||||
var b bytes.Buffer
|
||||
c.Stdout = &b
|
||||
c.Stderr = &b
|
||||
if err := c.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err := WaitTimeout(c, timeout)
|
||||
return b.Bytes(), err
|
||||
}
|
||||
|
||||
// RunTimeout runs the given command with the given timeout.
|
||||
// If the command times out, it attempts to kill the process.
|
||||
func RunTimeout(c *exec.Cmd, timeout time.Duration) error {
|
||||
if err := c.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
return WaitTimeout(c, timeout)
|
||||
}
|
||||
|
||||
// WaitTimeout waits for the given command to finish with a timeout.
|
||||
// It assumes the command has already been started.
|
||||
// If the command times out, it attempts to kill the process.
|
||||
func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
|
||||
timer := time.NewTimer(timeout)
|
||||
done := make(chan error)
|
||||
go func() { done <- c.Wait() }()
|
||||
select {
|
||||
case err := <-done:
|
||||
timer.Stop()
|
||||
return err
|
||||
case <-timer.C:
|
||||
if err := c.Process.Kill(); err != nil {
|
||||
log.Printf("E! FATAL error killing process: %s", err)
|
||||
return err
|
||||
}
|
||||
// wait for the command to return after killing it
|
||||
<-done
|
||||
return TimeoutErr
|
||||
}
|
||||
}
|
||||
|
||||
// RandomSleep will sleep for a random amount of time up to max.
|
||||
// If the shutdown channel is closed, it will return before it has finished
|
||||
// sleeping.
|
||||
func RandomSleep(max time.Duration, shutdown chan struct{}) {
|
||||
if max == 0 {
|
||||
return
|
||||
}
|
||||
maxSleep := big.NewInt(max.Nanoseconds())
|
||||
|
||||
var sleepns int64
|
||||
if j, err := rand.Int(rand.Reader, maxSleep); err == nil {
|
||||
sleepns = j.Int64()
|
||||
}
|
||||
|
||||
t := time.NewTimer(time.Nanosecond * time.Duration(sleepns))
|
||||
select {
|
||||
case <-t.C:
|
||||
return
|
||||
case <-shutdown:
|
||||
t.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,44 +1,157 @@
|
||||
package internal
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"os/exec"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
func testGlobMatch(t *testing.T, pattern, subj string) {
|
||||
if !Glob(pattern, subj) {
|
||||
t.Errorf("%s should match %s", pattern, subj)
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type SnakeTest struct {
|
||||
input string
|
||||
output string
|
||||
}
|
||||
|
||||
var tests = []SnakeTest{
|
||||
{"a", "a"},
|
||||
{"snake", "snake"},
|
||||
{"A", "a"},
|
||||
{"ID", "id"},
|
||||
{"MOTD", "motd"},
|
||||
{"Snake", "snake"},
|
||||
{"SnakeTest", "snake_test"},
|
||||
{"APIResponse", "api_response"},
|
||||
{"SnakeID", "snake_id"},
|
||||
{"SnakeIDGoogle", "snake_id_google"},
|
||||
{"LinuxMOTD", "linux_motd"},
|
||||
{"OMGWTFBBQ", "omgwtfbbq"},
|
||||
{"omg_wtf_bbq", "omg_wtf_bbq"},
|
||||
}
|
||||
|
||||
func TestSnakeCase(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
if SnakeCase(test.input) != test.output {
|
||||
t.Errorf(`SnakeCase("%s"), wanted "%s", got \%s"`, test.input, test.output, SnakeCase(test.input))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testGlobNoMatch(t *testing.T, pattern, subj string) {
|
||||
if Glob(pattern, subj) {
|
||||
t.Errorf("%s should not match %s", pattern, subj)
|
||||
var (
|
||||
sleepbin, _ = exec.LookPath("sleep")
|
||||
echobin, _ = exec.LookPath("echo")
|
||||
shell, _ = exec.LookPath("sh")
|
||||
)
|
||||
|
||||
func TestRunTimeout(t *testing.T) {
|
||||
if sleepbin == "" {
|
||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(sleepbin, "10")
|
||||
start := time.Now()
|
||||
err := RunTimeout(cmd, time.Millisecond*20)
|
||||
elapsed := time.Since(start)
|
||||
|
||||
assert.Equal(t, TimeoutErr, err)
|
||||
// Verify that command gets killed in 20ms, with some breathing room
|
||||
assert.True(t, elapsed < time.Millisecond*75)
|
||||
}
|
||||
|
||||
func TestEmptyPattern(t *testing.T) {
|
||||
testGlobMatch(t, "", "")
|
||||
testGlobNoMatch(t, "", "test")
|
||||
}
|
||||
|
||||
func TestPatternWithoutGlobs(t *testing.T) {
|
||||
testGlobMatch(t, "test", "test")
|
||||
}
|
||||
|
||||
func TestGlob(t *testing.T) {
|
||||
for _, pattern := range []string{
|
||||
"*test", // Leading glob
|
||||
"this*", // Trailing glob
|
||||
"*is*a*", // Lots of globs
|
||||
"**test**", // Double glob characters
|
||||
"**is**a***test*", // Varying number of globs
|
||||
} {
|
||||
testGlobMatch(t, pattern, "this_is_a_test")
|
||||
func TestCombinedOutputTimeout(t *testing.T) {
|
||||
if sleepbin == "" {
|
||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(sleepbin, "10")
|
||||
start := time.Now()
|
||||
_, err := CombinedOutputTimeout(cmd, time.Millisecond*20)
|
||||
elapsed := time.Since(start)
|
||||
|
||||
for _, pattern := range []string{
|
||||
"test*", // Implicit substring match should fail
|
||||
"*is", // Partial match should fail
|
||||
"*no*", // Globs without a match between them should fail
|
||||
} {
|
||||
testGlobNoMatch(t, pattern, "this_is_a_test")
|
||||
}
|
||||
assert.Equal(t, TimeoutErr, err)
|
||||
// Verify that command gets killed in 20ms, with some breathing room
|
||||
assert.True(t, elapsed < time.Millisecond*75)
|
||||
}
|
||||
|
||||
func TestCombinedOutput(t *testing.T) {
|
||||
if echobin == "" {
|
||||
t.Skip("'echo' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(echobin, "foo")
|
||||
out, err := CombinedOutputTimeout(cmd, time.Second)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "foo\n", string(out))
|
||||
}
|
||||
|
||||
// test that CombinedOutputTimeout and exec.Cmd.CombinedOutput return
|
||||
// the same output from a failed command.
|
||||
func TestCombinedOutputError(t *testing.T) {
|
||||
if shell == "" {
|
||||
t.Skip("'sh' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(shell, "-c", "false")
|
||||
expected, err := cmd.CombinedOutput()
|
||||
|
||||
cmd2 := exec.Command(shell, "-c", "false")
|
||||
actual, err := CombinedOutputTimeout(cmd2, time.Second)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestRunError(t *testing.T) {
|
||||
if shell == "" {
|
||||
t.Skip("'sh' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(shell, "-c", "false")
|
||||
err := RunTimeout(cmd, time.Second)
|
||||
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestRandomSleep(t *testing.T) {
|
||||
// test that zero max returns immediately
|
||||
s := time.Now()
|
||||
RandomSleep(time.Duration(0), make(chan struct{}))
|
||||
elapsed := time.Since(s)
|
||||
assert.True(t, elapsed < time.Millisecond)
|
||||
|
||||
// test that max sleep is respected
|
||||
s = time.Now()
|
||||
RandomSleep(time.Millisecond*50, make(chan struct{}))
|
||||
elapsed = time.Since(s)
|
||||
assert.True(t, elapsed < time.Millisecond*100)
|
||||
|
||||
// test that shutdown is respected
|
||||
s = time.Now()
|
||||
shutdown := make(chan struct{})
|
||||
go func() {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
close(shutdown)
|
||||
}()
|
||||
RandomSleep(time.Second, shutdown)
|
||||
elapsed = time.Since(s)
|
||||
assert.True(t, elapsed < time.Millisecond*150)
|
||||
}
|
||||
|
||||
func TestDuration(t *testing.T) {
|
||||
var d Duration
|
||||
|
||||
d.UnmarshalTOML([]byte(`"1s"`))
|
||||
assert.Equal(t, time.Second, d.Duration)
|
||||
|
||||
d = Duration{}
|
||||
d.UnmarshalTOML([]byte(`1s`))
|
||||
assert.Equal(t, time.Second, d.Duration)
|
||||
|
||||
d = Duration{}
|
||||
d.UnmarshalTOML([]byte(`'1s'`))
|
||||
assert.Equal(t, time.Second, d.Duration)
|
||||
|
||||
d = Duration{}
|
||||
d.UnmarshalTOML([]byte(`10`))
|
||||
assert.Equal(t, 10*time.Second, d.Duration)
|
||||
|
||||
d = Duration{}
|
||||
d.UnmarshalTOML([]byte(`1.5`))
|
||||
assert.Equal(t, time.Second, d.Duration)
|
||||
}
|
||||
|
||||
59
internal/limiter/limiter.go
Normal file
59
internal/limiter/limiter.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package limiter
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewRateLimiter returns a rate limiter that will will emit from the C
|
||||
// channel only 'n' times every 'rate' seconds.
|
||||
func NewRateLimiter(n int, rate time.Duration) *rateLimiter {
|
||||
r := &rateLimiter{
|
||||
C: make(chan bool),
|
||||
rate: rate,
|
||||
n: n,
|
||||
shutdown: make(chan bool),
|
||||
}
|
||||
r.wg.Add(1)
|
||||
go r.limiter()
|
||||
return r
|
||||
}
|
||||
|
||||
type rateLimiter struct {
|
||||
C chan bool
|
||||
rate time.Duration
|
||||
n int
|
||||
|
||||
shutdown chan bool
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func (r *rateLimiter) Stop() {
|
||||
close(r.shutdown)
|
||||
r.wg.Wait()
|
||||
close(r.C)
|
||||
}
|
||||
|
||||
func (r *rateLimiter) limiter() {
|
||||
defer r.wg.Done()
|
||||
ticker := time.NewTicker(r.rate)
|
||||
defer ticker.Stop()
|
||||
counter := 0
|
||||
for {
|
||||
select {
|
||||
case <-r.shutdown:
|
||||
return
|
||||
case <-ticker.C:
|
||||
counter = 0
|
||||
default:
|
||||
if counter < r.n {
|
||||
select {
|
||||
case r.C <- true:
|
||||
counter++
|
||||
case <-r.shutdown:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
54
internal/limiter/limiter_test.go
Normal file
54
internal/limiter/limiter_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package limiter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRateLimiter(t *testing.T) {
|
||||
r := NewRateLimiter(5, time.Second)
|
||||
ticker := time.NewTicker(time.Millisecond * 75)
|
||||
|
||||
// test that we can only get 5 receives from the rate limiter
|
||||
counter := 0
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-r.C:
|
||||
counter++
|
||||
case <-ticker.C:
|
||||
break outer
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, 5, counter)
|
||||
r.Stop()
|
||||
// verify that the Stop function closes the channel.
|
||||
_, ok := <-r.C
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestRateLimiterMultipleIterations(t *testing.T) {
|
||||
r := NewRateLimiter(5, time.Millisecond*50)
|
||||
ticker := time.NewTicker(time.Millisecond * 250)
|
||||
|
||||
// test that we can get 15 receives from the rate limiter
|
||||
counter := 0
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
break outer
|
||||
case <-r.C:
|
||||
counter++
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, counter > 10)
|
||||
r.Stop()
|
||||
// verify that the Stop function closes the channel.
|
||||
_, ok := <-r.C
|
||||
assert.False(t, ok)
|
||||
}
|
||||
263
internal/models/filter.go
Normal file
263
internal/models/filter.go
Normal file
@@ -0,0 +1,263 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
)
|
||||
|
||||
// TagFilter is the name of a tag, and the values on which to filter
|
||||
type TagFilter struct {
|
||||
Name string
|
||||
Filter []string
|
||||
filter filter.Filter
|
||||
}
|
||||
|
||||
// Filter containing drop/pass and tagdrop/tagpass rules
|
||||
type Filter struct {
|
||||
NameDrop []string
|
||||
nameDrop filter.Filter
|
||||
NamePass []string
|
||||
namePass filter.Filter
|
||||
|
||||
FieldDrop []string
|
||||
fieldDrop filter.Filter
|
||||
FieldPass []string
|
||||
fieldPass filter.Filter
|
||||
|
||||
TagDrop []TagFilter
|
||||
TagPass []TagFilter
|
||||
|
||||
TagExclude []string
|
||||
tagExclude filter.Filter
|
||||
TagInclude []string
|
||||
tagInclude filter.Filter
|
||||
|
||||
isActive bool
|
||||
}
|
||||
|
||||
// Compile all Filter lists into filter.Filter objects.
|
||||
func (f *Filter) Compile() error {
|
||||
if len(f.NameDrop) == 0 &&
|
||||
len(f.NamePass) == 0 &&
|
||||
len(f.FieldDrop) == 0 &&
|
||||
len(f.FieldPass) == 0 &&
|
||||
len(f.TagInclude) == 0 &&
|
||||
len(f.TagExclude) == 0 &&
|
||||
len(f.TagPass) == 0 &&
|
||||
len(f.TagDrop) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
f.isActive = true
|
||||
var err error
|
||||
f.nameDrop, err = filter.Compile(f.NameDrop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'namedrop', %s", err)
|
||||
}
|
||||
f.namePass, err = filter.Compile(f.NamePass)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'namepass', %s", err)
|
||||
}
|
||||
|
||||
f.fieldDrop, err = filter.Compile(f.FieldDrop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'fielddrop', %s", err)
|
||||
}
|
||||
f.fieldPass, err = filter.Compile(f.FieldPass)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'fieldpass', %s", err)
|
||||
}
|
||||
|
||||
f.tagExclude, err = filter.Compile(f.TagExclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagexclude', %s", err)
|
||||
}
|
||||
f.tagInclude, err = filter.Compile(f.TagInclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'taginclude', %s", err)
|
||||
}
|
||||
|
||||
for i, _ := range f.TagDrop {
|
||||
f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
|
||||
}
|
||||
}
|
||||
for i, _ := range f.TagPass {
|
||||
f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagpass', %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Apply applies the filter to the given measurement name, fields map, and
|
||||
// tags map. It will return false if the metric should be "filtered out", and
|
||||
// true if the metric should "pass".
|
||||
// It will modify tags & fields in-place if they need to be deleted.
|
||||
func (f *Filter) Apply(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
) bool {
|
||||
if !f.isActive {
|
||||
return true
|
||||
}
|
||||
|
||||
// check if the measurement name should pass
|
||||
if !f.shouldNamePass(measurement) {
|
||||
return false
|
||||
}
|
||||
|
||||
// check if the tags should pass
|
||||
if !f.shouldTagsPass(tags) {
|
||||
return false
|
||||
}
|
||||
|
||||
// filter fields
|
||||
for fieldkey, _ := range fields {
|
||||
if !f.shouldFieldPass(fieldkey) {
|
||||
delete(fields, fieldkey)
|
||||
}
|
||||
}
|
||||
if len(fields) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// filter tags
|
||||
f.filterTags(tags)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// IsActive checking if filter is active
|
||||
func (f *Filter) IsActive() bool {
|
||||
return f.isActive
|
||||
}
|
||||
|
||||
// shouldNamePass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f *Filter) shouldNamePass(key string) bool {
|
||||
|
||||
pass := func(f *Filter) bool {
|
||||
if f.namePass.Match(key) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
drop := func(f *Filter) bool {
|
||||
if f.nameDrop.Match(key) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if f.namePass != nil && f.nameDrop != nil {
|
||||
return pass(f) && drop(f)
|
||||
} else if f.namePass != nil {
|
||||
return pass(f)
|
||||
} else if f.nameDrop != nil {
|
||||
return drop(f)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// shouldFieldPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f *Filter) shouldFieldPass(key string) bool {
|
||||
|
||||
pass := func(f *Filter) bool {
|
||||
if f.fieldPass.Match(key) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
drop := func(f *Filter) bool {
|
||||
if f.fieldDrop.Match(key) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if f.fieldPass != nil && f.fieldDrop != nil {
|
||||
return pass(f) && drop(f)
|
||||
} else if f.fieldPass != nil {
|
||||
return pass(f)
|
||||
} else if f.fieldDrop != nil {
|
||||
return drop(f)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// shouldTagsPass returns true if the metric should pass, false if should drop
|
||||
// based on the tagdrop/tagpass filter parameters
|
||||
func (f *Filter) shouldTagsPass(tags map[string]string) bool {
|
||||
|
||||
pass := func(f *Filter) bool {
|
||||
for _, pat := range f.TagPass {
|
||||
if pat.filter == nil {
|
||||
continue
|
||||
}
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
if pat.filter.Match(tagval) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
drop := func(f *Filter) bool {
|
||||
for _, pat := range f.TagDrop {
|
||||
if pat.filter == nil {
|
||||
continue
|
||||
}
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
if pat.filter.Match(tagval) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Add additional logic in case where both parameters are set.
|
||||
// see: https://github.com/influxdata/telegraf/issues/2860
|
||||
if f.TagPass != nil && f.TagDrop != nil {
|
||||
// return true only in case when tag pass and won't be dropped (true, true).
|
||||
// in case when the same tag should be passed and dropped it will be dropped (true, false).
|
||||
return pass(f) && drop(f)
|
||||
} else if f.TagPass != nil {
|
||||
return pass(f)
|
||||
} else if f.TagDrop != nil {
|
||||
return drop(f)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Apply TagInclude and TagExclude filters.
|
||||
// modifies the tags map in-place.
|
||||
func (f *Filter) filterTags(tags map[string]string) {
|
||||
if f.tagInclude != nil {
|
||||
for k, _ := range tags {
|
||||
if !f.tagInclude.Match(k) {
|
||||
delete(tags, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if f.tagExclude != nil {
|
||||
for k, _ := range tags {
|
||||
if f.tagExclude.Match(k) {
|
||||
delete(tags, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
444
internal/models/filter_test.go
Normal file
444
internal/models/filter_test.go
Normal file
@@ -0,0 +1,444 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFilter_ApplyEmpty(t *testing.T) {
|
||||
f := Filter{}
|
||||
require.NoError(t, f.Compile())
|
||||
assert.False(t, f.IsActive())
|
||||
|
||||
assert.True(t, f.Apply("m", map[string]interface{}{"value": int64(1)}, map[string]string{}))
|
||||
}
|
||||
|
||||
func TestFilter_ApplyTagsDontPass(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
}
|
||||
f := Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
require.NoError(t, f.Compile())
|
||||
assert.True(t, f.IsActive())
|
||||
|
||||
assert.False(t, f.Apply("m",
|
||||
map[string]interface{}{"value": int64(1)},
|
||||
map[string]string{"cpu": "cpu-total"}))
|
||||
}
|
||||
|
||||
func TestFilter_ApplyDeleteFields(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldDrop: []string{"value"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
require.NoError(t, f.Compile())
|
||||
assert.True(t, f.IsActive())
|
||||
|
||||
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
|
||||
assert.True(t, f.Apply("m", fields, nil))
|
||||
assert.Equal(t, map[string]interface{}{"value2": int64(2)}, fields)
|
||||
}
|
||||
|
||||
func TestFilter_ApplyDeleteAllFields(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldDrop: []string{"value*"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
require.NoError(t, f.Compile())
|
||||
assert.True(t, f.IsActive())
|
||||
|
||||
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
|
||||
assert.False(t, f.Apply("m", fields, nil))
|
||||
}
|
||||
|
||||
func TestFilter_Empty(t *testing.T) {
|
||||
f := Filter{}
|
||||
|
||||
measurements := []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"barfoo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"supercalifradjulisticexpialidocious",
|
||||
}
|
||||
|
||||
for _, measurement := range measurements {
|
||||
if !f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_NamePass(t *testing.T) {
|
||||
f := Filter{
|
||||
NamePass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.shouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.shouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_NameDrop(t *testing.T) {
|
||||
f := Filter{
|
||||
NameDrop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.shouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.shouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_FieldPass(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldPass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_FieldDrop(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldDrop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagPass(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagPass: filters,
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.shouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.shouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagDrop(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.shouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.shouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_FilterTagsNoMatches(t *testing.T) {
|
||||
pretags := map[string]string{
|
||||
"host": "localhost",
|
||||
"mytag": "foobar",
|
||||
}
|
||||
f := Filter{
|
||||
TagExclude: []string{"nomatch"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
f.filterTags(pretags)
|
||||
assert.Equal(t, map[string]string{
|
||||
"host": "localhost",
|
||||
"mytag": "foobar",
|
||||
}, pretags)
|
||||
|
||||
f = Filter{
|
||||
TagInclude: []string{"nomatch"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
f.filterTags(pretags)
|
||||
assert.Equal(t, map[string]string{}, pretags)
|
||||
}
|
||||
|
||||
func TestFilter_FilterTagsMatches(t *testing.T) {
|
||||
pretags := map[string]string{
|
||||
"host": "localhost",
|
||||
"mytag": "foobar",
|
||||
}
|
||||
f := Filter{
|
||||
TagExclude: []string{"ho*"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
f.filterTags(pretags)
|
||||
assert.Equal(t, map[string]string{
|
||||
"mytag": "foobar",
|
||||
}, pretags)
|
||||
|
||||
pretags = map[string]string{
|
||||
"host": "localhost",
|
||||
"mytag": "foobar",
|
||||
}
|
||||
f = Filter{
|
||||
TagInclude: []string{"my*"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
f.filterTags(pretags)
|
||||
assert.Equal(t, map[string]string{
|
||||
"mytag": "foobar",
|
||||
}, pretags)
|
||||
}
|
||||
|
||||
// TestFilter_FilterNamePassAndDrop used for check case when
|
||||
// both parameters were defined
|
||||
// see: https://github.com/influxdata/telegraf/issues/2860
|
||||
func TestFilter_FilterNamePassAndDrop(t *testing.T) {
|
||||
|
||||
inputData := []string{"name1", "name2", "name3", "name4"}
|
||||
expectedResult := []bool{false, true, false, false}
|
||||
|
||||
f := Filter{
|
||||
NamePass: []string{"name1", "name2"},
|
||||
NameDrop: []string{"name1", "name3"},
|
||||
}
|
||||
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
for i, name := range inputData {
|
||||
assert.Equal(t, f.shouldNamePass(name), expectedResult[i])
|
||||
}
|
||||
}
|
||||
|
||||
// TestFilter_FilterFieldPassAndDrop used for check case when
|
||||
// both parameters were defined
|
||||
// see: https://github.com/influxdata/telegraf/issues/2860
|
||||
func TestFilter_FilterFieldPassAndDrop(t *testing.T) {
|
||||
|
||||
inputData := []string{"field1", "field2", "field3", "field4"}
|
||||
expectedResult := []bool{false, true, false, false}
|
||||
|
||||
f := Filter{
|
||||
FieldPass: []string{"field1", "field2"},
|
||||
FieldDrop: []string{"field1", "field3"},
|
||||
}
|
||||
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
for i, field := range inputData {
|
||||
assert.Equal(t, f.shouldFieldPass(field), expectedResult[i])
|
||||
}
|
||||
}
|
||||
|
||||
// TestFilter_FilterTagsPassAndDrop used for check case when
|
||||
// both parameters were defined
|
||||
// see: https://github.com/influxdata/telegraf/issues/2860
|
||||
func TestFilter_FilterTagsPassAndDrop(t *testing.T) {
|
||||
|
||||
inputData := []map[string]string{
|
||||
{"tag1": "1", "tag2": "3"},
|
||||
{"tag1": "1", "tag2": "2"},
|
||||
{"tag1": "2", "tag2": "1"},
|
||||
{"tag1": "4", "tag2": "1"},
|
||||
}
|
||||
|
||||
expectedResult := []bool{false, true, false, false}
|
||||
|
||||
filterPass := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "tag1",
|
||||
Filter: []string{"1", "4"},
|
||||
},
|
||||
}
|
||||
|
||||
filterDrop := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "tag1",
|
||||
Filter: []string{"4"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "tag2",
|
||||
Filter: []string{"3"},
|
||||
},
|
||||
}
|
||||
|
||||
f := Filter{
|
||||
TagDrop: filterDrop,
|
||||
TagPass: filterPass,
|
||||
}
|
||||
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
for i, tag := range inputData {
|
||||
assert.Equal(t, f.shouldTagsPass(tag), expectedResult[i])
|
||||
}
|
||||
|
||||
}
|
||||
166
internal/models/makemetric.go
Normal file
166
internal/models/makemetric.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
// makemetric is used by both RunningAggregator & RunningInput
|
||||
// to make metrics.
|
||||
// nameOverride: override the name of the measurement being made.
|
||||
// namePrefix: add this prefix to each measurement name.
|
||||
// nameSuffix: add this suffix to each measurement name.
|
||||
// pluginTags: these are tags that are specific to this plugin.
|
||||
// daemonTags: these are daemon-wide global tags, and get applied after pluginTags.
|
||||
// filter: this is a filter to apply to each metric being made.
|
||||
// applyFilter: if false, the above filter is not applied to each metric.
|
||||
// This is used by Aggregators, because aggregators use filters
|
||||
// on incoming metrics instead of on created metrics.
|
||||
// TODO refactor this to not have such a huge func signature.
|
||||
func makemetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
nameOverride string,
|
||||
namePrefix string,
|
||||
nameSuffix string,
|
||||
pluginTags map[string]string,
|
||||
daemonTags map[string]string,
|
||||
filter Filter,
|
||||
applyFilter bool,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric {
|
||||
if len(fields) == 0 || len(measurement) == 0 {
|
||||
return nil
|
||||
}
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
|
||||
// Override measurement name if set
|
||||
if len(nameOverride) != 0 {
|
||||
measurement = nameOverride
|
||||
}
|
||||
// Apply measurement prefix and suffix if set
|
||||
if len(namePrefix) != 0 {
|
||||
measurement = namePrefix + measurement
|
||||
}
|
||||
if len(nameSuffix) != 0 {
|
||||
measurement = measurement + nameSuffix
|
||||
}
|
||||
|
||||
// Apply plugin-wide tags if set
|
||||
for k, v := range pluginTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
// Apply daemon-wide tags if set
|
||||
for k, v := range daemonTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the metric filter(s)
|
||||
// for aggregators, the filter does not get applied when the metric is made.
|
||||
// instead, the filter is applied to metric incoming into the plugin.
|
||||
// ie, it gets applied in the RunningAggregator.Apply function.
|
||||
if applyFilter {
|
||||
if ok := filter.Apply(measurement, fields, tags); !ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range tags {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
log.Printf("D! Measurement [%s] tag [%s] "+
|
||||
"ends with a backslash, skipping", measurement, k)
|
||||
delete(tags, k)
|
||||
continue
|
||||
} else if strings.HasSuffix(v, `\`) {
|
||||
log.Printf("D! Measurement [%s] tag [%s] has a value "+
|
||||
"ending with a backslash, skipping", measurement, k)
|
||||
delete(tags, k)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range fields {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
log.Printf("D! Measurement [%s] field [%s] "+
|
||||
"ends with a backslash, skipping", measurement, k)
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
// Validate uint64 and float64 fields
|
||||
// convert all int & uint types to int64
|
||||
switch val := v.(type) {
|
||||
case nil:
|
||||
// delete nil fields
|
||||
delete(fields, k)
|
||||
case uint:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint8:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint16:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint32:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int8:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int16:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int32:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
fields[k] = int64(val)
|
||||
} else {
|
||||
fields[k] = int64(9223372036854775807)
|
||||
}
|
||||
continue
|
||||
case float32:
|
||||
fields[k] = float64(val)
|
||||
continue
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
log.Printf("D! Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
case string:
|
||||
fields[k] = v
|
||||
default:
|
||||
fields[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
m, err := metric.New(measurement, tags, fields, t, mType)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
166
internal/models/running_aggregator.go
Normal file
166
internal/models/running_aggregator.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
type RunningAggregator struct {
|
||||
a telegraf.Aggregator
|
||||
Config *AggregatorConfig
|
||||
|
||||
metrics chan telegraf.Metric
|
||||
|
||||
periodStart time.Time
|
||||
periodEnd time.Time
|
||||
}
|
||||
|
||||
func NewRunningAggregator(
|
||||
a telegraf.Aggregator,
|
||||
conf *AggregatorConfig,
|
||||
) *RunningAggregator {
|
||||
return &RunningAggregator{
|
||||
a: a,
|
||||
Config: conf,
|
||||
metrics: make(chan telegraf.Metric, 100),
|
||||
}
|
||||
}
|
||||
|
||||
// AggregatorConfig containing configuration parameters for the running
|
||||
// aggregator plugin.
|
||||
type AggregatorConfig struct {
|
||||
Name string
|
||||
|
||||
DropOriginal bool
|
||||
NameOverride string
|
||||
MeasurementPrefix string
|
||||
MeasurementSuffix string
|
||||
Tags map[string]string
|
||||
Filter Filter
|
||||
|
||||
Period time.Duration
|
||||
Delay time.Duration
|
||||
}
|
||||
|
||||
func (r *RunningAggregator) Name() string {
|
||||
return "aggregators." + r.Config.Name
|
||||
}
|
||||
|
||||
func (r *RunningAggregator) MakeMetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric {
|
||||
m := makemetric(
|
||||
measurement,
|
||||
fields,
|
||||
tags,
|
||||
r.Config.NameOverride,
|
||||
r.Config.MeasurementPrefix,
|
||||
r.Config.MeasurementSuffix,
|
||||
r.Config.Tags,
|
||||
nil,
|
||||
r.Config.Filter,
|
||||
false,
|
||||
mType,
|
||||
t,
|
||||
)
|
||||
|
||||
if m != nil {
|
||||
m.SetAggregate(true)
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Add applies the given metric to the aggregator.
|
||||
// Before applying to the plugin, it will run any defined filters on the metric.
|
||||
// Apply returns true if the original metric should be dropped.
|
||||
func (r *RunningAggregator) Add(in telegraf.Metric) bool {
|
||||
if r.Config.Filter.IsActive() {
|
||||
// check if the aggregator should apply this metric
|
||||
name := in.Name()
|
||||
fields := in.Fields()
|
||||
tags := in.Tags()
|
||||
t := in.Time()
|
||||
if ok := r.Config.Filter.Apply(name, fields, tags); !ok {
|
||||
// aggregator should not apply this metric
|
||||
return false
|
||||
}
|
||||
|
||||
in, _ = metric.New(name, tags, fields, t)
|
||||
}
|
||||
|
||||
r.metrics <- in
|
||||
return r.Config.DropOriginal
|
||||
}
|
||||
func (r *RunningAggregator) add(in telegraf.Metric) {
|
||||
r.a.Add(in)
|
||||
}
|
||||
|
||||
func (r *RunningAggregator) push(acc telegraf.Accumulator) {
|
||||
r.a.Push(acc)
|
||||
}
|
||||
|
||||
func (r *RunningAggregator) reset() {
|
||||
r.a.Reset()
|
||||
}
|
||||
|
||||
// Run runs the running aggregator, listens for incoming metrics, and waits
|
||||
// for period ticks to tell it when to push and reset the aggregator.
|
||||
func (r *RunningAggregator) Run(
|
||||
acc telegraf.Accumulator,
|
||||
now time.Time,
|
||||
shutdown chan struct{},
|
||||
) {
|
||||
// The start of the period is truncated to the nearest second.
|
||||
//
|
||||
// Every metric then gets it's timestamp checked and is dropped if it
|
||||
// is not within:
|
||||
//
|
||||
// start < t < end + truncation + delay
|
||||
//
|
||||
// So if we start at now = 00:00.2 with a 10s period and 0.3s delay:
|
||||
// now = 00:00.2
|
||||
// start = 00:00
|
||||
// truncation = 00:00.2
|
||||
// end = 00:10
|
||||
// 1st interval: 00:00 - 00:10.5
|
||||
// 2nd interval: 00:10 - 00:20.5
|
||||
// etc.
|
||||
//
|
||||
r.periodStart = now.Truncate(time.Second)
|
||||
truncation := now.Sub(r.periodStart)
|
||||
r.periodEnd = r.periodStart.Add(r.Config.Period)
|
||||
time.Sleep(r.Config.Delay)
|
||||
periodT := time.NewTicker(r.Config.Period)
|
||||
defer periodT.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
if len(r.metrics) > 0 {
|
||||
// wait until metrics are flushed before exiting
|
||||
continue
|
||||
}
|
||||
return
|
||||
case m := <-r.metrics:
|
||||
if m.Time().Before(r.periodStart) ||
|
||||
m.Time().After(r.periodEnd.Add(truncation).Add(r.Config.Delay)) {
|
||||
// the metric is outside the current aggregation period, so
|
||||
// skip it.
|
||||
continue
|
||||
}
|
||||
r.add(m)
|
||||
case <-periodT.C:
|
||||
r.periodStart = r.periodEnd
|
||||
r.periodEnd = r.periodStart.Add(r.Config.Period)
|
||||
r.push(acc)
|
||||
r.reset()
|
||||
}
|
||||
}
|
||||
}
|
||||
256
internal/models/running_aggregator_test.go
Normal file
256
internal/models/running_aggregator_test.go
Normal file
@@ -0,0 +1,256 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
a := &TestAggregator{}
|
||||
ra := NewRunningAggregator(a, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
Filter: Filter{
|
||||
NamePass: []string{"*"},
|
||||
},
|
||||
Period: time.Millisecond * 500,
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
go ra.Run(&acc, time.Now(), make(chan struct{}))
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now().Add(time.Millisecond*150),
|
||||
)
|
||||
assert.False(t, ra.Add(m))
|
||||
|
||||
for {
|
||||
time.Sleep(time.Millisecond)
|
||||
if atomic.LoadInt64(&a.sum) > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum))
|
||||
}
|
||||
|
||||
func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
|
||||
a := &TestAggregator{}
|
||||
ra := NewRunningAggregator(a, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
Filter: Filter{
|
||||
NamePass: []string{"*"},
|
||||
},
|
||||
Period: time.Millisecond * 500,
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
go ra.Run(&acc, time.Now(), make(chan struct{}))
|
||||
|
||||
// metric before current period
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now().Add(-time.Hour),
|
||||
)
|
||||
assert.False(t, ra.Add(m))
|
||||
|
||||
// metric after current period
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now().Add(time.Hour),
|
||||
)
|
||||
assert.False(t, ra.Add(m))
|
||||
|
||||
// "now" metric
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now().Add(time.Millisecond*50),
|
||||
)
|
||||
assert.False(t, ra.Add(m))
|
||||
|
||||
for {
|
||||
time.Sleep(time.Millisecond)
|
||||
if atomic.LoadInt64(&a.sum) > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum))
|
||||
}
|
||||
|
||||
func TestAddAndPushOnePeriod(t *testing.T) {
|
||||
a := &TestAggregator{}
|
||||
ra := NewRunningAggregator(a, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
Filter: Filter{
|
||||
NamePass: []string{"*"},
|
||||
},
|
||||
Period: time.Millisecond * 500,
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
shutdown := make(chan struct{})
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ra.Run(&acc, time.Now(), shutdown)
|
||||
}()
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now().Add(time.Millisecond*100),
|
||||
)
|
||||
assert.False(t, ra.Add(m))
|
||||
|
||||
for {
|
||||
time.Sleep(time.Millisecond)
|
||||
if acc.NMetrics() > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
acc.AssertContainsFields(t, "TestMetric", map[string]interface{}{"sum": int64(101)})
|
||||
|
||||
close(shutdown)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestAddDropOriginal(t *testing.T) {
|
||||
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
Filter: Filter{
|
||||
NamePass: []string{"RI*"},
|
||||
},
|
||||
DropOriginal: true,
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now(),
|
||||
)
|
||||
assert.True(t, ra.Add(m))
|
||||
|
||||
// this metric name doesn't match the filter, so Add will return false
|
||||
m2 := ra.MakeMetric(
|
||||
"foobar",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now(),
|
||||
)
|
||||
assert.False(t, ra.Add(m2))
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetricA(t *testing.T) {
|
||||
now := time.Now()
|
||||
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
})
|
||||
assert.Equal(t, "aggregators.TestRunningAggregator", ra.Name())
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Untyped,
|
||||
)
|
||||
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Counter,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Counter,
|
||||
)
|
||||
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Gauge,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Gauge,
|
||||
)
|
||||
}
|
||||
|
||||
type TestAggregator struct {
|
||||
sum int64
|
||||
}
|
||||
|
||||
func (t *TestAggregator) Description() string { return "" }
|
||||
func (t *TestAggregator) SampleConfig() string { return "" }
|
||||
func (t *TestAggregator) Reset() {
|
||||
atomic.StoreInt64(&t.sum, 0)
|
||||
}
|
||||
|
||||
func (t *TestAggregator) Push(acc telegraf.Accumulator) {
|
||||
acc.AddFields("TestMetric",
|
||||
map[string]interface{}{"sum": t.sum},
|
||||
map[string]string{},
|
||||
)
|
||||
}
|
||||
|
||||
func (t *TestAggregator) Add(in telegraf.Metric) {
|
||||
for _, v := range in.Fields() {
|
||||
if vi, ok := v.(int64); ok {
|
||||
atomic.AddInt64(&t.sum, vi)
|
||||
}
|
||||
}
|
||||
}
|
||||
96
internal/models/running_input.go
Normal file
96
internal/models/running_input.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
var GlobalMetricsGathered = selfstat.Register("agent", "metrics_gathered", map[string]string{})
|
||||
|
||||
type RunningInput struct {
|
||||
Input telegraf.Input
|
||||
Config *InputConfig
|
||||
|
||||
trace bool
|
||||
defaultTags map[string]string
|
||||
|
||||
MetricsGathered selfstat.Stat
|
||||
}
|
||||
|
||||
func NewRunningInput(
|
||||
input telegraf.Input,
|
||||
config *InputConfig,
|
||||
) *RunningInput {
|
||||
return &RunningInput{
|
||||
Input: input,
|
||||
Config: config,
|
||||
MetricsGathered: selfstat.Register(
|
||||
"gather",
|
||||
"metrics_gathered",
|
||||
map[string]string{"input": config.Name},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// InputConfig containing a name, interval, and filter
|
||||
type InputConfig struct {
|
||||
Name string
|
||||
NameOverride string
|
||||
MeasurementPrefix string
|
||||
MeasurementSuffix string
|
||||
Tags map[string]string
|
||||
Filter Filter
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
func (r *RunningInput) Name() string {
|
||||
return "inputs." + r.Config.Name
|
||||
}
|
||||
|
||||
// MakeMetric either returns a metric, or returns nil if the metric doesn't
|
||||
// need to be created (because of filtering, an error, etc.)
|
||||
func (r *RunningInput) MakeMetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric {
|
||||
m := makemetric(
|
||||
measurement,
|
||||
fields,
|
||||
tags,
|
||||
r.Config.NameOverride,
|
||||
r.Config.MeasurementPrefix,
|
||||
r.Config.MeasurementSuffix,
|
||||
r.Config.Tags,
|
||||
r.defaultTags,
|
||||
r.Config.Filter,
|
||||
true,
|
||||
mType,
|
||||
t,
|
||||
)
|
||||
|
||||
if r.trace && m != nil {
|
||||
fmt.Print("> " + m.String())
|
||||
}
|
||||
|
||||
r.MetricsGathered.Incr(1)
|
||||
GlobalMetricsGathered.Incr(1)
|
||||
return m
|
||||
}
|
||||
|
||||
func (r *RunningInput) Trace() bool {
|
||||
return r.trace
|
||||
}
|
||||
|
||||
func (r *RunningInput) SetTrace(trace bool) {
|
||||
r.trace = trace
|
||||
}
|
||||
|
||||
func (r *RunningInput) SetDefaultTags(tags map[string]string) {
|
||||
r.defaultTags = tags
|
||||
}
|
||||
463
internal/models/running_input_test.go
Normal file
463
internal/models/running_input_test.go
Normal file
@@ -0,0 +1,463 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMakeMetricNoFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Nil(t, m)
|
||||
}
|
||||
|
||||
// nil fields should get dropped
|
||||
func TestMakeMetricNilFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{
|
||||
"value": int(101),
|
||||
"nil": nil,
|
||||
},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
assert.Equal(t, "inputs.TestRunningInput", ri.Name())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Untyped,
|
||||
)
|
||||
|
||||
m = ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Counter,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Counter,
|
||||
)
|
||||
|
||||
m = ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Gauge,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Gauge,
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricWithPluginTags(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
Tags: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
nil,
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricFilteredOut(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
Tags: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
Filter: Filter{NamePass: []string{"foobar"}},
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
assert.NoError(t, ri.Config.Filter.Compile())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
nil,
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Nil(t, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricWithDaemonTags(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
ri.SetDefaultTags(map[string]string{
|
||||
"foo": "bar",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetricInfFields(t *testing.T) {
|
||||
inf := math.Inf(1)
|
||||
ninf := math.Inf(-1)
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{
|
||||
"value": int(101),
|
||||
"inf": inf,
|
||||
"ninf": ninf,
|
||||
},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricAllFieldTypes(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{
|
||||
"a": int(10),
|
||||
"b": int8(10),
|
||||
"c": int16(10),
|
||||
"d": int32(10),
|
||||
"e": uint(10),
|
||||
"f": uint8(10),
|
||||
"g": uint16(10),
|
||||
"h": uint32(10),
|
||||
"i": uint64(10),
|
||||
"j": float32(10),
|
||||
"k": uint64(9223372036854775810),
|
||||
"l": "foobar",
|
||||
"m": true,
|
||||
},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Contains(t, m.String(), "a=10i")
|
||||
assert.Contains(t, m.String(), "b=10i")
|
||||
assert.Contains(t, m.String(), "c=10i")
|
||||
assert.Contains(t, m.String(), "d=10i")
|
||||
assert.Contains(t, m.String(), "e=10i")
|
||||
assert.Contains(t, m.String(), "f=10i")
|
||||
assert.Contains(t, m.String(), "g=10i")
|
||||
assert.Contains(t, m.String(), "h=10i")
|
||||
assert.Contains(t, m.String(), "i=10i")
|
||||
assert.Contains(t, m.String(), "j=10")
|
||||
assert.NotContains(t, m.String(), "j=10i")
|
||||
assert.Contains(t, m.String(), "k=9223372036854775807i")
|
||||
assert.Contains(t, m.String(), "l=\"foobar\"")
|
||||
assert.Contains(t, m.String(), "m=true")
|
||||
}
|
||||
|
||||
func TestMakeMetricNameOverride(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
NameOverride: "foobar",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("foobar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricNamePrefix(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
MeasurementPrefix: "foobar_",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("foobar_RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricNameSuffix(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
MeasurementSuffix: "_foobar",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest_foobar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetric_TrailingSlash(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
measurement string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
expectedNil bool
|
||||
expectedMeasurement string
|
||||
expectedFields map[string]interface{}
|
||||
expectedTags map[string]string
|
||||
}{
|
||||
{
|
||||
name: "Measurement cannot have trailing slash",
|
||||
measurement: `cpu\`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedNil: true,
|
||||
},
|
||||
{
|
||||
name: "Field key with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
`bad\`: `xyzzy`,
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Field value with trailing slash okay",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
"ok": `xyzzy\`,
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
"ok": `xyzzy\`,
|
||||
},
|
||||
expectedTags: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Must have one field after dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"bad": math.NaN(),
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedNil: true,
|
||||
},
|
||||
{
|
||||
name: "Tag key with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{
|
||||
`host\`: "localhost",
|
||||
"a": "x",
|
||||
},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Tag value with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{
|
||||
`host`: `localhost\`,
|
||||
"a": "x",
|
||||
},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
m := ri.MakeMetric(
|
||||
tc.measurement,
|
||||
tc.fields,
|
||||
tc.tags,
|
||||
telegraf.Untyped,
|
||||
now)
|
||||
|
||||
if tc.expectedNil {
|
||||
require.Nil(t, m)
|
||||
} else {
|
||||
require.NotNil(t, m)
|
||||
require.Equal(t, tc.expectedMeasurement, m.Name())
|
||||
require.Equal(t, tc.expectedFields, m.Fields())
|
||||
require.Equal(t, tc.expectedTags, m.Tags())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testInput struct{}
|
||||
|
||||
func (t *testInput) Description() string { return "" }
|
||||
func (t *testInput) SampleConfig() string { return "" }
|
||||
func (t *testInput) Gather(acc telegraf.Accumulator) error { return nil }
|
||||
194
internal/models/running_output.go
Normal file
194
internal/models/running_output.go
Normal file
@@ -0,0 +1,194 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/buffer"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
const (
|
||||
// Default size of metrics batch size.
|
||||
DEFAULT_METRIC_BATCH_SIZE = 1000
|
||||
|
||||
// Default number of metrics kept. It should be a multiple of batch size.
|
||||
DEFAULT_METRIC_BUFFER_LIMIT = 10000
|
||||
)
|
||||
|
||||
// RunningOutput contains the output configuration
|
||||
type RunningOutput struct {
|
||||
Name string
|
||||
Output telegraf.Output
|
||||
Config *OutputConfig
|
||||
MetricBufferLimit int
|
||||
MetricBatchSize int
|
||||
|
||||
MetricsFiltered selfstat.Stat
|
||||
MetricsWritten selfstat.Stat
|
||||
BufferSize selfstat.Stat
|
||||
BufferLimit selfstat.Stat
|
||||
WriteTime selfstat.Stat
|
||||
|
||||
metrics *buffer.Buffer
|
||||
failMetrics *buffer.Buffer
|
||||
|
||||
// Guards against concurrent calls to the Output as described in #3009
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewRunningOutput(
|
||||
name string,
|
||||
output telegraf.Output,
|
||||
conf *OutputConfig,
|
||||
batchSize int,
|
||||
bufferLimit int,
|
||||
) *RunningOutput {
|
||||
if bufferLimit == 0 {
|
||||
bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT
|
||||
}
|
||||
if batchSize == 0 {
|
||||
batchSize = DEFAULT_METRIC_BATCH_SIZE
|
||||
}
|
||||
ro := &RunningOutput{
|
||||
Name: name,
|
||||
metrics: buffer.NewBuffer(batchSize),
|
||||
failMetrics: buffer.NewBuffer(bufferLimit),
|
||||
Output: output,
|
||||
Config: conf,
|
||||
MetricBufferLimit: bufferLimit,
|
||||
MetricBatchSize: batchSize,
|
||||
MetricsWritten: selfstat.Register(
|
||||
"write",
|
||||
"metrics_written",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
MetricsFiltered: selfstat.Register(
|
||||
"write",
|
||||
"metrics_filtered",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
BufferSize: selfstat.Register(
|
||||
"write",
|
||||
"buffer_size",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
BufferLimit: selfstat.Register(
|
||||
"write",
|
||||
"buffer_limit",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
WriteTime: selfstat.RegisterTiming(
|
||||
"write",
|
||||
"write_time_ns",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
}
|
||||
ro.BufferLimit.Incr(int64(ro.MetricBufferLimit))
|
||||
return ro
|
||||
}
|
||||
|
||||
// AddMetric adds a metric to the output. This function can also write cached
|
||||
// points if FlushBufferWhenFull is true.
|
||||
func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
// Filter any tagexclude/taginclude parameters before adding metric
|
||||
if ro.Config.Filter.IsActive() {
|
||||
// In order to filter out tags, we need to create a new metric, since
|
||||
// metrics are immutable once created.
|
||||
name := m.Name()
|
||||
tags := m.Tags()
|
||||
fields := m.Fields()
|
||||
t := m.Time()
|
||||
if ok := ro.Config.Filter.Apply(name, fields, tags); !ok {
|
||||
ro.MetricsFiltered.Incr(1)
|
||||
return
|
||||
}
|
||||
// error is not possible if creating from another metric, so ignore.
|
||||
m, _ = metric.New(name, tags, fields, t)
|
||||
}
|
||||
|
||||
ro.metrics.Add(m)
|
||||
if ro.metrics.Len() == ro.MetricBatchSize {
|
||||
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
||||
err := ro.write(batch)
|
||||
if err != nil {
|
||||
ro.failMetrics.Add(batch...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes all cached points to this output.
|
||||
func (ro *RunningOutput) Write() error {
|
||||
nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len()
|
||||
ro.BufferSize.Set(int64(nFails + nMetrics))
|
||||
log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ",
|
||||
ro.Name, nFails+nMetrics, ro.MetricBufferLimit)
|
||||
var err error
|
||||
if !ro.failMetrics.IsEmpty() {
|
||||
// how many batches of failed writes we need to write.
|
||||
nBatches := nFails/ro.MetricBatchSize + 1
|
||||
batchSize := ro.MetricBatchSize
|
||||
|
||||
for i := 0; i < nBatches; i++ {
|
||||
// If it's the last batch, only grab the metrics that have not had
|
||||
// a write attempt already (this is primarily to preserve order).
|
||||
if i == nBatches-1 {
|
||||
batchSize = nFails % ro.MetricBatchSize
|
||||
}
|
||||
batch := ro.failMetrics.Batch(batchSize)
|
||||
// If we've already failed previous writes, don't bother trying to
|
||||
// write to this output again. We are not exiting the loop just so
|
||||
// that we can rotate the metrics to preserve order.
|
||||
if err == nil {
|
||||
err = ro.write(batch)
|
||||
}
|
||||
if err != nil {
|
||||
ro.failMetrics.Add(batch...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
||||
// see comment above about not trying to write to an already failed output.
|
||||
// if ro.failMetrics is empty then err will always be nil at this point.
|
||||
if err == nil {
|
||||
err = ro.write(batch)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
ro.failMetrics.Add(batch...)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
nMetrics := len(metrics)
|
||||
if nMetrics == 0 {
|
||||
return nil
|
||||
}
|
||||
ro.Lock()
|
||||
defer ro.Unlock()
|
||||
start := time.Now()
|
||||
err := ro.Output.Write(metrics)
|
||||
elapsed := time.Since(start)
|
||||
if err == nil {
|
||||
log.Printf("D! Output [%s] wrote batch of %d metrics in %s\n",
|
||||
ro.Name, nMetrics, elapsed)
|
||||
ro.MetricsWritten.Incr(int64(nMetrics))
|
||||
ro.WriteTime.Incr(elapsed.Nanoseconds())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// OutputConfig containing name and filter
|
||||
type OutputConfig struct {
|
||||
Name string
|
||||
Filter Filter
|
||||
}
|
||||
556
internal/models/running_output_test.go
Normal file
556
internal/models/running_output_test.go
Normal file
@@ -0,0 +1,556 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var first5 = []telegraf.Metric{
|
||||
testutil.TestMetric(101, "metric1"),
|
||||
testutil.TestMetric(101, "metric2"),
|
||||
testutil.TestMetric(101, "metric3"),
|
||||
testutil.TestMetric(101, "metric4"),
|
||||
testutil.TestMetric(101, "metric5"),
|
||||
}
|
||||
|
||||
var next5 = []telegraf.Metric{
|
||||
testutil.TestMetric(101, "metric6"),
|
||||
testutil.TestMetric(101, "metric7"),
|
||||
testutil.TestMetric(101, "metric8"),
|
||||
testutil.TestMetric(101, "metric9"),
|
||||
testutil.TestMetric(101, "metric10"),
|
||||
}
|
||||
|
||||
// Benchmark adding metrics.
|
||||
func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &perfOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
ro.Write()
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark adding metrics.
|
||||
func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &perfOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
if n%100 == 0 {
|
||||
ro.Write()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark adding metrics.
|
||||
func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &perfOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddingNilMetric(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(nil)
|
||||
ro.AddMetric(nil)
|
||||
ro.AddMetric(nil)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
}
|
||||
|
||||
// Test that NameDrop filters ger properly applied.
|
||||
func TestRunningOutput_DropFilter(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
NameDrop: []string{"metric1", "metric2"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 8)
|
||||
}
|
||||
|
||||
// Test that NameDrop filters without a match do nothing.
|
||||
func TestRunningOutput_PassFilter(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
NameDrop: []string{"metric1000", "foo*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 10)
|
||||
}
|
||||
|
||||
// Test that tags are properly included
|
||||
func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
TagInclude: []string{"nothing*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 1)
|
||||
assert.Empty(t, m.Metrics()[0].Tags())
|
||||
}
|
||||
|
||||
// Test that tags are properly excluded
|
||||
func TestRunningOutput_TagExcludeMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
TagExclude: []string{"tag*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 1)
|
||||
assert.Len(t, m.Metrics()[0].Tags(), 0)
|
||||
}
|
||||
|
||||
// Test that tags are properly Excluded
|
||||
func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
TagExclude: []string{"nothing*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 1)
|
||||
assert.Len(t, m.Metrics()[0].Tags(), 1)
|
||||
}
|
||||
|
||||
// Test that tags are properly included
|
||||
func TestRunningOutput_TagIncludeMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
TagInclude: []string{"tag*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 1)
|
||||
assert.Len(t, m.Metrics()[0].Tags(), 1)
|
||||
}
|
||||
|
||||
// Test that we can write metrics with simple default setup.
|
||||
func TestRunningOutputDefault(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 10)
|
||||
}
|
||||
|
||||
// Test that running output doesn't flush until it's full when
|
||||
// FlushBufferWhenFull is set.
|
||||
func TestRunningOutputFlushWhenFull(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 6, 10)
|
||||
|
||||
// Fill buffer to 1 under limit
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// no flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// add one more metric
|
||||
ro.AddMetric(next5[0])
|
||||
// now it flushed
|
||||
assert.Len(t, m.Metrics(), 6)
|
||||
|
||||
// add one more metric and write it manually
|
||||
ro.AddMetric(next5[1])
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 7)
|
||||
}
|
||||
|
||||
// Test that running output doesn't flush until it's full when
|
||||
// FlushBufferWhenFull is set, twice.
|
||||
func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 4, 12)
|
||||
|
||||
// Fill buffer past limit twive
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// flushed twice
|
||||
assert.Len(t, m.Metrics(), 8)
|
||||
}
|
||||
|
||||
func TestRunningOutputWriteFail(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf, 4, 12)
|
||||
|
||||
// Fill buffer to limit twice
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// manual write fails
|
||||
err := ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
m.failWrite = false
|
||||
err = ro.Write()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, m.Metrics(), 10)
|
||||
}
|
||||
|
||||
// Verify that the order of points is preserved during a write failure.
|
||||
func TestRunningOutputWriteFailOrder(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf, 100, 1000)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// Write fails
|
||||
err := ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
m.failWrite = false
|
||||
// add 5 more metrics
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
err = ro.Write()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that 10 metrics were written
|
||||
assert.Len(t, m.Metrics(), 10)
|
||||
// Verify that they are in order
|
||||
expected := append(first5, next5...)
|
||||
assert.Equal(t, expected, m.Metrics())
|
||||
}
|
||||
|
||||
// Verify that the order of points is preserved during many write failures.
|
||||
func TestRunningOutputWriteFailOrder2(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf, 5, 100)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// Write fails
|
||||
err := ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// Write fails
|
||||
err = ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// Write fails
|
||||
err = ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// Write fails
|
||||
err = ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
m.failWrite = false
|
||||
err = ro.Write()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that 10 metrics were written
|
||||
assert.Len(t, m.Metrics(), 20)
|
||||
// Verify that they are in order
|
||||
expected := append(first5, next5...)
|
||||
expected = append(expected, first5...)
|
||||
expected = append(expected, next5...)
|
||||
assert.Equal(t, expected, m.Metrics())
|
||||
}
|
||||
|
||||
// Verify that the order of points is preserved when there is a remainder
|
||||
// of points for the batch.
|
||||
//
|
||||
// ie, with a batch size of 5:
|
||||
//
|
||||
// 1 2 3 4 5 6 <-- order, failed points
|
||||
// 6 1 2 3 4 5 <-- order, after 1st write failure (1 2 3 4 5 was batch)
|
||||
// 1 2 3 4 5 6 <-- order, after 2nd write failure, (6 was batch)
|
||||
//
|
||||
func TestRunningOutputWriteFailOrder3(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf, 5, 1000)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// Write fails
|
||||
err := ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// add and attempt to write a single metric:
|
||||
ro.AddMetric(next5[0])
|
||||
err = ro.Write()
|
||||
require.Error(t, err)
|
||||
|
||||
// unset fail and write metrics
|
||||
m.failWrite = false
|
||||
err = ro.Write()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that 6 metrics were written
|
||||
assert.Len(t, m.Metrics(), 6)
|
||||
// Verify that they are in order
|
||||
expected := append(first5, next5[0])
|
||||
assert.Equal(t, expected, m.Metrics())
|
||||
}
|
||||
|
||||
type mockOutput struct {
|
||||
sync.Mutex
|
||||
|
||||
metrics []telegraf.Metric
|
||||
|
||||
// if true, mock a write failure
|
||||
failWrite bool
|
||||
}
|
||||
|
||||
func (m *mockOutput) Connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockOutput) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockOutput) Description() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *mockOutput) SampleConfig() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *mockOutput) Write(metrics []telegraf.Metric) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if m.failWrite {
|
||||
return fmt.Errorf("Failed Write!")
|
||||
}
|
||||
|
||||
if m.metrics == nil {
|
||||
m.metrics = []telegraf.Metric{}
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
m.metrics = append(m.metrics, metric)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockOutput) Metrics() []telegraf.Metric {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.metrics
|
||||
}
|
||||
|
||||
type perfOutput struct {
|
||||
// if true, mock a write failure
|
||||
failWrite bool
|
||||
}
|
||||
|
||||
func (m *perfOutput) Connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *perfOutput) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *perfOutput) Description() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *perfOutput) SampleConfig() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *perfOutput) Write(metrics []telegraf.Metric) error {
|
||||
if m.failWrite {
|
||||
return fmt.Errorf("Failed Write!")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
51
internal/models/running_processor.go
Normal file
51
internal/models/running_processor.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type RunningProcessor struct {
|
||||
Name string
|
||||
|
||||
sync.Mutex
|
||||
Processor telegraf.Processor
|
||||
Config *ProcessorConfig
|
||||
}
|
||||
|
||||
type RunningProcessors []*RunningProcessor
|
||||
|
||||
func (rp RunningProcessors) Len() int { return len(rp) }
|
||||
func (rp RunningProcessors) Swap(i, j int) { rp[i], rp[j] = rp[j], rp[i] }
|
||||
func (rp RunningProcessors) Less(i, j int) bool { return rp[i].Config.Order < rp[j].Config.Order }
|
||||
|
||||
// FilterConfig containing a name and filter
|
||||
type ProcessorConfig struct {
|
||||
Name string
|
||||
Order int64
|
||||
Filter Filter
|
||||
}
|
||||
|
||||
func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
||||
rp.Lock()
|
||||
defer rp.Unlock()
|
||||
|
||||
ret := []telegraf.Metric{}
|
||||
|
||||
for _, metric := range in {
|
||||
if rp.Config.Filter.IsActive() {
|
||||
// check if the filter should be applied to this metric
|
||||
if ok := rp.Config.Filter.Apply(metric.Name(), metric.Fields(), metric.Tags()); !ok {
|
||||
// this means filter should not be applied
|
||||
ret = append(ret, metric)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// This metric should pass through the filter, so call the filter Apply
|
||||
// function and append results to the output slice.
|
||||
ret = append(ret, rp.Processor.Apply(metric)...)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
117
internal/models/running_processor_test.go
Normal file
117
internal/models/running_processor_test.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type TestProcessor struct {
|
||||
}
|
||||
|
||||
func (f *TestProcessor) SampleConfig() string { return "" }
|
||||
func (f *TestProcessor) Description() string { return "" }
|
||||
|
||||
// Apply renames:
|
||||
// "foo" to "fuz"
|
||||
// "bar" to "baz"
|
||||
// And it also drops measurements named "dropme"
|
||||
func (f *TestProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
||||
out := make([]telegraf.Metric, 0)
|
||||
for _, m := range in {
|
||||
switch m.Name() {
|
||||
case "foo":
|
||||
out = append(out, testutil.TestMetric(1, "fuz"))
|
||||
case "bar":
|
||||
out = append(out, testutil.TestMetric(1, "baz"))
|
||||
case "dropme":
|
||||
// drop the metric!
|
||||
default:
|
||||
out = append(out, m)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func NewTestRunningProcessor() *RunningProcessor {
|
||||
out := &RunningProcessor{
|
||||
Name: "test",
|
||||
Processor: &TestProcessor{},
|
||||
Config: &ProcessorConfig{Filter: Filter{}},
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func TestRunningProcessor(t *testing.T) {
|
||||
inmetrics := []telegraf.Metric{
|
||||
testutil.TestMetric(1, "foo"),
|
||||
testutil.TestMetric(1, "bar"),
|
||||
testutil.TestMetric(1, "baz"),
|
||||
}
|
||||
|
||||
expectedNames := []string{
|
||||
"fuz",
|
||||
"baz",
|
||||
"baz",
|
||||
}
|
||||
rfp := NewTestRunningProcessor()
|
||||
filteredMetrics := rfp.Apply(inmetrics...)
|
||||
|
||||
actualNames := []string{
|
||||
filteredMetrics[0].Name(),
|
||||
filteredMetrics[1].Name(),
|
||||
filteredMetrics[2].Name(),
|
||||
}
|
||||
assert.Equal(t, expectedNames, actualNames)
|
||||
}
|
||||
|
||||
func TestRunningProcessor_WithNameDrop(t *testing.T) {
|
||||
inmetrics := []telegraf.Metric{
|
||||
testutil.TestMetric(1, "foo"),
|
||||
testutil.TestMetric(1, "bar"),
|
||||
testutil.TestMetric(1, "baz"),
|
||||
}
|
||||
|
||||
expectedNames := []string{
|
||||
"foo",
|
||||
"baz",
|
||||
"baz",
|
||||
}
|
||||
rfp := NewTestRunningProcessor()
|
||||
|
||||
rfp.Config.Filter.NameDrop = []string{"foo"}
|
||||
assert.NoError(t, rfp.Config.Filter.Compile())
|
||||
|
||||
filteredMetrics := rfp.Apply(inmetrics...)
|
||||
|
||||
actualNames := []string{
|
||||
filteredMetrics[0].Name(),
|
||||
filteredMetrics[1].Name(),
|
||||
filteredMetrics[2].Name(),
|
||||
}
|
||||
assert.Equal(t, expectedNames, actualNames)
|
||||
}
|
||||
|
||||
func TestRunningProcessor_DroppedMetric(t *testing.T) {
|
||||
inmetrics := []telegraf.Metric{
|
||||
testutil.TestMetric(1, "dropme"),
|
||||
testutil.TestMetric(1, "foo"),
|
||||
testutil.TestMetric(1, "bar"),
|
||||
}
|
||||
|
||||
expectedNames := []string{
|
||||
"fuz",
|
||||
"baz",
|
||||
}
|
||||
rfp := NewTestRunningProcessor()
|
||||
filteredMetrics := rfp.Apply(inmetrics...)
|
||||
|
||||
actualNames := []string{
|
||||
filteredMetrics[0].Name(),
|
||||
filteredMetrics[1].Name(),
|
||||
}
|
||||
assert.Equal(t, expectedNames, actualNames)
|
||||
}
|
||||
69
logger/logger.go
Normal file
69
logger/logger.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/wlog"
|
||||
)
|
||||
|
||||
var prefixRegex = regexp.MustCompile("^[DIWE]!")
|
||||
|
||||
// newTelegrafWriter returns a logging-wrapped writer.
|
||||
func newTelegrafWriter(w io.Writer) io.Writer {
|
||||
return &telegrafLog{
|
||||
writer: wlog.NewWriter(w),
|
||||
}
|
||||
}
|
||||
|
||||
type telegrafLog struct {
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (t *telegrafLog) Write(b []byte) (n int, err error) {
|
||||
var line []byte
|
||||
if !prefixRegex.Match(b) {
|
||||
line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" I! "), b...)
|
||||
} else {
|
||||
line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...)
|
||||
}
|
||||
return t.writer.Write(line)
|
||||
}
|
||||
|
||||
// SetupLogging configures the logging output.
|
||||
// debug will set the log level to DEBUG
|
||||
// quiet will set the log level to ERROR
|
||||
// logfile will direct the logging output to a file. Empty string is
|
||||
// interpreted as stderr. If there is an error opening the file the
|
||||
// logger will fallback to stderr.
|
||||
func SetupLogging(debug, quiet bool, logfile string) {
|
||||
log.SetFlags(0)
|
||||
if debug {
|
||||
wlog.SetLevel(wlog.DEBUG)
|
||||
}
|
||||
if quiet {
|
||||
wlog.SetLevel(wlog.ERROR)
|
||||
}
|
||||
|
||||
var oFile *os.File
|
||||
if logfile != "" {
|
||||
if _, err := os.Stat(logfile); os.IsNotExist(err) {
|
||||
if oFile, err = os.Create(logfile); err != nil {
|
||||
log.Printf("E! Unable to create %s (%s), using stderr", logfile, err)
|
||||
oFile = os.Stderr
|
||||
}
|
||||
} else {
|
||||
if oFile, err = os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, os.ModeAppend); err != nil {
|
||||
log.Printf("E! Unable to append to %s (%s), using stderr", logfile, err)
|
||||
oFile = os.Stderr
|
||||
}
|
||||
}
|
||||
} else {
|
||||
oFile = os.Stderr
|
||||
}
|
||||
|
||||
log.SetOutput(newTelegrafWriter(oFile))
|
||||
}
|
||||
75
logger/logger_test.go
Normal file
75
logger/logger_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestWriteLogToFile(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
assert.NoError(t, err)
|
||||
defer func() { os.Remove(tmpfile.Name()) }()
|
||||
|
||||
SetupLogging(false, false, tmpfile.Name())
|
||||
log.Printf("I! TEST")
|
||||
log.Printf("D! TEST") // <- should be ignored
|
||||
|
||||
f, err := ioutil.ReadFile(tmpfile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, f[19:], []byte("Z I! TEST\n"))
|
||||
}
|
||||
|
||||
func TestDebugWriteLogToFile(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
assert.NoError(t, err)
|
||||
defer func() { os.Remove(tmpfile.Name()) }()
|
||||
|
||||
SetupLogging(true, false, tmpfile.Name())
|
||||
log.Printf("D! TEST")
|
||||
|
||||
f, err := ioutil.ReadFile(tmpfile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, f[19:], []byte("Z D! TEST\n"))
|
||||
}
|
||||
|
||||
func TestErrorWriteLogToFile(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
assert.NoError(t, err)
|
||||
defer func() { os.Remove(tmpfile.Name()) }()
|
||||
|
||||
SetupLogging(false, true, tmpfile.Name())
|
||||
log.Printf("E! TEST")
|
||||
log.Printf("I! TEST") // <- should be ignored
|
||||
|
||||
f, err := ioutil.ReadFile(tmpfile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, f[19:], []byte("Z E! TEST\n"))
|
||||
}
|
||||
|
||||
func TestAddDefaultLogLevel(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
assert.NoError(t, err)
|
||||
defer func() { os.Remove(tmpfile.Name()) }()
|
||||
|
||||
SetupLogging(true, false, tmpfile.Name())
|
||||
log.Printf("TEST")
|
||||
|
||||
f, err := ioutil.ReadFile(tmpfile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, f[19:], []byte("Z I! TEST\n"))
|
||||
}
|
||||
|
||||
func BenchmarkTelegrafLogWrite(b *testing.B) {
|
||||
var msg = []byte("test")
|
||||
var buf bytes.Buffer
|
||||
w := newTelegrafWriter(&buf)
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf.Reset()
|
||||
w.Write(msg)
|
||||
}
|
||||
}
|
||||
64
metric.go
Normal file
64
metric.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// ValueType is an enumeration of metric types that represent a simple value.
|
||||
type ValueType int
|
||||
|
||||
// Possible values for the ValueType enum.
|
||||
const (
|
||||
_ ValueType = iota
|
||||
Counter
|
||||
Gauge
|
||||
Untyped
|
||||
Summary
|
||||
Histogram
|
||||
)
|
||||
|
||||
type Metric interface {
|
||||
// Serialize serializes the metric into a line-protocol byte buffer,
|
||||
// including a newline at the end.
|
||||
Serialize() []byte
|
||||
// same as Serialize, but avoids an allocation.
|
||||
// returns number of bytes copied into dst.
|
||||
SerializeTo(dst []byte) int
|
||||
// String is the same as Serialize, but returns a string.
|
||||
String() string
|
||||
// Copy deep-copies the metric.
|
||||
Copy() Metric
|
||||
// Split will attempt to return multiple metrics with the same timestamp
|
||||
// whose string representations are no longer than maxSize.
|
||||
// Metrics with a single field may exceed the requested size.
|
||||
Split(maxSize int) []Metric
|
||||
|
||||
// Tag functions
|
||||
HasTag(key string) bool
|
||||
AddTag(key, value string)
|
||||
RemoveTag(key string)
|
||||
|
||||
// Field functions
|
||||
HasField(key string) bool
|
||||
AddField(key string, value interface{})
|
||||
RemoveField(key string) error
|
||||
|
||||
// Name functions
|
||||
SetName(name string)
|
||||
SetPrefix(prefix string)
|
||||
SetSuffix(suffix string)
|
||||
|
||||
// Getting data structure functions
|
||||
Name() string
|
||||
Tags() map[string]string
|
||||
Fields() map[string]interface{}
|
||||
Time() time.Time
|
||||
UnixNano() int64
|
||||
Type() ValueType
|
||||
Len() int // returns the length of the serialized metric, including newline
|
||||
HashID() uint64
|
||||
|
||||
// aggregator things:
|
||||
SetAggregate(bool)
|
||||
IsAggregate() bool
|
||||
}
|
||||
55
metric/escape.go
Normal file
55
metric/escape.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// escaper is for escaping:
|
||||
// - tag keys
|
||||
// - tag values
|
||||
// - field keys
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`)
|
||||
unEscaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`)
|
||||
|
||||
// nameEscaper is for escaping measurement names only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
nameEscaper = strings.NewReplacer(`,`, `\,`, ` `, `\ `)
|
||||
nameUnEscaper = strings.NewReplacer(`\,`, `,`, `\ `, ` `)
|
||||
|
||||
// stringFieldEscaper is for escaping string field values only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
stringFieldEscaper = strings.NewReplacer(
|
||||
`"`, `\"`,
|
||||
`\`, `\\`,
|
||||
)
|
||||
stringFieldUnEscaper = strings.NewReplacer(
|
||||
`\"`, `"`,
|
||||
`\\`, `\`,
|
||||
)
|
||||
)
|
||||
|
||||
func escape(s string, t string) string {
|
||||
switch t {
|
||||
case "fieldkey", "tagkey", "tagval":
|
||||
return escaper.Replace(s)
|
||||
case "name":
|
||||
return nameEscaper.Replace(s)
|
||||
case "fieldval":
|
||||
return stringFieldEscaper.Replace(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func unescape(s string, t string) string {
|
||||
switch t {
|
||||
case "fieldkey", "tagkey", "tagval":
|
||||
return unEscaper.Replace(s)
|
||||
case "name":
|
||||
return nameUnEscaper.Replace(s)
|
||||
case "fieldval":
|
||||
return stringFieldUnEscaper.Replace(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
38
metric/inline_strconv_parse.go
Normal file
38
metric/inline_strconv_parse.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
|
||||
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseInt(s, base, bitSize)
|
||||
}
|
||||
|
||||
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
|
||||
func parseFloatBytes(b []byte, bitSize int) (float64, error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseFloat(s, bitSize)
|
||||
}
|
||||
|
||||
// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
|
||||
func parseBoolBytes(b []byte) (bool, error) {
|
||||
return strconv.ParseBool(unsafeBytesToString(b))
|
||||
}
|
||||
|
||||
// unsafeBytesToString converts a []byte to a string without a heap allocation.
|
||||
//
|
||||
// It is unsafe, and is intended to prepare input to short-lived functions
|
||||
// that require strings.
|
||||
func unsafeBytesToString(in []byte) string {
|
||||
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
|
||||
dst := reflect.StringHeader{
|
||||
Data: src.Data,
|
||||
Len: src.Len,
|
||||
}
|
||||
s := *(*string)(unsafe.Pointer(&dst))
|
||||
return s
|
||||
}
|
||||
103
metric/inline_strconv_parse_test.go
Normal file
103
metric/inline_strconv_parse_test.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
func TestParseIntBytesEquivalenceFuzz(t *testing.T) {
|
||||
f := func(b []byte, base int, bitSize int) bool {
|
||||
exp, expErr := strconv.ParseInt(string(b), base, bitSize)
|
||||
got, gotErr := parseIntBytes(b, base, bitSize)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseIntBytesValid64bitBase10EquivalenceFuzz(t *testing.T) {
|
||||
buf := []byte{}
|
||||
f := func(n int64) bool {
|
||||
buf = strconv.AppendInt(buf[:0], n, 10)
|
||||
|
||||
exp, expErr := strconv.ParseInt(string(buf), 10, 64)
|
||||
got, gotErr := parseIntBytes(buf, 10, 64)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFloatBytesEquivalenceFuzz(t *testing.T) {
|
||||
f := func(b []byte, bitSize int) bool {
|
||||
exp, expErr := strconv.ParseFloat(string(b), bitSize)
|
||||
got, gotErr := parseFloatBytes(b, bitSize)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFloatBytesValid64bitEquivalenceFuzz(t *testing.T) {
|
||||
buf := []byte{}
|
||||
f := func(n float64) bool {
|
||||
buf = strconv.AppendFloat(buf[:0], n, 'f', -1, 64)
|
||||
|
||||
exp, expErr := strconv.ParseFloat(string(buf), 64)
|
||||
got, gotErr := parseFloatBytes(buf, 64)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseBoolBytesEquivalence(t *testing.T) {
|
||||
var buf []byte
|
||||
for _, s := range []string{"1", "t", "T", "TRUE", "true", "True", "0", "f", "F", "FALSE", "false", "False", "fail", "TrUe", "FAlSE", "numbers", ""} {
|
||||
buf = append(buf[:0], s...)
|
||||
|
||||
exp, expErr := strconv.ParseBool(s)
|
||||
got, gotErr := parseBoolBytes(buf)
|
||||
|
||||
if got != exp || !checkErrs(expErr, gotErr) {
|
||||
t.Errorf("Failed to parse boolean value %q correctly: wanted (%t, %v), got (%t, %v)", s, exp, expErr, got, gotErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkErrs(a, b error) bool {
|
||||
if (a == nil) != (b == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
return a == nil || a.Error() == b.Error()
|
||||
}
|
||||
623
metric/metric.go
Normal file
623
metric/metric.go
Normal file
@@ -0,0 +1,623 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
const MaxInt = int(^uint(0) >> 1)
|
||||
|
||||
func New(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
mType ...telegraf.ValueType,
|
||||
) (telegraf.Metric, error) {
|
||||
if len(name) == 0 {
|
||||
return nil, fmt.Errorf("missing measurement name")
|
||||
}
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("%s: must have one or more fields", name)
|
||||
}
|
||||
if strings.HasSuffix(name, `\`) {
|
||||
return nil, fmt.Errorf("%s: measurement name cannot end with a backslash", name)
|
||||
}
|
||||
|
||||
var thisType telegraf.ValueType
|
||||
if len(mType) > 0 {
|
||||
thisType = mType[0]
|
||||
} else {
|
||||
thisType = telegraf.Untyped
|
||||
}
|
||||
|
||||
m := &metric{
|
||||
name: []byte(escape(name, "name")),
|
||||
t: []byte(fmt.Sprint(t.UnixNano())),
|
||||
nsec: t.UnixNano(),
|
||||
mType: thisType,
|
||||
}
|
||||
|
||||
// pre-allocate exact size of the tags slice
|
||||
taglen := 0
|
||||
for k, v := range tags {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("%s: tag key cannot end with a backslash: %s", name, k)
|
||||
}
|
||||
if strings.HasSuffix(v, `\`) {
|
||||
return nil, fmt.Errorf("%s: tag value cannot end with a backslash: %s", name, v)
|
||||
}
|
||||
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
taglen += 2 + len(escape(k, "tagkey")) + len(escape(v, "tagval"))
|
||||
}
|
||||
m.tags = make([]byte, taglen)
|
||||
|
||||
i := 0
|
||||
for k, v := range tags {
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
m.tags[i] = ','
|
||||
i++
|
||||
i += copy(m.tags[i:], escape(k, "tagkey"))
|
||||
m.tags[i] = '='
|
||||
i++
|
||||
i += copy(m.tags[i:], escape(v, "tagval"))
|
||||
}
|
||||
|
||||
// pre-allocate capacity of the fields slice
|
||||
fieldlen := 0
|
||||
for k, _ := range fields {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("%s: field key cannot end with a backslash: %s", name, k)
|
||||
}
|
||||
|
||||
// 10 bytes is completely arbitrary, but will at least prevent some
|
||||
// amount of allocations. There's a small possibility this will create
|
||||
// slightly more allocations for a metric that has many short fields.
|
||||
fieldlen += len(k) + 10
|
||||
}
|
||||
m.fields = make([]byte, 0, fieldlen)
|
||||
|
||||
i = 0
|
||||
for k, v := range fields {
|
||||
if i != 0 {
|
||||
m.fields = append(m.fields, ',')
|
||||
}
|
||||
m.fields = appendField(m.fields, k, v)
|
||||
i++
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// indexUnescapedByte finds the index of the first byte equal to b in buf that
|
||||
// is not escaped. Does not allow the escape char to be escaped. Returns -1 if
|
||||
// not found.
|
||||
func indexUnescapedByte(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
i := bytes.IndexByte(buf[keyi:], b)
|
||||
if i == -1 {
|
||||
return -1
|
||||
} else if i == 0 {
|
||||
break
|
||||
}
|
||||
keyi += i
|
||||
if buf[keyi-1] != '\\' {
|
||||
break
|
||||
} else {
|
||||
keyi++
|
||||
}
|
||||
}
|
||||
return keyi
|
||||
}
|
||||
|
||||
// indexUnescapedByteBackslashEscaping finds the index of the first byte equal
|
||||
// to b in buf that is not escaped. Allows for the escape char `\` to be
|
||||
// escaped. Returns -1 if not found.
|
||||
func indexUnescapedByteBackslashEscaping(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
i := bytes.IndexByte(buf[keyi:], b)
|
||||
if i == -1 {
|
||||
return -1
|
||||
} else if i == 0 {
|
||||
break
|
||||
}
|
||||
keyi += i
|
||||
if countBackslashes(buf, keyi-1)%2 == 0 {
|
||||
break
|
||||
} else {
|
||||
keyi++
|
||||
}
|
||||
}
|
||||
return keyi
|
||||
}
|
||||
|
||||
// countBackslashes counts the number of preceding backslashes starting at
|
||||
// the 'start' index.
|
||||
func countBackslashes(buf []byte, index int) int {
|
||||
var count int
|
||||
for {
|
||||
if index < 0 {
|
||||
return count
|
||||
}
|
||||
if buf[index] == '\\' {
|
||||
count++
|
||||
index--
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
name []byte
|
||||
tags []byte
|
||||
fields []byte
|
||||
t []byte
|
||||
|
||||
mType telegraf.ValueType
|
||||
aggregate bool
|
||||
|
||||
// cached values for reuse in "get" functions
|
||||
hashID uint64
|
||||
nsec int64
|
||||
}
|
||||
|
||||
func (m *metric) String() string {
|
||||
return string(m.name) + string(m.tags) + " " + string(m.fields) + " " + string(m.t) + "\n"
|
||||
}
|
||||
|
||||
func (m *metric) SetAggregate(b bool) {
|
||||
m.aggregate = b
|
||||
}
|
||||
|
||||
func (m *metric) IsAggregate() bool {
|
||||
return m.aggregate
|
||||
}
|
||||
|
||||
func (m *metric) Type() telegraf.ValueType {
|
||||
return m.mType
|
||||
}
|
||||
|
||||
func (m *metric) Len() int {
|
||||
// 3 is for 2 spaces surrounding the fields array + newline at the end.
|
||||
return len(m.name) + len(m.tags) + len(m.fields) + len(m.t) + 3
|
||||
}
|
||||
|
||||
func (m *metric) Serialize() []byte {
|
||||
tmp := make([]byte, m.Len())
|
||||
i := 0
|
||||
i += copy(tmp[i:], m.name)
|
||||
i += copy(tmp[i:], m.tags)
|
||||
tmp[i] = ' '
|
||||
i++
|
||||
i += copy(tmp[i:], m.fields)
|
||||
tmp[i] = ' '
|
||||
i++
|
||||
i += copy(tmp[i:], m.t)
|
||||
tmp[i] = '\n'
|
||||
return tmp
|
||||
}
|
||||
|
||||
func (m *metric) SerializeTo(dst []byte) int {
|
||||
i := 0
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.name)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.tags)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
dst[i] = ' '
|
||||
i++
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.fields)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
dst[i] = ' '
|
||||
i++
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.t)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
dst[i] = '\n'
|
||||
|
||||
return i + 1
|
||||
}
|
||||
|
||||
func (m *metric) Split(maxSize int) []telegraf.Metric {
|
||||
if m.Len() <= maxSize {
|
||||
return []telegraf.Metric{m}
|
||||
}
|
||||
var out []telegraf.Metric
|
||||
|
||||
// constant number of bytes for each metric (in addition to field bytes)
|
||||
constant := len(m.name) + len(m.tags) + len(m.t) + 3
|
||||
// currently selected fields
|
||||
fields := make([]byte, 0, maxSize)
|
||||
|
||||
i := 0
|
||||
for {
|
||||
if i >= len(m.fields) {
|
||||
// hit the end of the field byte slice
|
||||
if len(fields) > 0 {
|
||||
out = append(out, copyWith(m.name, m.tags, fields, m.t))
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// find the end of the next field
|
||||
j := indexUnescapedByte(m.fields[i:], ',')
|
||||
if j == -1 {
|
||||
j = len(m.fields)
|
||||
} else {
|
||||
j += i
|
||||
}
|
||||
|
||||
// if true, then we need to create a metric _not_ including the currently
|
||||
// selected field
|
||||
if len(m.fields[i:j])+len(fields)+constant >= maxSize {
|
||||
// if false, then we'll create a metric including the currently
|
||||
// selected field anyways. This means that the given maxSize is too
|
||||
// small for a single field to fit.
|
||||
if len(fields) > 0 {
|
||||
out = append(out, copyWith(m.name, m.tags, fields, m.t))
|
||||
}
|
||||
|
||||
fields = make([]byte, 0, maxSize)
|
||||
}
|
||||
if len(fields) > 0 {
|
||||
fields = append(fields, ',')
|
||||
}
|
||||
fields = append(fields, m.fields[i:j]...)
|
||||
|
||||
i = j + 1
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (m *metric) Fields() map[string]interface{} {
|
||||
fieldMap := map[string]interface{}{}
|
||||
i := 0
|
||||
for {
|
||||
if i >= len(m.fields) {
|
||||
break
|
||||
}
|
||||
// end index of field key
|
||||
i1 := indexUnescapedByte(m.fields[i:], '=')
|
||||
if i1 == -1 {
|
||||
break
|
||||
}
|
||||
// start index of field value
|
||||
i2 := i1 + 1
|
||||
|
||||
// end index of field value
|
||||
var i3 int
|
||||
if m.fields[i:][i2] == '"' {
|
||||
i3 = indexUnescapedByteBackslashEscaping(m.fields[i:][i2+1:], '"')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
i3 += i2 + 2 // increment index to the comma
|
||||
} else {
|
||||
i3 = indexUnescapedByte(m.fields[i:], ',')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
}
|
||||
|
||||
switch m.fields[i:][i2] {
|
||||
case '"':
|
||||
// string field
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = unescape(string(m.fields[i:][i2+1:i3-1]), "fieldval")
|
||||
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
// number field
|
||||
switch m.fields[i:][i3-1] {
|
||||
case 'i':
|
||||
// integer field
|
||||
n, err := parseIntBytes(m.fields[i:][i2:i3-1], 10, 64)
|
||||
if err == nil {
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
|
||||
} else {
|
||||
// TODO handle error or just ignore field silently?
|
||||
}
|
||||
default:
|
||||
// float field
|
||||
n, err := parseFloatBytes(m.fields[i:][i2:i3], 64)
|
||||
if err == nil {
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
|
||||
} else {
|
||||
// TODO handle error or just ignore field silently?
|
||||
}
|
||||
}
|
||||
case 'T', 't':
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = true
|
||||
case 'F', 'f':
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = false
|
||||
default:
|
||||
// TODO handle unsupported field type
|
||||
}
|
||||
|
||||
i += i3 + 1
|
||||
}
|
||||
|
||||
return fieldMap
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
tagMap := map[string]string{}
|
||||
if len(m.tags) == 0 {
|
||||
return tagMap
|
||||
}
|
||||
|
||||
i := 0
|
||||
for {
|
||||
// start index of tag key
|
||||
i0 := indexUnescapedByte(m.tags[i:], ',') + 1
|
||||
if i0 == 0 {
|
||||
// didn't find a tag start
|
||||
break
|
||||
}
|
||||
// end index of tag key
|
||||
i1 := indexUnescapedByte(m.tags[i:], '=')
|
||||
// start index of tag value
|
||||
i2 := i1 + 1
|
||||
// end index of tag value (starting from i2)
|
||||
i3 := indexUnescapedByte(m.tags[i+i2:], ',')
|
||||
if i3 == -1 {
|
||||
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:]), "tagval")
|
||||
break
|
||||
}
|
||||
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:i2+i3]), "tagval")
|
||||
// increment start index for the next tag
|
||||
i += i2 + i3
|
||||
}
|
||||
|
||||
return tagMap
|
||||
}
|
||||
|
||||
func (m *metric) Name() string {
|
||||
return unescape(string(m.name), "name")
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
// assume metric has been verified already and ignore error:
|
||||
if m.nsec == 0 {
|
||||
m.nsec, _ = parseIntBytes(m.t, 10, 64)
|
||||
}
|
||||
return time.Unix(0, m.nsec)
|
||||
}
|
||||
|
||||
func (m *metric) UnixNano() int64 {
|
||||
// assume metric has been verified already and ignore error:
|
||||
if m.nsec == 0 {
|
||||
m.nsec, _ = parseIntBytes(m.t, 10, 64)
|
||||
}
|
||||
return m.nsec
|
||||
}
|
||||
|
||||
func (m *metric) SetName(name string) {
|
||||
m.hashID = 0
|
||||
m.name = []byte(nameEscaper.Replace(name))
|
||||
}
|
||||
|
||||
func (m *metric) SetPrefix(prefix string) {
|
||||
m.hashID = 0
|
||||
m.name = append([]byte(nameEscaper.Replace(prefix)), m.name...)
|
||||
}
|
||||
|
||||
func (m *metric) SetSuffix(suffix string) {
|
||||
m.hashID = 0
|
||||
m.name = append(m.name, []byte(nameEscaper.Replace(suffix))...)
|
||||
}
|
||||
|
||||
func (m *metric) AddTag(key, value string) {
|
||||
m.RemoveTag(key)
|
||||
m.tags = append(m.tags, []byte(","+escape(key, "tagkey")+"="+escape(value, "tagval"))...)
|
||||
}
|
||||
|
||||
func (m *metric) HasTag(key string) bool {
|
||||
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *metric) RemoveTag(key string) {
|
||||
m.hashID = 0
|
||||
|
||||
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return
|
||||
}
|
||||
|
||||
tmp := m.tags[0 : i-1]
|
||||
j := indexUnescapedByte(m.tags[i:], ',')
|
||||
if j != -1 {
|
||||
tmp = append(tmp, m.tags[i+j:]...)
|
||||
}
|
||||
m.tags = tmp
|
||||
return
|
||||
}
|
||||
|
||||
func (m *metric) AddField(key string, value interface{}) {
|
||||
m.fields = append(m.fields, ',')
|
||||
m.fields = appendField(m.fields, key, value)
|
||||
}
|
||||
|
||||
func (m *metric) HasField(key string) bool {
|
||||
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *metric) RemoveField(key string) error {
|
||||
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var tmp []byte
|
||||
if i != 0 {
|
||||
tmp = m.fields[0 : i-1]
|
||||
}
|
||||
j := indexUnescapedByte(m.fields[i:], ',')
|
||||
if j != -1 {
|
||||
tmp = append(tmp, m.fields[i+j:]...)
|
||||
}
|
||||
|
||||
if len(tmp) == 0 {
|
||||
return fmt.Errorf("Metric cannot remove final field: %s", m.fields)
|
||||
}
|
||||
|
||||
m.fields = tmp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *metric) Copy() telegraf.Metric {
|
||||
return copyWith(m.name, m.tags, m.fields, m.t)
|
||||
}
|
||||
|
||||
func copyWith(name, tags, fields, t []byte) telegraf.Metric {
|
||||
out := metric{
|
||||
name: make([]byte, len(name)),
|
||||
tags: make([]byte, len(tags)),
|
||||
fields: make([]byte, len(fields)),
|
||||
t: make([]byte, len(t)),
|
||||
}
|
||||
copy(out.name, name)
|
||||
copy(out.tags, tags)
|
||||
copy(out.fields, fields)
|
||||
copy(out.t, t)
|
||||
return &out
|
||||
}
|
||||
|
||||
func (m *metric) HashID() uint64 {
|
||||
if m.hashID == 0 {
|
||||
h := fnv.New64a()
|
||||
h.Write(m.name)
|
||||
|
||||
tags := m.Tags()
|
||||
tmp := make([]string, len(tags))
|
||||
i := 0
|
||||
for k, v := range tags {
|
||||
tmp[i] = k + v
|
||||
i++
|
||||
}
|
||||
sort.Strings(tmp)
|
||||
|
||||
for _, s := range tmp {
|
||||
h.Write([]byte(s))
|
||||
}
|
||||
|
||||
m.hashID = h.Sum64()
|
||||
}
|
||||
return m.hashID
|
||||
}
|
||||
|
||||
func appendField(b []byte, k string, v interface{}) []byte {
|
||||
if v == nil {
|
||||
return b
|
||||
}
|
||||
b = append(b, []byte(escape(k, "tagkey")+"=")...)
|
||||
|
||||
// check popular types first
|
||||
switch v := v.(type) {
|
||||
case float64:
|
||||
b = strconv.AppendFloat(b, v, 'f', -1, 64)
|
||||
case int64:
|
||||
b = strconv.AppendInt(b, v, 10)
|
||||
b = append(b, 'i')
|
||||
case string:
|
||||
b = append(b, '"')
|
||||
b = append(b, []byte(escape(v, "fieldval"))...)
|
||||
b = append(b, '"')
|
||||
case bool:
|
||||
b = strconv.AppendBool(b, v)
|
||||
case int32:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int16:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int8:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint64:
|
||||
// Cap uints above the maximum int value
|
||||
var intv int64
|
||||
if v <= uint64(MaxInt) {
|
||||
intv = int64(v)
|
||||
} else {
|
||||
intv = int64(MaxInt)
|
||||
}
|
||||
b = strconv.AppendInt(b, intv, 10)
|
||||
b = append(b, 'i')
|
||||
case uint32:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint16:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint8:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint:
|
||||
// Cap uints above the maximum int value
|
||||
var intv int64
|
||||
if v <= uint(MaxInt) {
|
||||
intv = int64(v)
|
||||
} else {
|
||||
intv = int64(MaxInt)
|
||||
}
|
||||
b = strconv.AppendInt(b, intv, 10)
|
||||
b = append(b, 'i')
|
||||
case float32:
|
||||
b = strconv.AppendFloat(b, float64(v), 'f', -1, 32)
|
||||
case []byte:
|
||||
b = append(b, v...)
|
||||
default:
|
||||
// Can't determine the type, so convert to string
|
||||
b = append(b, '"')
|
||||
b = append(b, []byte(escape(fmt.Sprintf("%v", v), "fieldval"))...)
|
||||
b = append(b, '"')
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
148
metric/metric_benchmark_test.go
Normal file
148
metric/metric_benchmark_test.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
// vars for making sure that the compiler doesnt optimize out the benchmarks:
|
||||
var (
|
||||
s string
|
||||
I interface{}
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
)
|
||||
|
||||
func BenchmarkNewMetric(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
for n := 0; n < b.N; n++ {
|
||||
mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
}
|
||||
s = string(mt.String())
|
||||
}
|
||||
|
||||
func BenchmarkAddTag(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
mt = &metric{
|
||||
name: []byte("cpu"),
|
||||
tags: []byte(",host=localhost"),
|
||||
fields: []byte("a=101"),
|
||||
t: []byte("1480614053000000000"),
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
mt.AddTag("foo", "bar")
|
||||
}
|
||||
s = string(mt.String())
|
||||
}
|
||||
|
||||
func BenchmarkSplit(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
mt = &metric{
|
||||
name: []byte("cpu"),
|
||||
tags: []byte(",host=localhost"),
|
||||
fields: []byte("a=101,b=10i,c=10101,d=101010,e=42"),
|
||||
t: []byte("1480614053000000000"),
|
||||
}
|
||||
var metrics []telegraf.Metric
|
||||
for n := 0; n < b.N; n++ {
|
||||
metrics = mt.Split(60)
|
||||
}
|
||||
s = string(metrics[0].String())
|
||||
}
|
||||
|
||||
func BenchmarkTags(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
var mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
tags = mt.Tags()
|
||||
}
|
||||
s = fmt.Sprint(tags)
|
||||
}
|
||||
|
||||
func BenchmarkFields(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
var mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
fields = mt.Fields()
|
||||
}
|
||||
s = fmt.Sprint(fields)
|
||||
}
|
||||
|
||||
func BenchmarkString(b *testing.B) {
|
||||
mt, _ := New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var S string
|
||||
for n := 0; n < b.N; n++ {
|
||||
S = mt.String()
|
||||
}
|
||||
s = S
|
||||
}
|
||||
|
||||
func BenchmarkSerialize(b *testing.B) {
|
||||
mt, _ := New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var B []byte
|
||||
for n := 0; n < b.N; n++ {
|
||||
B = mt.Serialize()
|
||||
}
|
||||
s = string(B)
|
||||
}
|
||||
737
metric/metric_test.go
Normal file
737
metric/metric_test.go
Normal file
@@ -0,0 +1,737 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Untyped, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewErrors(t *testing.T) {
|
||||
// creating a metric with an empty name produces an error:
|
||||
m, err := New(
|
||||
"",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
// creating a metric with empty fields produces an error:
|
||||
m, err = New(
|
||||
"foobar",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, m)
|
||||
}
|
||||
|
||||
func TestNewMetric_Tags(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, m.HasTag("host"))
|
||||
assert.True(t, m.HasTag("datacenter"))
|
||||
|
||||
m.AddTag("newtag", "foo")
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
|
||||
m.RemoveTag("host")
|
||||
assert.False(t, m.HasTag("host"))
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
assert.True(t, m.HasTag("datacenter"))
|
||||
|
||||
m.RemoveTag("datacenter")
|
||||
assert.False(t, m.HasTag("datacenter"))
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
assert.Equal(t, map[string]string{"newtag": "foo"}, m.Tags())
|
||||
|
||||
m.RemoveTag("newtag")
|
||||
assert.False(t, m.HasTag("newtag"))
|
||||
assert.Equal(t, map[string]string{}, m.Tags())
|
||||
|
||||
assert.Equal(t, "cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n", m.String())
|
||||
}
|
||||
|
||||
func TestSerialize(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t,
|
||||
[]byte("cpu,datacenter=us-east-1 value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
|
||||
m.Serialize())
|
||||
|
||||
m.RemoveTag("datacenter")
|
||||
assert.Equal(t,
|
||||
[]byte("cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
|
||||
m.Serialize())
|
||||
}
|
||||
|
||||
func TestHashID(t *testing.T) {
|
||||
m, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
hash := m.HashID()
|
||||
|
||||
// adding a field doesn't change the hash:
|
||||
m.AddField("foo", int64(100))
|
||||
assert.Equal(t, hash, m.HashID())
|
||||
|
||||
// removing a non-existent tag doesn't change the hash:
|
||||
m.RemoveTag("no-op")
|
||||
assert.Equal(t, hash, m.HashID())
|
||||
|
||||
// adding a tag does change it:
|
||||
m.AddTag("foo", "bar")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
|
||||
// removing a tag also changes it:
|
||||
m.RemoveTag("mytag")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
}
|
||||
|
||||
func TestHashID_Consistency(t *testing.T) {
|
||||
m, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
hash := m.HashID()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
m2, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Equal(t, hash, m2.HashID())
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewMetric_NameModifiers(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hash := m.HashID()
|
||||
suffix := fmt.Sprintf(" value=1 %d\n", now.UnixNano())
|
||||
assert.Equal(t, "cpu"+suffix, m.String())
|
||||
|
||||
m.SetPrefix("pre_")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
assert.Equal(t, "pre_cpu"+suffix, m.String())
|
||||
|
||||
m.SetSuffix("_post")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
assert.Equal(t, "pre_cpu_post"+suffix, m.String())
|
||||
|
||||
m.SetName("mem")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
assert.Equal(t, "mem"+suffix, m.String())
|
||||
}
|
||||
|
||||
func TestNewMetric_FieldModifiers(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, m.HasField("value"))
|
||||
assert.False(t, m.HasField("foo"))
|
||||
|
||||
m.AddField("newfield", "foo")
|
||||
assert.True(t, m.HasField("newfield"))
|
||||
|
||||
assert.NoError(t, m.RemoveField("newfield"))
|
||||
assert.False(t, m.HasField("newfield"))
|
||||
|
||||
// don't allow user to remove all fields:
|
||||
assert.Error(t, m.RemoveField("value"))
|
||||
|
||||
m.AddField("value2", int64(101))
|
||||
assert.NoError(t, m.RemoveField("value"))
|
||||
assert.False(t, m.HasField("value"))
|
||||
}
|
||||
|
||||
func TestNewMetric_Fields(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
"quote_string": `x"y`,
|
||||
"backslash_quote_string": `x\"y`,
|
||||
"backslash": `x\y`,
|
||||
"ends_with_backslash": `x\`,
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
}
|
||||
|
||||
func TestNewMetric_Time(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m = m.Copy()
|
||||
m2 := m.Copy()
|
||||
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m2.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewMetric_Copy(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m2 := m.Copy()
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
m.AddTag("host", "localhost")
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu,host=localhost float=1 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
|
||||
m2.String())
|
||||
}
|
||||
|
||||
func TestNewMetric_AllTypes(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"float64": float64(1),
|
||||
"float32": float32(1),
|
||||
"int64": int64(1),
|
||||
"int32": int32(1),
|
||||
"int16": int16(1),
|
||||
"int8": int8(1),
|
||||
"int": int(1),
|
||||
"uint64": uint64(1),
|
||||
"uint32": uint32(1),
|
||||
"uint16": uint16(1),
|
||||
"uint8": uint8(1),
|
||||
"uint": uint(1),
|
||||
"bytes": []byte("foo"),
|
||||
"nil": nil,
|
||||
"maxuint64": uint64(MaxInt) + 10,
|
||||
"maxuint": uint(MaxInt) + 10,
|
||||
"unsupported": []int{1, 2},
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, m.String(), "float64=1")
|
||||
assert.Contains(t, m.String(), "float32=1")
|
||||
assert.Contains(t, m.String(), "int64=1i")
|
||||
assert.Contains(t, m.String(), "int32=1i")
|
||||
assert.Contains(t, m.String(), "int16=1i")
|
||||
assert.Contains(t, m.String(), "int8=1i")
|
||||
assert.Contains(t, m.String(), "int=1i")
|
||||
assert.Contains(t, m.String(), "uint64=1i")
|
||||
assert.Contains(t, m.String(), "uint32=1i")
|
||||
assert.Contains(t, m.String(), "uint16=1i")
|
||||
assert.Contains(t, m.String(), "uint8=1i")
|
||||
assert.Contains(t, m.String(), "uint=1i")
|
||||
assert.NotContains(t, m.String(), "nil")
|
||||
assert.Contains(t, m.String(), fmt.Sprintf("maxuint64=%di", MaxInt))
|
||||
assert.Contains(t, m.String(), fmt.Sprintf("maxuint=%di", MaxInt))
|
||||
}
|
||||
|
||||
func TestIndexUnescapedByte(t *testing.T) {
|
||||
tests := []struct {
|
||||
in []byte
|
||||
b byte
|
||||
expected int
|
||||
}{
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'b',
|
||||
expected: 3,
|
||||
},
|
||||
{
|
||||
in: []byte(`foo\bar`),
|
||||
b: 'b',
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
in: []byte(`foo\\bar`),
|
||||
b: 'b',
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'f',
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'r',
|
||||
expected: 5,
|
||||
},
|
||||
{
|
||||
in: []byte(`\foobar`),
|
||||
b: 'f',
|
||||
expected: -1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := indexUnescapedByte(test.in, test.b)
|
||||
assert.Equal(t, test.expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewGaugeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now, telegraf.Gauge)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Gauge, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewCounterMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now, telegraf.Counter)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Counter, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
func TestSplitMetric(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split80 := m.Split(80)
|
||||
assert.Len(t, split80, 2)
|
||||
|
||||
split70 := m.Split(70)
|
||||
assert.Len(t, split70, 3)
|
||||
|
||||
split60 := m.Split(60)
|
||||
assert.Len(t, split60, 5)
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
// use a simple regex check to verify that the split metrics are valid
|
||||
func TestSplitMetric_RegexVerify(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"foo": float64(98934259085),
|
||||
"bar": float64(19385292),
|
||||
"number": float64(19385292),
|
||||
"another": float64(19385292),
|
||||
"n": float64(19385292),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// verification regex
|
||||
re := regexp.MustCompile(`cpu,host=localhost \w+=\d+(,\w+=\d+)* 1480940990034083306`)
|
||||
|
||||
split90 := m.Split(90)
|
||||
assert.Len(t, split90, 2)
|
||||
for _, splitM := range split90 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
|
||||
split70 := m.Split(70)
|
||||
assert.Len(t, split70, 3)
|
||||
for _, splitM := range split70 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
|
||||
split20 := m.Split(20)
|
||||
assert.Len(t, split20, 5)
|
||||
for _, splitM := range split20 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
}
|
||||
|
||||
// test splitting metric even when given length is shorter than
|
||||
// shortest possible length
|
||||
// Split should split metric as short as possible, ie, 1 field per metric
|
||||
func TestSplitMetric_TooShort(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split := m.Split(10)
|
||||
assert.Len(t, split, 5)
|
||||
strings := make([]string, 5)
|
||||
for i, splitM := range split {
|
||||
strings[i] = splitM.String()
|
||||
}
|
||||
|
||||
assert.Contains(t, strings, "cpu,host=localhost float=100001 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost int=100001i 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost bool=true 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost false=false 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost string=\"test\" 1480940990034083306\n")
|
||||
}
|
||||
|
||||
func TestSplitMetric_NoOp(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split := m.Split(1000)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, m, split[0])
|
||||
}
|
||||
|
||||
func TestSplitMetric_OneField(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", m.String())
|
||||
|
||||
split := m.Split(1000)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
|
||||
split = m.Split(1)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
|
||||
split = m.Split(40)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
}
|
||||
|
||||
func TestSplitMetric_ExactSize(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
actual := m.Split(m.Len())
|
||||
// check that no copy was made
|
||||
require.Equal(t, &m, &actual[0])
|
||||
}
|
||||
|
||||
func TestSplitMetric_NoRoomForNewline(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
actual := m.Split(m.Len() - 1)
|
||||
require.Equal(t, 2, len(actual))
|
||||
}
|
||||
|
||||
func TestNewMetricAggregate(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.False(t, m.IsAggregate())
|
||||
m.SetAggregate(true)
|
||||
assert.True(t, m.IsAggregate())
|
||||
}
|
||||
|
||||
func TestNewMetricString(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n",
|
||||
now.UnixNano())
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
}
|
||||
|
||||
func TestNewMetricFailNaN(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": math.NaN(),
|
||||
}
|
||||
|
||||
_, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestEmptyTagValueOrKey(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"emptytag": "",
|
||||
"": "valuewithoutkey",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
|
||||
assert.True(t, m.HasTag("host"))
|
||||
assert.False(t, m.HasTag("emptytag"))
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestNewMetric_TrailingSlash(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
}{
|
||||
{
|
||||
name: `cpu\`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
fields: map[string]interface{}{
|
||||
`value\`: "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
tags: map[string]string{
|
||||
`host\`: "localhost",
|
||||
},
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
tags: map[string]string{
|
||||
"host": `localhost\`,
|
||||
},
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
_, err := New(tc.name, tc.tags, tc.fields, now)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
680
metric/parse.go
Normal file
680
metric/parse.go
Normal file
@@ -0,0 +1,680 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInvalidNumber = errors.New("invalid number")
|
||||
)
|
||||
|
||||
const (
|
||||
// the number of characters for the largest possible int64 (9223372036854775807)
|
||||
maxInt64Digits = 19
|
||||
|
||||
// the number of characters for the smallest possible int64 (-9223372036854775808)
|
||||
minInt64Digits = 20
|
||||
|
||||
// the number of characters required for the largest float64 before a range check
|
||||
// would occur during parsing
|
||||
maxFloat64Digits = 25
|
||||
|
||||
// the number of characters required for smallest float64 before a range check occur
|
||||
// would occur during parsing
|
||||
minFloat64Digits = 27
|
||||
|
||||
MaxKeyLength = 65535
|
||||
)
|
||||
|
||||
// The following constants allow us to specify which state to move to
|
||||
// next, when scanning sections of a Point.
|
||||
const (
|
||||
tagKeyState = iota
|
||||
tagValueState
|
||||
fieldsState
|
||||
)
|
||||
|
||||
func Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
return ParseWithDefaultTimePrecision(buf, time.Now(), "")
|
||||
}
|
||||
|
||||
func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
|
||||
return ParseWithDefaultTimePrecision(buf, t, "")
|
||||
}
|
||||
|
||||
func ParseWithDefaultTimePrecision(
|
||||
buf []byte,
|
||||
t time.Time,
|
||||
precision string,
|
||||
) ([]telegraf.Metric, error) {
|
||||
if len(buf) == 0 {
|
||||
return []telegraf.Metric{}, nil
|
||||
}
|
||||
if len(buf) <= 6 {
|
||||
return []telegraf.Metric{}, makeError("buffer too short", buf, 0)
|
||||
}
|
||||
metrics := make([]telegraf.Metric, 0, bytes.Count(buf, []byte("\n"))+1)
|
||||
var errStr string
|
||||
i := 0
|
||||
for {
|
||||
j := bytes.IndexByte(buf[i:], '\n')
|
||||
if j == -1 {
|
||||
break
|
||||
}
|
||||
if len(buf[i:i+j]) < 2 {
|
||||
i += j + 1 // increment i past the previous newline
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := parseMetric(buf[i:i+j], t, precision)
|
||||
if err != nil {
|
||||
i += j + 1 // increment i past the previous newline
|
||||
errStr += " " + err.Error()
|
||||
continue
|
||||
}
|
||||
i += j + 1 // increment i past the previous newline
|
||||
|
||||
metrics = append(metrics, m)
|
||||
}
|
||||
|
||||
if len(errStr) > 0 {
|
||||
return metrics, fmt.Errorf(errStr)
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func parseMetric(buf []byte,
|
||||
defaultTime time.Time,
|
||||
precision string,
|
||||
) (telegraf.Metric, error) {
|
||||
var dTime string
|
||||
// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
|
||||
pos, key, err := scanKey(buf, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// measurement name is required
|
||||
if len(key) == 0 {
|
||||
return nil, fmt.Errorf("missing measurement")
|
||||
}
|
||||
|
||||
if len(key) > MaxKeyLength {
|
||||
return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
|
||||
}
|
||||
|
||||
// scan the second block is which is field1=value1[,field2=value2,...]
|
||||
pos, fields, err := scanFields(buf, pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// at least one field is required
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("missing fields")
|
||||
}
|
||||
|
||||
// scan the last block which is an optional integer timestamp
|
||||
pos, ts, err := scanTime(buf, pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// apply precision multiplier
|
||||
var nsec int64
|
||||
multiplier := getPrecisionMultiplier(precision)
|
||||
if len(ts) > 0 && multiplier > 1 {
|
||||
tsint, err := parseIntBytes(ts, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nsec := multiplier * tsint
|
||||
ts = []byte(strconv.FormatInt(nsec, 10))
|
||||
}
|
||||
|
||||
m := &metric{
|
||||
fields: fields,
|
||||
t: ts,
|
||||
nsec: nsec,
|
||||
}
|
||||
|
||||
// parse out the measurement name
|
||||
// namei is the index at which the "name" ends
|
||||
namei := indexUnescapedByte(key, ',')
|
||||
if namei < 1 {
|
||||
// no tags
|
||||
m.name = key
|
||||
} else {
|
||||
m.name = key[0:namei]
|
||||
m.tags = key[namei:]
|
||||
}
|
||||
|
||||
if len(m.t) == 0 {
|
||||
if len(dTime) == 0 {
|
||||
dTime = fmt.Sprint(defaultTime.UnixNano())
|
||||
}
|
||||
// use default time
|
||||
m.t = []byte(dTime)
|
||||
}
|
||||
|
||||
// here we copy on return because this allows us to later call
|
||||
// AddTag, AddField, RemoveTag, RemoveField, etc. without worrying about
|
||||
// modifying 'tag' bytes having an affect on 'field' bytes, for example.
|
||||
return m.Copy(), nil
|
||||
}
|
||||
|
||||
// scanKey scans buf starting at i for the measurement and tag portion of the point.
|
||||
// It returns the ending position and the byte slice of key within buf. If there
|
||||
// are tags, they will be sorted if they are not already.
|
||||
func scanKey(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
// First scan the Point's measurement.
|
||||
state, i, err := scanMeasurement(buf, i)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
|
||||
// Optionally scan tags if needed.
|
||||
if state == tagKeyState {
|
||||
i, err = scanTags(buf, i)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// scanMeasurement examines the measurement part of a Point, returning
|
||||
// the next state to move to, and the current location in the buffer.
|
||||
func scanMeasurement(buf []byte, i int) (int, int, error) {
|
||||
// Check first byte of measurement, anything except a comma is fine.
|
||||
// It can't be a space, since whitespace is stripped prior to this
|
||||
// function call.
|
||||
if i >= len(buf) || buf[i] == ',' {
|
||||
return -1, i, makeError("missing measurement", buf, i)
|
||||
}
|
||||
|
||||
for {
|
||||
i++
|
||||
if i >= len(buf) {
|
||||
// cpu
|
||||
return -1, i, makeError("missing fields", buf, i)
|
||||
}
|
||||
|
||||
if buf[i-1] == '\\' {
|
||||
// Skip character (it's escaped).
|
||||
continue
|
||||
}
|
||||
|
||||
// Unescaped comma; move onto scanning the tags.
|
||||
if buf[i] == ',' {
|
||||
return tagKeyState, i + 1, nil
|
||||
}
|
||||
|
||||
// Unescaped space; move onto scanning the fields.
|
||||
if buf[i] == ' ' {
|
||||
// cpu value=1.0
|
||||
return fieldsState, i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTags examines all the tags in a Point, keeping track of and
|
||||
// returning the updated indices slice, number of commas and location
|
||||
// in buf where to start examining the Point fields.
|
||||
func scanTags(buf []byte, i int) (int, error) {
|
||||
var (
|
||||
err error
|
||||
state = tagKeyState
|
||||
)
|
||||
|
||||
for {
|
||||
switch state {
|
||||
case tagKeyState:
|
||||
i, err = scanTagsKey(buf, i)
|
||||
state = tagValueState // tag value always follows a tag key
|
||||
case tagValueState:
|
||||
state, i, err = scanTagsValue(buf, i)
|
||||
case fieldsState:
|
||||
return i, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTagsKey scans each character in a tag key.
|
||||
func scanTagsKey(buf []byte, i int) (int, error) {
|
||||
// First character of the key.
|
||||
if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
|
||||
// cpu,{'', ' ', ',', '='}
|
||||
return i, makeError("missing tag key", buf, i)
|
||||
}
|
||||
|
||||
// Examine each character in the tag key until we hit an unescaped
|
||||
// equals (the tag value), or we hit an error (i.e., unescaped
|
||||
// space or comma).
|
||||
for {
|
||||
i++
|
||||
|
||||
// Either we reached the end of the buffer or we hit an
|
||||
// unescaped comma or space.
|
||||
if i >= len(buf) ||
|
||||
((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
|
||||
// cpu,tag{'', ' ', ','}
|
||||
return i, makeError("missing tag value", buf, i)
|
||||
}
|
||||
|
||||
if buf[i] == '=' && buf[i-1] != '\\' {
|
||||
// cpu,tag=
|
||||
return i + 1, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTagsValue scans each character in a tag value.
|
||||
func scanTagsValue(buf []byte, i int) (int, int, error) {
|
||||
// Tag value cannot be empty.
|
||||
if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
|
||||
// cpu,tag={',', ' '}
|
||||
return -1, i, makeError("missing tag value", buf, i)
|
||||
}
|
||||
|
||||
// Examine each character in the tag value until we hit an unescaped
|
||||
// comma (move onto next tag key), an unescaped space (move onto
|
||||
// fields), or we error out.
|
||||
for {
|
||||
i++
|
||||
if i >= len(buf) {
|
||||
// cpu,tag=value
|
||||
return -1, i, makeError("missing fields", buf, i)
|
||||
}
|
||||
|
||||
// An unescaped equals sign is an invalid tag value.
|
||||
if buf[i] == '=' && buf[i-1] != '\\' {
|
||||
// cpu,tag={'=', 'fo=o'}
|
||||
return -1, i, makeError("invalid tag format", buf, i)
|
||||
}
|
||||
|
||||
if buf[i] == ',' && buf[i-1] != '\\' {
|
||||
// cpu,tag=foo,
|
||||
return tagKeyState, i + 1, nil
|
||||
}
|
||||
|
||||
// cpu,tag=foo value=1.0
|
||||
// cpu, tag=foo\= value=1.0
|
||||
if buf[i] == ' ' && buf[i-1] != '\\' {
|
||||
return fieldsState, i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanFields scans buf, starting at i for the fields section of a point. It returns
|
||||
// the ending position and the byte slice of the fields within buf
|
||||
func scanFields(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
// track how many '"" we've seen since last '='
|
||||
quotes := 0
|
||||
|
||||
// tracks how many '=' we've seen
|
||||
equals := 0
|
||||
|
||||
// tracks how many commas we've seen
|
||||
commas := 0
|
||||
|
||||
for {
|
||||
// reached the end of buf?
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
// escaped characters?
|
||||
if buf[i] == '\\' && i+1 < len(buf) {
|
||||
i += 2
|
||||
continue
|
||||
}
|
||||
|
||||
// If the value is quoted, scan until we get to the end quote
|
||||
// Only quote values in the field value since quotes are not significant
|
||||
// in the field key
|
||||
if buf[i] == '"' && equals > commas {
|
||||
i++
|
||||
quotes++
|
||||
if quotes > 2 {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If we see an =, ensure that there is at least on char before and after it
|
||||
if buf[i] == '=' && quotes != 1 {
|
||||
quotes = 0
|
||||
equals++
|
||||
|
||||
// check for "... =123" but allow "a\ =123"
|
||||
if buf[i-1] == ' ' && buf[i-2] != '\\' {
|
||||
return i, buf[start:i], makeError("missing field key", buf, i)
|
||||
}
|
||||
|
||||
// check for "...a=123,=456" but allow "a=123,a\,=456"
|
||||
if buf[i-1] == ',' && buf[i-2] != '\\' {
|
||||
return i, buf[start:i], makeError("missing field key", buf, i)
|
||||
}
|
||||
|
||||
// check for "... value="
|
||||
if i+1 >= len(buf) {
|
||||
return i, buf[start:i], makeError("missing field value", buf, i)
|
||||
}
|
||||
|
||||
// check for "... value=,value2=..."
|
||||
if buf[i+1] == ',' || buf[i+1] == ' ' {
|
||||
return i, buf[start:i], makeError("missing field value", buf, i)
|
||||
}
|
||||
|
||||
if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
|
||||
var err error
|
||||
i, err = scanNumber(buf, i+1)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
continue
|
||||
}
|
||||
// If next byte is not a double-quote, the value must be a boolean
|
||||
if buf[i+1] != '"' {
|
||||
var err error
|
||||
i, _, err = scanBoolean(buf, i+1)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if buf[i] == ',' && quotes != 1 {
|
||||
commas++
|
||||
}
|
||||
|
||||
// reached end of block?
|
||||
if buf[i] == ' ' && quotes != 1 {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if quotes != 0 && quotes != 2 {
|
||||
return i, buf[start:i], makeError("unbalanced quotes", buf, i)
|
||||
}
|
||||
|
||||
// check that all field sections had key and values (e.g. prevent "a=1,b"
|
||||
if equals == 0 || commas != equals-1 {
|
||||
return i, buf[start:i], makeError("invalid field format", buf, i)
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// scanTime scans buf, starting at i for the time section of a point. It
|
||||
// returns the ending position and the byte slice of the timestamp within buf
|
||||
// and and error if the timestamp is not in the correct numeric format.
|
||||
func scanTime(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
for {
|
||||
// reached the end of buf?
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
// Reached end of block or trailing whitespace?
|
||||
if buf[i] == '\n' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
|
||||
// Handle negative timestamps
|
||||
if i == start && buf[i] == '-' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// Timestamps should be integers, make sure they are so we don't need
|
||||
// to actually parse the timestamp until needed.
|
||||
if buf[i] < '0' || buf[i] > '9' {
|
||||
return i, buf[start:i], makeError("invalid timestamp", buf, i)
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
func isNumeric(b byte) bool {
|
||||
return (b >= '0' && b <= '9') || b == '.'
|
||||
}
|
||||
|
||||
// scanNumber returns the end position within buf, start at i after
|
||||
// scanning over buf for an integer, or float. It returns an
|
||||
// error if a invalid number is scanned.
|
||||
func scanNumber(buf []byte, i int) (int, error) {
|
||||
start := i
|
||||
var isInt bool
|
||||
|
||||
// Is negative number?
|
||||
if i < len(buf) && buf[i] == '-' {
|
||||
i++
|
||||
// There must be more characters now, as just '-' is illegal.
|
||||
if i == len(buf) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
}
|
||||
|
||||
// how many decimal points we've see
|
||||
decimal := false
|
||||
|
||||
// indicates the number is float in scientific notation
|
||||
scientific := false
|
||||
|
||||
for {
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == ',' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == 'i' && i > start && !isInt {
|
||||
isInt = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
if buf[i] == '.' {
|
||||
// Can't have more than 1 decimal (e.g. 1.1.1 should fail)
|
||||
if decimal {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
decimal = true
|
||||
}
|
||||
|
||||
// `e` is valid for floats but not as the first char
|
||||
if i > start && (buf[i] == 'e' || buf[i] == 'E') {
|
||||
scientific = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// + and - are only valid at this point if they follow an e (scientific notation)
|
||||
if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// NaN is an unsupported value
|
||||
if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
if !isNumeric(buf[i]) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if isInt && (decimal || scientific) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
numericDigits := i - start
|
||||
if isInt {
|
||||
numericDigits--
|
||||
}
|
||||
if decimal {
|
||||
numericDigits--
|
||||
}
|
||||
if buf[start] == '-' {
|
||||
numericDigits--
|
||||
}
|
||||
|
||||
if numericDigits == 0 {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
// It's more common that numbers will be within min/max range for their type but we need to prevent
|
||||
// out or range numbers from being parsed successfully. This uses some simple heuristics to decide
|
||||
// if we should parse the number to the actual type. It does not do it all the time because it incurs
|
||||
// extra allocations and we end up converting the type again when writing points to disk.
|
||||
if isInt {
|
||||
// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
|
||||
if buf[i-1] != 'i' {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
// Parse the int to check bounds the number of digits could be larger than the max range
|
||||
// We subtract 1 from the index to remove the `i` from our tests
|
||||
if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
|
||||
if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil {
|
||||
return i, makeError(fmt.Sprintf("unable to parse integer %s: %s", buf[start:i-1], err), buf, i)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
|
||||
if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
|
||||
if _, err := parseFloatBytes(buf[start:i], 10); err != nil {
|
||||
return i, makeError("invalid float", buf, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// scanBoolean returns the end position within buf, start at i after
|
||||
// scanning over buf for boolean. Valid values for a boolean are
|
||||
// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean
|
||||
// is scanned.
|
||||
func scanBoolean(buf []byte, i int) (int, []byte, error) {
|
||||
start := i
|
||||
|
||||
if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
|
||||
return i, buf[start:i], makeError("invalid value", buf, i)
|
||||
}
|
||||
|
||||
i++
|
||||
for {
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == ',' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
// Single char bool (t, T, f, F) is ok
|
||||
if i-start == 1 {
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// length must be 4 for true or TRUE
|
||||
if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
// length must be 5 for false or FALSE
|
||||
if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
// Otherwise
|
||||
valid := false
|
||||
switch buf[start] {
|
||||
case 't':
|
||||
valid = bytes.Equal(buf[start:i], []byte("true"))
|
||||
case 'f':
|
||||
valid = bytes.Equal(buf[start:i], []byte("false"))
|
||||
case 'T':
|
||||
valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
|
||||
case 'F':
|
||||
valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
|
||||
}
|
||||
|
||||
if !valid {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
|
||||
}
|
||||
|
||||
// skipWhitespace returns the end position within buf, starting at i after
|
||||
// scanning over spaces in tags
|
||||
func skipWhitespace(buf []byte, i int) int {
|
||||
for i < len(buf) {
|
||||
if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// makeError is a helper function for making a metric parsing error.
|
||||
// reason is the reason why the error occurred.
|
||||
// buf should be the current buffer we are parsing.
|
||||
// i is the current index, to give some context on where in the buffer we are.
|
||||
func makeError(reason string, buf []byte, i int) error {
|
||||
return fmt.Errorf("metric parsing error, reason: [%s], buffer: [%s], index: [%d]",
|
||||
reason, buf, i)
|
||||
}
|
||||
|
||||
// getPrecisionMultiplier will return a multiplier for the precision specified.
|
||||
func getPrecisionMultiplier(precision string) int64 {
|
||||
d := time.Nanosecond
|
||||
switch precision {
|
||||
case "u":
|
||||
d = time.Microsecond
|
||||
case "ms":
|
||||
d = time.Millisecond
|
||||
case "s":
|
||||
d = time.Second
|
||||
case "m":
|
||||
d = time.Minute
|
||||
case "h":
|
||||
d = time.Hour
|
||||
}
|
||||
return int64(d)
|
||||
}
|
||||
413
metric/parse_test.go
Normal file
413
metric/parse_test.go
Normal file
@@ -0,0 +1,413 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const trues = `booltest b=T
|
||||
booltest b=t
|
||||
booltest b=True
|
||||
booltest b=TRUE
|
||||
booltest b=true
|
||||
`
|
||||
|
||||
const falses = `booltest b=F
|
||||
booltest b=f
|
||||
booltest b=False
|
||||
booltest b=FALSE
|
||||
booltest b=false
|
||||
`
|
||||
|
||||
const withEscapes = `w\,\ eather,host=local temp=99 1465839830100400200
|
||||
w\,eather,host=local temp=99 1465839830100400200
|
||||
weather,location=us\,midwest temperature=82 1465839830100400200
|
||||
weather,location=us-midwest temp\=rature=82 1465839830100400200
|
||||
weather,location\ place=us-midwest temperature=82 1465839830100400200
|
||||
weather,location=us-midwest temperature="too\"hot\"" 1465839830100400200
|
||||
`
|
||||
|
||||
const withTimestamps = `cpu usage=99 1480595849000000000
|
||||
cpu usage=99 1480595850000000000
|
||||
cpu usage=99 1480595851700030000
|
||||
cpu usage=99 1480595852000000300
|
||||
`
|
||||
|
||||
const sevenMetrics = `cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
`
|
||||
|
||||
const negMetrics = `weather,host=local temp=-99i,temp_float=-99.4 1465839830100400200
|
||||
`
|
||||
|
||||
// some metrics are invalid
|
||||
const someInvalid = `cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu3, host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu4 , usage_idle=99,usage_busy=1
|
||||
cpu 1480595852000000300
|
||||
cpu usage=99 1480595852foobar300
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
`
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
start := time.Now()
|
||||
metrics, err := Parse([]byte(sevenMetrics))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 7)
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
firstTime := metrics[0].Time()
|
||||
for _, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"idle": float64(99),
|
||||
"busy": int64(1),
|
||||
"b": true,
|
||||
"s": "string",
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
},
|
||||
m.Tags(),
|
||||
)
|
||||
assert.True(t, m.Time().After(start))
|
||||
assert.True(t, m.Time().Equal(firstTime))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNegNumbers(t *testing.T) {
|
||||
metrics, err := Parse([]byte(negMetrics))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"temp": int64(-99),
|
||||
"temp_float": float64(-99.4),
|
||||
},
|
||||
metrics[0].Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "local",
|
||||
},
|
||||
metrics[0].Tags(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestParseErrors(t *testing.T) {
|
||||
start := time.Now()
|
||||
metrics, err := Parse([]byte(someInvalid))
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, metrics, 4)
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
firstTime := metrics[0].Time()
|
||||
for _, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
},
|
||||
m.Tags(),
|
||||
)
|
||||
assert.True(t, m.Time().After(start))
|
||||
assert.True(t, m.Time().Equal(firstTime))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseWithTimestamps(t *testing.T) {
|
||||
metrics, err := Parse([]byte(withTimestamps))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 4)
|
||||
|
||||
expectedTimestamps := []time.Time{
|
||||
time.Unix(0, 1480595849000000000),
|
||||
time.Unix(0, 1480595850000000000),
|
||||
time.Unix(0, 1480595851700030000),
|
||||
time.Unix(0, 1480595852000000300),
|
||||
}
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
for i, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.True(t, m.Time().Equal(expectedTimestamps[i]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEscapes(t *testing.T) {
|
||||
metrics, err := Parse([]byte(withEscapes))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 6)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
}{
|
||||
{
|
||||
name: `w, eather`,
|
||||
fields: map[string]interface{}{"temp": float64(99)},
|
||||
tags: map[string]string{"host": "local"},
|
||||
},
|
||||
{
|
||||
name: `w,eather`,
|
||||
fields: map[string]interface{}{"temp": float64(99)},
|
||||
tags: map[string]string{"host": "local"},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{"temperature": float64(82)},
|
||||
tags: map[string]string{"location": `us,midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{`temp=rature`: float64(82)},
|
||||
tags: map[string]string{"location": `us-midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{"temperature": float64(82)},
|
||||
tags: map[string]string{`location place`: `us-midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{`temperature`: `too"hot"`},
|
||||
tags: map[string]string{"location": `us-midwest`},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
assert.Equal(t, test.name, metrics[i].Name())
|
||||
assert.Equal(t, test.fields, metrics[i].Fields())
|
||||
assert.Equal(t, test.tags, metrics[i].Tags())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTrueBooleans(t *testing.T) {
|
||||
metrics, err := Parse([]byte(trues))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 5)
|
||||
|
||||
for _, metric := range metrics {
|
||||
assert.Equal(t, "booltest", metric.Name())
|
||||
assert.Equal(t, true, metric.Fields()["b"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFalseBooleans(t *testing.T) {
|
||||
metrics, err := Parse([]byte(falses))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 5)
|
||||
|
||||
for _, metric := range metrics {
|
||||
assert.Equal(t, "booltest", metric.Name())
|
||||
assert.Equal(t, false, metric.Fields()["b"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointBadNumber(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"cpu v=- ",
|
||||
"cpu v=-i ",
|
||||
"cpu v=-. ",
|
||||
"cpu v=. ",
|
||||
"cpu v=1.0i ",
|
||||
"cpu v=1ii ",
|
||||
"cpu v=1a ",
|
||||
"cpu v=-e-e-e ",
|
||||
"cpu v=42+3 ",
|
||||
"cpu v= ",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTagsMissingParts(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
`cpu,host`,
|
||||
`cpu,host,`,
|
||||
`cpu,host=`,
|
||||
`cpu,f=oo=bar value=1`,
|
||||
`cpu,host value=1i`,
|
||||
`cpu,host=serverA,region value=1i`,
|
||||
`cpu,host=serverA,region= value=1i`,
|
||||
`cpu,host=serverA,region=,zone=us-west value=1i`,
|
||||
`cpu, value=1`,
|
||||
`cpu, ,,`,
|
||||
`cpu,,,`,
|
||||
`cpu,host=serverA,=us-east value=1i`,
|
||||
`cpu,host=serverAa\,,=us-east value=1i`,
|
||||
`cpu,host=serverA\,,=us-east value=1i`,
|
||||
`cpu, =serverA value=1i`,
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointWhitespace(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000 `,
|
||||
} {
|
||||
m, err := Parse([]byte(tt + "\n"))
|
||||
assert.NoError(t, err, tt)
|
||||
assert.Equal(t, "cpu", m[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{"value": float64(1)}, m[0].Fields())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointInvalidFields(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test,foo=bar a=101,=value",
|
||||
"test,foo=bar =value",
|
||||
"test,foo=bar a=101,key=",
|
||||
"test,foo=bar key=",
|
||||
`test,foo=bar a=101,b="foo`,
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointNoFields(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"cpu_load_short,host=server01,region=us-west",
|
||||
"very_long_measurement_name",
|
||||
"cpu,host==",
|
||||
"============",
|
||||
"cpu",
|
||||
"cpu\n\n\n\n\n\n\n",
|
||||
" ",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
// a b=1 << this is the shortest possible metric
|
||||
// any shorter is just ignored
|
||||
func TestParseBufTooShort(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"",
|
||||
"a",
|
||||
"a ",
|
||||
"a b=",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidBooleans(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test b=tru",
|
||||
"test b=fals",
|
||||
"test b=faLse",
|
||||
"test q=foo",
|
||||
"test b=lambchops",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidNumbers(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test b=-",
|
||||
"test b=1.1.1",
|
||||
"test b=nan",
|
||||
"test b=9i10",
|
||||
"test b=9999999999999999999i",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNegativeTimestamps(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test foo=101 -1257894000000000000",
|
||||
} {
|
||||
metrics, err := Parse([]byte(tt + "\n"))
|
||||
assert.NoError(t, err, tt)
|
||||
assert.True(t, metrics[0].Time().Equal(time.Unix(0, -1257894000000000000)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePrecision(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
line string
|
||||
precision string
|
||||
expected int64
|
||||
}{
|
||||
{"test v=42 1491847420", "s", 1491847420000000000},
|
||||
{"test v=42 1491847420123", "ms", 1491847420123000000},
|
||||
{"test v=42 1491847420123456", "u", 1491847420123456000},
|
||||
{"test v=42 1491847420123456789", "ns", 1491847420123456789},
|
||||
|
||||
{"test v=42 1491847420123456789", "1s", 1491847420123456789},
|
||||
{"test v=42 1491847420123456789", "asdf", 1491847420123456789},
|
||||
} {
|
||||
metrics, err := ParseWithDefaultTimePrecision(
|
||||
[]byte(tt.line+"\n"), time.Now(), tt.precision)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expected, metrics[0].UnixNano())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePrecisionUnsetTime(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
line string
|
||||
precision string
|
||||
}{
|
||||
{"test v=42", "s"},
|
||||
{"test v=42", "ns"},
|
||||
} {
|
||||
_, err := ParseWithDefaultTimePrecision(
|
||||
[]byte(tt.line+"\n"), time.Now(), tt.precision)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMaxKeyLength(t *testing.T) {
|
||||
key := ""
|
||||
for {
|
||||
if len(key) > MaxKeyLength {
|
||||
break
|
||||
}
|
||||
key += "test"
|
||||
}
|
||||
|
||||
_, err := Parse([]byte(key + " value=1\n"))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
159
metric/reader.go
Normal file
159
metric/reader.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type state int
|
||||
|
||||
const (
|
||||
_ state = iota
|
||||
// normal state copies whole metrics into the given buffer until we can't
|
||||
// fit the next metric.
|
||||
normal
|
||||
// split state means that we have a metric that we were able to split, so
|
||||
// that we can fit it into multiple metrics (and calls to Read)
|
||||
split
|
||||
// overflow state means that we have a metric that didn't fit into a single
|
||||
// buffer, and needs to be split across multiple calls to Read.
|
||||
overflow
|
||||
// splitOverflow state means that a split metric didn't fit into a single
|
||||
// buffer, and needs to be split across multiple calls to Read.
|
||||
splitOverflow
|
||||
// done means we're done reading metrics, and now always return (0, io.EOF)
|
||||
done
|
||||
)
|
||||
|
||||
type reader struct {
|
||||
metrics []telegraf.Metric
|
||||
splitMetrics []telegraf.Metric
|
||||
buf []byte
|
||||
state state
|
||||
|
||||
// metric index
|
||||
iM int
|
||||
// split metric index
|
||||
iSM int
|
||||
// buffer index
|
||||
iB int
|
||||
}
|
||||
|
||||
func NewReader(metrics []telegraf.Metric) io.Reader {
|
||||
return &reader{
|
||||
metrics: metrics,
|
||||
state: normal,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reader) Read(p []byte) (n int, err error) {
|
||||
var i int
|
||||
switch r.state {
|
||||
case done:
|
||||
return 0, io.EOF
|
||||
case normal:
|
||||
for {
|
||||
// this for-loop is the sunny-day scenario, where we are given a
|
||||
// buffer that is large enough to hold at least a single metric.
|
||||
// all of the cases below it are edge-cases.
|
||||
if r.metrics[r.iM].Len() <= len(p[i:]) {
|
||||
i += r.metrics[r.iM].SerializeTo(p[i:])
|
||||
} else {
|
||||
break
|
||||
}
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
}
|
||||
|
||||
// if we haven't written any bytes, check if we can split the current
|
||||
// metric into multiple full metrics at a smaller size.
|
||||
if i == 0 {
|
||||
tmp := r.metrics[r.iM].Split(len(p))
|
||||
if len(tmp) > 1 {
|
||||
r.splitMetrics = tmp
|
||||
r.state = split
|
||||
if r.splitMetrics[0].Len() <= len(p) {
|
||||
i += r.splitMetrics[0].SerializeTo(p)
|
||||
r.iSM = 1
|
||||
} else {
|
||||
// splitting didn't quite work, so we'll drop down and
|
||||
// overflow the metric.
|
||||
r.state = normal
|
||||
r.iSM = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we haven't written any bytes and we're not at the end of the metrics
|
||||
// slice, then it means we have a single metric that is larger than the
|
||||
// provided buffer.
|
||||
if i == 0 {
|
||||
r.buf = r.metrics[r.iM].Serialize()
|
||||
i += copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
r.state = overflow
|
||||
}
|
||||
|
||||
case split:
|
||||
if r.splitMetrics[r.iSM].Len() <= len(p) {
|
||||
// write the current split metric
|
||||
i += r.splitMetrics[r.iSM].SerializeTo(p)
|
||||
r.iSM++
|
||||
if r.iSM >= len(r.splitMetrics) {
|
||||
// done writing the current split metrics
|
||||
r.iSM = 0
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
}
|
||||
} else {
|
||||
// This would only happen if we split the metric, and then a
|
||||
// subsequent buffer was smaller than the initial one given,
|
||||
// so that our split metric no longer fits.
|
||||
r.buf = r.splitMetrics[r.iSM].Serialize()
|
||||
i += copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
r.state = splitOverflow
|
||||
}
|
||||
|
||||
case splitOverflow:
|
||||
i = copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
if r.iB >= len(r.buf) {
|
||||
r.iB = 0
|
||||
r.iSM++
|
||||
if r.iSM == len(r.splitMetrics) {
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
} else {
|
||||
r.state = split
|
||||
}
|
||||
}
|
||||
|
||||
case overflow:
|
||||
i = copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
if r.iB >= len(r.buf) {
|
||||
r.iB = 0
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
}
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
713
metric/reader_test.go
Normal file
713
metric/reader_test.go
Normal file
@@ -0,0 +1,713 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func BenchmarkMetricReader(b *testing.B) {
|
||||
metrics := make([]telegraf.Metric, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
metrics[i], _ = New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(1)}, time.Now())
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
r := NewReader(metrics)
|
||||
io.Copy(ioutil.Discard, r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
metrics := make([]telegraf.Metric, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
metrics[i], _ = New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(1)}, ts)
|
||||
}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
buf := make([]byte, 35)
|
||||
for i := 0; i < 10; i++ {
|
||||
n, err := r.Read(buf)
|
||||
if err != nil {
|
||||
assert.True(t, err == io.EOF, err.Error())
|
||||
}
|
||||
assert.Equal(t, 33, n)
|
||||
assert.Equal(t, "foo value=1i 1481032190000000000\n", string(buf[0:n]))
|
||||
}
|
||||
|
||||
// reader should now be done, and always return 0, io.EOF
|
||||
for i := 0; i < 10; i++ {
|
||||
n, err := r.Read(buf)
|
||||
assert.True(t, err == io.EOF, err.Error())
|
||||
assert.Equal(t, 0, n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader_OverflowMetric(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(10)}, ts)
|
||||
metrics := []telegraf.Metric{m}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 5)
|
||||
|
||||
tests := []struct {
|
||||
exp string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
"foo v",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"alue=",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"10i 1",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"48103",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"21900",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"00000",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
io.EOF,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
assert.Equal(t, test.exp, string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric is the same size as the buffer.
|
||||
//
|
||||
// Previously EOF would not be set until the next call to Read.
|
||||
func TestMetricReader_MetricSizeEqualsBufferSize(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, m1.Len())
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric requires to be split and one of the
|
||||
// split metrics is exactly the size of the buffer.
|
||||
//
|
||||
// Previously an empty string would be returned on the next Read without error,
|
||||
// and then next Read call would panic.
|
||||
func TestMetricReader_SplitWithExactLengthSplit(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1), "bb": int64(2)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
// foo a=1i,bb=2i 1481032190000000000\n // len 35
|
||||
//
|
||||
// Requires this specific split order:
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo bb=2i 1481032190000000000\n // len 30
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric requires to be split and one of the
|
||||
// split metrics is larger than the buffer.
|
||||
//
|
||||
// Previously the metric index would be set incorrectly causing a panic.
|
||||
func TestMetricReader_SplitOverflowOversized(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
"bbb": int64(2),
|
||||
}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
// foo a=1i,bbb=2i 1481032190000000000\n // len 36
|
||||
//
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo bbb=2i 1481032190000000000\n // len 31
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a split metric exactly fits in the buffer.
|
||||
//
|
||||
// Previously the metric would be overflow split when not required.
|
||||
func TestMetricReader_SplitOverflowUneeded(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1), "b": int64(2)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 29)
|
||||
|
||||
// foo a=1i,b=2i 1481032190000000000\n // len 34
|
||||
//
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo b=2i 1481032190000000000\n // len 29
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader_OverflowMultipleMetrics(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(10)}, ts)
|
||||
metrics := []telegraf.Metric{m, m.Copy()}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 10)
|
||||
|
||||
tests := []struct {
|
||||
exp string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
"foo value=",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"10i 148103",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"2190000000",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
nil,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"foo value=",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"10i 148103",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"2190000000",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
io.EOF,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
assert.Equal(t, test.exp, string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test splitting a metric
|
||||
func TestMetricReader_SplitMetric(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
"value4": int64(10),
|
||||
"value5": int64(10),
|
||||
"value6": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 60)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
57,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test an array with one split metric and one unsplit
|
||||
func TestMetricReader_SplitMetric2(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
"value4": int64(10),
|
||||
"value5": int64(10),
|
||||
"value6": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 60)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test split that results in metrics that are still too long, which results in
|
||||
// the reader falling back to regular overflow.
|
||||
func TestMetricReader_SplitMetricTooLong(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i 1481`,
|
||||
nil,
|
||||
30,
|
||||
},
|
||||
{
|
||||
`032190000000000\n`,
|
||||
io.EOF,
|
||||
16,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test split with a changing buffer size in the middle of subsequent calls
|
||||
// to Read
|
||||
func TestMetricReader_SplitMetricChangingBuffer(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
buf []byte
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 148103219000000`,
|
||||
nil,
|
||||
30,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`0000\n`,
|
||||
nil,
|
||||
5,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
make([]byte, 36),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(test.buf)
|
||||
assert.Equal(t, test.n, n, test.expRegex)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(test.buf[0:n])), string(test.buf[0:n]))
|
||||
assert.Equal(t, test.err, err, test.expRegex)
|
||||
}
|
||||
}
|
||||
|
||||
// test split with a changing buffer size in the middle of subsequent calls
|
||||
// to Read
|
||||
func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
buf []byte
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 148103219000000`,
|
||||
nil,
|
||||
30,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`0000\n`,
|
||||
nil,
|
||||
5,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
make([]byte, 36),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(test.buf)
|
||||
assert.Equal(t, test.n, n, test.expRegex)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(test.buf[0:n])), string(test.buf[0:n]))
|
||||
assert.Equal(t, test.err, err, test.expRegex)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader_Read(t *testing.T) {
|
||||
epoch := time.Unix(0, 0)
|
||||
|
||||
type args struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
t time.Time
|
||||
mType []telegraf.ValueType
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
expected []byte
|
||||
}{
|
||||
{
|
||||
name: "escape backslashes in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape quote in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test"`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\"" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape quote and backslash in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\"`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\\"" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape multiple backslash in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\\`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\\\" 0`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
buf := make([]byte, 512)
|
||||
m, err := New(tt.args.name, tt.args.tags, tt.args.fields, tt.args.t, tt.args.mType...)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := NewReader([]telegraf.Metric{m})
|
||||
num, err := r.Read(buf)
|
||||
if err != io.EOF {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
line := string(buf[:num])
|
||||
// This is done so that we can use raw strings in the test spec
|
||||
noeol := strings.TrimRight(line, "\n")
|
||||
require.Equal(t, string(tt.expected), noeol)
|
||||
require.Equal(t, len(tt.expected)+1, num)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricRoundtrip(t *testing.T) {
|
||||
const lp = `nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=netstat,sr=database IpExtInBcastOctets=12570626154i,IpExtInBcastPkts=95541226i,IpExtInCEPkts=0i,IpExtInCsumErrors=0i,IpExtInECT0Pkts=55674i,IpExtInECT1Pkts=0i,IpExtInMcastOctets=5928296i,IpExtInMcastPkts=174365i,IpExtInNoECTPkts=17965863529i,IpExtInNoRoutes=20i,IpExtInOctets=3334866321815i,IpExtInTruncatedPkts=0i,IpExtOutBcastOctets=0i,IpExtOutBcastPkts=0i,IpExtOutMcastOctets=0i,IpExtOutMcastPkts=0i,IpExtOutOctets=31397892391399i,TcpExtArpFilter=0i,TcpExtBusyPollRxPackets=0i,TcpExtDelayedACKLocked=14094i,TcpExtDelayedACKLost=302083i,TcpExtDelayedACKs=55486507i,TcpExtEmbryonicRsts=11879i,TcpExtIPReversePathFilter=0i,TcpExtListenDrops=1736i,TcpExtListenOverflows=0i,TcpExtLockDroppedIcmps=0i,TcpExtOfoPruned=0i,TcpExtOutOfWindowIcmps=8i,TcpExtPAWSActive=0i,TcpExtPAWSEstab=974i,TcpExtPAWSPassive=0i,TcpExtPruneCalled=0i,TcpExtRcvPruned=0i,TcpExtSyncookiesFailed=12593i,TcpExtSyncookiesRecv=0i,TcpExtSyncookiesSent=0i,TcpExtTCPACKSkippedChallenge=0i,TcpExtTCPACKSkippedFinWait2=0i,TcpExtTCPACKSkippedPAWS=806i,TcpExtTCPACKSkippedSeq=519i,TcpExtTCPACKSkippedSynRecv=0i,TcpExtTCPACKSkippedTimeWait=0i,TcpExtTCPAbortFailed=0i,TcpExtTCPAbortOnClose=22i,TcpExtTCPAbortOnData=36593i,TcpExtTCPAbortOnLinger=0i,TcpExtTCPAbortOnMemory=0i,TcpExtTCPAbortOnTimeout=674i,TcpExtTCPAutoCorking=494253233i,TcpExtTCPBacklogDrop=0i,TcpExtTCPChallengeACK=281i,TcpExtTCPDSACKIgnoredNoUndo=93354i,TcpExtTCPDSACKIgnoredOld=336i,TcpExtTCPDSACKOfoRecv=0i,TcpExtTCPDSACKOfoSent=7i,TcpExtTCPDSACKOldSent=302073i,TcpExtTCPDSACKRecv=215884i,TcpExtTCPDSACKUndo=7633i,TcpExtTCPDeferAcceptDrop=0i,TcpExtTCPDirectCopyFromBacklog=0i,TcpExtTCPDirectCopyFromPrequeue=0i,TcpExtTCPFACKReorder=1320i,TcpExtTCPFastOpenActive=0i,TcpExtTCPFastOpenActiveFail=0i,TcpExtTCPFastOpenCookieReqd=0i,TcpExtTCPFastOpenListenOverflow=0i,TcpExtTCPFastOpenPassive=0i,TcpExtTCPFastOpenPassiveFail=0i,TcpExtTCPFastRetrans=350681i,TcpExtTCPForwardRetrans=142168i,TcpExtTCPFromZeroWindowAdv=4317i,TcpExtTCPFullUndo=29502i,TcpExtTCPHPAcks=10267073000i,TcpExtTCPHPHits=5629837098i,TcpExtTCPHPHitsToUser=0i,TcpExtTCPHystartDelayCwnd=285127i,TcpExtTCPHystartDelayDetect=12318i,TcpExtTCPHystartTrainCwnd=69160570i,TcpExtTCPHystartTrainDetect=3315799i,TcpExtTCPLossFailures=109i,TcpExtTCPLossProbeRecovery=110819i,TcpExtTCPLossProbes=233995i,TcpExtTCPLossUndo=5276i,TcpExtTCPLostRetransmit=397i,TcpExtTCPMD5NotFound=0i,TcpExtTCPMD5Unexpected=0i,TcpExtTCPMemoryPressures=0i,TcpExtTCPMinTTLDrop=0i,TcpExtTCPOFODrop=0i,TcpExtTCPOFOMerge=7i,TcpExtTCPOFOQueue=15196i,TcpExtTCPOrigDataSent=29055119435i,TcpExtTCPPartialUndo=21320i,TcpExtTCPPrequeueDropped=0i,TcpExtTCPPrequeued=0i,TcpExtTCPPureAcks=1236441827i,TcpExtTCPRcvCoalesce=225590473i,TcpExtTCPRcvCollapsed=0i,TcpExtTCPRenoFailures=0i,TcpExtTCPRenoRecovery=0i,TcpExtTCPRenoRecoveryFail=0i,TcpExtTCPRenoReorder=0i,TcpExtTCPReqQFullDoCookies=0i,TcpExtTCPReqQFullDrop=0i,TcpExtTCPRetransFail=41i,TcpExtTCPSACKDiscard=0i,TcpExtTCPSACKReneging=0i,TcpExtTCPSACKReorder=4307i,TcpExtTCPSYNChallenge=244i,TcpExtTCPSackFailures=1698i,TcpExtTCPSackMerged=184668i,TcpExtTCPSackRecovery=97369i,TcpExtTCPSackRecoveryFail=381i,TcpExtTCPSackShiftFallback=2697079i,TcpExtTCPSackShifted=760299i,TcpExtTCPSchedulerFailed=0i,TcpExtTCPSlowStartRetrans=9276i,TcpExtTCPSpuriousRTOs=959i,TcpExtTCPSpuriousRtxHostQueues=2973i,TcpExtTCPSynRetrans=200970i,TcpExtTCPTSReorder=15221i,TcpExtTCPTimeWaitOverflow=0i,TcpExtTCPTimeouts=70127i,TcpExtTCPToZeroWindowAdv=4317i,TcpExtTCPWantZeroWindowAdv=2133i,TcpExtTW=24809813i,TcpExtTWKilled=0i,TcpExtTWRecycled=0i 1496460785000000000
|
||||
nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=snmp,sr=database IcmpInAddrMaskReps=0i,IcmpInAddrMasks=90i,IcmpInCsumErrors=0i,IcmpInDestUnreachs=284401i,IcmpInEchoReps=9i,IcmpInEchos=1761912i,IcmpInErrors=407i,IcmpInMsgs=2047767i,IcmpInParmProbs=0i,IcmpInRedirects=0i,IcmpInSrcQuenchs=0i,IcmpInTimeExcds=46i,IcmpInTimestampReps=0i,IcmpInTimestamps=1309i,IcmpMsgInType0=9i,IcmpMsgInType11=46i,IcmpMsgInType13=1309i,IcmpMsgInType17=90i,IcmpMsgInType3=284401i,IcmpMsgInType8=1761912i,IcmpMsgOutType0=1761912i,IcmpMsgOutType14=1248i,IcmpMsgOutType3=108709i,IcmpMsgOutType8=9i,IcmpOutAddrMaskReps=0i,IcmpOutAddrMasks=0i,IcmpOutDestUnreachs=108709i,IcmpOutEchoReps=1761912i,IcmpOutEchos=9i,IcmpOutErrors=0i,IcmpOutMsgs=1871878i,IcmpOutParmProbs=0i,IcmpOutRedirects=0i,IcmpOutSrcQuenchs=0i,IcmpOutTimeExcds=0i,IcmpOutTimestampReps=1248i,IcmpOutTimestamps=0i,IpDefaultTTL=64i,IpForwDatagrams=0i,IpForwarding=2i,IpFragCreates=0i,IpFragFails=0i,IpFragOKs=0i,IpInAddrErrors=0i,IpInDelivers=17658795773i,IpInDiscards=0i,IpInHdrErrors=0i,IpInReceives=17659269339i,IpInUnknownProtos=0i,IpOutDiscards=236976i,IpOutNoRoutes=1009i,IpOutRequests=23466783734i,IpReasmFails=0i,IpReasmOKs=0i,IpReasmReqds=0i,IpReasmTimeout=0i,TcpActiveOpens=23308977i,TcpAttemptFails=3757543i,TcpCurrEstab=280i,TcpEstabResets=184792i,TcpInCsumErrors=0i,TcpInErrs=232i,TcpInSegs=17536573089i,TcpMaxConn=-1i,TcpOutRsts=4051451i,TcpOutSegs=29836254873i,TcpPassiveOpens=176546974i,TcpRetransSegs=878085i,TcpRtoAlgorithm=1i,TcpRtoMax=120000i,TcpRtoMin=200i,UdpInCsumErrors=0i,UdpInDatagrams=24441661i,UdpInErrors=0i,UdpLiteInCsumErrors=0i,UdpLiteInDatagrams=0i,UdpLiteInErrors=0i,UdpLiteNoPorts=0i,UdpLiteOutDatagrams=0i,UdpLiteRcvbufErrors=0i,UdpLiteSndbufErrors=0i,UdpNoPorts=17660i,UdpOutDatagrams=51807896i,UdpRcvbufErrors=0i,UdpSndbufErrors=236922i 1496460785000000000
|
||||
`
|
||||
metrics, err := Parse([]byte(lp))
|
||||
require.NoError(t, err)
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 128)
|
||||
_, err = r.Read(buf)
|
||||
require.NoError(t, err)
|
||||
metrics, err = Parse(buf)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1,8 +1,4 @@
|
||||
package outputs
|
||||
|
||||
import (
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
)
|
||||
package telegraf
|
||||
|
||||
type Output interface {
|
||||
// Connect to the Output
|
||||
@@ -14,7 +10,7 @@ type Output interface {
|
||||
// SampleConfig returns the default configuration of the Output
|
||||
SampleConfig() string
|
||||
// Write takes in group of points to be written to the Output
|
||||
Write(points []*client.Point) error
|
||||
Write(metrics []Metric) error
|
||||
}
|
||||
|
||||
type ServiceOutput interface {
|
||||
@@ -27,17 +23,9 @@ type ServiceOutput interface {
|
||||
// SampleConfig returns the default configuration of the Output
|
||||
SampleConfig() string
|
||||
// Write takes in group of points to be written to the Output
|
||||
Write(points []*client.Point) error
|
||||
Write(metrics []Metric) error
|
||||
// Start the "service" that will provide an Output
|
||||
Start() error
|
||||
// Stop the "service" that will provide an Output
|
||||
Stop()
|
||||
}
|
||||
|
||||
type Creator func() Output
|
||||
|
||||
var Outputs = map[string]Creator{}
|
||||
|
||||
func Add(name string, creator Creator) {
|
||||
Outputs[name] = creator
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdb/telegraf/outputs/amon"
|
||||
_ "github.com/influxdb/telegraf/outputs/amqp"
|
||||
_ "github.com/influxdb/telegraf/outputs/datadog"
|
||||
_ "github.com/influxdb/telegraf/outputs/influxdb"
|
||||
_ "github.com/influxdb/telegraf/outputs/kafka"
|
||||
_ "github.com/influxdb/telegraf/outputs/kinesis"
|
||||
_ "github.com/influxdb/telegraf/outputs/librato"
|
||||
_ "github.com/influxdb/telegraf/outputs/mqtt"
|
||||
_ "github.com/influxdb/telegraf/outputs/nsq"
|
||||
_ "github.com/influxdb/telegraf/outputs/opentsdb"
|
||||
_ "github.com/influxdb/telegraf/outputs/prometheus_client"
|
||||
_ "github.com/influxdb/telegraf/outputs/riemann"
|
||||
)
|
||||
@@ -1,9 +0,0 @@
|
||||
# AMQP Output Plugin
|
||||
|
||||
This plugin writes to a AMQP exchange using tag, defined in configuration file
|
||||
as RoutingTag, as a routing key.
|
||||
|
||||
If RoutingTag is empty, then empty routing key will be used.
|
||||
Metrics are grouped in batches by RoutingTag.
|
||||
|
||||
This plugin doesn't bind exchange to a queue, so it should be done by consumer.
|
||||
@@ -1,161 +0,0 @@
|
||||
package amqp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
"github.com/streadway/amqp"
|
||||
)
|
||||
|
||||
type AMQP struct {
|
||||
// AMQP brokers to send metrics to
|
||||
URL string
|
||||
// AMQP exchange
|
||||
Exchange string
|
||||
// Routing Key Tag
|
||||
RoutingTag string `toml:"routing_tag"`
|
||||
// InfluxDB database
|
||||
Database string
|
||||
// InfluxDB retention policy
|
||||
RetentionPolicy string
|
||||
// InfluxDB precision
|
||||
Precision string
|
||||
|
||||
channel *amqp.Channel
|
||||
sync.Mutex
|
||||
headers amqp.Table
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultRetentionPolicy = "default"
|
||||
DefaultDatabase = "telegraf"
|
||||
DefaultPrecision = "s"
|
||||
)
|
||||
|
||||
var sampleConfig = `
|
||||
# AMQP url
|
||||
url = "amqp://localhost:5672/influxdb"
|
||||
# AMQP exchange
|
||||
exchange = "telegraf"
|
||||
# Telegraf tag to use as a routing key
|
||||
# ie, if this tag exists, it's value will be used as the routing key
|
||||
routing_tag = "host"
|
||||
|
||||
# InfluxDB retention policy
|
||||
#retention_policy = "default"
|
||||
# InfluxDB database
|
||||
#database = "telegraf"
|
||||
# InfluxDB precision
|
||||
#precision = "s"
|
||||
`
|
||||
|
||||
func (q *AMQP) Connect() error {
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
|
||||
q.headers = amqp.Table{
|
||||
"precision": q.Precision,
|
||||
"database": q.Database,
|
||||
"retention_policy": q.RetentionPolicy,
|
||||
}
|
||||
|
||||
connection, err := amqp.Dial(q.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
channel, err := connection.Channel()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to open a channel: %s", err)
|
||||
}
|
||||
|
||||
err = channel.ExchangeDeclare(
|
||||
q.Exchange, // name
|
||||
"topic", // type
|
||||
true, // durable
|
||||
false, // delete when unused
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to declare an exchange: %s", err)
|
||||
}
|
||||
q.channel = channel
|
||||
go func() {
|
||||
log.Printf("Closing: %s", <-connection.NotifyClose(make(chan *amqp.Error)))
|
||||
log.Printf("Trying to reconnect")
|
||||
for err := q.Connect(); err != nil; err = q.Connect() {
|
||||
log.Println(err)
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *AMQP) Close() error {
|
||||
return q.channel.Close()
|
||||
}
|
||||
|
||||
func (q *AMQP) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (q *AMQP) Description() string {
|
||||
return "Configuration for the AMQP server to send metrics to"
|
||||
}
|
||||
|
||||
func (q *AMQP) Write(points []*client.Point) error {
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
var outbuf = make(map[string][][]byte)
|
||||
|
||||
for _, p := range points {
|
||||
// Combine tags from Point and BatchPoints and grab the resulting
|
||||
// line-protocol output string to write to AMQP
|
||||
var value, key string
|
||||
value = p.String()
|
||||
|
||||
if q.RoutingTag != "" {
|
||||
if h, ok := p.Tags()[q.RoutingTag]; ok {
|
||||
key = h
|
||||
}
|
||||
}
|
||||
outbuf[key] = append(outbuf[key], []byte(value))
|
||||
|
||||
}
|
||||
for key, buf := range outbuf {
|
||||
err := q.channel.Publish(
|
||||
q.Exchange, // exchange
|
||||
key, // routing key
|
||||
false, // mandatory
|
||||
false, // immediate
|
||||
amqp.Publishing{
|
||||
Headers: q.headers,
|
||||
ContentType: "text/plain",
|
||||
Body: bytes.Join(buf, []byte("\n")),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("FAILED to send amqp message: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("amqp", func() outputs.Output {
|
||||
return &AMQP{
|
||||
Database: DefaultDatabase,
|
||||
Precision: DefaultPrecision,
|
||||
RetentionPolicy: DefaultRetentionPolicy,
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
# InfluxDB Output Plugin
|
||||
|
||||
This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP.
|
||||
|
||||
Required parameters:
|
||||
|
||||
* `urls`: List of strings, this is for InfluxDB clustering
|
||||
support. On each flush interval, Telegraf will randomly choose one of the urls
|
||||
to write to. Each URL should start with either `http://` or `udp://`
|
||||
* `database`: The name of the database to write to.
|
||||
|
||||
|
||||
@@ -1,160 +0,0 @@
|
||||
package influxdb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
)
|
||||
|
||||
type InfluxDB struct {
|
||||
// URL is only for backwards compatability
|
||||
URL string
|
||||
URLs []string `toml:"urls"`
|
||||
Username string
|
||||
Password string
|
||||
Database string
|
||||
UserAgent string
|
||||
Precision string
|
||||
Timeout internal.Duration
|
||||
UDPPayload int `toml:"udp_payload"`
|
||||
|
||||
conns []client.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||
# Multiple urls can be specified but it is assumed that they are part of the same
|
||||
# cluster, this means that only ONE of the urls will be written to each interval.
|
||||
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
||||
urls = ["http://localhost:8086"] # required
|
||||
# The target database for metrics (telegraf will create it if not exists)
|
||||
database = "telegraf" # required
|
||||
# Precision of writes, valid values are n, u, ms, s, m, and h
|
||||
# note: using second precision greatly helps InfluxDB compression
|
||||
precision = "s"
|
||||
|
||||
# Connection timeout (for the connection with InfluxDB), formatted as a string.
|
||||
# If not provided, will default to 0 (no timeout)
|
||||
# timeout = "5s"
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
||||
# udp_payload = 512
|
||||
`
|
||||
|
||||
func (i *InfluxDB) Connect() error {
|
||||
var urls []string
|
||||
for _, u := range i.URLs {
|
||||
urls = append(urls, u)
|
||||
}
|
||||
|
||||
// Backward-compatability with single Influx URL config files
|
||||
// This could eventually be removed in favor of specifying the urls as a list
|
||||
if i.URL != "" {
|
||||
urls = append(urls, i.URL)
|
||||
}
|
||||
|
||||
var conns []client.Client
|
||||
for _, u := range urls {
|
||||
switch {
|
||||
case strings.HasPrefix(u, "udp"):
|
||||
parsed_url, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if i.UDPPayload == 0 {
|
||||
i.UDPPayload = client.UDPPayloadSize
|
||||
}
|
||||
c, err := client.NewUDPClient(client.UDPConfig{
|
||||
Addr: parsed_url.Host,
|
||||
PayloadSize: i.UDPPayload,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conns = append(conns, c)
|
||||
default:
|
||||
// If URL doesn't start with "udp", assume HTTP client
|
||||
c, err := client.NewHTTPClient(client.HTTPConfig{
|
||||
Addr: u,
|
||||
Username: i.Username,
|
||||
Password: i.Password,
|
||||
UserAgent: i.UserAgent,
|
||||
Timeout: i.Timeout.Duration,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create Database if it doesn't exist
|
||||
_, e := c.Query(client.Query{
|
||||
Command: fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", i.Database),
|
||||
})
|
||||
|
||||
if e != nil {
|
||||
log.Println("Database creation failed: " + e.Error())
|
||||
}
|
||||
|
||||
conns = append(conns, c)
|
||||
}
|
||||
}
|
||||
|
||||
i.conns = conns
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *InfluxDB) Close() error {
|
||||
// InfluxDB client does not provide a Close() function
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *InfluxDB) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (i *InfluxDB) Description() string {
|
||||
return "Configuration for influxdb server to send metrics to"
|
||||
}
|
||||
|
||||
// Choose a random server in the cluster to write to until a successful write
|
||||
// occurs, logging each unsuccessful. If all servers fail, return error.
|
||||
func (i *InfluxDB) Write(points []*client.Point) error {
|
||||
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
|
||||
Database: i.Database,
|
||||
Precision: i.Precision,
|
||||
})
|
||||
|
||||
for _, point := range points {
|
||||
bp.AddPoint(point)
|
||||
}
|
||||
|
||||
// This will get set to nil if a successful write occurs
|
||||
err := errors.New("Could not write to any InfluxDB server in cluster")
|
||||
|
||||
p := rand.Perm(len(i.conns))
|
||||
for _, n := range p {
|
||||
if e := i.conns[n].Write(bp); e != nil {
|
||||
log.Println("ERROR: " + e.Error())
|
||||
} else {
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("influxdb", func() outputs.Output {
|
||||
return &InfluxDB{}
|
||||
})
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
package influxdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestUDPInflux(t *testing.T) {
|
||||
i := InfluxDB{
|
||||
URLs: []string{"udp://localhost:8089"},
|
||||
}
|
||||
|
||||
err := i.Connect()
|
||||
require.NoError(t, err)
|
||||
err = i.Write(testutil.MockBatchPoints().Points())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestHTTPInflux(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprintln(w, `{"results":[{}]}`)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
i := InfluxDB{
|
||||
URLs: []string{ts.URL},
|
||||
}
|
||||
|
||||
err := i.Connect()
|
||||
require.NoError(t, err)
|
||||
err = i.Write(testutil.MockBatchPoints().Points())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
)
|
||||
|
||||
type Kafka struct {
|
||||
// Kafka brokers to send metrics to
|
||||
Brokers []string
|
||||
// Kafka topic
|
||||
Topic string
|
||||
// Routing Key Tag
|
||||
RoutingTag string `toml:"routing_tag"`
|
||||
|
||||
producer sarama.SyncProducer
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# URLs of kafka brokers
|
||||
brokers = ["localhost:9092"]
|
||||
# Kafka topic for producer messages
|
||||
topic = "telegraf"
|
||||
# Telegraf tag to use as a routing key
|
||||
# ie, if this tag exists, it's value will be used as the routing key
|
||||
routing_tag = "host"
|
||||
`
|
||||
|
||||
func (k *Kafka) Connect() error {
|
||||
producer, err := sarama.NewSyncProducer(k.Brokers, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k.producer = producer
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Kafka) Close() error {
|
||||
return k.producer.Close()
|
||||
}
|
||||
|
||||
func (k *Kafka) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (k *Kafka) Description() string {
|
||||
return "Configuration for the Kafka server to send metrics to"
|
||||
}
|
||||
|
||||
func (k *Kafka) Write(points []*client.Point) error {
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, p := range points {
|
||||
// Combine tags from Point and BatchPoints and grab the resulting
|
||||
// line-protocol output string to write to Kafka
|
||||
value := p.String()
|
||||
|
||||
m := &sarama.ProducerMessage{
|
||||
Topic: k.Topic,
|
||||
Value: sarama.StringEncoder(value),
|
||||
}
|
||||
if h, ok := p.Tags()[k.RoutingTag]; ok {
|
||||
m.Key = sarama.StringEncoder(h)
|
||||
}
|
||||
|
||||
_, _, err := k.producer.SendMessage(m)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("FAILED to send kafka message: %s\n",
|
||||
err))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("kafka", func() outputs.Output {
|
||||
return &Kafka{}
|
||||
})
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConnectAndWrite(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
brokers := []string{testutil.GetLocalHost() + ":9092"}
|
||||
k := &Kafka{
|
||||
Brokers: brokers,
|
||||
Topic: "Test",
|
||||
}
|
||||
|
||||
// Verify that we can connect to the Kafka broker
|
||||
err := k.Connect()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that we can successfully write data to the kafka broker
|
||||
err = k.Write(testutil.MockBatchPoints().Points())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1,179 +0,0 @@
|
||||
package kinesis
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/kinesis"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
)
|
||||
|
||||
type KinesisOutput struct {
|
||||
Region string `toml:"region"`
|
||||
StreamName string `toml:"streamname"`
|
||||
PartitionKey string `toml:"partitionkey"`
|
||||
Format string `toml:"format"`
|
||||
Debug bool `toml:"debug"`
|
||||
svc *kinesis.Kinesis
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Amazon REGION of kinesis endpoint.
|
||||
region = "ap-southeast-2"
|
||||
# Kinesis StreamName must exist prior to starting telegraf.
|
||||
streamname = "StreamName"
|
||||
# PartitionKey as used for sharding data.
|
||||
partitionkey = "PartitionKey"
|
||||
# format of the Data payload in the kinesis PutRecord, supported
|
||||
# String and Custom.
|
||||
format = "string"
|
||||
# debug will show upstream aws messages.
|
||||
debug = false
|
||||
`
|
||||
|
||||
func (k *KinesisOutput) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) Description() string {
|
||||
return "Configuration for the AWS Kinesis output."
|
||||
}
|
||||
|
||||
func checkstream(l []*string, s string) bool {
|
||||
// Check if the StreamName exists in the slice returned from the ListStreams API request.
|
||||
for _, stream := range l {
|
||||
if *stream == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) Connect() error {
|
||||
// We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using
|
||||
// environment variables, and then Shared Credentials.
|
||||
if k.Debug {
|
||||
log.Printf("kinesis: Establishing a connection to Kinesis in %+v", k.Region)
|
||||
}
|
||||
Config := &aws.Config{
|
||||
Region: aws.String(k.Region),
|
||||
Credentials: credentials.NewChainCredentials(
|
||||
[]credentials.Provider{
|
||||
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())},
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{},
|
||||
}),
|
||||
}
|
||||
svc := kinesis.New(session.New(Config))
|
||||
|
||||
KinesisParams := &kinesis.ListStreamsInput{
|
||||
Limit: aws.Int64(100),
|
||||
}
|
||||
|
||||
resp, err := svc.ListStreams(KinesisParams)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("kinesis: Error in ListSteams API call : %+v \n", err)
|
||||
}
|
||||
|
||||
if checkstream(resp.StreamNames, k.StreamName) {
|
||||
if k.Debug {
|
||||
log.Printf("kinesis: Stream Exists")
|
||||
}
|
||||
k.svc = svc
|
||||
return nil
|
||||
} else {
|
||||
log.Printf("kinesis : You have configured a StreamName %+v which does not exist. exiting.", k.StreamName)
|
||||
os.Exit(1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) Close() error {
|
||||
return errors.New("Error")
|
||||
}
|
||||
|
||||
func FormatMetric(k *KinesisOutput, point *client.Point) (string, error) {
|
||||
if k.Format == "string" {
|
||||
return point.String(), nil
|
||||
} else {
|
||||
m := fmt.Sprintf("%+v,%+v,%+v",
|
||||
point.Name(),
|
||||
point.Tags(),
|
||||
point.String())
|
||||
return m, nil
|
||||
}
|
||||
}
|
||||
|
||||
func writekinesis(k *KinesisOutput, r []*kinesis.PutRecordsRequestEntry) time.Duration {
|
||||
start := time.Now()
|
||||
payload := &kinesis.PutRecordsInput{
|
||||
Records: r,
|
||||
StreamName: aws.String(k.StreamName),
|
||||
}
|
||||
|
||||
if k.Debug {
|
||||
resp, err := k.svc.PutRecords(payload)
|
||||
if err != nil {
|
||||
log.Printf("kinesis: Unable to write to Kinesis : %+v \n", err.Error())
|
||||
}
|
||||
log.Printf("%+v \n", resp)
|
||||
|
||||
} else {
|
||||
_, err := k.svc.PutRecords(payload)
|
||||
if err != nil {
|
||||
log.Printf("kinesis: Unable to write to Kinesis : %+v \n", err.Error())
|
||||
}
|
||||
}
|
||||
return time.Since(start)
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) Write(points []*client.Point) error {
|
||||
var sz uint32 = 0
|
||||
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r := []*kinesis.PutRecordsRequestEntry{}
|
||||
|
||||
for _, p := range points {
|
||||
atomic.AddUint32(&sz, 1)
|
||||
|
||||
metric, _ := FormatMetric(k, p)
|
||||
d := kinesis.PutRecordsRequestEntry{
|
||||
Data: []byte(metric),
|
||||
PartitionKey: aws.String(k.PartitionKey),
|
||||
}
|
||||
r = append(r, &d)
|
||||
|
||||
if sz == 500 {
|
||||
// Max Messages Per PutRecordRequest is 500
|
||||
elapsed := writekinesis(k, r)
|
||||
log.Printf("Wrote a %+v point batch to Kinesis in %+v.\n", sz, elapsed)
|
||||
atomic.StoreUint32(&sz, 0)
|
||||
r = nil
|
||||
}
|
||||
}
|
||||
|
||||
writekinesis(k, r)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("kinesis", func() outputs.Output {
|
||||
return &KinesisOutput{}
|
||||
})
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
package kinesis
|
||||
|
||||
import (
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFormatMetric(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
k := &KinesisOutput{
|
||||
Format: "string",
|
||||
}
|
||||
|
||||
p := testutil.MockBatchPoints().Points()[0]
|
||||
|
||||
valid_string := "test1,tag1=value1 value=1 1257894000000000000"
|
||||
func_string, err := FormatMetric(k, p)
|
||||
|
||||
if func_string != valid_string {
|
||||
t.Error("Expected ", valid_string)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
k = &KinesisOutput{
|
||||
Format: "custom",
|
||||
}
|
||||
|
||||
valid_custom := "test1,map[tag1:value1],test1,tag1=value1 value=1 1257894000000000000"
|
||||
func_custom, err := FormatMetric(k, p)
|
||||
|
||||
if func_custom != valid_custom {
|
||||
t.Error("Expected ", valid_custom)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
package librato
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
)
|
||||
|
||||
type Librato struct {
|
||||
ApiUser string
|
||||
ApiToken string
|
||||
SourceTag string
|
||||
Timeout internal.Duration
|
||||
|
||||
apiUrl string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Librator API Docs
|
||||
# http://dev.librato.com/v1/metrics-authentication
|
||||
|
||||
# Librato API user
|
||||
api_user = "telegraf@influxdb.com" # required.
|
||||
|
||||
# Librato API token
|
||||
api_token = "my-secret-token" # required.
|
||||
|
||||
# Tag Field to populate source attribute (optional)
|
||||
# This is typically the _hostname_ from which the metric was obtained.
|
||||
source_tag = "hostname"
|
||||
|
||||
# Connection timeout.
|
||||
# timeout = "5s"
|
||||
`
|
||||
|
||||
type Metrics struct {
|
||||
Gauges []*Gauge `json:"gauges"`
|
||||
}
|
||||
|
||||
type Gauge struct {
|
||||
Name string `json:"name"`
|
||||
Value float64 `json:"value"`
|
||||
Source string `json:"source"`
|
||||
MeasureTime int64 `json:"measure_time"`
|
||||
}
|
||||
|
||||
const librato_api = "https://metrics-api.librato.com/v1/metrics"
|
||||
|
||||
func NewLibrato(apiUrl string) *Librato {
|
||||
return &Librato{
|
||||
apiUrl: apiUrl,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Librato) Connect() error {
|
||||
if l.ApiUser == "" || l.ApiToken == "" {
|
||||
return fmt.Errorf("api_user and api_token are required fields for librato output")
|
||||
}
|
||||
l.client = &http.Client{
|
||||
Timeout: l.Timeout.Duration,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Librato) Write(points []*client.Point) error {
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
metrics := Metrics{}
|
||||
var tempGauges = make([]*Gauge, len(points))
|
||||
var acceptablePoints = 0
|
||||
for _, pt := range points {
|
||||
if gauge, err := l.buildGauge(pt); err == nil {
|
||||
tempGauges[acceptablePoints] = gauge
|
||||
acceptablePoints += 1
|
||||
} else {
|
||||
log.Printf("unable to build Gauge for %s, skipping\n", pt.Name())
|
||||
}
|
||||
}
|
||||
metrics.Gauges = make([]*Gauge, acceptablePoints)
|
||||
copy(metrics.Gauges, tempGauges[0:])
|
||||
metricsBytes, err := json.Marshal(metrics)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error())
|
||||
}
|
||||
req, err := http.NewRequest("POST", l.apiUrl, bytes.NewBuffer(metricsBytes))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create http.Request, %s\n", err.Error())
|
||||
}
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
req.SetBasicAuth(l.ApiUser, l.ApiToken)
|
||||
|
||||
resp, err := l.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error POSTing metrics, %s\n", err.Error())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("received bad status code, %d\n", resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Librato) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (l *Librato) Description() string {
|
||||
return "Configuration for Librato API to send metrics to."
|
||||
}
|
||||
|
||||
func (l *Librato) buildGauge(pt *client.Point) (*Gauge, error) {
|
||||
gauge := &Gauge{
|
||||
Name: pt.Name(),
|
||||
MeasureTime: pt.Time().Unix(),
|
||||
}
|
||||
if err := gauge.setValue(pt.Fields()["value"]); err != nil {
|
||||
return gauge, fmt.Errorf("unable to extract value from Fields, %s\n", err.Error())
|
||||
}
|
||||
if l.SourceTag != "" {
|
||||
if source, ok := pt.Tags()[l.SourceTag]; ok {
|
||||
gauge.Source = source
|
||||
} else {
|
||||
return gauge, fmt.Errorf("undeterminable Source type from Field, %s\n", l.SourceTag)
|
||||
}
|
||||
}
|
||||
return gauge, nil
|
||||
}
|
||||
|
||||
func (g *Gauge) setValue(v interface{}) error {
|
||||
switch d := v.(type) {
|
||||
case int:
|
||||
g.Value = float64(int(d))
|
||||
case int32:
|
||||
g.Value = float64(int32(d))
|
||||
case int64:
|
||||
g.Value = float64(int64(d))
|
||||
case float32:
|
||||
g.Value = float64(d)
|
||||
case float64:
|
||||
g.Value = float64(d)
|
||||
default:
|
||||
return fmt.Errorf("undeterminable type %+v", d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Librato) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("librato", func() outputs.Output {
|
||||
return NewLibrato(librato_api)
|
||||
})
|
||||
}
|
||||
@@ -1,212 +0,0 @@
|
||||
package librato
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
fakeUrl = "http://test.librato.com"
|
||||
fakeUser = "telegraf@influxdb.com"
|
||||
fakeToken = "123456"
|
||||
)
|
||||
|
||||
func fakeLibrato() *Librato {
|
||||
l := NewLibrato(fakeUrl)
|
||||
l.ApiUser = fakeUser
|
||||
l.ApiToken = fakeToken
|
||||
return l
|
||||
}
|
||||
|
||||
func TestUriOverride(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
l := NewLibrato(ts.URL)
|
||||
l.ApiUser = "telegraf@influxdb.com"
|
||||
l.ApiToken = "123456"
|
||||
err := l.Connect()
|
||||
require.NoError(t, err)
|
||||
err = l.Write(testutil.MockBatchPoints().Points())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestBadStatusCode(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
json.NewEncoder(w).Encode(`{
|
||||
"errors": {
|
||||
"system": [
|
||||
"The API is currently down for maintenance. It'll be back shortly."
|
||||
]
|
||||
}
|
||||
}`)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
l := NewLibrato(ts.URL)
|
||||
l.ApiUser = "telegraf@influxdb.com"
|
||||
l.ApiToken = "123456"
|
||||
err := l.Connect()
|
||||
require.NoError(t, err)
|
||||
err = l.Write(testutil.MockBatchPoints().Points())
|
||||
if err == nil {
|
||||
t.Errorf("error expected but none returned")
|
||||
} else {
|
||||
require.EqualError(t, fmt.Errorf("received bad status code, 503\n"), err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildGauge(t *testing.T) {
|
||||
var gaugeTests = []struct {
|
||||
ptIn *client.Point
|
||||
outGauge *Gauge
|
||||
err error
|
||||
}{
|
||||
{
|
||||
testutil.TestPoint(0.0, "test1"),
|
||||
&Gauge{
|
||||
Name: "test1",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 0.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(1.0, "test2"),
|
||||
&Gauge{
|
||||
Name: "test2",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 1.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(10, "test3"),
|
||||
&Gauge{
|
||||
Name: "test3",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 10.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(int32(112345), "test4"),
|
||||
&Gauge{
|
||||
Name: "test4",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 112345.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(int64(112345), "test5"),
|
||||
&Gauge{
|
||||
Name: "test5",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 112345.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(float32(11234.5), "test6"),
|
||||
&Gauge{
|
||||
Name: "test6",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 11234.5,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint("11234.5", "test7"),
|
||||
&Gauge{
|
||||
Name: "test7",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 11234.5,
|
||||
},
|
||||
fmt.Errorf("unable to extract value from Fields, undeterminable type"),
|
||||
},
|
||||
}
|
||||
|
||||
l := NewLibrato(fakeUrl)
|
||||
for _, gt := range gaugeTests {
|
||||
gauge, err := l.buildGauge(gt.ptIn)
|
||||
if err != nil && gt.err == nil {
|
||||
t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err)
|
||||
}
|
||||
if gt.err != nil && err == nil {
|
||||
t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error())
|
||||
}
|
||||
if !reflect.DeepEqual(gauge, gt.outGauge) && gt.err == nil {
|
||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauge)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildGaugeWithSource(t *testing.T) {
|
||||
pt1, _ := client.NewPoint(
|
||||
"test1",
|
||||
map[string]string{"hostname": "192.168.0.1"},
|
||||
map[string]interface{}{"value": 0.0},
|
||||
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
pt2, _ := client.NewPoint(
|
||||
"test2",
|
||||
map[string]string{"hostnam": "192.168.0.1"},
|
||||
map[string]interface{}{"value": 1.0},
|
||||
time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
var gaugeTests = []struct {
|
||||
ptIn *client.Point
|
||||
outGauge *Gauge
|
||||
err error
|
||||
}{
|
||||
|
||||
{
|
||||
pt1,
|
||||
&Gauge{
|
||||
Name: "test1",
|
||||
MeasureTime: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 0.0,
|
||||
Source: "192.168.0.1",
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
pt2,
|
||||
&Gauge{
|
||||
Name: "test2",
|
||||
MeasureTime: time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 1.0,
|
||||
},
|
||||
fmt.Errorf("undeterminable Source type from Field, hostname"),
|
||||
},
|
||||
}
|
||||
|
||||
l := NewLibrato(fakeUrl)
|
||||
l.SourceTag = "hostname"
|
||||
for _, gt := range gaugeTests {
|
||||
gauge, err := l.buildGauge(gt.ptIn)
|
||||
if err != nil && gt.err == nil {
|
||||
t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err)
|
||||
}
|
||||
if gt.err != nil && err == nil {
|
||||
t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error())
|
||||
}
|
||||
if !reflect.DeepEqual(gauge, gt.outGauge) && gt.err == nil {
|
||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauge)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,190 +0,0 @@
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
)
|
||||
|
||||
const MaxClientIdLen = 8
|
||||
const MaxRetryCount = 3
|
||||
const ClientIdPrefix = "telegraf"
|
||||
|
||||
type MQTT struct {
|
||||
Servers []string `toml:"servers"`
|
||||
Username string
|
||||
Password string
|
||||
Database string
|
||||
Timeout internal.Duration
|
||||
TopicPrefix string
|
||||
|
||||
Client *paho.Client
|
||||
Opts *paho.ClientOptions
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
servers = ["localhost:1883"] # required.
|
||||
|
||||
# MQTT outputs send metrics to this topic format
|
||||
# "<topic_prefix>/host/<hostname>/<pluginname>/"
|
||||
# ex: prefix/host/web01.example.com/mem/available
|
||||
# topic_prefix = "prefix"
|
||||
|
||||
# username and password to connect MQTT server.
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
`
|
||||
|
||||
func (m *MQTT) Connect() error {
|
||||
var err error
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
m.Opts, err = m.CreateOpts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.Client = paho.NewClient(m.Opts)
|
||||
if token := m.Client.Connect(); token.Wait() && token.Error() != nil {
|
||||
return token.Error()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MQTT) Close() error {
|
||||
if m.Client.IsConnected() {
|
||||
m.Client.Disconnect(20)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MQTT) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (m *MQTT) Description() string {
|
||||
return "Configuration for MQTT server to send metrics to"
|
||||
}
|
||||
|
||||
func (m *MQTT) Write(points []*client.Point) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
hostname, ok := points[0].Tags()["host"]
|
||||
if !ok {
|
||||
hostname = ""
|
||||
}
|
||||
|
||||
for _, p := range points {
|
||||
var t []string
|
||||
if m.TopicPrefix != "" {
|
||||
t = append(t, m.TopicPrefix)
|
||||
}
|
||||
tm := strings.Split(p.Name(), "_")
|
||||
if len(tm) < 2 {
|
||||
tm = []string{p.Name(), "stat"}
|
||||
}
|
||||
|
||||
t = append(t, "host", hostname, tm[0], tm[1])
|
||||
topic := strings.Join(t, "/")
|
||||
|
||||
value := p.String()
|
||||
err := m.publish(topic, value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not write to MQTT server, %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MQTT) publish(topic, body string) error {
|
||||
token := m.Client.Publish(topic, 0, false, body)
|
||||
token.Wait()
|
||||
if token.Error() != nil {
|
||||
return token.Error()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MQTT) CreateOpts() (*paho.ClientOptions, error) {
|
||||
opts := paho.NewClientOptions()
|
||||
|
||||
clientId := getRandomClientId()
|
||||
opts.SetClientID(clientId)
|
||||
|
||||
TLSConfig := &tls.Config{InsecureSkipVerify: false}
|
||||
ca := "" // TODO
|
||||
scheme := "tcp"
|
||||
if ca != "" {
|
||||
scheme = "ssl"
|
||||
certPool, err := getCertPool(ca)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
TLSConfig.RootCAs = certPool
|
||||
}
|
||||
TLSConfig.InsecureSkipVerify = true // TODO
|
||||
opts.SetTLSConfig(TLSConfig)
|
||||
|
||||
user := m.Username
|
||||
if user == "" {
|
||||
opts.SetUsername(user)
|
||||
}
|
||||
password := m.Password
|
||||
if password != "" {
|
||||
opts.SetPassword(password)
|
||||
}
|
||||
|
||||
if len(m.Servers) == 0 {
|
||||
return opts, fmt.Errorf("could not get host infomations")
|
||||
}
|
||||
for _, host := range m.Servers {
|
||||
server := fmt.Sprintf("%s://%s", scheme, host)
|
||||
|
||||
opts.AddBroker(server)
|
||||
}
|
||||
opts.SetAutoReconnect(true)
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
func getRandomClientId() string {
|
||||
const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
var bytes = make([]byte, MaxClientIdLen)
|
||||
rand.Read(bytes)
|
||||
for i, b := range bytes {
|
||||
bytes[i] = alphanum[b%byte(len(alphanum))]
|
||||
}
|
||||
return ClientIdPrefix + "-" + string(bytes)
|
||||
}
|
||||
|
||||
func getCertPool(pemPath string) (*x509.CertPool, error) {
|
||||
certs := x509.NewCertPool()
|
||||
|
||||
pemData, err := ioutil.ReadFile(pemPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certs.AppendCertsFromPEM(pemData)
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("mqtt", func() outputs.Output {
|
||||
return &MQTT{}
|
||||
})
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
package nsq
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
"github.com/nsqio/go-nsq"
|
||||
)
|
||||
|
||||
type NSQ struct {
|
||||
Server string
|
||||
Topic string
|
||||
producer *nsq.Producer
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Location of nsqd instance listening on TCP
|
||||
server = "localhost:4150"
|
||||
# NSQ topic for producer messages
|
||||
topic = "telegraf"
|
||||
`
|
||||
|
||||
func (n *NSQ) Connect() error {
|
||||
config := nsq.NewConfig()
|
||||
producer, err := nsq.NewProducer(n.Server, config)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n.producer = producer
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NSQ) Close() error {
|
||||
n.producer.Stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NSQ) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (n *NSQ) Description() string {
|
||||
return "Send telegraf measurements to NSQD"
|
||||
}
|
||||
|
||||
func (n *NSQ) Write(points []*client.Point) error {
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, p := range points {
|
||||
// Combine tags from Point and BatchPoints and grab the resulting
|
||||
// line-protocol output string to write to NSQ
|
||||
value := p.String()
|
||||
|
||||
err := n.producer.Publish(n.Topic, []byte(value))
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("FAILED to send NSQD message: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("nsq", func() outputs.Output {
|
||||
return &NSQ{}
|
||||
})
|
||||
}
|
||||
@@ -1,158 +0,0 @@
|
||||
package opentsdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
)
|
||||
|
||||
type OpenTSDB struct {
|
||||
Prefix string
|
||||
|
||||
Host string
|
||||
Port int
|
||||
|
||||
Debug bool
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# prefix for metrics keys
|
||||
prefix = "my.specific.prefix."
|
||||
|
||||
## Telnet Mode ##
|
||||
# DNS name of the OpenTSDB server in telnet mode
|
||||
host = "opentsdb.example.com"
|
||||
|
||||
# Port of the OpenTSDB server in telnet mode
|
||||
port = 4242
|
||||
|
||||
# Debug true - Prints OpenTSDB communication
|
||||
debug = false
|
||||
`
|
||||
|
||||
type MetricLine struct {
|
||||
Metric string
|
||||
Timestamp int64
|
||||
Value string
|
||||
Tags string
|
||||
}
|
||||
|
||||
func (o *OpenTSDB) Connect() error {
|
||||
// Test Connection to OpenTSDB Server
|
||||
uri := fmt.Sprintf("%s:%d", o.Host, o.Port)
|
||||
tcpAddr, err := net.ResolveTCPAddr("tcp", uri)
|
||||
if err != nil {
|
||||
return fmt.Errorf("OpenTSDB: TCP address cannot be resolved")
|
||||
}
|
||||
connection, err := net.DialTCP("tcp", nil, tcpAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("OpenTSDB: Telnet connect fail")
|
||||
}
|
||||
defer connection.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OpenTSDB) Write(points []*client.Point) error {
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
var timeNow = time.Now()
|
||||
// Send Data with telnet / socket communication
|
||||
uri := fmt.Sprintf("%s:%d", o.Host, o.Port)
|
||||
tcpAddr, _ := net.ResolveTCPAddr("tcp", uri)
|
||||
connection, err := net.DialTCP("tcp", nil, tcpAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("OpenTSDB: Telnet connect fail")
|
||||
}
|
||||
for _, pt := range points {
|
||||
metric := &MetricLine{
|
||||
Metric: fmt.Sprintf("%s%s", o.Prefix, pt.Name()),
|
||||
Timestamp: timeNow.Unix(),
|
||||
}
|
||||
|
||||
metricValue, buildError := buildValue(pt)
|
||||
if buildError != nil {
|
||||
fmt.Printf("OpenTSDB: %s\n", buildError.Error())
|
||||
continue
|
||||
}
|
||||
metric.Value = metricValue
|
||||
|
||||
tagsSlice := buildTags(pt.Tags())
|
||||
metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " "))
|
||||
|
||||
messageLine := fmt.Sprintf("put %s %v %s %s\n", metric.Metric, metric.Timestamp, metric.Value, metric.Tags)
|
||||
if o.Debug {
|
||||
fmt.Print(messageLine)
|
||||
}
|
||||
_, err := connection.Write([]byte(messageLine))
|
||||
if err != nil {
|
||||
return fmt.Errorf("OpenTSDB: Telnet writing error %s", err.Error())
|
||||
}
|
||||
}
|
||||
defer connection.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildTags(ptTags map[string]string) []string {
|
||||
tags := make([]string, len(ptTags))
|
||||
index := 0
|
||||
for k, v := range ptTags {
|
||||
tags[index] = fmt.Sprintf("%s=%s", k, v)
|
||||
index += 1
|
||||
}
|
||||
sort.Strings(tags)
|
||||
return tags
|
||||
}
|
||||
|
||||
func buildValue(pt *client.Point) (string, error) {
|
||||
var retv string
|
||||
var v = pt.Fields()["value"]
|
||||
switch p := v.(type) {
|
||||
case int64:
|
||||
retv = IntToString(int64(p))
|
||||
case uint64:
|
||||
retv = UIntToString(uint64(p))
|
||||
case float64:
|
||||
retv = FloatToString(float64(p))
|
||||
default:
|
||||
return retv, fmt.Errorf("unexpected type %T with value %v for OpenTSDB", v, v)
|
||||
}
|
||||
return retv, nil
|
||||
}
|
||||
|
||||
func IntToString(input_num int64) string {
|
||||
return strconv.FormatInt(input_num, 10)
|
||||
}
|
||||
|
||||
func UIntToString(input_num uint64) string {
|
||||
return strconv.FormatUint(input_num, 10)
|
||||
}
|
||||
|
||||
func FloatToString(input_num float64) string {
|
||||
return strconv.FormatFloat(input_num, 'f', 6, 64)
|
||||
}
|
||||
|
||||
func (o *OpenTSDB) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (o *OpenTSDB) Description() string {
|
||||
return "Configuration for OpenTSDB server to send metrics to"
|
||||
}
|
||||
|
||||
func (o *OpenTSDB) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("opentsdb", func() outputs.Output {
|
||||
return &OpenTSDB{}
|
||||
})
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
package opentsdb
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBuildTagsTelnet(t *testing.T) {
|
||||
var tagtests = []struct {
|
||||
ptIn map[string]string
|
||||
outTags []string
|
||||
}{
|
||||
{
|
||||
map[string]string{"one": "two", "three": "four"},
|
||||
[]string{"one=two", "three=four"},
|
||||
},
|
||||
{
|
||||
map[string]string{"aaa": "bbb"},
|
||||
[]string{"aaa=bbb"},
|
||||
},
|
||||
{
|
||||
map[string]string{"one": "two", "aaa": "bbb"},
|
||||
[]string{"aaa=bbb", "one=two"},
|
||||
},
|
||||
{
|
||||
map[string]string{},
|
||||
[]string{},
|
||||
},
|
||||
}
|
||||
for _, tt := range tagtests {
|
||||
tags := buildTags(tt.ptIn)
|
||||
if !reflect.DeepEqual(tags, tt.outTags) {
|
||||
t.Errorf("\nexpected %+v\ngot %+v\n", tt.outTags, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrite(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
o := &OpenTSDB{
|
||||
Host: testutil.GetLocalHost(),
|
||||
Port: 4242,
|
||||
Prefix: "prefix.test.",
|
||||
}
|
||||
|
||||
// Verify that we can connect to the OpenTSDB instance
|
||||
err := o.Connect()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that we can successfully write data to OpenTSDB
|
||||
err = o.Write(testutil.MockBatchPoints().Points())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify postive and negative test cases of writing data
|
||||
bp := testutil.MockBatchPoints()
|
||||
bp.AddPoint(testutil.TestPoint(float64(1.0), "justametric.float"))
|
||||
bp.AddPoint(testutil.TestPoint(int64(123456789), "justametric.int"))
|
||||
bp.AddPoint(testutil.TestPoint(uint64(123456789012345), "justametric.uint"))
|
||||
bp.AddPoint(testutil.TestPoint("Lorem Ipsum", "justametric.string"))
|
||||
bp.AddPoint(testutil.TestPoint(float64(42.0), "justametric.anotherfloat"))
|
||||
|
||||
err = o.Write(bp.Points())
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
# Prometheus Client Service Output Plugin
|
||||
|
||||
This plugin starts a Prometheus Client, listening on a port defined in the
|
||||
configuration file.
|
||||
|
||||
It exposes all metrics on `/metrics` to be polled by a Prometheus server.
|
||||
@@ -1,125 +0,0 @@
|
||||
package prometheus_client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type PrometheusClient struct {
|
||||
Listen string
|
||||
metrics map[string]*prometheus.UntypedVec
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Address to listen on
|
||||
# listen = ":9126"
|
||||
`
|
||||
|
||||
func (p *PrometheusClient) Start() error {
|
||||
if p.Listen == "" {
|
||||
p.Listen = "localhost:9126"
|
||||
}
|
||||
|
||||
http.Handle("/metrics", prometheus.Handler())
|
||||
server := &http.Server{
|
||||
Addr: p.Listen,
|
||||
}
|
||||
|
||||
p.metrics = make(map[string]*prometheus.UntypedVec)
|
||||
go server.ListenAndServe()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PrometheusClient) Stop() {
|
||||
// TODO: Use a listener for http.Server that counts active connections
|
||||
// that can be stopped and closed gracefully
|
||||
}
|
||||
|
||||
func (p *PrometheusClient) Connect() error {
|
||||
// This service output does not need to make any further connections
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PrometheusClient) Close() error {
|
||||
// This service output does not need to close any of its connections
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PrometheusClient) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (p *PrometheusClient) Description() string {
|
||||
return "Configuration for the Prometheus client to spawn"
|
||||
}
|
||||
|
||||
func (p *PrometheusClient) Write(points []*client.Point) error {
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, point := range points {
|
||||
var labels []string
|
||||
key := point.Name()
|
||||
|
||||
for k, _ := range point.Tags() {
|
||||
if len(k) > 0 {
|
||||
labels = append(labels, k)
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := p.metrics[key]; !ok {
|
||||
p.metrics[key] = prometheus.NewUntypedVec(
|
||||
prometheus.UntypedOpts{
|
||||
Name: key,
|
||||
Help: fmt.Sprintf("Telegraf collected point '%s'", key),
|
||||
},
|
||||
labels,
|
||||
)
|
||||
prometheus.MustRegister(p.metrics[key])
|
||||
}
|
||||
|
||||
l := prometheus.Labels{}
|
||||
for tk, tv := range point.Tags() {
|
||||
l[tk] = tv
|
||||
}
|
||||
|
||||
for _, val := range point.Fields() {
|
||||
switch val := val.(type) {
|
||||
default:
|
||||
log.Printf("Prometheus output, unsupported type. key: %s, type: %T\n",
|
||||
key, val)
|
||||
case int64:
|
||||
m, err := p.metrics[key].GetMetricWith(l)
|
||||
if err != nil {
|
||||
log.Printf("ERROR Getting metric in Prometheus output, "+
|
||||
"key: %s, labels: %v,\nerr: %s\n",
|
||||
key, l, err.Error())
|
||||
continue
|
||||
}
|
||||
m.Set(float64(val))
|
||||
case float64:
|
||||
m, err := p.metrics[key].GetMetricWith(l)
|
||||
if err != nil {
|
||||
log.Printf("ERROR Getting metric in Prometheus output, "+
|
||||
"key: %s, labels: %v,\nerr: %s\n",
|
||||
key, l, err.Error())
|
||||
continue
|
||||
}
|
||||
m.Set(val)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("prometheus_client", func() outputs.Output {
|
||||
return &PrometheusClient{}
|
||||
})
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
package prometheus_client
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/plugins/prometheus"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var pTesting *PrometheusClient
|
||||
|
||||
func TestPrometheusWritePointEmptyTag(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := &prometheus.Prometheus{
|
||||
Urls: []string{"http://localhost:9126/metrics"},
|
||||
}
|
||||
tags := make(map[string]string)
|
||||
pt1, _ := client.NewPoint(
|
||||
"test_point_1",
|
||||
tags,
|
||||
map[string]interface{}{"value": 0.0})
|
||||
pt2, _ := client.NewPoint(
|
||||
"test_point_2",
|
||||
tags,
|
||||
map[string]interface{}{"value": 1.0})
|
||||
var points = []*client.Point{
|
||||
pt1,
|
||||
pt2,
|
||||
}
|
||||
require.NoError(t, pTesting.Write(points))
|
||||
|
||||
expected := []struct {
|
||||
name string
|
||||
value float64
|
||||
tags map[string]string
|
||||
}{
|
||||
{"test_point_1", 0.0, tags},
|
||||
{"test_point_2", 1.0, tags},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
require.NoError(t, p.Gather(&acc))
|
||||
for _, e := range expected {
|
||||
assert.NoError(t, acc.ValidateValue(e.name, e.value))
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrometheusWritePointTag(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := &prometheus.Prometheus{
|
||||
Urls: []string{"http://localhost:9126/metrics"},
|
||||
}
|
||||
tags := make(map[string]string)
|
||||
tags["testtag"] = "testvalue"
|
||||
pt1, _ := client.NewPoint(
|
||||
"test_point_3",
|
||||
tags,
|
||||
map[string]interface{}{"value": 0.0})
|
||||
pt2, _ := client.NewPoint(
|
||||
"test_point_4",
|
||||
tags,
|
||||
map[string]interface{}{"value": 1.0})
|
||||
var points = []*client.Point{
|
||||
pt1,
|
||||
pt2,
|
||||
}
|
||||
require.NoError(t, pTesting.Write(points))
|
||||
|
||||
expected := []struct {
|
||||
name string
|
||||
value float64
|
||||
}{
|
||||
{"test_point_3", 0.0},
|
||||
{"test_point_4", 1.0},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
require.NoError(t, p.Gather(&acc))
|
||||
for _, e := range expected {
|
||||
assert.True(t, acc.CheckTaggedValue(e.name, e.value, tags))
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
pTesting = &PrometheusClient{Listen: "localhost:9126"}
|
||||
pTesting.Start()
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
package riemann
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/amir/raidman"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
)
|
||||
|
||||
type Riemann struct {
|
||||
URL string
|
||||
Transport string
|
||||
|
||||
client *raidman.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# URL of server
|
||||
url = "localhost:5555"
|
||||
# transport protocol to use either tcp or udp
|
||||
transport = "tcp"
|
||||
`
|
||||
|
||||
func (r *Riemann) Connect() error {
|
||||
c, err := raidman.Dial(r.Transport, r.URL)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.client = c
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Riemann) Close() error {
|
||||
r.client.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Riemann) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (r *Riemann) Description() string {
|
||||
return "Configuration for the Riemann server to send metrics to"
|
||||
}
|
||||
|
||||
func (r *Riemann) Write(points []*client.Point) error {
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var events []*raidman.Event
|
||||
for _, p := range points {
|
||||
ev := buildEvent(p)
|
||||
events = append(events, ev)
|
||||
}
|
||||
|
||||
var senderr = r.client.SendMulti(events)
|
||||
if senderr != nil {
|
||||
return errors.New(fmt.Sprintf("FAILED to send riemann message: %s\n",
|
||||
senderr))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildEvent(p *client.Point) *raidman.Event {
|
||||
host, ok := p.Tags()["host"]
|
||||
if !ok {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
host = "unknown"
|
||||
} else {
|
||||
host = hostname
|
||||
}
|
||||
}
|
||||
|
||||
var event = &raidman.Event{
|
||||
Host: host,
|
||||
Service: p.Name(),
|
||||
Metric: p.Fields()["value"],
|
||||
}
|
||||
|
||||
return event
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("riemann", func() outputs.Output {
|
||||
return &Riemann{}
|
||||
})
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user