Compare commits
542 Commits
1.5.3
...
bugfix/433
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6f4bd9ad82 | ||
|
|
4ea618ea26 | ||
|
|
a7545e6cac | ||
|
|
58e815fdd1 | ||
|
|
06682c6350 | ||
|
|
839ca60b0e | ||
|
|
9fe90d71f4 | ||
|
|
2ee374cf50 | ||
|
|
220e6c5361 | ||
|
|
c7cfc2ec39 | ||
|
|
9bc63c2f7a | ||
|
|
56ea2eb57b | ||
|
|
210dfcee83 | ||
|
|
b7a02c73b3 | ||
|
|
73e2e6afc5 | ||
|
|
b2586a7eaf | ||
|
|
abfbf4f4f2 | ||
|
|
85eacf268b | ||
|
|
1a781a5851 | ||
|
|
ed2bc1151b | ||
|
|
b2e972cd81 | ||
|
|
54056f3808 | ||
|
|
5aa199e2b3 | ||
|
|
9bd5e10133 | ||
|
|
beaef8e3da | ||
|
|
a10262c5d6 | ||
|
|
8bf18d6ac7 | ||
|
|
23523ffd10 | ||
|
|
523d761f34 | ||
|
|
3f28add025 | ||
|
|
ee6e4b0afd | ||
|
|
16454e25ba | ||
|
|
2a1feb6db9 | ||
|
|
61e197d254 | ||
|
|
1bd41ef3ce | ||
|
|
d7c756e9ff | ||
|
|
39206677f8 | ||
|
|
b66eb2fec7 | ||
|
|
3ad10283ef | ||
|
|
84e9a5c97e | ||
|
|
c98b58dacc | ||
|
|
98d86df797 | ||
|
|
4e9e57e210 | ||
|
|
7781507c01 | ||
|
|
8482c40a91 | ||
|
|
0dda9b8319 | ||
|
|
4e69d10ff7 | ||
|
|
f689463e8e | ||
|
|
f217d12de5 | ||
|
|
886795063e | ||
|
|
30dc95fa78 | ||
|
|
40fac0a9b4 | ||
|
|
36df4c5ae5 | ||
|
|
70ffed3a4d | ||
|
|
bf59bcf721 | ||
|
|
a789f97feb | ||
|
|
d2e00a3205 | ||
|
|
daddd8bbac | ||
|
|
d16530677d | ||
|
|
1ea18ffd0a | ||
|
|
dd2223ae1c | ||
|
|
90eebd88af | ||
|
|
d2e729dfaf | ||
|
|
f64d612294 | ||
|
|
76ec90e66d | ||
|
|
1690f36b09 | ||
|
|
87f711a19a | ||
|
|
58895d6b03 | ||
|
|
cd9ad77038 | ||
|
|
8563238059 | ||
|
|
11335f5fee | ||
|
|
acba20af1a | ||
|
|
229b6bd944 | ||
|
|
7fe6e2f5ae | ||
|
|
a4214abfc4 | ||
|
|
5f0cbd1255 | ||
|
|
3ef4dff4ec | ||
|
|
dfe7b5eec2 | ||
|
|
92a8f795f5 | ||
|
|
b1d77ade55 | ||
|
|
7103077b3f | ||
|
|
7332ce0e95 | ||
|
|
2be32f0a80 | ||
|
|
701e157ef0 | ||
|
|
eb94bb29fd | ||
|
|
449bd5c3b9 | ||
|
|
96abff0660 | ||
|
|
9eab3572ff | ||
|
|
be8b87000c | ||
|
|
ff93c3c326 | ||
|
|
df1fe7a2b4 | ||
|
|
a04cfee349 | ||
|
|
da6ad34fc8 | ||
|
|
179bcfdcbb | ||
|
|
e3f1d28908 | ||
|
|
fcea745e99 | ||
|
|
90bcb5bc3c | ||
|
|
312116c101 | ||
|
|
2cc2913d81 | ||
|
|
b556eb8b2f | ||
|
|
8b28f40cc0 | ||
|
|
cabee8f8e0 | ||
|
|
e0071f365a | ||
|
|
5ae2b02f5d | ||
|
|
59f0a5354f | ||
|
|
c8b68430f0 | ||
|
|
1ac64596bf | ||
|
|
b78984554c | ||
|
|
2def31bc3d | ||
|
|
010e4f5b0b | ||
|
|
ce3b367dac | ||
|
|
f3f753310f | ||
|
|
50d721ae05 | ||
|
|
14d97e5416 | ||
|
|
44e3b9bee3 | ||
|
|
7f93911f43 | ||
|
|
f417cec036 | ||
|
|
dbd02ebb74 | ||
|
|
fbf09409e9 | ||
|
|
54728f54c6 | ||
|
|
83b03ecb18 | ||
|
|
8826cdc423 | ||
|
|
697d8ceae5 | ||
|
|
089eb2c8d6 | ||
|
|
fd22b1ef1f | ||
|
|
a86c2d54ad | ||
|
|
daacfc6368 | ||
|
|
795c8057ad | ||
|
|
6a21e23bcc | ||
|
|
0d21296aed | ||
|
|
7660315e45 | ||
|
|
703be4f124 | ||
|
|
ccc4a85fd6 | ||
|
|
3be9cad309 | ||
|
|
45c1a45f4a | ||
|
|
1a407ceaf9 | ||
|
|
61a0e500a8 | ||
|
|
7f46aafcd6 | ||
|
|
3072b5a493 | ||
|
|
81f5a41bc9 | ||
|
|
a688eefd1c | ||
|
|
1a8786712c | ||
|
|
339cebbc21 | ||
|
|
b62f7a3c68 | ||
|
|
cce4f520bd | ||
|
|
6d73cb85cc | ||
|
|
2948dec6f5 | ||
|
|
863af9d1d4 | ||
|
|
99033241c4 | ||
|
|
e45822e2e2 | ||
|
|
0eba72d2c0 | ||
|
|
d5f57715dc | ||
|
|
190a4128c5 | ||
|
|
d19a33dd6f | ||
|
|
0af40a8a5d | ||
|
|
558caf57de | ||
|
|
18db718d7f | ||
|
|
fdd899e9d4 | ||
|
|
7e0e664860 | ||
|
|
5030373a4c | ||
|
|
5b599337a3 | ||
|
|
2add516eee | ||
|
|
1f29612918 | ||
|
|
851efc9ca0 | ||
|
|
fa04e539ff | ||
|
|
3ef28e332f | ||
|
|
5953db88df | ||
|
|
2bf2b51039 | ||
|
|
2a2cc3212f | ||
|
|
b11468757c | ||
|
|
339c5d0312 | ||
|
|
1c6cfcfbab | ||
|
|
c16ecaa124 | ||
|
|
ce58926feb | ||
|
|
cff2aa1863 | ||
|
|
4790a21c04 | ||
|
|
21167a6232 | ||
|
|
2fe167b8a7 | ||
|
|
d96bcac3ec | ||
|
|
ac9b308cee | ||
|
|
4c35a56edd | ||
|
|
73c22a8189 | ||
|
|
de355b76d6 | ||
|
|
b2bb44363a | ||
|
|
8b687a8e21 | ||
|
|
81620c69c5 | ||
|
|
3ae0c20200 | ||
|
|
7c0754ebe5 | ||
|
|
757e23a5f2 | ||
|
|
fd63591b15 | ||
|
|
2108582b43 | ||
|
|
c125cb1d27 | ||
|
|
2fb3f7a585 | ||
|
|
9647ea88ea | ||
|
|
c1d4b0b154 | ||
|
|
239333ad90 | ||
|
|
fd64487be5 | ||
|
|
cff7ee8edf | ||
|
|
c03e8918a2 | ||
|
|
83345ec2b3 | ||
|
|
f094f83da5 | ||
|
|
0768022240 | ||
|
|
92956104d6 | ||
|
|
964856eb5f | ||
|
|
377547aa4c | ||
|
|
1662b6feb9 | ||
|
|
908170b207 | ||
|
|
ec47cab950 | ||
|
|
06671777e9 | ||
|
|
46a8bdbfe5 | ||
|
|
abdff033cc | ||
|
|
535e9e9a68 | ||
|
|
c256f17870 | ||
|
|
b8d5df2076 | ||
|
|
538baee8a4 | ||
|
|
d3d8d52e2f | ||
|
|
286f14f730 | ||
|
|
9f4752ba12 | ||
|
|
f639f994b5 | ||
|
|
911f0e4b57 | ||
|
|
86a3b8cad4 | ||
|
|
a3500cc33a | ||
|
|
bf0c59f56c | ||
|
|
c7b3667ac4 | ||
|
|
638853be05 | ||
|
|
ee9a2f73a1 | ||
|
|
648d7ae922 | ||
|
|
13937d511d | ||
|
|
fe4d3cd117 | ||
|
|
eacf11fcd8 | ||
|
|
3a8ca4d08d | ||
|
|
00e3363d45 | ||
|
|
29b37e67c2 | ||
|
|
42fee824f8 | ||
|
|
120be7e87b | ||
|
|
9e4a330ee5 | ||
|
|
78d4a95ce6 | ||
|
|
571ce86d10 | ||
|
|
dd2c60e620 | ||
|
|
1486ae25c0 | ||
|
|
da5b46e770 | ||
|
|
9ef902f4a1 | ||
|
|
058510464c | ||
|
|
0b4f4b089f | ||
|
|
7c592558d8 | ||
|
|
1e1d9e8acb | ||
|
|
3b3d16273d | ||
|
|
3046f957d5 | ||
|
|
bcf1cf59c1 | ||
|
|
c8d2ba2bc8 | ||
|
|
04ab9a4fe4 | ||
|
|
e4009234e9 | ||
|
|
8d516d26e9 | ||
|
|
0a02363c03 | ||
|
|
2c19d74829 | ||
|
|
3f4e1af222 | ||
|
|
10c7324d74 | ||
|
|
55cfc383f3 | ||
|
|
7b8f12b377 | ||
|
|
15f19375e7 | ||
|
|
93e2381f42 | ||
|
|
387bae9b9f | ||
|
|
34416e0da8 | ||
|
|
32f56140a3 | ||
|
|
64a23c0b18 | ||
|
|
af68975e2f | ||
|
|
0223b22b3e | ||
|
|
1890efbb70 | ||
|
|
e4f8a82ee6 | ||
|
|
a28de4b5cd | ||
|
|
caac224276 | ||
|
|
fe31ce9d7d | ||
|
|
01ede2ea0b | ||
|
|
fb6390e7ab | ||
|
|
ff40da6019 | ||
|
|
43a044542e | ||
|
|
00203fa889 | ||
|
|
7177e0473f | ||
|
|
252101b7c6 | ||
|
|
efdf36746c | ||
|
|
df78133bf3 | ||
|
|
bf915fa79c | ||
|
|
c160b56229 | ||
|
|
627f0e5d9d | ||
|
|
4551b4c5d2 | ||
|
|
a9afd2f030 | ||
|
|
caf860bc88 | ||
|
|
beeab2c509 | ||
|
|
a50acadc44 | ||
|
|
265d0e6d84 | ||
|
|
413cf6dd23 | ||
|
|
7b23287e20 | ||
|
|
f4c0aac898 | ||
|
|
bcaaeda49c | ||
|
|
9d2f3fcbb9 | ||
|
|
0aad487cab | ||
|
|
19c102cf4b | ||
|
|
109c1a4344 | ||
|
|
82448a9dd1 | ||
|
|
64b239663c | ||
|
|
7e3ec16e15 | ||
|
|
a971ffb880 | ||
|
|
461c0dccd8 | ||
|
|
971debb582 | ||
|
|
6d585beedf | ||
|
|
38ec968b0b | ||
|
|
0c1293ad5e | ||
|
|
b99cd14129 | ||
|
|
c2108fcf09 | ||
|
|
04b9afff68 | ||
|
|
a320f91516 | ||
|
|
ef112e6ee7 | ||
|
|
5be1198274 | ||
|
|
8a73dc05c0 | ||
|
|
43bd23e555 | ||
|
|
b0b18df0bf | ||
|
|
cc97b48ca8 | ||
|
|
36b8220181 | ||
|
|
1c0f63a90d | ||
|
|
503881d4d7 | ||
|
|
63de4ffc51 | ||
|
|
4cefe3eadd | ||
|
|
b63073deb2 | ||
|
|
e60abdf8ea | ||
|
|
e5e75a62cc | ||
|
|
a4870e6a6d | ||
|
|
3469e74dd9 | ||
|
|
def76ace3b | ||
|
|
05393da939 | ||
|
|
e8fc3ca70c | ||
|
|
729388f4dd | ||
|
|
be9d4f4be0 | ||
|
|
3658ac8f53 | ||
|
|
d7f279e3d3 | ||
|
|
e28f422d21 | ||
|
|
cd919066d5 | ||
|
|
6200683c29 | ||
|
|
76ce71f7fa | ||
|
|
2160779126 | ||
|
|
6e5e2f713d | ||
|
|
8e515688eb | ||
|
|
6d6631382c | ||
|
|
f1b681cbdc | ||
|
|
4118ec7629 | ||
|
|
f114f6a124 | ||
|
|
8cfd001441 | ||
|
|
9ce70aad77 | ||
|
|
07dbbb27dc | ||
|
|
0e14e31b0a | ||
|
|
8b3767fd6e | ||
|
|
81a93fcddf | ||
|
|
8005883de8 | ||
|
|
f7207f514e | ||
|
|
f1c8abd68c | ||
|
|
e4ce057885 | ||
|
|
a6d366fb84 | ||
|
|
de22480e7d | ||
|
|
2b65915b96 | ||
|
|
9d8b1b1e87 | ||
|
|
b9ddbbd5ed | ||
|
|
c377c8fb7c | ||
|
|
45c22e42da | ||
|
|
ad5e954047 | ||
|
|
93b2870b28 | ||
|
|
3501b65f7c | ||
|
|
35378ae9cc | ||
|
|
1212b2ddc5 | ||
|
|
0a37386c5e | ||
|
|
00a52a67b9 | ||
|
|
dc96c34e2c | ||
|
|
5928219454 | ||
|
|
8c932abff6 | ||
|
|
fcd6d4eb09 | ||
|
|
b355536b20 | ||
|
|
e988c83068 | ||
|
|
80d9417315 | ||
|
|
f4fa05530a | ||
|
|
18aef35c58 | ||
|
|
8147d60973 | ||
|
|
df80fa6099 | ||
|
|
53221d87eb | ||
|
|
ddde8809f4 | ||
|
|
0ca3900abe | ||
|
|
a777ce9293 | ||
|
|
3242f97deb | ||
|
|
6e35071c89 | ||
|
|
cd620ac144 | ||
|
|
6406abbc89 | ||
|
|
9aabf56795 | ||
|
|
4ac78d5c6d | ||
|
|
3fe3d75bb3 | ||
|
|
a55456b56c | ||
|
|
6c656d92a0 | ||
|
|
2ee270f274 | ||
|
|
5b37fd3ae9 | ||
|
|
f82f03b92c | ||
|
|
42ccc9f324 | ||
|
|
a00d5b48f8 | ||
|
|
f5ea13a9ab | ||
|
|
32dd1b3725 | ||
|
|
1b0e87a8b0 | ||
|
|
efa9095829 | ||
|
|
89974d96d7 | ||
|
|
8c51d629eb | ||
|
|
ea0be51985 | ||
|
|
5639d5608d | ||
|
|
9a1d69a2ae | ||
|
|
b7a68eef56 | ||
|
|
be688ec761 | ||
|
|
3208fc32ee | ||
|
|
1f87c10dd4 | ||
|
|
281f4d3688 | ||
|
|
3dcf66aed6 | ||
|
|
01479af096 | ||
|
|
23933e1139 | ||
|
|
a7571d5730 | ||
|
|
12d62e60b3 | ||
|
|
4153d2ca42 | ||
|
|
8c8c9200e7 | ||
|
|
426360d61f | ||
|
|
86e08e6ce7 | ||
|
|
a462b555a7 | ||
|
|
a2635573a8 | ||
|
|
ec8e923fda | ||
|
|
d43e8262b7 | ||
|
|
7b365180d0 | ||
|
|
32732d42f8 | ||
|
|
10e51e4b49 | ||
|
|
3a85e7b1f0 | ||
|
|
5d87ad85a1 | ||
|
|
c28d0e1b16 | ||
|
|
1b0a4e49cd | ||
|
|
f9c48ee2f0 | ||
|
|
1b84ac08ab | ||
|
|
bcefe90846 | ||
|
|
da12c64791 | ||
|
|
de03ee3caa | ||
|
|
fbd3544a9d | ||
|
|
fb947e8fe7 | ||
|
|
5b130b6ea0 | ||
|
|
48092ed598 | ||
|
|
efb9d5b4cb | ||
|
|
c17427631d | ||
|
|
8527a1b7b8 | ||
|
|
d831dbc51d | ||
|
|
f9c0aa1e23 | ||
|
|
3e4c91880a | ||
|
|
899c3a2ae1 | ||
|
|
4558aeddeb | ||
|
|
36c9113917 | ||
|
|
5270aa451c | ||
|
|
91fc2765b1 | ||
|
|
ef776f120b | ||
|
|
5bac08662e | ||
|
|
601dc99606 | ||
|
|
0f55d9eba2 | ||
|
|
f374a295d9 | ||
|
|
548157852c | ||
|
|
822cfbc8e8 | ||
|
|
fa5f1bf6d9 | ||
|
|
ad921a3840 | ||
|
|
9d559292a5 | ||
|
|
6e24056757 | ||
|
|
87830a1c38 | ||
|
|
d188b78d9e | ||
|
|
6e4650da3a | ||
|
|
7ab0d50116 | ||
|
|
97f6c9d8e1 | ||
|
|
666eb47613 | ||
|
|
90b6b760d1 | ||
|
|
f3147cc44d | ||
|
|
3cf0ba1ccf | ||
|
|
2b972dcd56 | ||
|
|
ce06d0cee0 | ||
|
|
24ae3293bc | ||
|
|
0ddb1d26a0 | ||
|
|
317de40ac4 | ||
|
|
9cfa3b292b | ||
|
|
0bf63a29f1 | ||
|
|
1d86064fb7 | ||
|
|
53e7537c5c | ||
|
|
6dd5c3b2c0 | ||
|
|
2938c2fa79 | ||
|
|
35f1b9f500 | ||
|
|
ae848e9539 | ||
|
|
163f18f959 | ||
|
|
37757b7782 | ||
|
|
315fd1e987 | ||
|
|
b0c2bb870e | ||
|
|
11c6a7f9c9 | ||
|
|
92acef1664 | ||
|
|
5397c02570 | ||
|
|
87f1d45ee0 | ||
|
|
07cb749e04 | ||
|
|
acea7109d4 | ||
|
|
009b649a13 | ||
|
|
b900967b78 | ||
|
|
81f42e8b17 | ||
|
|
56be3d3236 | ||
|
|
a440ed8d8c | ||
|
|
06c21fb9f7 | ||
|
|
4f7afb8cb5 | ||
|
|
ef6e5c5a85 | ||
|
|
005face7c0 | ||
|
|
1011cd0c94 | ||
|
|
6c075c4346 | ||
|
|
7f3f556b39 | ||
|
|
6639f44c17 | ||
|
|
801a248668 | ||
|
|
496452144c | ||
|
|
3029d58cad | ||
|
|
fcc9c82d34 | ||
|
|
4f1ea13ebf | ||
|
|
b90ee4a43c | ||
|
|
4537eb2c5d | ||
|
|
d6fd9ce738 | ||
|
|
5b40173bcb | ||
|
|
6638fc68de | ||
|
|
9ad0297b1f | ||
|
|
15266bb7eb | ||
|
|
d935dfa9ed | ||
|
|
8785c7d78d | ||
|
|
fb3d66cdd3 | ||
|
|
de180d1e56 | ||
|
|
df9c7590b3 | ||
|
|
d7d224d511 | ||
|
|
abcad439eb | ||
|
|
8484de6c12 | ||
|
|
ab8376de03 | ||
|
|
ff634c5056 | ||
|
|
14b31a2354 | ||
|
|
663a5b1f50 | ||
|
|
93d16a4603 | ||
|
|
88746b01c3 | ||
|
|
37095ef47d | ||
|
|
4f42d8a298 | ||
|
|
574034c301 | ||
|
|
654e953a89 | ||
|
|
4d91162abd | ||
|
|
177e7e2c73 | ||
|
|
d8966d5067 | ||
|
|
bdda6ceb70 |
96
.circleci/config.yml
Normal file
96
.circleci/config.yml
Normal file
@@ -0,0 +1,96 @@
|
||||
---
|
||||
defaults:
|
||||
defaults: &defaults
|
||||
working_directory: '/go/src/github.com/influxdata/telegraf'
|
||||
go-1_9: &go-1_9
|
||||
docker:
|
||||
- image: 'circleci/golang:1.9.7'
|
||||
go-1_10: &go-1_10
|
||||
docker:
|
||||
- image: 'circleci/golang:1.10.3'
|
||||
|
||||
version: 2
|
||||
jobs:
|
||||
deps:
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
key: vendor-{{ checksum "Gopkg.lock" }}
|
||||
- run: 'make deps'
|
||||
- save_cache:
|
||||
name: 'vendored deps'
|
||||
key: vendor-{{ checksum "Gopkg.lock" }}
|
||||
paths:
|
||||
- './vendor'
|
||||
- persist_to_workspace:
|
||||
root: '/go/src'
|
||||
paths:
|
||||
- '*'
|
||||
test-go-1.9:
|
||||
<<: [ *defaults, *go-1_9 ]
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: 'make test-ci'
|
||||
test-go-1.10:
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: 'make test-ci'
|
||||
- run: 'GOARCH=386 make test-ci'
|
||||
release:
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: './scripts/release.sh'
|
||||
- store_artifacts:
|
||||
path: './artifacts'
|
||||
destination: '.'
|
||||
nightly:
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: './scripts/release.sh'
|
||||
- store_artifacts:
|
||||
path: './artifacts'
|
||||
destination: '.'
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build_and_release:
|
||||
jobs:
|
||||
- 'deps'
|
||||
- 'test-go-1.9':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'test-go-1.10':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'release':
|
||||
requires:
|
||||
- 'test-go-1.9'
|
||||
- 'test-go-1.10'
|
||||
nightly:
|
||||
jobs:
|
||||
- 'deps'
|
||||
- 'test-go-1.9':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'test-go-1.10':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'nightly':
|
||||
requires:
|
||||
- 'test-go-1.9'
|
||||
- 'test-go-1.10'
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 7 * * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
44
.github/ISSUE_TEMPLATE.md
vendored
44
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,44 +0,0 @@
|
||||
## Directions
|
||||
|
||||
GitHub Issues are reserved for actionable bug reports and feature requests.
|
||||
General questions should be asked at the [InfluxData Community](https://community.influxdata.com) site.
|
||||
|
||||
Before opening an issue, search for similar bug reports or feature requests on GitHub Issues.
|
||||
If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.
|
||||
Erase the other section and everything on and above this line.
|
||||
|
||||
*Please note, the quickest way to fix a bug is to open a Pull Request.*
|
||||
|
||||
## Bug report
|
||||
|
||||
### Relevant telegraf.conf:
|
||||
|
||||
### System info:
|
||||
|
||||
[Include Telegraf version, operating system name, and other relevant details]
|
||||
|
||||
### Steps to reproduce:
|
||||
|
||||
1. ...
|
||||
2. ...
|
||||
|
||||
### Expected behavior:
|
||||
|
||||
### Actual behavior:
|
||||
|
||||
### Additional info:
|
||||
|
||||
[Include gist of relevant config, logs, etc.]
|
||||
|
||||
|
||||
## Feature Request
|
||||
|
||||
Opening a feature request kicks off a discussion.
|
||||
|
||||
### Proposal:
|
||||
|
||||
### Current behavior:
|
||||
|
||||
### Desired behavior:
|
||||
|
||||
### Use case: [Why is this important (helps with prioritizing requests)]
|
||||
24
.github/ISSUE_TEMPLATE/Bug_report.md
vendored
Normal file
24
.github/ISSUE_TEMPLATE/Bug_report.md
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
|
||||
---
|
||||
|
||||
### Relevant telegraf.conf:
|
||||
|
||||
### System info:
|
||||
|
||||
[Include Telegraf version, operating system name, and other relevant details]
|
||||
|
||||
### Steps to reproduce:
|
||||
|
||||
1. ...
|
||||
2. ...
|
||||
|
||||
### Expected behavior:
|
||||
|
||||
### Actual behavior:
|
||||
|
||||
### Additional info:
|
||||
|
||||
[Include gist of relevant config, logs, etc.]
|
||||
17
.github/ISSUE_TEMPLATE/Feature_request.md
vendored
Normal file
17
.github/ISSUE_TEMPLATE/Feature_request.md
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
|
||||
---
|
||||
|
||||
## Feature Request
|
||||
|
||||
Opening a feature request kicks off a discussion.
|
||||
|
||||
### Proposal:
|
||||
|
||||
### Current behavior:
|
||||
|
||||
### Desired behavior:
|
||||
|
||||
### Use case: [Why is this important (helps with prioritizing requests)]
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -1,7 +1,5 @@
|
||||
build
|
||||
tivan
|
||||
.vagrant
|
||||
/build
|
||||
/telegraf
|
||||
.idea
|
||||
*~
|
||||
*#
|
||||
/telegraf.exe
|
||||
/telegraf.gz
|
||||
/vendor
|
||||
|
||||
327
CHANGELOG.md
327
CHANGELOG.md
@@ -1,15 +1,327 @@
|
||||
## v1.5 [unreleased]
|
||||
## v1.8 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
### New Inputs
|
||||
|
||||
- [tengine](./plugins/inputs/tengine/README.md) - Contributed by @ertaoxu
|
||||
|
||||
### New Aggregators
|
||||
|
||||
- [valuecounter](./plugins/aggregators/valuecounter/README.md) - Contributed by @piotr1212
|
||||
|
||||
### Features
|
||||
|
||||
- [#4236](https://github.com/influxdata/telegraf/pull/4236): Add SSL/TLS support to redis input.
|
||||
- [#4160](https://github.com/influxdata/telegraf/pull/4160): Add tengine input plugin.
|
||||
- [#4262](https://github.com/influxdata/telegraf/pull/4262): Add power draw field to nvidia_smi plugin.
|
||||
- [#4271](https://github.com/influxdata/telegraf/pull/4271): Add support for solr 7 to the solr input.
|
||||
- [#4281](https://github.com/influxdata/telegraf/pull/4281): Add owner tag on partitions in burrow input.
|
||||
- [#4259](https://github.com/influxdata/telegraf/pull/4259): Add container status tag to docker input.
|
||||
- [#3523](https://github.com/influxdata/telegraf/pull/3523): Add valuecounter aggregator plugin.
|
||||
- [#4307](https://github.com/influxdata/telegraf/pull/4307): Add new measurement with results of pgrep lookup to procstat input.
|
||||
- [#4311](https://github.com/influxdata/telegraf/pull/4311): Add support for comma in logparser timestamp format.
|
||||
- [#4292](https://github.com/influxdata/telegraf/pull/4292): Add path tag to tail input plugin.
|
||||
- [#4322](https://github.com/influxdata/telegraf/pull/4322): Add log message when tail is added or removed from a file.
|
||||
- [#4267](https://github.com/influxdata/telegraf/pull/4267): Add option to use of counter time in win perf counters.
|
||||
- [#4343](https://github.com/influxdata/telegraf/pull/4343): Add energy and power field and device id tag to fibaro input.
|
||||
- [#4347](https://github.com/influxdata/telegraf/pull/4347): Add http path configuration for OpenTSDB output.
|
||||
|
||||
## v1.7.1 [unreleased]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#4277](https://github.com/influxdata/telegraf/pull/4277): Treat sigterm as a clean shutdown signal.
|
||||
- [#4284](https://github.com/influxdata/telegraf/pull/4284): Fix selection of tags under nested objects in the JSON parser.
|
||||
- [#4135](https://github.com/influxdata/telegraf/issues/4135): Fix postfix input handling multi-level queues.
|
||||
- [#4334](https://github.com/influxdata/telegraf/pull/4334): Fix syslog timestamp parsing with single digit day of month.
|
||||
- [#2910](https://github.com/influxdata/telegraf/issues/2910): Handle mysql input variations in the user_statistics collecting.
|
||||
- [#4293](https://github.com/influxdata/telegraf/issues/4293): Fix minmax and basicstats aggregators to use uint64.
|
||||
- [#4290](https://github.com/influxdata/telegraf/issues/4290): Document swap input plugin.
|
||||
|
||||
## v1.7 [2018-06-12]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- The `cassandra` input plugin has been deprecated in favor of the `jolokia2`
|
||||
input plugin which is much more configurable and more performant. There is
|
||||
an [example configuration](./plugins/inputs/jolokia2/examples) to help you
|
||||
get started.
|
||||
|
||||
- For plugins supporting TLS, you can now specify the certificate and keys
|
||||
using `tls_ca`, `tls_cert`, `tls_key`. These options behave the same as
|
||||
the, now deprecated, `ssl` forms.
|
||||
|
||||
### New Inputs
|
||||
|
||||
- [aurora](./plugins/inputs/aurora/README.md) - Contributed by @influxdata
|
||||
- [burrow](./plugins/inputs/burrow/README.md) - Contributed by @arkady-emelyanov
|
||||
- [fibaro](./plugins/inputs/fibaro/README.md) - Contributed by @dynek
|
||||
- [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry/README.md) - Contributed by @ajhai
|
||||
- [mcrouter](./plugins/inputs/mcrouter/README.md) - Contributed by @cthayer
|
||||
- [nvidia_smi](./plugins/inputs/nvidia_smi/README.md) - Contributed by @jackzampolin
|
||||
- [syslog](./plugins/inputs/syslog/README.md) - Contributed by @influxdata
|
||||
|
||||
### New Processors
|
||||
|
||||
- [converter](./plugins/processors/converter/README.md) - Contributed by @influxdata
|
||||
- [regex](./plugins/processors/regex/README.md) - Contributed by @44px
|
||||
- [topk](./plugins/processors/topk/README.md) - Contributed by @mirath
|
||||
|
||||
### New Outputs
|
||||
|
||||
- [http](./plugins/outputs/http/README.md) - Contributed by @Dark0096
|
||||
- [application_insights](./plugins/outputs/application_insights/README.md): Contribute by @karolz-ms
|
||||
|
||||
### Features
|
||||
|
||||
- [#3964](https://github.com/influxdata/telegraf/pull/3964): Add repl_oplog_window_sec metric to mongodb input.
|
||||
- [#3819](https://github.com/influxdata/telegraf/pull/3819): Add per-host shard metrics in mongodb input.
|
||||
- [#3999](https://github.com/influxdata/telegraf/pull/3999): Skip files with leading `..` in config directory.
|
||||
- [#4021](https://github.com/influxdata/telegraf/pull/4021): Add TLS support to socket_writer and socket_listener plugins.
|
||||
- [#4025](https://github.com/influxdata/telegraf/pull/4025): Add snmp input option to strip non fixed length index suffixes.
|
||||
- [#4035](https://github.com/influxdata/telegraf/pull/4035): Add server version tag to docker input.
|
||||
- [#4044](https://github.com/influxdata/telegraf/pull/4044): Add support for LeoFS 1.4 to leofs input.
|
||||
- [#4068](https://github.com/influxdata/telegraf/pull/4068): Add parameter to force the interval of gather for sysstat.
|
||||
- [#3877](https://github.com/influxdata/telegraf/pull/3877): Support busybox ping in the ping input.
|
||||
- [#4077](https://github.com/influxdata/telegraf/pull/4077): Add input plugin for McRouter.
|
||||
- [#4096](https://github.com/influxdata/telegraf/pull/4096): Add topk processor plugin.
|
||||
- [#4114](https://github.com/influxdata/telegraf/pull/4114): Add cursor metrics to mongodb input.
|
||||
- [#3455](https://github.com/influxdata/telegraf/pull/3455): Add tag/integer pair for result to net_response.
|
||||
- [#4010](https://github.com/influxdata/telegraf/pull/3455): Add application_insights output plugin.
|
||||
- [#4167](https://github.com/influxdata/telegraf/pull/4167): Added several important elasticsearch cluster health metrics.
|
||||
- [#4094](https://github.com/influxdata/telegraf/pull/4094): Add batch mode to mqtt output.
|
||||
- [#4158](https://github.com/influxdata/telegraf/pull/4158): Add aurora input plugin.
|
||||
- [#3839](https://github.com/influxdata/telegraf/pull/3839): Add regex processor plugin.
|
||||
- [#4165](https://github.com/influxdata/telegraf/pull/4165): Add support for Graphite 1.1 tags.
|
||||
- [#4162](https://github.com/influxdata/telegraf/pull/4162): Add timeout option to sensors input.
|
||||
- [#3489](https://github.com/influxdata/telegraf/pull/3489): Add burrow input plugin.
|
||||
- [#3969](https://github.com/influxdata/telegraf/pull/3969): Add option to unbound module to use threads as tags.
|
||||
- [#4183](https://github.com/influxdata/telegraf/pull/4183): Add support for TLS and username/password auth to aerospike input.
|
||||
- [#4190](https://github.com/influxdata/telegraf/pull/4190): Add special syslog timestamp parser to grok parser that uses current year.
|
||||
- [#4181](https://github.com/influxdata/telegraf/pull/4181): Add syslog input plugin.
|
||||
- [#4212](https://github.com/influxdata/telegraf/pull/4212): Print the enabled aggregator and processor plugins on startup.
|
||||
- [#3994](https://github.com/influxdata/telegraf/pull/3994): Add static routing_key option to amqp output.
|
||||
- [#3995](https://github.com/influxdata/telegraf/pull/3995): Add passive mode exchange declaration option to amqp consumer input.
|
||||
- [#4216](https://github.com/influxdata/telegraf/pull/4216): Add counter fields to pf input.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#4018](https://github.com/influxdata/telegraf/pull/4018): Write to working file outputs if any files are not writeable.
|
||||
- [#4036](https://github.com/influxdata/telegraf/pull/4036): Add all win_perf_counters fields for a series in a single metric.
|
||||
- [#4118](https://github.com/influxdata/telegraf/pull/4118): Report results of dns_query instead of 0ms on timeout.
|
||||
- [#4155](https://github.com/influxdata/telegraf/pull/4155): Add consul service tags to metric.
|
||||
- [#2879](https://github.com/influxdata/telegraf/issues/2879): Fix wildcards and multi instance processes in win_perf_counters.
|
||||
- [#2468](https://github.com/influxdata/telegraf/issues/2468): Fix crash on 32-bit Windows in win_perf_counters.
|
||||
- [#4198](https://github.com/influxdata/telegraf/issues/4198): Fix win_perf_counters not collecting at every interval.
|
||||
- [#4227](https://github.com/influxdata/telegraf/issues/4227): Use same flags for all BSD family ping variants.
|
||||
- [#4266](https://github.com/influxdata/telegraf/issues/4266): Remove tags with empty values from Wavefront output.
|
||||
|
||||
## v1.6.4 [2018-06-05]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#4203](https://github.com/influxdata/telegraf/issues/4203): Fix snmp overriding of auto-configured table fields.
|
||||
- [#4218](https://github.com/influxdata/telegraf/issues/4218): Fix uint support in cloudwatch output.
|
||||
- [#4188](https://github.com/influxdata/telegraf/pull/4188): Fix documentation of instance_name option in varnish input.
|
||||
- [#4195](https://github.com/influxdata/telegraf/pull/4195): Revert to previous aerospike library version due to memory leak.
|
||||
|
||||
## v1.6.3 [2018-05-21]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#4127](https://github.com/influxdata/telegraf/issues/4127): Fix intermittent panic in aerospike input.
|
||||
- [#4130](https://github.com/influxdata/telegraf/issues/4130): Fix connection leak in jolokia2_agent.
|
||||
- [#4136](https://github.com/influxdata/telegraf/pull/4130): Fix jolokia2 timeout parsing.
|
||||
- [#4142](https://github.com/influxdata/telegraf/pull/4142): Fix error parsing dropwizard metrics.
|
||||
- [#4149](https://github.com/influxdata/telegraf/issues/4149): Fix librato output support for uint and bool.
|
||||
- [#4176](https://github.com/influxdata/telegraf/pull/4176): Fix waitgroup deadlock if url is incorrect in apache input.
|
||||
|
||||
## v1.6.2 [2018-05-08]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#4078](https://github.com/influxdata/telegraf/pull/4078): Use same timestamp for fields in system input.
|
||||
- [#4091](https://github.com/influxdata/telegraf/pull/4091): Fix handling of uint64 in datadog output.
|
||||
- [#4099](https://github.com/influxdata/telegraf/pull/4099): Ignore UTF8 BOM in JSON parser.
|
||||
- [#4104](https://github.com/influxdata/telegraf/issues/4104): Fix case for slave metrics in mysql input.
|
||||
- [#4110](https://github.com/influxdata/telegraf/issues/4110): Fix uint support in cratedb output.
|
||||
|
||||
## v1.6.1 [2018-04-23]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3835](https://github.com/influxdata/telegraf/issues/3835): Report mem input fields as gauges instead counters.
|
||||
- [#4030](https://github.com/influxdata/telegraf/issues/4030): Fix graphite outputs unsigned integers in wrong format.
|
||||
- [#4043](https://github.com/influxdata/telegraf/issues/4043): Report available fields if utmp is unreadable.
|
||||
- [#4039](https://github.com/influxdata/telegraf/issues/4039): Fix potential "no fields" error writing to outputs.
|
||||
- [#4037](https://github.com/influxdata/telegraf/issues/4037): Fix uptime reporting in system input when ran inside docker.
|
||||
- [#3750](https://github.com/influxdata/telegraf/issues/3750): Fix mem input "cannot allocate memory" error on FreeBSD based systems.
|
||||
- [#4056](https://github.com/influxdata/telegraf/pull/4056): Fix duplicate tags when overriding an existing tag.
|
||||
- [#4062](https://github.com/influxdata/telegraf/pull/4062): Add server argument as first argument in unbound input.
|
||||
- [#4063](https://github.com/influxdata/telegraf/issues/4063): Fix handling of floats with multiple leading zeroes.
|
||||
- [#4064](https://github.com/influxdata/telegraf/issues/4064): Return errors in mongodb SSL/TLS configuration.
|
||||
|
||||
## v1.6 [2018-04-16]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- The `mysql` input plugin has been updated fix a number of type convertion
|
||||
issues. This may cause a `field type error` when inserting into InfluxDB due
|
||||
the change of types.
|
||||
|
||||
To address this we have introduced a new `metric_version` option to control
|
||||
enabling the new format. For in depth recommendations on upgrading please
|
||||
reference the [mysql plugin documentation](./plugins/inputs/mysql/README.md#metric-version).
|
||||
|
||||
It is encouraged to migrate to the new model when possible as the old version
|
||||
is deprecated and will be removed in a future version.
|
||||
|
||||
- The `postgresql` plugins now defaults to using a persistent connection to the database.
|
||||
In environments where TCP connections are terminated the `max_lifetime`
|
||||
setting should be set less than the collection `interval` to prevent errors.
|
||||
|
||||
- The `sqlserver` input plugin has a new query and data model that can be enabled
|
||||
by setting `query_version = 2`. It is encouraged to migrate to the new
|
||||
model when possible as the old version is deprecated and will be removed in
|
||||
a future version.
|
||||
|
||||
- An option has been added to the `openldap` input plugin that reverses metric
|
||||
name to improve grouping. This change is enabled when `reverse_metric_names = true`
|
||||
is set. It is encouraged to enable this option when possible as the old
|
||||
ordering is deprecated.
|
||||
|
||||
- The new `http` input configured with `data_format = "json"` can perform the
|
||||
same task as the, now deprecated, `httpjson` input.
|
||||
|
||||
|
||||
### New Inputs
|
||||
|
||||
- [http](./plugins/inputs/http/README.md) - Thanks to @grange74
|
||||
- [ipset](./plugins/inputs/ipset/README.md) - Thanks to @sajoupa
|
||||
- [nats](./plugins/inputs/nats/README.md) - Thanks to @mjs & @levex
|
||||
|
||||
### New Processors
|
||||
|
||||
- [override](./plugins/processors/override/README.md) - Thanks to @KarstenSchnitter
|
||||
|
||||
### New Parsers
|
||||
|
||||
- [dropwizard](./docs/DATA_FORMATS_INPUT.md#dropwizard) - Thanks to @atzoum
|
||||
|
||||
### Features
|
||||
|
||||
- [#3551](https://github.com/influxdata/telegraf/pull/3551): Add health status mapping from string to int in elasticsearch input.
|
||||
- [#3580](https://github.com/influxdata/telegraf/pull/3580): Add control over which stats to gather in basicstats aggregator.
|
||||
- [#3596](https://github.com/influxdata/telegraf/pull/3596): Add messages_delivered_get to rabbitmq input.
|
||||
- [#3632](https://github.com/influxdata/telegraf/pull/3632): Add wired field to mem input.
|
||||
- [#3619](https://github.com/influxdata/telegraf/pull/3619): Add support for gathering exchange metrics to the rabbitmq input.
|
||||
- [#3565](https://github.com/influxdata/telegraf/pull/3565): Add support for additional metrics on Linux in zfs input.
|
||||
- [#3524](https://github.com/influxdata/telegraf/pull/3524): Add available_entropy field to kernel input plugin.
|
||||
- [#3643](https://github.com/influxdata/telegraf/pull/3643): Add user privilege level setting to IPMI sensors.
|
||||
- [#2701](https://github.com/influxdata/telegraf/pull/2701): Use persistent connection to postgresql database.
|
||||
- [#2846](https://github.com/influxdata/telegraf/pull/2846): Add support for dropwizard input format.
|
||||
- [#3666](https://github.com/influxdata/telegraf/pull/3666): Add container health metrics to docker input.
|
||||
- [#3687](https://github.com/influxdata/telegraf/pull/3687): Add support for using globs in devices list of diskio input plugin.
|
||||
- [#2754](https://github.com/influxdata/telegraf/pull/2754): Allow running as console application on Windows.
|
||||
- [#3703](https://github.com/influxdata/telegraf/pull/3703): Add listener counts and node running status to rabbitmq input.
|
||||
- [#3674](https://github.com/influxdata/telegraf/pull/3674): Add NATS Monitoring Input Plugin.
|
||||
- [#3702](https://github.com/influxdata/telegraf/pull/3702): Add ability to select which queues will be gathered in rabbitmq input.
|
||||
- [#3726](https://github.com/influxdata/telegraf/pull/3726): Add support for setting bsd source address to the ping input.
|
||||
- [#3346](https://github.com/influxdata/telegraf/pull/3346): Add Ipset input plugin.
|
||||
- [#3719](https://github.com/influxdata/telegraf/pull/3719): Add TLS and HTTP basic auth to prometheus_client output.
|
||||
- [#3618](https://github.com/influxdata/telegraf/pull/3618): Add new sqlserver output data model.
|
||||
- [#3559](https://github.com/influxdata/telegraf/pull/3559): Add native Go method for finding pids to procstat.
|
||||
- [#3722](https://github.com/influxdata/telegraf/pull/3722): Add additional metrics and reverse metric names option to openldap.
|
||||
- [#3769](https://github.com/influxdata/telegraf/pull/3769): Add TLS support to the mesos input plugin.
|
||||
- [#3546](https://github.com/influxdata/telegraf/pull/3546): Add http input plugin.
|
||||
- [#3781](https://github.com/influxdata/telegraf/pull/3781): Add keep alive support to the TCP mode of statsd.
|
||||
- [#3783](https://github.com/influxdata/telegraf/pull/3783): Support deadline in ping plugin.
|
||||
- [#3765](https://github.com/influxdata/telegraf/pull/3765): Add option to disable labels in prometheus output for string fields.
|
||||
- [#3808](https://github.com/influxdata/telegraf/pull/3808): Add shard server stats to the mongodb input plugin.
|
||||
- [#3713](https://github.com/influxdata/telegraf/pull/3713): Add server option to unbound plugin.
|
||||
- [#3804](https://github.com/influxdata/telegraf/pull/3804): Convert boolean metric values to float in datadog output.
|
||||
- [#3799](https://github.com/influxdata/telegraf/pull/3799): Add Solr 3 compatibility.
|
||||
- [#3797](https://github.com/influxdata/telegraf/pull/3797): Add sum stat to basicstats aggregator.
|
||||
- [#3626](https://github.com/influxdata/telegraf/pull/3626): Add ability to override proxy from environment in http response.
|
||||
- [#3853](https://github.com/influxdata/telegraf/pull/3853): Add host to ping timeout log message.
|
||||
- [#3773](https://github.com/influxdata/telegraf/pull/3773): Add override processor.
|
||||
- [#3814](https://github.com/influxdata/telegraf/pull/3814): Add status_code and result tags and result_type field to http_response input.
|
||||
- [#3880](https://github.com/influxdata/telegraf/pull/3880): Added config flag to skip collection of network protocol metrics.
|
||||
- [#3927](https://github.com/influxdata/telegraf/pull/3927): Add TLS support to kapacitor input.
|
||||
- [#3496](https://github.com/influxdata/telegraf/pull/3496): Add HTTP basic auth support to the http_listener input.
|
||||
- [#3452](https://github.com/influxdata/telegraf/issues/3452): Tags in output InfluxDB Line Protocol are now sorted.
|
||||
- [#3631](https://github.com/influxdata/telegraf/issues/3631): InfluxDB Line Protocol parser now accepts DOS line endings.
|
||||
- [#2496](https://github.com/influxdata/telegraf/issues/2496): An option has been added to skip database creation in the InfluxDB output.
|
||||
- [#3366](https://github.com/influxdata/telegraf/issues/3366): Add support for connecting to InfluxDB over a unix domain socket.
|
||||
- [#3946](https://github.com/influxdata/telegraf/pull/3946): Add optional unsigned integer support to the influx data format.
|
||||
- [#3811](https://github.com/influxdata/telegraf/pull/3811): Add TLS support to zookeeper input.
|
||||
- [#2737](https://github.com/influxdata/telegraf/issues/2737): Add filters for container state to docker input.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1896](https://github.com/influxdata/telegraf/issues/1896): Fix various mysql data type conversions.
|
||||
- [#3810](https://github.com/influxdata/telegraf/issues/3810): Fix metric buffer limit in internal plugin after reload.
|
||||
- [#3801](https://github.com/influxdata/telegraf/issues/3801): Fix panic in http_response on invalid regex.
|
||||
- [#3973](https://github.com/influxdata/telegraf/issues/3873): Fix socket_listener setting ReadBufferSize on tcp sockets.
|
||||
- [#1575](https://github.com/influxdata/telegraf/issues/1575): Add tag for target url to phpfpm input.
|
||||
- [#3868](https://github.com/influxdata/telegraf/issues/3868): Fix cannot unmarshal object error in DC/OS input.
|
||||
- [#3648](https://github.com/influxdata/telegraf/issues/3648): Fix InfluxDB output not able to reconnect when server address changes.
|
||||
- [#3957](https://github.com/influxdata/telegraf/issues/3957): Fix parsing of dos line endings in the smart input.
|
||||
- [#3754](https://github.com/influxdata/telegraf/issues/3754): Fix precision truncation when no timestamp included.
|
||||
- [#3655](https://github.com/influxdata/telegraf/issues/3655): Fix SNMPv3 connection with Cisco ASA 5515 in snmp input.
|
||||
- [#3981](https://github.com/influxdata/telegraf/pull/3981): Export all vars defined in /etc/default/telegraf.
|
||||
- [#4004](https://github.com/influxdata/telegraf/issues/4004): Allow grok pattern to contain newlines.
|
||||
|
||||
## v1.5.3 [2018-03-14]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3729](https://github.com/influxdata/telegraf/issues/3729): Set path to / if HOST_MOUNT_PREFIX matches full path.
|
||||
- [#3739](https://github.com/influxdata/telegraf/issues/3739): Remove userinfo from url tag in prometheus input.
|
||||
- [#3778](https://github.com/influxdata/telegraf/issues/3778): Fix ping plugin not reporting zero durations.
|
||||
- [#3697](https://github.com/influxdata/telegraf/issues/3697): Disable keepalive in mqtt output to prevent deadlock.
|
||||
- [#3786](https://github.com/influxdata/telegraf/pull/3786): Fix collation difference in sqlserver input.
|
||||
- [#3871](https://github.com/influxdata/telegraf/pull/3871): Fix uptime metric in passenger input plugin.
|
||||
- [#3851](https://github.com/influxdata/telegraf/issues/3851): Add output of stderr in case of error to exec log message.
|
||||
|
||||
## v1.5.2 [2018-01-30]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3684](https://github.com/influxdata/telegraf/pull/3684): Ignore empty lines in Graphite plaintext.
|
||||
- [#3604](https://github.com/influxdata/telegraf/issues/3604): Fix index out of bounds error in solr input plugin.
|
||||
- [#3680](https://github.com/influxdata/telegraf/pull/3680): Reconnect before sending graphite metrics if disconnected.
|
||||
- [#3693](https://github.com/influxdata/telegraf/pull/3693): Align aggregator period with internal ticker to avoid skipping metrics.
|
||||
- [#3629](https://github.com/influxdata/telegraf/issues/3629): Fix a potential deadlock when using aggregators.
|
||||
- [#3697](https://github.com/influxdata/telegraf/issues/3697): Limit wait time for writes in mqtt output.
|
||||
- [#3698](https://github.com/influxdata/telegraf/issues/3698): Revert change in graphite output where dot in field key was replaced by underscore.
|
||||
- [#3710](https://github.com/influxdata/telegraf/issues/3710): Add timeout to wavefront output write.
|
||||
- [#3725](https://github.com/influxdata/telegraf/issues/3725): Exclude master_replid fields from redis input.
|
||||
|
||||
## v1.5.1 [2018-01-10]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3624](https://github.com/influxdata/telegraf/pull/3624): Fix name error in jolokia2_agent sample config.
|
||||
- [#3625](https://github.com/influxdata/telegraf/pull/3625): Fix DC/OS login expiration time.
|
||||
- [#3593](https://github.com/influxdata/telegraf/pull/3593): Set Content-Type charset in influxdb output and allow it be overridden.
|
||||
- [#3594](https://github.com/influxdata/telegraf/pull/3594): Document permissions setup for postfix input.
|
||||
- [#3633](https://github.com/influxdata/telegraf/pull/3633): Fix deliver_get field in rabbitmq input.
|
||||
- [#3607](https://github.com/influxdata/telegraf/issues/3607): Escape environment variables during config toml parsing.
|
||||
|
||||
## v1.5 [2017-12-14]
|
||||
|
||||
### New Plugins
|
||||
- [basicstats](./plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno
|
||||
- [bond](./plugins/inputs/bond/README.md) - Thanks to @ildarsv
|
||||
- [cratedb](./plugins/outputs/wavefront/README.md) - Thanks to @felixge
|
||||
- [cratedb](./plugins/outputs/cratedb/README.md) - Thanks to @felixge
|
||||
- [dcos](./plugins/inputs/dcos/README.md) - Thanks to @influxdata
|
||||
- [jolokia2](./plugins/inputs/jolokia2/README.md) - Thanks to @dylanmei
|
||||
- [nginx_plus](./plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah
|
||||
- [opensmtpd](./plugins/inputs/opensmtpd/README.md) - Thanks to @aromeyer
|
||||
- [particle](./plugins/inputs/webhooks/particle/README.md) - Thanks to @davidgs
|
||||
- [pf](./plugins/inputs/pf/README.md) Thanks to @nferch
|
||||
- [pf](./plugins/inputs/pf/README.md) - Thanks to @nferch
|
||||
- [postfix](./plugins/inputs/postfix/README.md) - Thanks to @phemmer
|
||||
- [smart](./plugins/inputs/smart/README.md) - Thanks to @rickard-von-essen
|
||||
- [solr](./plugins/inputs/solr/README.md) - Thanks to @ljagiello
|
||||
@@ -28,6 +340,11 @@
|
||||
plugin is deprecated and will be removed in a future release. Users of this
|
||||
plugin are encouraged to update to the new `jolokia2` plugin.
|
||||
|
||||
- In the `postgresql` and `postgresql_extensible` plugins, the type of the oid
|
||||
data type has changed from string to integer. It is recommended to drop
|
||||
affected fields until a new shard is started. For details on how to
|
||||
workaround this issue please see [#3622](https://github.com/influxdata/telegraf/issues/3622).
|
||||
|
||||
### Features
|
||||
|
||||
- [#3170](https://github.com/influxdata/telegraf/pull/3170): Add support for sharding based on metric name.
|
||||
@@ -78,6 +395,7 @@
|
||||
- [#3140](https://github.com/influxdata/telegraf/pull/3140): Add support for glob patterns in net input plugin.
|
||||
- [#3405](https://github.com/influxdata/telegraf/pull/3405): Add input plugin for OpenBSD/FreeBSD pf.
|
||||
- [#3528](https://github.com/influxdata/telegraf/pull/3528): Add option to amqp output to publish persistent messages.
|
||||
- [#3530](https://github.com/influxdata/telegraf/pull/3530): Support I (idle) process state on procfs+Linux.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
@@ -92,6 +410,9 @@
|
||||
- [#3263](https://github.com/influxdata/telegraf/issues/3263): Fix snmp-tools output parsing with Windows EOLs.
|
||||
- [#3447](https://github.com/influxdata/telegraf/issues/3447): Add shadow-utils dependency to rpm package.
|
||||
- [#3448](https://github.com/influxdata/telegraf/issues/3448): Use deb-systemd-invoke to restart service.
|
||||
- [#3553](https://github.com/influxdata/telegraf/issues/3553): Fix kafka_consumer outside range of offsets error.
|
||||
- [#3568](https://github.com/influxdata/telegraf/issues/3568): Fix separation of multiple prometheus_client outputs.
|
||||
- [#3577](https://github.com/influxdata/telegraf/issues/3577): Don't add system input uptime_format as a counter.
|
||||
|
||||
## v1.4.5 [2017-12-01]
|
||||
|
||||
|
||||
@@ -30,9 +30,9 @@ which can be found [on our website](http://influxdb.com/community/cla.html)
|
||||
|
||||
Assuming you can already build the project, run these in the telegraf directory:
|
||||
|
||||
1. `go get github.com/sparrc/gdm`
|
||||
1. `gdm restore`
|
||||
1. `GOOS=linux gdm save`
|
||||
1. `go get -u github.com/golang/dep/cmd/dep`
|
||||
2. `dep ensure`
|
||||
3. `dep ensure -add github.com/[dependency]/[new-package]`
|
||||
|
||||
## Input Plugins
|
||||
|
||||
@@ -79,7 +79,10 @@ func (s *Simple) Description() string {
|
||||
}
|
||||
|
||||
func (s *Simple) SampleConfig() string {
|
||||
return "ok = true # indicate if everything is fine"
|
||||
return `
|
||||
## Indicate if everything is fine
|
||||
ok = true
|
||||
`
|
||||
}
|
||||
|
||||
func (s *Simple) Gather(acc telegraf.Accumulator) error {
|
||||
@@ -97,6 +100,13 @@ func init() {
|
||||
}
|
||||
```
|
||||
|
||||
### Input Plugin Development
|
||||
|
||||
* Run `make static` followed by `make plugin-[pluginName]` to spin up a docker dev environment
|
||||
using docker-compose.
|
||||
* ***[Optional]*** When developing a plugin, add a `dev` directory with a `docker-compose.yml` and `telegraf.conf`
|
||||
as well as any other supporting files, where sensible.
|
||||
|
||||
## Adding Typed Metrics
|
||||
|
||||
In addition the the `AddFields` function, the accumulator also supports an
|
||||
@@ -167,7 +177,7 @@ and `Stop()` methods.
|
||||
### Service Plugin Guidelines
|
||||
|
||||
* Same as the `Plugin` guidelines, except that they must conform to the
|
||||
`inputs.ServiceInput` interface.
|
||||
[`telegraf.ServiceInput`](https://godoc.org/github.com/influxdata/telegraf#ServiceInput) interface.
|
||||
|
||||
## Output Plugins
|
||||
|
||||
@@ -207,7 +217,9 @@ func (s *Simple) Description() string {
|
||||
}
|
||||
|
||||
func (s *Simple) SampleConfig() string {
|
||||
return "url = localhost"
|
||||
return `
|
||||
ok = true
|
||||
`
|
||||
}
|
||||
|
||||
func (s *Simple) Connect() error {
|
||||
|
||||
92
Godeps
92
Godeps
@@ -1,92 +0,0 @@
|
||||
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
|
||||
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
|
||||
github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
|
||||
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
|
||||
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/bsm/sarama-cluster ccdc0803695fbce22f1706d04ded46cd518fd832
|
||||
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
|
||||
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
|
||||
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
|
||||
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/dgrijalva/jwt-go dbeaa9332f19a944acb5736b4456cfcc02140e29
|
||||
github.com/docker/docker f5ec1e2936dcbe7b5001c2b817188b095c700c27
|
||||
github.com/docker/go-connections 990a1a1a70b0da4c4cb70e117971a4f0babfbf1a
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
|
||||
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
|
||||
github.com/eclipse/paho.mqtt.golang d4f545eb108a2d19f9b1a735689dbfb719bc21fb
|
||||
github.com/go-logfmt/logfmt 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
|
||||
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
|
||||
github.com/gogo/protobuf 7b6c6391c4ff245962047fc1e2c6e08b1cdfa0e8
|
||||
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
|
||||
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
|
||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
||||
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
|
||||
github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
|
||||
github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea
|
||||
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
|
||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||
github.com/jackc/pgx 63f58fd32edb5684b9e9f4cfaac847c6b42b3917
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
|
||||
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
|
||||
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
|
||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
||||
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
|
||||
github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4
|
||||
github.com/multiplay/go-ts3 07477f49b8dfa3ada231afc7b7b17617d42afe8e
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
|
||||
github.com/nsqio/go-nsq eee57a3ac4174c55924125bb15eeeda8cffb6e6f
|
||||
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
||||
github.com/opentracing-contrib/go-observer a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
|
||||
github.com/opentracing/opentracing-go 06f47b42c792fef2796e9681353e1d908c417827
|
||||
github.com/openzipkin/zipkin-go-opentracing 1cafbdfde94fbf2b373534764e0863aa3bd0bf7b
|
||||
github.com/pierrec/lz4 5c9560bfa9ace2bf86080bf40d46b34ae44604df
|
||||
github.com/pierrec/xxHash 5a004441f897722c627870a981d02b29924215fa
|
||||
github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d
|
||||
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/prometheus/client_golang c317fb74746eac4fc65fe3909195f4cf67c5562a
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
|
||||
github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
|
||||
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||
github.com/shirou/gopsutil 384a55110aa5ae052eb93ea94940548c1e305a99
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
github.com/Shopify/sarama c01858abb625b73a3af51d0798e4ad42c8147093
|
||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
|
||||
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
|
||||
github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
|
||||
github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto dc137beb6cce2043eb6b5f223ab8bf51c32459f4
|
||||
golang.org/x/net f2499483f923065a842d38eb4c7f1927e6fc6e6d
|
||||
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
|
||||
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
|
||||
gopkg.in/asn1-ber.v1 4e86f4367175e39f69d9358a5f17b4dda270378d
|
||||
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
|
||||
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
|
||||
gopkg.in/gorethink/gorethink.v3 7ab832f7b65573104a555d84a27992ae9ea1f659
|
||||
gopkg.in/ldap.v2 8168ee085ee43257585e50c6441aadf54ecb2c9f
|
||||
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
|
||||
gopkg.in/olivere/elastic.v5 3113f9b9ad37509fe5f8a0e5e91c96fdc4435e26
|
||||
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
|
||||
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
|
||||
973
Gopkg.lock
generated
Normal file
973
Gopkg.lock
generated
Normal file
@@ -0,0 +1,973 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "code.cloudfoundry.org/clock"
|
||||
packages = ["."]
|
||||
revision = "02e53af36e6c978af692887ed449b74026d76fec"
|
||||
|
||||
[[projects]]
|
||||
name = "collectd.org"
|
||||
packages = [
|
||||
"api",
|
||||
"cdtime",
|
||||
"network"
|
||||
]
|
||||
revision = "2ce144541b8903101fb8f1483cc0497a68798122"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Microsoft/ApplicationInsights-Go"
|
||||
packages = [
|
||||
"appinsights",
|
||||
"appinsights/contracts"
|
||||
]
|
||||
revision = "d2df5d440eda5372f24fcac03839a64d6cb5f7e5"
|
||||
version = "v0.4.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Microsoft/go-winio"
|
||||
packages = ["."]
|
||||
revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f"
|
||||
version = "v0.4.7"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Shopify/sarama"
|
||||
packages = ["."]
|
||||
revision = "35324cf48e33d8260e1c7c18854465a904ade249"
|
||||
version = "v1.17.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/StackExchange/wmi"
|
||||
packages = ["."]
|
||||
revision = "5d049714c4a64225c3c79a7cf7d02f7fb5b96338"
|
||||
version = "1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/aerospike/aerospike-client-go"
|
||||
packages = [
|
||||
".",
|
||||
"internal/lua",
|
||||
"internal/lua/resources",
|
||||
"logger",
|
||||
"pkg/bcrypt",
|
||||
"pkg/ripemd160",
|
||||
"types",
|
||||
"types/atomic",
|
||||
"types/particle_type",
|
||||
"types/rand",
|
||||
"utils/buffer"
|
||||
]
|
||||
revision = "c10b5393e43bd60125aca6289c7b24879edb1787"
|
||||
version = "v1.33.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/alecthomas/template"
|
||||
packages = [
|
||||
".",
|
||||
"parse"
|
||||
]
|
||||
revision = "a0175ee3bccc567396460bf5acd36800cb10c49c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/alecthomas/units"
|
||||
packages = ["."]
|
||||
revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/amir/raidman"
|
||||
packages = [
|
||||
".",
|
||||
"proto"
|
||||
]
|
||||
revision = "1ccc43bfb9c93cb401a4025e49c64ba71e5e668b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/apache/thrift"
|
||||
packages = ["lib/go/thrift"]
|
||||
revision = "f5f430df56871bc937950274b2c86681d3db6e59"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = [
|
||||
"aws",
|
||||
"aws/awserr",
|
||||
"aws/awsutil",
|
||||
"aws/client",
|
||||
"aws/client/metadata",
|
||||
"aws/corehandlers",
|
||||
"aws/credentials",
|
||||
"aws/credentials/ec2rolecreds",
|
||||
"aws/credentials/endpointcreds",
|
||||
"aws/credentials/stscreds",
|
||||
"aws/csm",
|
||||
"aws/defaults",
|
||||
"aws/ec2metadata",
|
||||
"aws/endpoints",
|
||||
"aws/request",
|
||||
"aws/session",
|
||||
"aws/signer/v4",
|
||||
"internal/sdkio",
|
||||
"internal/sdkrand",
|
||||
"internal/shareddefaults",
|
||||
"private/protocol",
|
||||
"private/protocol/json/jsonutil",
|
||||
"private/protocol/jsonrpc",
|
||||
"private/protocol/query",
|
||||
"private/protocol/query/queryutil",
|
||||
"private/protocol/rest",
|
||||
"private/protocol/xml/xmlutil",
|
||||
"service/cloudwatch",
|
||||
"service/kinesis",
|
||||
"service/sts"
|
||||
]
|
||||
revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff"
|
||||
version = "v1.14.8"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/bsm/sarama-cluster"
|
||||
packages = ["."]
|
||||
revision = "cf455bc755fe41ac9bb2861e7a961833d9c2ecc3"
|
||||
version = "v2.1.13"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/cenkalti/backoff"
|
||||
packages = ["."]
|
||||
revision = "2ea60e5f094469f9e65adb9cd103795b73ae743e"
|
||||
version = "v2.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/couchbase/go-couchbase"
|
||||
packages = ["."]
|
||||
revision = "16db1f1fe037412f12738fa4d8448c549c4edd77"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/couchbase/gomemcached"
|
||||
packages = [
|
||||
".",
|
||||
"client"
|
||||
]
|
||||
revision = "0da75df145308b9a4e6704d762ca9d9b77752efc"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/couchbase/goutils"
|
||||
packages = [
|
||||
"logging",
|
||||
"scramsha"
|
||||
]
|
||||
revision = "e865a1461c8ac0032bd37e2d4dab3289faea3873"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/docker/distribution"
|
||||
packages = [
|
||||
"digest",
|
||||
"reference"
|
||||
]
|
||||
revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89"
|
||||
version = "v2.6.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/docker/docker"
|
||||
packages = [
|
||||
"api/types",
|
||||
"api/types/blkiodev",
|
||||
"api/types/container",
|
||||
"api/types/events",
|
||||
"api/types/filters",
|
||||
"api/types/mount",
|
||||
"api/types/network",
|
||||
"api/types/reference",
|
||||
"api/types/registry",
|
||||
"api/types/strslice",
|
||||
"api/types/swarm",
|
||||
"api/types/time",
|
||||
"api/types/versions",
|
||||
"api/types/volume",
|
||||
"client",
|
||||
"pkg/tlsconfig"
|
||||
]
|
||||
revision = "eef6495eddab52828327aade186443681ed71a4e"
|
||||
version = "v17.03.2-ce-rc1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/docker/go-connections"
|
||||
packages = [
|
||||
"nat",
|
||||
"sockets",
|
||||
"tlsconfig"
|
||||
]
|
||||
revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/docker/go-units"
|
||||
packages = ["."]
|
||||
revision = "47565b4f722fb6ceae66b95f853feed578a4a51c"
|
||||
version = "v0.3.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/eapache/go-resiliency"
|
||||
packages = ["breaker"]
|
||||
revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/eapache/go-xerial-snappy"
|
||||
packages = ["."]
|
||||
revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/eapache/queue"
|
||||
packages = ["."]
|
||||
revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/eclipse/paho.mqtt.golang"
|
||||
packages = [
|
||||
".",
|
||||
"packets"
|
||||
]
|
||||
revision = "36d01c2b4cbeb3d2a12063e4880ce30800af9560"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
|
||||
version = "v1.37.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-logfmt/logfmt"
|
||||
packages = ["."]
|
||||
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ole/go-ole"
|
||||
packages = [
|
||||
".",
|
||||
"oleutil"
|
||||
]
|
||||
revision = "a41e3c4b706f6ae8dfbff342b06e40fa4d2d0506"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-redis/redis"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"internal/consistenthash",
|
||||
"internal/hashtag",
|
||||
"internal/pool",
|
||||
"internal/proto",
|
||||
"internal/singleflight",
|
||||
"internal/util"
|
||||
]
|
||||
revision = "83fb42932f6145ce52df09860384a4653d2d332a"
|
||||
version = "v6.12.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-sql-driver/mysql"
|
||||
packages = ["."]
|
||||
revision = "d523deb1b23d913de5bdada721a6071e71283618"
|
||||
version = "v1.4.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gobwas/glob"
|
||||
packages = [
|
||||
".",
|
||||
"compiler",
|
||||
"match",
|
||||
"syntax",
|
||||
"syntax/ast",
|
||||
"syntax/lexer",
|
||||
"util/runes",
|
||||
"util/strings"
|
||||
]
|
||||
revision = "5ccd90ef52e1e632236f7326478d4faa74f99438"
|
||||
version = "v0.2.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp"
|
||||
]
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/snappy"
|
||||
packages = ["."]
|
||||
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/google/go-cmp"
|
||||
packages = [
|
||||
"cmp",
|
||||
"cmp/internal/diff",
|
||||
"cmp/internal/function",
|
||||
"cmp/internal/value"
|
||||
]
|
||||
revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gorilla/context"
|
||||
packages = ["."]
|
||||
revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gorilla/mux"
|
||||
packages = ["."]
|
||||
revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf"
|
||||
version = "v1.6.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hailocab/go-hostpool"
|
||||
packages = ["."]
|
||||
revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/hashicorp/consul"
|
||||
packages = ["api"]
|
||||
revision = "5174058f0d2bda63fa5198ab96c33d9a909c58ed"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/go-cleanhttp"
|
||||
packages = ["."]
|
||||
revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/go-rootcerts"
|
||||
packages = ["."]
|
||||
revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/hashicorp/serf"
|
||||
packages = ["coordinate"]
|
||||
revision = "d6574a5bb1226678d7010325fb6c985db20ee458"
|
||||
version = "v0.8.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/influxdata/go-syslog"
|
||||
packages = [
|
||||
"rfc5424",
|
||||
"rfc5425"
|
||||
]
|
||||
revision = "eecd51df3ad85464a2bab9b7d3a45bc1e299059e"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/influxdata/tail"
|
||||
packages = [
|
||||
".",
|
||||
"ratelimiter",
|
||||
"util",
|
||||
"watch",
|
||||
"winfile"
|
||||
]
|
||||
revision = "c43482518d410361b6c383d7aebce33d0471d7bc"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/influxdata/toml"
|
||||
packages = [
|
||||
".",
|
||||
"ast"
|
||||
]
|
||||
revision = "2a2e3012f7cfbef64091cc79776311e65dfa211b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/influxdata/wlog"
|
||||
packages = ["."]
|
||||
revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jackc/pgx"
|
||||
packages = [
|
||||
".",
|
||||
"chunkreader",
|
||||
"internal/sanitize",
|
||||
"pgio",
|
||||
"pgproto3",
|
||||
"pgtype",
|
||||
"stdlib"
|
||||
]
|
||||
revision = "da3231b0b66e2e74cdb779f1d46c5e958ba8be27"
|
||||
version = "v3.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
revision = "0b12d6b5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kardianos/osext"
|
||||
packages = ["."]
|
||||
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kardianos/service"
|
||||
packages = ["."]
|
||||
revision = "615a14ed75099c9eaac6949e22ac2341bf9d3197"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kballard/go-shellquote"
|
||||
packages = ["."]
|
||||
revision = "95032a82bc518f77982ea72343cc1ade730072f0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kr/logfmt"
|
||||
packages = ["."]
|
||||
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mailru/easyjson"
|
||||
packages = [
|
||||
".",
|
||||
"buffer",
|
||||
"jlexer",
|
||||
"jwriter"
|
||||
]
|
||||
revision = "3fdea8d05856a0c8df22ed4bc71b3219245e4485"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/miekg/dns"
|
||||
packages = ["."]
|
||||
revision = "5a2b9fab83ff0f8bfc99684bd5f43a37abe560f1"
|
||||
version = "v1.0.8"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/go-homedir"
|
||||
packages = ["."]
|
||||
revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/multiplay/go-ts3"
|
||||
packages = ["."]
|
||||
revision = "d0d44555495c8776880a17e439399e715a4ef319"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/naoina/go-stringutil"
|
||||
packages = ["."]
|
||||
revision = "6b638e95a32d0c1131db0e7fe83775cbea4a0d0b"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/nats-io/gnatsd"
|
||||
packages = [
|
||||
"conf",
|
||||
"logger",
|
||||
"server",
|
||||
"server/pse",
|
||||
"util"
|
||||
]
|
||||
revision = "add6d7930ae6d4bff8823b28999ea87bf1bfd23d"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/nats-io/go-nats"
|
||||
packages = [
|
||||
".",
|
||||
"encoders/builtin",
|
||||
"util"
|
||||
]
|
||||
revision = "062418ea1c2181f52dc0f954f6204370519a868b"
|
||||
version = "v1.5.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/nats-io/nuid"
|
||||
packages = ["."]
|
||||
revision = "289cccf02c178dc782430d534e3c1f5b72af807f"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/nsqio/go-nsq"
|
||||
packages = ["."]
|
||||
revision = "eee57a3ac4174c55924125bb15eeeda8cffb6e6f"
|
||||
version = "v1.0.7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/opentracing-contrib/go-observer"
|
||||
packages = ["."]
|
||||
revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/opentracing/opentracing-go"
|
||||
packages = [
|
||||
".",
|
||||
"ext",
|
||||
"log"
|
||||
]
|
||||
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/openzipkin/zipkin-go-opentracing"
|
||||
packages = [
|
||||
".",
|
||||
"flag",
|
||||
"thrift/gen-go/scribe",
|
||||
"thrift/gen-go/zipkincore",
|
||||
"types",
|
||||
"wire"
|
||||
]
|
||||
revision = "26cf9707480e6b90e5eff22cf0bbf05319154232"
|
||||
version = "v0.3.4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pierrec/lz4"
|
||||
packages = [
|
||||
".",
|
||||
"internal/xxh32"
|
||||
]
|
||||
revision = "6b9367c9ff401dbc54fabce3fb8d972e799b702d"
|
||||
version = "v2.0.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = [
|
||||
"prometheus",
|
||||
"prometheus/promhttp"
|
||||
]
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"log",
|
||||
"model"
|
||||
]
|
||||
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs"
|
||||
]
|
||||
revision = "7d6f385de8bea29190f15ba9931442a0eaef9af7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/rcrowley/go-metrics"
|
||||
packages = ["."]
|
||||
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/samuel/go-zookeeper"
|
||||
packages = ["zk"]
|
||||
revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/shirou/gopsutil"
|
||||
packages = [
|
||||
"cpu",
|
||||
"disk",
|
||||
"host",
|
||||
"internal/common",
|
||||
"load",
|
||||
"mem",
|
||||
"net",
|
||||
"process"
|
||||
]
|
||||
revision = "eeb1d38d69593f121e060d24d17f7b1f0936b203"
|
||||
version = "v2.18.05"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/shirou/w32"
|
||||
packages = ["."]
|
||||
revision = "bb4de0191aa41b5507caa14b0650cdbddcd9280b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
|
||||
version = "v1.0.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/soniah/gosnmp"
|
||||
packages = ["."]
|
||||
revision = "bcf840db66be7d64bf96c3c0e075c92e3d98f793"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/streadway/amqp"
|
||||
packages = ["."]
|
||||
revision = "e5adc2ada8b8efff032bf61173a233d143e9318e"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/stretchr/objx"
|
||||
packages = ["."]
|
||||
revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
|
||||
version = "v0.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"mock",
|
||||
"require"
|
||||
]
|
||||
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||
version = "v1.2.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tidwall/gjson"
|
||||
packages = ["."]
|
||||
revision = "afaeb9562041a8018c74e006551143666aed08bf"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/tidwall/match"
|
||||
packages = ["."]
|
||||
revision = "1731857f09b1f38450e2c12409748407822dc6be"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/vjeantet/grok"
|
||||
packages = ["."]
|
||||
revision = "ce01e59abcf6fbc9833b7deb5e4b8ee1769bcc53"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/wvanbergen/kafka"
|
||||
packages = ["consumergroup"]
|
||||
revision = "e2edea948ddfee841ea9a263b32ccca15f7d6c2f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/wvanbergen/kazoo-go"
|
||||
packages = ["."]
|
||||
revision = "f72d8611297a7cf105da904c04198ad701a60101"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/yuin/gopher-lua"
|
||||
packages = [
|
||||
".",
|
||||
"ast",
|
||||
"parse",
|
||||
"pm"
|
||||
]
|
||||
revision = "ca850f594eaafa5468da2bd53b865e4ee55be18b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/zensqlmonitor/go-mssqldb"
|
||||
packages = ["."]
|
||||
revision = "e8fbf836e44e86764eba398361d1825651709547"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"bcrypt",
|
||||
"blowfish",
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
"md4",
|
||||
"pbkdf2",
|
||||
"ssh/terminal"
|
||||
]
|
||||
revision = "027cca12c2d63e3d62b670d901e8a2c95854feec"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"bpf",
|
||||
"context",
|
||||
"context/ctxhttp",
|
||||
"html",
|
||||
"html/atom",
|
||||
"html/charset",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/iana",
|
||||
"internal/socket",
|
||||
"internal/socks",
|
||||
"internal/timeseries",
|
||||
"ipv4",
|
||||
"ipv6",
|
||||
"proxy",
|
||||
"trace",
|
||||
"websocket"
|
||||
]
|
||||
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
"windows/registry",
|
||||
"windows/svc",
|
||||
"windows/svc/debug",
|
||||
"windows/svc/eventlog",
|
||||
"windows/svc/mgr"
|
||||
]
|
||||
revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"encoding",
|
||||
"encoding/charmap",
|
||||
"encoding/htmlindex",
|
||||
"encoding/internal",
|
||||
"encoding/internal/identifier",
|
||||
"encoding/japanese",
|
||||
"encoding/korean",
|
||||
"encoding/simplifiedchinese",
|
||||
"encoding/traditionalchinese",
|
||||
"encoding/unicode",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"internal/utf8internal",
|
||||
"language",
|
||||
"runes",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable"
|
||||
]
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = ["cloudsql"]
|
||||
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
revision = "32ee49c4dd805befd833990acba36cb75042378c"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
"balancer",
|
||||
"balancer/base",
|
||||
"balancer/roundrobin",
|
||||
"channelz",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"encoding",
|
||||
"encoding/proto",
|
||||
"grpclb/grpc_lb_v1/messages",
|
||||
"grpclog",
|
||||
"internal",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
"peer",
|
||||
"resolver",
|
||||
"resolver/dns",
|
||||
"resolver/passthrough",
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
"transport"
|
||||
]
|
||||
revision = "7a6a684ca69eb4cae85ad0a484f2e531598c047b"
|
||||
version = "v1.12.2"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/alecthomas/kingpin.v2"
|
||||
packages = ["."]
|
||||
revision = "947dcec5ba9c011838740e680966fd7087a71d0d"
|
||||
version = "v2.2.6"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/asn1-ber.v1"
|
||||
packages = ["."]
|
||||
revision = "379148ca0225df7a432012b8df0355c2a2063ac0"
|
||||
version = "v1.2"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/fatih/pool.v2"
|
||||
packages = ["."]
|
||||
revision = "010e0b745d12eaf8426c95f9c3924d81dd0b668f"
|
||||
version = "v2.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/fsnotify.v1"
|
||||
packages = ["."]
|
||||
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
|
||||
source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz"
|
||||
version = "v1.4.7"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/gorethink/gorethink.v3"
|
||||
packages = [
|
||||
".",
|
||||
"encoding",
|
||||
"ql2",
|
||||
"types"
|
||||
]
|
||||
revision = "7f5bdfd858bb064d80559b2a32b86669c5de5d3b"
|
||||
version = "v3.0.5"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/ldap.v2"
|
||||
packages = ["."]
|
||||
revision = "bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9"
|
||||
version = "v2.5.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/mgo.v2"
|
||||
packages = [
|
||||
".",
|
||||
"bson",
|
||||
"internal/json",
|
||||
"internal/sasl",
|
||||
"internal/scram"
|
||||
]
|
||||
revision = "3f83fa5005286a7fe593b055f0d7771a7dce4655"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/olivere/elastic.v5"
|
||||
packages = [
|
||||
".",
|
||||
"config",
|
||||
"uritemplates"
|
||||
]
|
||||
revision = "b708306d715bea9b983685e94ab4602cdc9f988b"
|
||||
version = "v5.0.69"
|
||||
|
||||
[[projects]]
|
||||
branch = "v1"
|
||||
name = "gopkg.in/tomb.v1"
|
||||
packages = ["."]
|
||||
revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "024194b983d91b9500fe97e0aa0ddb5fe725030cb51ddfb034e386cae1098370"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
243
Gopkg.toml
Normal file
243
Gopkg.toml
Normal file
@@ -0,0 +1,243 @@
|
||||
[[constraint]]
|
||||
name = "collectd.org"
|
||||
version = "0.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aerospike/aerospike-client-go"
|
||||
version = "^1.33.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/amir/raidman"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/apache/thrift"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
version = "1.14.8"
|
||||
# version = "1.8.39"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/bsm/sarama-cluster"
|
||||
version = "2.1.13"
|
||||
# version = "2.1.10"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/couchbase/go-couchbase"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
version = "3.2.0"
|
||||
# version = "3.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/docker/docker"
|
||||
version = "~17.03.2-ce"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/docker/go-connections"
|
||||
version = "0.3.0"
|
||||
# version = "0.2.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/eclipse/paho.mqtt.golang"
|
||||
version = "~1.1.1"
|
||||
# version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-sql-driver/mysql"
|
||||
version = "1.4.0"
|
||||
# version = "1.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gobwas/glob"
|
||||
version = "0.2.3"
|
||||
# version = "0.2.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/golang/protobuf"
|
||||
version = "1.1.0"
|
||||
# version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/google/go-cmp"
|
||||
version = "0.2.0"
|
||||
# version = "0.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gorilla/mux"
|
||||
version = "1.6.2"
|
||||
# version = "1.6.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-redis/redis"
|
||||
version = "6.12.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/hashicorp/consul"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/influxdata/go-syslog"
|
||||
version = "1.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/influxdata/tail"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/influxdata/toml"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/influxdata/wlog"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/jackc/pgx"
|
||||
version = "3.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/kardianos/service"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/kballard/go-shellquote"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
version = "1.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Microsoft/ApplicationInsights-Go"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/miekg/dns"
|
||||
version = "1.0.8"
|
||||
# version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/multiplay/go-ts3"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/nats-io/gnatsd"
|
||||
version = "1.1.0"
|
||||
# version = "1.0.4"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/nats-io/go-nats"
|
||||
version = "1.5.0"
|
||||
# version = "1.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/nsqio/go-nsq"
|
||||
version = "1.0.7"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/openzipkin/zipkin-go-opentracing"
|
||||
version = "0.3.4"
|
||||
# version = "0.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_model"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/common"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
version = "1.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/shirou/gopsutil"
|
||||
version = "2.18.05"
|
||||
# version = "2.18.04"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Shopify/sarama"
|
||||
version = "1.17.0"
|
||||
# version = "1.15.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/soniah/gosnmp"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/StackExchange/wmi"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/streadway/amqp"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "1.2.2"
|
||||
# version = "1.2.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tidwall/gjson"
|
||||
version = "1.1.1"
|
||||
# version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/vjeantet/grok"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/wvanbergen/kafka"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/zensqlmonitor/go-mssqldb"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/net"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/sys"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "1.12.2"
|
||||
# version = "1.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/gorethink/gorethink.v3"
|
||||
version = "3.0.5"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/ldap.v2"
|
||||
version = "2.5.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/mgo.v2"
|
||||
branch = "v2"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/olivere/elastic.v5"
|
||||
version = "^5.0.69"
|
||||
# version = "^6.1.23"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
version = "^2.2.1"
|
||||
|
||||
[[override]]
|
||||
source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz"
|
||||
name = "gopkg.in/fsnotify.v1"
|
||||
139
Makefile
139
Makefile
@@ -2,14 +2,16 @@ PREFIX := /usr/local
|
||||
VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
|
||||
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
COMMIT := $(shell git rev-parse --short HEAD)
|
||||
GOFILES ?= $(shell git ls-files '*.go')
|
||||
GOFMT ?= $(shell gofmt -l $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)))
|
||||
BUILDFLAGS ?=
|
||||
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
else
|
||||
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
|
||||
endif
|
||||
|
||||
TELEGRAF := telegraf$(shell go tool dist env | grep -q 'GOOS=.windows.' && echo .exe)
|
||||
|
||||
LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)
|
||||
ifdef VERSION
|
||||
LDFLAGS += -X main.version=$(VERSION)
|
||||
@@ -20,114 +22,85 @@ all:
|
||||
$(MAKE) telegraf
|
||||
|
||||
deps:
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
go get -u github.com/golang/lint/golint
|
||||
go get -u github.com/golang/dep/cmd/dep
|
||||
dep ensure
|
||||
|
||||
telegraf:
|
||||
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
|
||||
go build -ldflags "$(LDFLAGS)" ./cmd/telegraf
|
||||
|
||||
go-install:
|
||||
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
|
||||
|
||||
install: telegraf
|
||||
mkdir -p $(DESTDIR)$(PREFIX)/bin/
|
||||
cp $(TELEGRAF) $(DESTDIR)$(PREFIX)/bin/
|
||||
cp telegraf $(DESTDIR)$(PREFIX)/bin/
|
||||
|
||||
test:
|
||||
go test -short ./...
|
||||
|
||||
fmt:
|
||||
@gofmt -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES))
|
||||
|
||||
fmtcheck:
|
||||
@echo '[INFO] running gofmt to identify incorrectly formatted code...'
|
||||
@if [ ! -z "$(GOFMT)" ]; then \
|
||||
echo "[ERROR] gofmt has found errors in the following files:" ; \
|
||||
echo "$(GOFMT)" ; \
|
||||
echo "" ;\
|
||||
echo "Run make fmt to fix them." ; \
|
||||
exit 1 ;\
|
||||
fi
|
||||
@echo '[INFO] done.'
|
||||
|
||||
test-windows:
|
||||
go test ./plugins/inputs/ping/...
|
||||
go test ./plugins/inputs/win_perf_counters/...
|
||||
go test ./plugins/inputs/win_services/...
|
||||
go test -short ./plugins/inputs/ping/...
|
||||
go test -short ./plugins/inputs/win_perf_counters/...
|
||||
go test -short ./plugins/inputs/win_services/...
|
||||
go test -short ./plugins/inputs/procstat/...
|
||||
go test -short ./plugins/inputs/ntpq/...
|
||||
|
||||
lint:
|
||||
go vet ./...
|
||||
# vet runs the Go source code static analysis tool `vet` to find
|
||||
# any common errors.
|
||||
vet:
|
||||
@echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)'
|
||||
@go vet $$(go list ./... | grep -v ./plugins/parsers/influx) ; if [ $$? -ne 0 ]; then \
|
||||
echo ""; \
|
||||
echo "go vet has found suspicious constructs. Please remediate any reported errors"; \
|
||||
echo "to fix them before submitting code for review."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
test-all: lint
|
||||
test-ci: fmtcheck vet
|
||||
go test -short ./...
|
||||
|
||||
test-all: fmtcheck vet
|
||||
go test ./...
|
||||
|
||||
package:
|
||||
./scripts/build.py --package --platform=all --arch=all
|
||||
|
||||
clean:
|
||||
-rm -f telegraf
|
||||
-rm -f telegraf.exe
|
||||
rm -f telegraf
|
||||
rm -f telegraf.exe
|
||||
|
||||
docker-image:
|
||||
./scripts/build.py --package --platform=linux --arch=amd64
|
||||
cp build/telegraf*$(COMMIT)*.deb .
|
||||
docker build -f scripts/dev.docker --build-arg "package=telegraf*$(COMMIT)*.deb" -t "telegraf-dev:$(COMMIT)" .
|
||||
|
||||
# Run all docker containers necessary for integration tests
|
||||
docker-run:
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
||||
docker run --name zookeeper -p "2181:2181" -d wurstmeister/zookeeper
|
||||
docker run --name kafka \
|
||||
--link zookeeper:zookeeper \
|
||||
-e KAFKA_ADVERTISED_HOST_NAME=localhost \
|
||||
-e KAFKA_ADVERTISED_PORT=9092 \
|
||||
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
|
||||
-e KAFKA_CREATE_TOPICS="test:1:1" \
|
||||
-p "9092:9092" \
|
||||
-d wurstmeister/kafka
|
||||
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
|
||||
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
||||
docker run --name memcached -p "11211:11211" -d memcached
|
||||
docker run --name postgres -p "5432:5432" -d postgres
|
||||
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
||||
docker run --name redis -p "6379:6379" -d redis
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
docker run --name openldap \
|
||||
-e SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" \
|
||||
-e SLAPD_CONFIG_ROOTPW="secret" \
|
||||
-p "389:389" -p "636:636" \
|
||||
-d cobaugh/openldap-alpine
|
||||
docker run --name cratedb \
|
||||
-p "6543:5432" \
|
||||
-d crate crate \
|
||||
-Cnetwork.host=0.0.0.0 \
|
||||
-Ctransport.host=localhost \
|
||||
-Clicense.enterprise=false
|
||||
plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl
|
||||
ragel -Z -G2 $^ -o $@
|
||||
|
||||
# Run docker containers necessary for integration tests; skipping services provided
|
||||
# by CircleCI
|
||||
docker-run-circle:
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
||||
docker run --name zookeeper -p "2181:2181" -d wurstmeister/zookeeper
|
||||
docker run --name kafka \
|
||||
--link zookeeper:zookeeper \
|
||||
-e KAFKA_ADVERTISED_HOST_NAME=localhost \
|
||||
-e KAFKA_ADVERTISED_PORT=9092 \
|
||||
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
|
||||
-e KAFKA_CREATE_TOPICS="test:1:1" \
|
||||
-p "9092:9092" \
|
||||
-d wurstmeister/kafka
|
||||
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
docker run --name openldap \
|
||||
-e SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" \
|
||||
-e SLAPD_CONFIG_ROOTPW="secret" \
|
||||
-p "389:389" -p "636:636" \
|
||||
-d cobaugh/openldap-alpine
|
||||
docker run --name cratedb \
|
||||
-p "6543:5432" \
|
||||
-d crate crate \
|
||||
-Cnetwork.host=0.0.0.0 \
|
||||
-Ctransport.host=localhost \
|
||||
-Clicense.enterprise=false
|
||||
static:
|
||||
@echo "Building static linux binary..."
|
||||
@CGO_ENABLED=0 \
|
||||
GOOS=linux \
|
||||
GOARCH=amd64 \
|
||||
go build -ldflags "$(LDFLAGS)" ./cmd/telegraf
|
||||
|
||||
docker-kill:
|
||||
-docker kill aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
|
||||
openldap postgres rabbitmq redis riemann zookeeper cratedb
|
||||
-docker rm aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
|
||||
openldap postgres rabbitmq redis riemann zookeeper cratedb
|
||||
plugin-%:
|
||||
@echo "Starting dev environment for $${$(@)} input plugin..."
|
||||
@docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up
|
||||
|
||||
.PHONY: deps telegraf telegraf.exe install test test-windows lint test-all \
|
||||
package clean docker-run docker-run-circle docker-kill docker-image
|
||||
.PHONY: deps telegraf install test test-windows lint vet test-all package clean docker-image fmtcheck uint64 static
|
||||
|
||||
31
README.md
31
README.md
@@ -5,7 +5,7 @@ and writing metrics.
|
||||
|
||||
Design goals are to have a minimal memory footprint with a plugin system so
|
||||
that developers in the community can easily add support for collecting metrics
|
||||
from local or remote services.
|
||||
. For an example configuration referencet from local or remote services.
|
||||
|
||||
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
|
||||
|
||||
@@ -40,9 +40,9 @@ Ansible role: https://github.com/rossmcdonald/telegraf
|
||||
|
||||
### From Source:
|
||||
|
||||
Telegraf requires golang version 1.8+, the Makefile requires GNU make.
|
||||
Telegraf requires golang version 1.9 or newer, the Makefile requires GNU make.
|
||||
|
||||
Dependencies are managed with [gdm](https://github.com/sparrc/gdm),
|
||||
Dependencies are managed with [dep](https://github.com/golang/dep),
|
||||
which is installed by the Makefile if you don't have it already.
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install)
|
||||
@@ -127,10 +127,12 @@ configuration options.
|
||||
* [aerospike](./plugins/inputs/aerospike)
|
||||
* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq)
|
||||
* [apache](./plugins/inputs/apache)
|
||||
* [aurora](./plugins/inputs/aurora)
|
||||
* [aws cloudwatch](./plugins/inputs/cloudwatch)
|
||||
* [bcache](./plugins/inputs/bcache)
|
||||
* [bond](./plugins/inputs/bond)
|
||||
* [cassandra](./plugins/inputs/cassandra)
|
||||
* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
|
||||
* [burrow](./plugins/inputs/burrow)
|
||||
* [ceph](./plugins/inputs/ceph)
|
||||
* [cgroup](./plugins/inputs/cgroup)
|
||||
* [chrony](./plugins/inputs/chrony)
|
||||
@@ -147,11 +149,13 @@ configuration options.
|
||||
* [elasticsearch](./plugins/inputs/elasticsearch)
|
||||
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||
* [fail2ban](./plugins/inputs/fail2ban)
|
||||
* [fibaro](./plugins/inputs/fibaro)
|
||||
* [filestat](./plugins/inputs/filestat)
|
||||
* [fluentd](./plugins/inputs/fluentd)
|
||||
* [graylog](./plugins/inputs/graylog)
|
||||
* [haproxy](./plugins/inputs/haproxy)
|
||||
* [hddtemp](./plugins/inputs/hddtemp)
|
||||
* [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats)
|
||||
* [http_response](./plugins/inputs/http_response)
|
||||
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||
* [internal](./plugins/inputs/internal)
|
||||
@@ -159,24 +163,29 @@ configuration options.
|
||||
* [interrupts](./plugins/inputs/interrupts)
|
||||
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
|
||||
* [iptables](./plugins/inputs/iptables)
|
||||
* [ipset](./plugins/inputs/ipset)
|
||||
* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
|
||||
* [jolokia2](./plugins/inputs/jolokia2)
|
||||
* [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka)
|
||||
- [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry)
|
||||
* [kapacitor](./plugins/inputs/kapacitor)
|
||||
* [kubernetes](./plugins/inputs/kubernetes)
|
||||
* [leofs](./plugins/inputs/leofs)
|
||||
* [lustre2](./plugins/inputs/lustre2)
|
||||
* [mailchimp](./plugins/inputs/mailchimp)
|
||||
* [mcrouter](./plugins/inputs/mcrouter)
|
||||
* [memcached](./plugins/inputs/memcached)
|
||||
* [mesos](./plugins/inputs/mesos)
|
||||
* [minecraft](./plugins/inputs/minecraft)
|
||||
* [mongodb](./plugins/inputs/mongodb)
|
||||
* [mysql](./plugins/inputs/mysql)
|
||||
* [nats](./plugins/inputs/nats)
|
||||
* [net_response](./plugins/inputs/net_response)
|
||||
* [nginx](./plugins/inputs/nginx)
|
||||
* [nginx_plus](./plugins/inputs/nginx_plus)
|
||||
* [nsq](./plugins/inputs/nsq)
|
||||
* [nstat](./plugins/inputs/nstat)
|
||||
* [ntpq](./plugins/inputs/ntpq)
|
||||
* [nvidia_smi](./plugins/inputs/nvidia_smi)
|
||||
* [openldap](./plugins/inputs/openldap)
|
||||
* [opensmtpd](./plugins/inputs/opensmtpd)
|
||||
* [pf](./plugins/inputs/pf)
|
||||
@@ -202,10 +211,12 @@ configuration options.
|
||||
* [snmp_legacy](./plugins/inputs/snmp_legacy)
|
||||
* [solr](./plugins/inputs/solr)
|
||||
* [sql server](./plugins/inputs/sqlserver) (microsoft)
|
||||
* [syslog](./plugins/inputs/syslog)
|
||||
* [teamspeak](./plugins/inputs/teamspeak)
|
||||
* [tengine](./plugins/inputs/tengine)
|
||||
* [tomcat](./plugins/inputs/tomcat)
|
||||
* [twemproxy](./plugins/inputs/twemproxy)
|
||||
* [unbound](./plugins/input/unbound)
|
||||
* [unbound](./plugins/inputs/unbound)
|
||||
* [varnish](./plugins/inputs/varnish)
|
||||
* [zfs](./plugins/inputs/zfs)
|
||||
* [zookeeper](./plugins/inputs/zookeeper)
|
||||
@@ -256,22 +267,29 @@ formats may be used with input plugins supporting the `data_format` option:
|
||||
* [Value](./docs/DATA_FORMATS_INPUT.md#value)
|
||||
* [Nagios](./docs/DATA_FORMATS_INPUT.md#nagios)
|
||||
* [Collectd](./docs/DATA_FORMATS_INPUT.md#collectd)
|
||||
* [Dropwizard](./docs/DATA_FORMATS_INPUT.md#dropwizard)
|
||||
|
||||
## Processor Plugins
|
||||
|
||||
* [converter](./plugins/processors/converter)
|
||||
* [override](./plugins/processors/override)
|
||||
* [printer](./plugins/processors/printer)
|
||||
* [regex](./plugins/processors/regex)
|
||||
* [topk](./plugins/processors/topk)
|
||||
|
||||
## Aggregator Plugins
|
||||
|
||||
* [basicstats](./plugins/aggregators/basicstats)
|
||||
* [minmax](./plugins/aggregators/minmax)
|
||||
* [histogram](./plugins/aggregators/histogram)
|
||||
* [valuecounter](./plugins/aggregators/valuecounter)
|
||||
|
||||
## Output Plugins
|
||||
|
||||
* [influxdb](./plugins/outputs/influxdb)
|
||||
* [amon](./plugins/outputs/amon)
|
||||
* [amqp](./plugins/outputs/amqp) (rabbitmq)
|
||||
* [application_insights](./plugins/outputs/application_insights)
|
||||
* [aws kinesis](./plugins/outputs/kinesis)
|
||||
* [aws cloudwatch](./plugins/outputs/cloudwatch)
|
||||
* [cratedb](./plugins/outputs/cratedb)
|
||||
@@ -281,6 +299,7 @@ formats may be used with input plugins supporting the `data_format` option:
|
||||
* [file](./plugins/outputs/file)
|
||||
* [graphite](./plugins/outputs/graphite)
|
||||
* [graylog](./plugins/outputs/graylog)
|
||||
* [http](./plugins/outputs/http)
|
||||
* [instrumental](./plugins/outputs/instrumental)
|
||||
* [kafka](./plugins/outputs/kafka)
|
||||
* [librato](./plugins/outputs/librato)
|
||||
|
||||
@@ -26,7 +26,7 @@ type MetricMaker interface {
|
||||
func NewAccumulator(
|
||||
maker MetricMaker,
|
||||
metrics chan telegraf.Metric,
|
||||
) *accumulator {
|
||||
) telegraf.Accumulator {
|
||||
acc := accumulator{
|
||||
maker: maker,
|
||||
metrics: metrics,
|
||||
|
||||
@@ -15,63 +15,36 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
tags := map[string]string{"foo": "bar"}
|
||||
fields := map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddGauge("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddCounter("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
now := time.Now()
|
||||
a.AddCounter("acctest", fields, tags, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest usage=99")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test usage=99")
|
||||
require.Equal(t, "acctest", testm.Name())
|
||||
actual, ok := testm.GetField("usage")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test usage=99 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, float64(99), actual)
|
||||
|
||||
actual, ok = testm.GetTag("foo")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "bar", actual)
|
||||
|
||||
tm := testm.Time()
|
||||
// okay if monotonic clock differs
|
||||
require.True(t, now.Equal(tm))
|
||||
|
||||
tp := testm.Type()
|
||||
require.Equal(t, telegraf.Counter, tp)
|
||||
}
|
||||
|
||||
func TestAccAddError(t *testing.T) {
|
||||
@@ -98,215 +71,61 @@ func TestAccAddError(t *testing.T) {
|
||||
assert.Contains(t, string(errs[2]), "baz")
|
||||
}
|
||||
|
||||
func TestAddNoIntervalWithPrecision(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
a.SetPrecision(0, time.Second)
|
||||
func TestSetPrecision(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
unset bool
|
||||
precision time.Duration
|
||||
interval time.Duration
|
||||
timestamp time.Time
|
||||
expected time.Time
|
||||
}{
|
||||
{
|
||||
name: "default precision is nanosecond",
|
||||
unset: true,
|
||||
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
|
||||
expected: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
|
||||
},
|
||||
{
|
||||
name: "second interval",
|
||||
interval: time.Second,
|
||||
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
|
||||
expected: time.Date(2006, time.February, 10, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
name: "microsecond interval",
|
||||
interval: time.Microsecond,
|
||||
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
|
||||
expected: time.Date(2006, time.February, 10, 12, 0, 0, 82913000, time.UTC),
|
||||
},
|
||||
{
|
||||
name: "2 second precision",
|
||||
precision: 2 * time.Second,
|
||||
timestamp: time.Date(2006, time.February, 10, 12, 0, 2, 4, time.UTC),
|
||||
expected: time.Date(2006, time.February, 10, 12, 0, 2, 0, time.UTC),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
if !tt.unset {
|
||||
a.SetPrecision(tt.precision, tt.interval)
|
||||
}
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{},
|
||||
tt.timestamp,
|
||||
)
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
testm := <-metrics
|
||||
require.Equal(t, tt.expected, testm.Time())
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddDisablePrecision(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(time.Nanosecond, 0)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(0, time.Second)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestDifferentPrecisions(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(0, time.Second)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Millisecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800083000000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Microsecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082913000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Nanosecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddGauge(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
}
|
||||
|
||||
func TestAddCounter(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
close(metrics)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type TestMetricMaker struct {
|
||||
|
||||
@@ -143,7 +143,7 @@ func (a *Agent) gatherer(
|
||||
func gatherWithTimeout(
|
||||
shutdown chan struct{},
|
||||
input *models.RunningInput,
|
||||
acc *accumulator,
|
||||
acc telegraf.Accumulator,
|
||||
timeout time.Duration,
|
||||
) {
|
||||
ticker := time.NewTicker(timeout)
|
||||
@@ -203,11 +203,6 @@ func (a *Agent) Test() error {
|
||||
input.SetTrace(true)
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name())
|
||||
if input.Config.Interval != 0 {
|
||||
fmt.Printf("* Internal: %s\n", input.Config.Interval)
|
||||
}
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -217,7 +212,6 @@ func (a *Agent) Test() error {
|
||||
switch input.Name() {
|
||||
case "inputs.cpu", "inputs.mongodb", "inputs.procstat":
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name())
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -271,11 +265,9 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, ag
|
||||
// if dropOriginal is set to true, then we will only send this
|
||||
// metric to the aggregators, not the outputs.
|
||||
var dropOriginal bool
|
||||
if !m.IsAggregate() {
|
||||
for _, agg := range a.Config.Aggregators {
|
||||
if ok := agg.Add(m.Copy()); ok {
|
||||
dropOriginal = true
|
||||
}
|
||||
for _, agg := range a.Config.Aggregators {
|
||||
if ok := agg.Add(m.Copy()); ok {
|
||||
dropOriginal = true
|
||||
}
|
||||
}
|
||||
if !dropOriginal {
|
||||
@@ -308,7 +300,13 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, ag
|
||||
metrics = processor.Apply(metrics...)
|
||||
}
|
||||
for _, m := range metrics {
|
||||
outMetricC <- m
|
||||
for i, o := range a.Config.Outputs {
|
||||
if i == len(a.Config.Outputs)-1 {
|
||||
o.AddMetric(m)
|
||||
} else {
|
||||
o.AddMetric(m.Copy())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -364,26 +362,6 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
metricC := make(chan telegraf.Metric, 100)
|
||||
aggC := make(chan telegraf.Metric, 100)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// Start all ServicePlugins
|
||||
for _, input := range a.Config.Inputs {
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
switch p := input.Input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
acc := NewAccumulator(input, metricC)
|
||||
// Service input plugins should set their own precision of their
|
||||
// metrics.
|
||||
acc.SetPrecision(time.Nanosecond, 0)
|
||||
if err := p.Start(acc); err != nil {
|
||||
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name(), err.Error())
|
||||
return err
|
||||
}
|
||||
defer p.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// Round collection to nearest interval by sleeping
|
||||
if a.Config.Agent.RoundInterval {
|
||||
i := int64(a.Config.Agent.Interval.Duration)
|
||||
@@ -406,7 +384,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
acc := NewAccumulator(agg, aggC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
agg.Run(acc, now, shutdown)
|
||||
agg.Run(acc, shutdown)
|
||||
}(aggregator)
|
||||
}
|
||||
|
||||
@@ -423,6 +401,25 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
}(input, interval)
|
||||
}
|
||||
|
||||
// Start all ServicePlugins inputs after all other
|
||||
// plugins are loaded so that no metrics get dropped
|
||||
for _, input := range a.Config.Inputs {
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
switch p := input.Input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
acc := NewAccumulator(input, metricC)
|
||||
// Service input plugins should set their own precision of their
|
||||
// metrics.
|
||||
acc.SetPrecision(time.Nanosecond, 0)
|
||||
if err := p.Start(acc); err != nil {
|
||||
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name(), err.Error())
|
||||
return err
|
||||
}
|
||||
defer p.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
a.Close()
|
||||
return nil
|
||||
|
||||
@@ -12,18 +12,20 @@ platform: x64
|
||||
|
||||
install:
|
||||
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
|
||||
- IF NOT EXIST "C:\Cache\go1.9.2.msi" curl -o "C:\Cache\go1.9.2.msi" https://storage.googleapis.com/golang/go1.9.2.windows-amd64.msi
|
||||
- IF NOT EXIST "C:\Cache\go1.10.1.msi" curl -o "C:\Cache\go1.10.1.msi" https://storage.googleapis.com/golang/go1.10.1.windows-amd64.msi
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
|
||||
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
|
||||
- msiexec.exe /i "C:\Cache\go1.9.2.msi" /quiet
|
||||
- msiexec.exe /i "C:\Cache\go1.10.1.msi" /quiet
|
||||
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
|
||||
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
|
||||
- go version
|
||||
- go env
|
||||
- git config --system core.longpaths true
|
||||
|
||||
build_script:
|
||||
- cmd: C:\GnuWin32\bin\make
|
||||
- cmd: C:\GnuWin32\bin\make deps
|
||||
- cmd: C:\GnuWin32\bin\make telegraf
|
||||
|
||||
test_script:
|
||||
- cmd: C:\GnuWin32\bin\make test-windows
|
||||
|
||||
19
circle.yml
19
circle.yml
@@ -1,19 +0,0 @@
|
||||
machine:
|
||||
services:
|
||||
- docker
|
||||
- memcached
|
||||
- redis
|
||||
- rabbitmq-server
|
||||
post:
|
||||
- sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.9.2.linux-amd64.tar.gz
|
||||
- go version
|
||||
|
||||
dependencies:
|
||||
override:
|
||||
- docker info
|
||||
|
||||
test:
|
||||
override:
|
||||
- bash scripts/circle-test.sh
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/influxdata/telegraf/agent"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/logger"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
|
||||
@@ -54,9 +55,10 @@ var fUsage = flag.String("usage", "",
|
||||
"print usage for a plugin, ie, 'telegraf --usage mysql'")
|
||||
var fService = flag.String("service", "",
|
||||
"operate on the service")
|
||||
var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)")
|
||||
|
||||
var (
|
||||
nextVersion = "1.5.0"
|
||||
nextVersion = "1.8.0"
|
||||
version string
|
||||
commit string
|
||||
branch string
|
||||
@@ -72,48 +74,6 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf [commands|flags]
|
||||
|
||||
The commands & flags are:
|
||||
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
--config-directory directory containing additional *.conf files
|
||||
--input-filter filter the input plugins to enable, separator is :
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||
--quiet run in quiet mode
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf --config telegraf.conf --test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf --config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
|
||||
# run telegraf with pprof
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
`
|
||||
|
||||
var stop chan struct{}
|
||||
|
||||
func reloadLoop(
|
||||
@@ -187,11 +147,11 @@ func reloadLoop(
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
signals := make(chan os.Signal)
|
||||
signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
|
||||
signal.Notify(signals, os.Interrupt, syscall.SIGHUP, syscall.SIGTERM)
|
||||
go func() {
|
||||
select {
|
||||
case sig := <-signals:
|
||||
if sig == os.Interrupt {
|
||||
if sig == os.Interrupt || sig == syscall.SIGTERM {
|
||||
close(shutdown)
|
||||
}
|
||||
if sig == syscall.SIGHUP {
|
||||
@@ -206,8 +166,10 @@ func reloadLoop(
|
||||
}()
|
||||
|
||||
log.Printf("I! Starting Telegraf %s\n", displayVersion())
|
||||
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("I! Loaded aggregators: %s", strings.Join(c.AggregatorNames(), " "))
|
||||
log.Printf("I! Loaded processors: %s", strings.Join(c.ProcessorNames(), " "))
|
||||
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("I! Tags enabled: %s", c.ListTags())
|
||||
|
||||
if *fPidfile != "" {
|
||||
@@ -233,7 +195,7 @@ func reloadLoop(
|
||||
}
|
||||
|
||||
func usageExit(rc int) {
|
||||
fmt.Println(usage)
|
||||
fmt.Println(internal.Usage)
|
||||
os.Exit(rc)
|
||||
}
|
||||
|
||||
@@ -358,13 +320,13 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
if runtime.GOOS == "windows" && !(*fRunAsConsole) {
|
||||
svcConfig := &service.Config{
|
||||
Name: "telegraf",
|
||||
DisplayName: "Telegraf Data Collector Service",
|
||||
Description: "Collects data using a series of plugins and publishes it to" +
|
||||
"another series of plugins.",
|
||||
Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
|
||||
Arguments: []string{"--config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
|
||||
}
|
||||
|
||||
prg := &program{
|
||||
@@ -377,14 +339,14 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
// Handle the -service flag here to prevent any issues with tooling that
|
||||
// Handle the --service flag here to prevent any issues with tooling that
|
||||
// may not have an interactive session, e.g. installing from Ansible.
|
||||
if *fService != "" {
|
||||
if *fConfig != "" {
|
||||
(*svcConfig).Arguments = []string{"-config", *fConfig}
|
||||
(*svcConfig).Arguments = []string{"--config", *fConfig}
|
||||
}
|
||||
if *fConfigDirectory != "" {
|
||||
(*svcConfig).Arguments = append((*svcConfig).Arguments, "-config-directory", *fConfigDirectory)
|
||||
(*svcConfig).Arguments = append((*svcConfig).Arguments, "--config-directory", *fConfigDirectory)
|
||||
}
|
||||
err := service.Control(s, *fService)
|
||||
if err != nil {
|
||||
|
||||
93
docker-compose.yml
Normal file
93
docker-compose.yml
Normal file
@@ -0,0 +1,93 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
aerospike:
|
||||
image: aerospike/aerospike-server:3.9.0
|
||||
ports:
|
||||
- "3000:3000"
|
||||
zookeeper:
|
||||
image: wurstmeister/zookeeper
|
||||
environment:
|
||||
- JAVA_OPTS="-Xms256m -Xmx256m"
|
||||
ports:
|
||||
- "2181:2181"
|
||||
kafka:
|
||||
image: wurstmeister/kafka
|
||||
environment:
|
||||
- KAFKA_ADVERTISED_HOST_NAME=localhost
|
||||
- KAFKA_ADVERTISED_PORT=9092
|
||||
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
|
||||
- KAFKA_CREATE_TOPICS="test:1:1"
|
||||
- JAVA_OPTS="-Xms256m -Xmx256m"
|
||||
ports:
|
||||
- "9092:9092"
|
||||
depends_on:
|
||||
- zookeeper
|
||||
elasticsearch:
|
||||
image: elasticsearch:5
|
||||
environment:
|
||||
- JAVA_OPTS="-Xms256m -Xmx256m"
|
||||
ports:
|
||||
- "9200:9200"
|
||||
- "9300:9300"
|
||||
mysql:
|
||||
image: mysql
|
||||
environment:
|
||||
- MYSQL_ALLOW_EMPTY_PASSWORD=yes
|
||||
ports:
|
||||
- "3306:3306"
|
||||
memcached:
|
||||
image: memcached
|
||||
ports:
|
||||
- "11211:11211"
|
||||
postgres:
|
||||
image: postgres:alpine
|
||||
ports:
|
||||
- "5432:5432"
|
||||
rabbitmq:
|
||||
image: rabbitmq:3-management
|
||||
ports:
|
||||
- "15672:15672"
|
||||
- "5672:5672"
|
||||
redis:
|
||||
image: redis:alpine
|
||||
ports:
|
||||
- "6379:6379"
|
||||
nsq:
|
||||
image: nsqio/nsq
|
||||
ports:
|
||||
- "4150:4150"
|
||||
command: "/nsqd"
|
||||
mqtt:
|
||||
image: ncarlier/mqtt
|
||||
ports:
|
||||
- "1883:1883"
|
||||
riemann:
|
||||
image: stealthly/docker-riemann
|
||||
ports:
|
||||
- "5555:5555"
|
||||
nats:
|
||||
image: nats
|
||||
ports:
|
||||
- "4222:4222"
|
||||
openldap:
|
||||
image: cobaugh/openldap-alpine
|
||||
environment:
|
||||
- SLAPD_CONFIG_ROOTDN="cn=manager,cn=config"
|
||||
- SLAPD_CONFIG_ROOTPW="secret"
|
||||
ports:
|
||||
- "389:389"
|
||||
- "636:636"
|
||||
crate:
|
||||
image: crate/crate
|
||||
ports:
|
||||
- "4200:4200"
|
||||
- "4230:4230"
|
||||
- "5432:5432"
|
||||
command:
|
||||
- crate
|
||||
- -Cnetwork.host=0.0.0.0
|
||||
- -Ctransport.host=localhost
|
||||
- -Clicense.enterprise=false
|
||||
environment:
|
||||
- CRATE_HEAP_SIZE=128m
|
||||
@@ -125,7 +125,7 @@ aggregator and will not get sent to the output plugins.
|
||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||
|
||||
The [measurement filtering](#measurement-filtering) parameters be used to
|
||||
The [measurement filtering](#measurement-filtering) parameters can be used to
|
||||
limit what metrics are handled by the aggregator. Excluded metrics are passed
|
||||
downstream to the next aggregator.
|
||||
|
||||
@@ -136,7 +136,7 @@ The following config parameters are available for all processors:
|
||||
* **order**: This is the order in which the processor(s) get executed. If this
|
||||
is not specified then processor execution order will be random.
|
||||
|
||||
The [measurement filtering](#measurement-filtering) can parameters may be used
|
||||
The [measurement filtering](#measurement-filtering) parameters can be used
|
||||
to limit what metrics are handled by the processor. Excluded metrics are
|
||||
passed downstream to the next processor.
|
||||
|
||||
@@ -153,11 +153,11 @@ The inverse of `namepass`. If a match is found the point is discarded. This
|
||||
is tested on points after they have passed the `namepass` test.
|
||||
* **fieldpass**:
|
||||
An array of glob pattern strings. Only fields whose field key matches a
|
||||
pattern in this list are emitted. Not available for outputs.
|
||||
pattern in this list are emitted.
|
||||
* **fielddrop**:
|
||||
The inverse of `fieldpass`. Fields with a field key matching one of the
|
||||
patterns will be discarded from the point. This is tested on points after
|
||||
they have passed the `fieldpass` test. Not available for outputs.
|
||||
they have passed the `fieldpass` test.
|
||||
* **tagpass**:
|
||||
A table mapping tag keys to arrays of glob pattern strings. Only points
|
||||
that contain a tag key in the table and a tag value matching one of its
|
||||
|
||||
@@ -8,6 +8,7 @@ Telegraf is able to parse the following input data formats into metrics:
|
||||
1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
|
||||
1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
|
||||
1. [Collectd](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#collectd)
|
||||
1. [Dropwizard](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#dropwizard)
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||
@@ -479,3 +480,176 @@ You can also change the path to the typesdb or add additional typesdb using
|
||||
## Path of to TypesDB specifications
|
||||
collectd_typesdb = ["/usr/share/collectd/types.db"]
|
||||
```
|
||||
|
||||
# Dropwizard:
|
||||
|
||||
The dropwizard format can parse the JSON representation of a single dropwizard metric registry. By default, tags are parsed from metric names as if they were actual influxdb line protocol keys (`measurement<,tag_set>`) which can be overriden by defining custom [measurement & tag templates](./DATA_FORMATS_INPUT.md#measurement--tag-templates). All field value types are supported, `string`, `number` and `boolean`.
|
||||
|
||||
A typical JSON of a dropwizard metric registry:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "3.0.0",
|
||||
"counters" : {
|
||||
"measurement,tag1=green" : {
|
||||
"count" : 1
|
||||
}
|
||||
},
|
||||
"meters" : {
|
||||
"measurement" : {
|
||||
"count" : 1,
|
||||
"m15_rate" : 1.0,
|
||||
"m1_rate" : 1.0,
|
||||
"m5_rate" : 1.0,
|
||||
"mean_rate" : 1.0,
|
||||
"units" : "events/second"
|
||||
}
|
||||
},
|
||||
"gauges" : {
|
||||
"measurement" : {
|
||||
"value" : 1
|
||||
}
|
||||
},
|
||||
"histograms" : {
|
||||
"measurement" : {
|
||||
"count" : 1,
|
||||
"max" : 1.0,
|
||||
"mean" : 1.0,
|
||||
"min" : 1.0,
|
||||
"p50" : 1.0,
|
||||
"p75" : 1.0,
|
||||
"p95" : 1.0,
|
||||
"p98" : 1.0,
|
||||
"p99" : 1.0,
|
||||
"p999" : 1.0,
|
||||
"stddev" : 1.0
|
||||
}
|
||||
},
|
||||
"timers" : {
|
||||
"measurement" : {
|
||||
"count" : 1,
|
||||
"max" : 1.0,
|
||||
"mean" : 1.0,
|
||||
"min" : 1.0,
|
||||
"p50" : 1.0,
|
||||
"p75" : 1.0,
|
||||
"p95" : 1.0,
|
||||
"p98" : 1.0,
|
||||
"p99" : 1.0,
|
||||
"p999" : 1.0,
|
||||
"stddev" : 1.0,
|
||||
"m15_rate" : 1.0,
|
||||
"m1_rate" : 1.0,
|
||||
"m5_rate" : 1.0,
|
||||
"mean_rate" : 1.0,
|
||||
"duration_units" : "seconds",
|
||||
"rate_units" : "calls/second"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Would get translated into 4 different measurements:
|
||||
|
||||
```
|
||||
measurement,metric_type=counter,tag1=green count=1
|
||||
measurement,metric_type=meter count=1,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0
|
||||
measurement,metric_type=gauge value=1
|
||||
measurement,metric_type=histogram count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0
|
||||
measurement,metric_type=timer count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0,stddev=1.0,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0
|
||||
```
|
||||
|
||||
You may also parse a dropwizard registry from any JSON document which contains a dropwizard registry in some inner field.
|
||||
Eg. to parse the following JSON document:
|
||||
|
||||
```json
|
||||
{
|
||||
"time" : "2017-02-22T14:33:03.662+02:00",
|
||||
"tags" : {
|
||||
"tag1" : "green",
|
||||
"tag2" : "yellow"
|
||||
},
|
||||
"metrics" : {
|
||||
"counters" : {
|
||||
"measurement" : {
|
||||
"count" : 1
|
||||
}
|
||||
},
|
||||
"meters" : {},
|
||||
"gauges" : {},
|
||||
"histograms" : {},
|
||||
"timers" : {}
|
||||
}
|
||||
}
|
||||
```
|
||||
and translate it into:
|
||||
|
||||
```
|
||||
measurement,metric_type=counter,tag1=green,tag2=yellow count=1 1487766783662000000
|
||||
```
|
||||
|
||||
you simply need to use the following additional configuration properties:
|
||||
|
||||
```toml
|
||||
dropwizard_metric_registry_path = "metrics"
|
||||
dropwizard_time_path = "time"
|
||||
dropwizard_time_format = "2006-01-02T15:04:05Z07:00"
|
||||
dropwizard_tags_path = "tags"
|
||||
## tag paths per tag are supported too, eg.
|
||||
#[inputs.yourinput.dropwizard_tag_paths]
|
||||
# tag1 = "tags.tag1"
|
||||
# tag2 = "tags.tag2"
|
||||
```
|
||||
|
||||
|
||||
For more information about the dropwizard json format see
|
||||
[here](http://metrics.dropwizard.io/3.1.0/manual/json/).
|
||||
|
||||
#### Dropwizard Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["curl http://localhost:8080/sys/metrics"]
|
||||
timeout = "5s"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "dropwizard"
|
||||
|
||||
## Used by the templating engine to join matched values when cardinality is > 1
|
||||
separator = "_"
|
||||
|
||||
## Each template line requires a template pattern. It can have an optional
|
||||
## filter before the template and separated by spaces. It can also have optional extra
|
||||
## tags following the template. Multiple tags should be separated by commas and no spaces
|
||||
## similar to the line protocol format. There can be only one default template.
|
||||
## Templates support below format:
|
||||
## 1. filter + template
|
||||
## 2. filter + template + extra tag(s)
|
||||
## 3. filter + template with field key
|
||||
## 4. default template
|
||||
## By providing an empty template array, templating is disabled and measurements are parsed as influxdb line protocol keys (measurement<,tag_set>)
|
||||
templates = []
|
||||
|
||||
## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax)
|
||||
## to locate the metric registry within the JSON document
|
||||
# dropwizard_metric_registry_path = "metrics"
|
||||
|
||||
## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax)
|
||||
## to locate the default time of the measurements within the JSON document
|
||||
# dropwizard_time_path = "time"
|
||||
# dropwizard_time_format = "2006-01-02T15:04:05Z07:00"
|
||||
|
||||
## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax)
|
||||
## to locate the tags map within the JSON document
|
||||
# dropwizard_tags_path = "tags"
|
||||
|
||||
## You may even use tag paths per tag
|
||||
# [inputs.exec.dropwizard_tag_paths]
|
||||
# tag1 = "tags.tag1"
|
||||
# tag2 = "tags.tag2"
|
||||
|
||||
```
|
||||
@@ -1,35 +1,15 @@
|
||||
# Telegraf Output Data Formats
|
||||
# Output Data Formats
|
||||
|
||||
Telegraf is able to serialize metrics into the following output data formats:
|
||||
In addition to output specific data formats, Telegraf supports a set of
|
||||
standard data formats that may be selected from when configuring many output
|
||||
plugins.
|
||||
|
||||
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#influx)
|
||||
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#json)
|
||||
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite)
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||
are a combination of four basic parts:
|
||||
|
||||
1. Measurement Name
|
||||
1. Tags
|
||||
1. Fields
|
||||
1. Timestamp
|
||||
|
||||
In InfluxDB line protocol, these 4 parts are easily defined in textual form:
|
||||
|
||||
```
|
||||
measurement_name[,tag1=val1,...] field1=val1[,field2=val2,...] [timestamp]
|
||||
```
|
||||
|
||||
For Telegraf outputs that write textual data (such as `kafka`, `mqtt`, and `file`),
|
||||
InfluxDB line protocol was originally the only available output format. But now
|
||||
we are normalizing telegraf metric "serializers" into a
|
||||
[plugin-like interface](https://github.com/influxdata/telegraf/tree/master/plugins/serializers)
|
||||
across all output plugins that can support it.
|
||||
You will be able to identify a plugin that supports different data formats
|
||||
by the presence of a `data_format`
|
||||
config option, for example, in the `file` output plugin:
|
||||
1. [InfluxDB Line Protocol](#influx)
|
||||
1. [JSON](#json)
|
||||
1. [Graphite](#graphite)
|
||||
|
||||
You will be able to identify the plugins with support by the presence of a
|
||||
`data_format` config option, for example, in the `file` output plugin:
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
@@ -40,20 +20,16 @@ config option, for example, in the `file` output plugin:
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
## Additional configuration options go here
|
||||
```
|
||||
|
||||
Each data_format has an additional set of configuration options available, which
|
||||
I'll go over below.
|
||||
## Influx
|
||||
|
||||
# Influx:
|
||||
|
||||
There are no additional configuration options for InfluxDB line-protocol. The
|
||||
metrics are serialized directly into InfluxDB line-protocol.
|
||||
|
||||
### Influx Configuration:
|
||||
The `influx` data format outputs metrics using
|
||||
[InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/latest/write_protocols/line_protocol_tutorial/).
|
||||
This is the recommended format unless another format is required for
|
||||
interoperability.
|
||||
|
||||
### Influx Configuration
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
@@ -64,13 +40,33 @@ metrics are serialized directly into InfluxDB line-protocol.
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
## Maximum line length in bytes. Useful only for debugging.
|
||||
# influx_max_line_bytes = 0
|
||||
|
||||
## When true, fields will be output in ascending lexical order. Enabling
|
||||
## this option will result in decreased performance and is only recommended
|
||||
## when you need predictable ordering while debugging.
|
||||
# influx_sort_fields = false
|
||||
|
||||
## When true, Telegraf will output unsigned integers as unsigned values,
|
||||
## i.e.: `42u`. You will need a version of InfluxDB supporting unsigned
|
||||
## integer values. Enabling this option will result in field type errors if
|
||||
## existing data has been written.
|
||||
# influx_uint_support = false
|
||||
```
|
||||
|
||||
# Graphite:
|
||||
## Graphite
|
||||
|
||||
The Graphite data format translates Telegraf metrics into _dot_ buckets. A
|
||||
template can be specified for the output of Telegraf metrics into Graphite
|
||||
buckets. The default template is:
|
||||
The Graphite data format is translated from Telegraf Metrics using either the
|
||||
template pattern or tag support method. You can select between the two
|
||||
methods using the [`graphite_tag_support`](#graphite-tag-support) option. When set, the tag support
|
||||
method is used, otherwise the [`template` pattern](#template-pattern) is used.
|
||||
|
||||
#### Template Pattern
|
||||
|
||||
The `template` option describes how Telegraf traslates metrics into _dot_
|
||||
buckets. The default template is:
|
||||
|
||||
```
|
||||
template = "host.tags.measurement.field"
|
||||
@@ -87,7 +83,7 @@ tag keys are filled.
|
||||
1. _measurement_ is a special keyword that outputs the measurement name.
|
||||
1. _field_ is a special keyword that outputs the field name.
|
||||
|
||||
Which means the following influx metric -> graphite conversion would happen:
|
||||
**Example Conversion**:
|
||||
|
||||
```
|
||||
cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
|
||||
@@ -99,7 +95,25 @@ tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
||||
Fields with string values will be skipped. Boolean fields will be converted
|
||||
to 1 (true) or 0 (false).
|
||||
|
||||
### Graphite Configuration:
|
||||
#### Graphite Tag Support
|
||||
|
||||
When the `graphite_tag_support` option is enabled, the template pattern is not
|
||||
used. Instead, tags are encoded using
|
||||
[Graphite tag support](http://graphite.readthedocs.io/en/latest/tags.html)
|
||||
added in Graphite 1.1. The `metric_path` is a combination of the optional
|
||||
`prefix` option, measurement name, and field name.
|
||||
|
||||
The tag `name` is reserved by Graphite, any conflicting tags and will be encoded as `_name`.
|
||||
|
||||
**Example Conversion**:
|
||||
```
|
||||
cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
|
||||
=>
|
||||
cpu.usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690
|
||||
cpu.usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690
|
||||
```
|
||||
|
||||
### Graphite Configuration
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
@@ -112,33 +126,72 @@ to 1 (true) or 0 (false).
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "graphite"
|
||||
|
||||
# prefix each graphite bucket
|
||||
## Prefix added to each graphite bucket
|
||||
prefix = "telegraf"
|
||||
# graphite template
|
||||
## Graphite template pattern
|
||||
template = "host.tags.measurement.field"
|
||||
|
||||
## Support Graphite tags, recommended to enable when using Graphite 1.1 or later.
|
||||
# graphite_tag_support = false
|
||||
```
|
||||
|
||||
# JSON:
|
||||
|
||||
The JSON data format serialized Telegraf metrics in json format. The format is:
|
||||
## JSON
|
||||
|
||||
The JSON output data format output for a single metric is in the
|
||||
form:
|
||||
```json
|
||||
{
|
||||
"fields":{
|
||||
"field_1":30,
|
||||
"field_2":4,
|
||||
"field_N":59,
|
||||
"n_images":660
|
||||
},
|
||||
"name":"docker",
|
||||
"tags":{
|
||||
"host":"raynor"
|
||||
},
|
||||
"timestamp":1458229140
|
||||
"fields": {
|
||||
"field_1": 30,
|
||||
"field_2": 4,
|
||||
"field_N": 59,
|
||||
"n_images": 660
|
||||
},
|
||||
"name": "docker",
|
||||
"tags": {
|
||||
"host": "raynor"
|
||||
},
|
||||
"timestamp": 1458229140
|
||||
}
|
||||
```
|
||||
|
||||
### JSON Configuration:
|
||||
When an output plugin needs to emit multiple metrics at one time, it may use
|
||||
the batch format. The use of batch format is determined by the plugin,
|
||||
reference the documentation for the specific plugin.
|
||||
```json
|
||||
{
|
||||
"metrics": [
|
||||
{
|
||||
"fields": {
|
||||
"field_1": 30,
|
||||
"field_2": 4,
|
||||
"field_N": 59,
|
||||
"n_images": 660
|
||||
},
|
||||
"name": "docker",
|
||||
"tags": {
|
||||
"host": "raynor"
|
||||
},
|
||||
"timestamp": 1458229140
|
||||
},
|
||||
{
|
||||
"fields": {
|
||||
"field_1": 30,
|
||||
"field_2": 4,
|
||||
"field_N": 59,
|
||||
"n_images": 660
|
||||
},
|
||||
"name": "docker",
|
||||
"tags": {
|
||||
"host": "raynor"
|
||||
},
|
||||
"timestamp": 1458229140
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### JSON Configuration
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
@@ -150,14 +203,9 @@ The JSON data format serialized Telegraf metrics in json format. The format is:
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "json"
|
||||
json_timestamp_units = "1ns"
|
||||
```
|
||||
|
||||
By default, the timestamp that is output in JSON data format serialized Telegraf
|
||||
metrics is in seconds. The precision of this timestamp can be adjusted for any output
|
||||
by adding the optional `json_timestamp_units` parameter to the configuration for
|
||||
that output. This parameter can be used to set the timestamp units to nanoseconds (`ns`),
|
||||
microseconds (`us` or `µs`), milliseconds (`ms`), or seconds (`s`). Note that this
|
||||
parameter will be truncated to the nearest power of 10 that, so if the `json_timestamp_units`
|
||||
are set to `15ms` the timestamps for the JSON format serialized Telegraf metrics will be
|
||||
output in hundredths of a second (`10ms`).
|
||||
## The resolution to use for the metric timestamp. Must be a duration string
|
||||
## such as "1ns", "1us", "1ms", "10ms", "1s". Durations are truncated to
|
||||
## the power of 10 less than the specified units.
|
||||
json_timestamp_units = "1s"
|
||||
```
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
When distributed in a binary form, Telegraf may contain portions of the
|
||||
following works:
|
||||
|
||||
- code.cloudfoundry.org/clock [APACHE](https://github.com/cloudfoundry/clock/blob/master/LICENSE)
|
||||
- collectd.org [MIT](https://github.com/collectd/go-collectd/blob/master/LICENSE)
|
||||
- github.com/aerospike/aerospike-client-go [APACHE](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE)
|
||||
- github.com/amir/raidman [PUBLIC DOMAIN](https://github.com/amir/raidman/blob/master/UNLICENSE)
|
||||
@@ -24,6 +25,7 @@ following works:
|
||||
- github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE)
|
||||
- github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE)
|
||||
- github.com/eclipse/paho.mqtt.golang [ECLIPSE](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE)
|
||||
- github.com/fsnotify/fsnotify [BSD](https://github.com/fsnotify/fsnotify/blob/master/LICENSE)
|
||||
- github.com/fsouza/go-dockerclient [BSD](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
|
||||
- github.com/gobwas/glob [MIT](https://github.com/gobwas/glob/blob/master/LICENSE)
|
||||
- github.com/google/go-cmp [BSD](https://github.com/google/go-cmp/blob/master/LICENSE)
|
||||
@@ -42,6 +44,7 @@ following works:
|
||||
- github.com/hashicorp/raft [MPL](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||
- github.com/influxdata/tail [MIT](https://github.com/influxdata/tail/blob/master/LICENSE.txt)
|
||||
- github.com/influxdata/toml [MIT](https://github.com/influxdata/toml/blob/master/LICENSE)
|
||||
- github.com/influxdata/go-syslog [MIT](https://github.com/influxdata/go-syslog/blob/develop/LICENSE)
|
||||
- github.com/influxdata/wlog [MIT](https://github.com/influxdata/wlog/blob/master/LICENSE)
|
||||
- github.com/jackc/pgx [MIT](https://github.com/jackc/pgx/blob/master/LICENSE)
|
||||
- github.com/jmespath/go-jmespath [APACHE](https://github.com/jmespath/go-jmespath/blob/master/LICENSE)
|
||||
@@ -50,10 +53,12 @@ following works:
|
||||
- github.com/kballard/go-shellquote [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
|
||||
- github.com/lib/pq [MIT](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||
- github.com/matttproud/golang_protobuf_extensions [APACHE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||
- github.com/Microsoft/ApplicationInsights-Go [APACHE](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE)
|
||||
- github.com/Microsoft/go-winio [MIT](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
|
||||
- github.com/miekg/dns [BSD](https://github.com/miekg/dns/blob/master/LICENSE)
|
||||
- github.com/naoina/go-stringutil [MIT](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||
- github.com/naoina/toml [MIT](https://github.com/naoina/toml/blob/master/LICENSE)
|
||||
- github.com/nats-io/gnatsd [MIT](https://github.com/nats-io/gnatsd/blob/master/LICENSE)
|
||||
- github.com/nats-io/go-nats [MIT](https://github.com/nats-io/go-nats/blob/master/LICENSE)
|
||||
- github.com/nats-io/nats [MIT](https://github.com/nats-io/nats/blob/master/LICENSE)
|
||||
- github.com/nats-io/nuid [MIT](https://github.com/nats-io/nuid/blob/master/LICENSE)
|
||||
@@ -82,6 +87,8 @@ following works:
|
||||
- github.com/streadway/amqp [BSD](https://github.com/streadway/amqp/blob/master/LICENSE)
|
||||
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
|
||||
- github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
|
||||
- github.com/tidwall/gjson [MIT](https://github.com/tidwall/gjson/blob/master/LICENSE)
|
||||
- github.com/tidwall/match [MIT](https://github.com/tidwall/match/blob/master/LICENSE)
|
||||
- github.com/mitchellh/mapstructure [MIT](https://github.com/mitchellh/mapstructure/blob/master/LICENSE)
|
||||
- github.com/multiplay/go-ts3 [BSD](https://github.com/multiplay/go-ts3/blob/master/LICENSE)
|
||||
- github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE)
|
||||
@@ -93,10 +100,11 @@ following works:
|
||||
- golang.org/x/net [BSD](https://go.googlesource.com/net/+/master/LICENSE)
|
||||
- golang.org/x/text [BSD](https://go.googlesource.com/text/+/master/LICENSE)
|
||||
- golang.org/x/sys [BSD](https://go.googlesource.com/sys/+/master/LICENSE)
|
||||
- google.golang.org/grpc [APACHE](https://github.com/google/grpc-go/blob/master/LICENSE)
|
||||
- google.golang.org/genproto [APACHE](https://github.com/google/go-genproto/blob/master/LICENSE)
|
||||
- gopkg.in/asn1-ber.v1 [MIT](https://github.com/go-asn1-ber/asn1-ber/blob/v1.2/LICENSE)
|
||||
- gopkg.in/dancannon/gorethink.v1 [APACHE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||
- gopkg.in/fatih/pool.v2 [MIT](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
|
||||
- gopkg.in/fsnotify.v1 [BSD](https://github.com/fsnotify/fsnotify/blob/v1.4.2/LICENSE)
|
||||
- gopkg.in/ldap.v2 [MIT](https://github.com/go-ldap/ldap/blob/v2.5.0/LICENSE)
|
||||
- gopkg.in/mgo.v2 [BSD](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||
- gopkg.in/olivere/elastic.v5 [MIT](https://github.com/olivere/elastic/blob/v5.0.38/LICENSE)
|
||||
|
||||
@@ -5,7 +5,7 @@ the general steps to set it up.
|
||||
|
||||
1. Obtain the telegraf windows distribution
|
||||
2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
|
||||
location simply specify the `-config` parameter with the desired location)
|
||||
location simply specify the `--config` parameter with the desired location)
|
||||
3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf`
|
||||
4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""):
|
||||
|
||||
@@ -26,6 +26,15 @@ the general steps to set it up.
|
||||
> net start telegraf
|
||||
```
|
||||
|
||||
## Config Directory
|
||||
|
||||
You can also specify a `--config-directory` for the service to use:
|
||||
1. Create a directory for config snippets: `C:\Program Files\Telegraf\telegraf.d`
|
||||
2. Include the `--config-directory` option when registering the service:
|
||||
```
|
||||
> C:\"Program Files"\Telegraf\telegraf.exe --service install --config C:\"Program Files"\Telegraf\telegraf.conf --config-directory C:\"Program Files"\Telegraf\telegraf.d
|
||||
```
|
||||
|
||||
## Other supported operations
|
||||
|
||||
Telegraf can manage its own service through the --service flag:
|
||||
@@ -37,7 +46,6 @@ Telegraf can manage its own service through the --service flag:
|
||||
| `telegraf.exe --service start` | Start the telegraf service |
|
||||
| `telegraf.exe --service stop` | Stop the telegraf service |
|
||||
|
||||
|
||||
Troubleshooting common error #1067
|
||||
|
||||
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start
|
||||
|
||||
1393
etc/telegraf.conf
1393
etc/telegraf.conf
File diff suppressed because it is too large
Load Diff
@@ -242,7 +242,7 @@
|
||||
#
|
||||
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
# ## present on /run, /var/run, /dev/shm or /dev).
|
||||
# # ignore_fs = ["tmpfs", "devtmpfs"]
|
||||
# # ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
|
||||
|
||||
|
||||
# # Read metrics about disk IO by device
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
@@ -40,6 +41,11 @@ var (
|
||||
|
||||
// envVarRe is a regex to find environment variables in the config file
|
||||
envVarRe = regexp.MustCompile(`\$\w+`)
|
||||
|
||||
envVarEscaper = strings.NewReplacer(
|
||||
`"`, `\"`,
|
||||
`\`, `\\`,
|
||||
)
|
||||
)
|
||||
|
||||
// Config specifies the URL/user/password for the database that telegraf
|
||||
@@ -150,6 +156,24 @@ func (c *Config) InputNames() []string {
|
||||
return name
|
||||
}
|
||||
|
||||
// Outputs returns a list of strings of the configured aggregators.
|
||||
func (c *Config) AggregatorNames() []string {
|
||||
var name []string
|
||||
for _, aggregator := range c.Aggregators {
|
||||
name = append(name, aggregator.Name())
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// Outputs returns a list of strings of the configured processors.
|
||||
func (c *Config) ProcessorNames() []string {
|
||||
var name []string
|
||||
for _, processor := range c.Processors {
|
||||
name = append(name, processor.Name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// Outputs returns a list of strings of the configured outputs.
|
||||
func (c *Config) OutputNames() []string {
|
||||
var name []string
|
||||
@@ -513,7 +537,13 @@ func (c *Config) LoadDirectory(path string) error {
|
||||
log.Printf("W! Telegraf is not permitted to read %s", thispath)
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
if strings.HasPrefix(info.Name(), "..") {
|
||||
// skip Kubernetes mounts, prevening loading the same config twice
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
name := info.Name()
|
||||
@@ -689,6 +719,11 @@ func trimBOM(f []byte) []byte {
|
||||
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
|
||||
}
|
||||
|
||||
// escapeEnv escapes a value for inserting into a TOML string.
|
||||
func escapeEnv(value string) string {
|
||||
return envVarEscaper.Replace(value)
|
||||
}
|
||||
|
||||
// parseFile loads a TOML configuration from a provided path and
|
||||
// returns the AST produced from the TOML parser. When loading the file, it
|
||||
// will find environment variables and replace them.
|
||||
@@ -702,8 +737,9 @@ func parseFile(fpath string) (*ast.Table, error) {
|
||||
|
||||
env_vars := envVarRe.FindAll(contents, -1)
|
||||
for _, env_var := range env_vars {
|
||||
env_val := os.Getenv(strings.TrimPrefix(string(env_var), "$"))
|
||||
if env_val != "" {
|
||||
env_val, ok := os.LookupEnv(strings.TrimPrefix(string(env_var), "$"))
|
||||
if ok {
|
||||
env_val = escapeEnv(env_val)
|
||||
contents = bytes.Replace(contents, env_var, []byte(env_val), 1)
|
||||
}
|
||||
}
|
||||
@@ -1261,6 +1297,47 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["dropwizard_metric_registry_path"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.DropwizardMetricRegistryPath = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
if node, ok := tbl.Fields["dropwizard_time_path"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.DropwizardTimePath = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
if node, ok := tbl.Fields["dropwizard_time_format"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.DropwizardTimeFormat = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
if node, ok := tbl.Fields["dropwizard_tags_path"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.DropwizardTagsPath = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
c.DropwizardTagPathsMap = make(map[string]string)
|
||||
if node, ok := tbl.Fields["dropwizard_tag_paths"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
for name, val := range subtbl.Fields {
|
||||
if kv, ok := val.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.DropwizardTagPathsMap[name] = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.MetricName = name
|
||||
|
||||
delete(tbl.Fields, "data_format")
|
||||
@@ -1271,6 +1348,11 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
|
||||
delete(tbl.Fields, "collectd_auth_file")
|
||||
delete(tbl.Fields, "collectd_security_level")
|
||||
delete(tbl.Fields, "collectd_typesdb")
|
||||
delete(tbl.Fields, "dropwizard_metric_registry_path")
|
||||
delete(tbl.Fields, "dropwizard_time_path")
|
||||
delete(tbl.Fields, "dropwizard_time_format")
|
||||
delete(tbl.Fields, "dropwizard_tags_path")
|
||||
delete(tbl.Fields, "dropwizard_tag_paths")
|
||||
|
||||
return parsers.NewParser(c)
|
||||
}
|
||||
@@ -1309,6 +1391,54 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["influx_max_line_bytes"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if integer, ok := kv.Value.(*ast.Integer); ok {
|
||||
v, err := integer.Int()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.InfluxMaxLineBytes = int(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["influx_sort_fields"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if b, ok := kv.Value.(*ast.Boolean); ok {
|
||||
var err error
|
||||
c.InfluxSortFields, err = b.Boolean()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["influx_uint_support"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if b, ok := kv.Value.(*ast.Boolean); ok {
|
||||
var err error
|
||||
c.InfluxUintSupport, err = b.Boolean()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["graphite_tag_support"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if b, ok := kv.Value.(*ast.Boolean); ok {
|
||||
var err error
|
||||
c.GraphiteTagSupport, err = b.Boolean()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["json_timestamp_units"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
@@ -1325,6 +1455,10 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "influx_max_line_bytes")
|
||||
delete(tbl.Fields, "influx_sort_fields")
|
||||
delete(tbl.Fields, "influx_uint_support")
|
||||
delete(tbl.Fields, "graphite_tag_support")
|
||||
delete(tbl.Fields, "data_format")
|
||||
delete(tbl.Fields, "prefix")
|
||||
delete(tbl.Fields, "template")
|
||||
|
||||
4
internal/config/testdata/subconfig/..4984_10_04_08_28_06.119/invalid-config.conf
vendored
Normal file
4
internal/config/testdata/subconfig/..4984_10_04_08_28_06.119/invalid-config.conf
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# This invalid config file should be skipped during testing
|
||||
# as it is an ..data folder
|
||||
|
||||
[[outputs.influxdb
|
||||
@@ -4,17 +4,14 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/big"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
@@ -112,49 +109,6 @@ func RandomString(n int) string {
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files.
|
||||
// you must give the full path to the files.
|
||||
// If all files are blank and InsecureSkipVerify=false, returns a nil pointer.
|
||||
func GetTLSConfig(
|
||||
SSLCert, SSLKey, SSLCA string,
|
||||
InsecureSkipVerify bool,
|
||||
) (*tls.Config, error) {
|
||||
if SSLCert == "" && SSLKey == "" && SSLCA == "" && !InsecureSkipVerify {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
t := &tls.Config{
|
||||
InsecureSkipVerify: InsecureSkipVerify,
|
||||
}
|
||||
|
||||
if SSLCA != "" {
|
||||
caCert, err := ioutil.ReadFile(SSLCA)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s",
|
||||
err))
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
t.RootCAs = caCertPool
|
||||
}
|
||||
|
||||
if SSLCert != "" && SSLKey != "" {
|
||||
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf(
|
||||
"Could not load TLS client key/certificate from %s:%s: %s",
|
||||
SSLKey, SSLCert, err))
|
||||
}
|
||||
|
||||
t.Certificates = []tls.Certificate{cert}
|
||||
t.BuildNameToCertificate()
|
||||
}
|
||||
|
||||
// will be nil by default if nothing is provided
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// SnakeCase converts the given string to snake case following the Golang format:
|
||||
// acronyms are converted to lower-case and preceded by an underscore.
|
||||
func SnakeCase(in string) string {
|
||||
@@ -240,3 +194,15 @@ func RandomSleep(max time.Duration, shutdown chan struct{}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Exit status takes the error from exec.Command
|
||||
// and returns the exit status and true
|
||||
// if error is not exit status, will return 0 and false
|
||||
func ExitStatus(err error) (int, bool) {
|
||||
if exiterr, ok := err.(*exec.ExitError); ok {
|
||||
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
|
||||
return status.ExitStatus(), true
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
@@ -44,6 +44,9 @@ var (
|
||||
)
|
||||
|
||||
func TestRunTimeout(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping test due to random failures.")
|
||||
}
|
||||
if sleepbin == "" {
|
||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||
}
|
||||
@@ -58,6 +61,8 @@ func TestRunTimeout(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCombinedOutputTimeout(t *testing.T) {
|
||||
// TODO: Fix this test
|
||||
t.Skip("Test failing too often, skip for now and revisit later.")
|
||||
if sleepbin == "" {
|
||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||
}
|
||||
@@ -109,6 +114,8 @@ func TestRunError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRandomSleep(t *testing.T) {
|
||||
// TODO: Fix this test
|
||||
t.Skip("Test failing too often, skip for now and revisit later.")
|
||||
// test that zero max returns immediately
|
||||
s := time.Now()
|
||||
RandomSleep(time.Duration(0), make(chan struct{}))
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package limiter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRateLimiter(t *testing.T) {
|
||||
r := NewRateLimiter(5, time.Second)
|
||||
ticker := time.NewTicker(time.Millisecond * 75)
|
||||
|
||||
// test that we can only get 5 receives from the rate limiter
|
||||
counter := 0
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-r.C:
|
||||
counter++
|
||||
case <-ticker.C:
|
||||
break outer
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, 5, counter)
|
||||
r.Stop()
|
||||
// verify that the Stop function closes the channel.
|
||||
_, ok := <-r.C
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestRateLimiterMultipleIterations(t *testing.T) {
|
||||
r := NewRateLimiter(5, time.Millisecond*50)
|
||||
ticker := time.NewTicker(time.Millisecond * 250)
|
||||
|
||||
// test that we can get 15 receives from the rate limiter
|
||||
counter := 0
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
break outer
|
||||
case <-r.C:
|
||||
counter++
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, counter > 10)
|
||||
r.Stop()
|
||||
// verify that the Stop function closes the channel.
|
||||
_, ok := <-r.C
|
||||
assert.False(t, ok)
|
||||
}
|
||||
@@ -2,8 +2,6 @@ package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -78,84 +76,6 @@ func makemetric(
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range tags {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
log.Printf("D! Measurement [%s] tag [%s] "+
|
||||
"ends with a backslash, skipping", measurement, k)
|
||||
delete(tags, k)
|
||||
continue
|
||||
} else if strings.HasSuffix(v, `\`) {
|
||||
log.Printf("D! Measurement [%s] tag [%s] has a value "+
|
||||
"ending with a backslash, skipping", measurement, k)
|
||||
delete(tags, k)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range fields {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
log.Printf("D! Measurement [%s] field [%s] "+
|
||||
"ends with a backslash, skipping", measurement, k)
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
// Validate uint64 and float64 fields
|
||||
// convert all int & uint types to int64
|
||||
switch val := v.(type) {
|
||||
case nil:
|
||||
// delete nil fields
|
||||
delete(fields, k)
|
||||
case uint:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint8:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint16:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint32:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int8:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int16:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int32:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
fields[k] = int64(val)
|
||||
} else {
|
||||
fields[k] = int64(9223372036854775807)
|
||||
}
|
||||
continue
|
||||
case float32:
|
||||
fields[k] = float64(val)
|
||||
continue
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
log.Printf("D! Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
case string:
|
||||
fields[k] = v
|
||||
default:
|
||||
fields[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
m, err := metric.New(measurement, tags, fields, t, mType)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -114,7 +115,6 @@ func (r *RunningAggregator) reset() {
|
||||
// for period ticks to tell it when to push and reset the aggregator.
|
||||
func (r *RunningAggregator) Run(
|
||||
acc telegraf.Accumulator,
|
||||
now time.Time,
|
||||
shutdown chan struct{},
|
||||
) {
|
||||
// The start of the period is truncated to the nearest second.
|
||||
@@ -133,6 +133,7 @@ func (r *RunningAggregator) Run(
|
||||
// 2nd interval: 00:10 - 00:20.5
|
||||
// etc.
|
||||
//
|
||||
now := time.Now()
|
||||
r.periodStart = now.Truncate(time.Second)
|
||||
truncation := now.Sub(r.periodStart)
|
||||
r.periodEnd = r.periodStart.Add(r.Config.Period)
|
||||
@@ -153,6 +154,7 @@ func (r *RunningAggregator) Run(
|
||||
m.Time().After(r.periodEnd.Add(truncation).Add(r.Config.Delay)) {
|
||||
// the metric is outside the current aggregation period, so
|
||||
// skip it.
|
||||
log.Printf("D! aggregator: metric \"%s\" is not in the current timewindow, skipping", m.Name())
|
||||
continue
|
||||
}
|
||||
r.add(m)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
@@ -24,7 +23,7 @@ func TestAdd(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
go ra.Run(&acc, time.Now(), make(chan struct{}))
|
||||
go ra.Run(&acc, make(chan struct{}))
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
@@ -55,7 +54,7 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
go ra.Run(&acc, time.Now(), make(chan struct{}))
|
||||
go ra.Run(&acc, make(chan struct{}))
|
||||
|
||||
// metric before current period
|
||||
m := ra.MakeMetric(
|
||||
@@ -113,7 +112,7 @@ func TestAddAndPushOnePeriod(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ra.Run(&acc, time.Now(), shutdown)
|
||||
ra.Run(&acc, shutdown)
|
||||
}()
|
||||
|
||||
m := ra.MakeMetric(
|
||||
@@ -167,69 +166,6 @@ func TestAddDropOriginal(t *testing.T) {
|
||||
assert.False(t, ra.Add(m2))
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetricA(t *testing.T) {
|
||||
now := time.Now()
|
||||
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
})
|
||||
assert.Equal(t, "aggregators.TestRunningAggregator", ra.Name())
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Untyped,
|
||||
)
|
||||
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Counter,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Counter,
|
||||
)
|
||||
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Gauge,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Gauge,
|
||||
)
|
||||
}
|
||||
|
||||
type TestAggregator struct {
|
||||
sum int64
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/serializers/influx"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
@@ -75,7 +76,12 @@ func (r *RunningInput) MakeMetric(
|
||||
)
|
||||
|
||||
if r.trace && m != nil {
|
||||
fmt.Print("> " + m.String())
|
||||
s := influx.NewSerializer()
|
||||
s.SetFieldSortOrder(influx.SortFields)
|
||||
octets, err := s.Serialize(m)
|
||||
if err == nil {
|
||||
fmt.Print("> " + string(octets))
|
||||
}
|
||||
}
|
||||
|
||||
r.MetricsGathered.Incr(1)
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -45,77 +44,17 @@ func TestMakeMetricNilFields(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
assert.Equal(t, "inputs.TestRunningInput", ri.Name())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
expected, err := metric.New("RITest",
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
map[string]interface{}{
|
||||
"value": int(101),
|
||||
},
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Untyped,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
m = ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Counter,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Counter,
|
||||
)
|
||||
|
||||
m = ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Gauge,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Gauge,
|
||||
)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricWithPluginTags(t *testing.T) {
|
||||
@@ -137,11 +76,18 @@ func TestMakeMetricWithPluginTags(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
|
||||
expected, err := metric.New("RITest",
|
||||
map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 101,
|
||||
},
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricFilteredOut(t *testing.T) {
|
||||
@@ -187,87 +133,17 @@ func TestMakeMetricWithDaemonTags(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetricInfFields(t *testing.T) {
|
||||
inf := math.Inf(1)
|
||||
ninf := math.Inf(-1)
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{
|
||||
"value": int(101),
|
||||
"inf": inf,
|
||||
"ninf": ninf,
|
||||
expected, err := metric.New("RITest",
|
||||
map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 101,
|
||||
},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricAllFieldTypes(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{
|
||||
"a": int(10),
|
||||
"b": int8(10),
|
||||
"c": int16(10),
|
||||
"d": int32(10),
|
||||
"e": uint(10),
|
||||
"f": uint8(10),
|
||||
"g": uint16(10),
|
||||
"h": uint32(10),
|
||||
"i": uint64(10),
|
||||
"j": float32(10),
|
||||
"k": uint64(9223372036854775810),
|
||||
"l": "foobar",
|
||||
"m": true,
|
||||
},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Contains(t, m.String(), "a=10i")
|
||||
assert.Contains(t, m.String(), "b=10i")
|
||||
assert.Contains(t, m.String(), "c=10i")
|
||||
assert.Contains(t, m.String(), "d=10i")
|
||||
assert.Contains(t, m.String(), "e=10i")
|
||||
assert.Contains(t, m.String(), "f=10i")
|
||||
assert.Contains(t, m.String(), "g=10i")
|
||||
assert.Contains(t, m.String(), "h=10i")
|
||||
assert.Contains(t, m.String(), "i=10i")
|
||||
assert.Contains(t, m.String(), "j=10")
|
||||
assert.NotContains(t, m.String(), "j=10i")
|
||||
assert.Contains(t, m.String(), "k=9223372036854775807i")
|
||||
assert.Contains(t, m.String(), "l=\"foobar\"")
|
||||
assert.Contains(t, m.String(), "m=true")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricNameOverride(t *testing.T) {
|
||||
@@ -284,11 +160,15 @@ func TestMakeMetricNameOverride(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("foobar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
expected, err := metric.New("foobar",
|
||||
nil,
|
||||
map[string]interface{}{
|
||||
"value": 101,
|
||||
},
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricNamePrefix(t *testing.T) {
|
||||
@@ -305,11 +185,15 @@ func TestMakeMetricNamePrefix(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("foobar_RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
expected, err := metric.New("foobar_RITest",
|
||||
nil,
|
||||
map[string]interface{}{
|
||||
"value": 101,
|
||||
},
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricNameSuffix(t *testing.T) {
|
||||
@@ -326,134 +210,15 @@ func TestMakeMetricNameSuffix(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest_foobar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
expected, err := metric.New("RITest_foobar",
|
||||
nil,
|
||||
map[string]interface{}{
|
||||
"value": 101,
|
||||
},
|
||||
now,
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetric_TrailingSlash(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
measurement string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
expectedNil bool
|
||||
expectedMeasurement string
|
||||
expectedFields map[string]interface{}
|
||||
expectedTags map[string]string
|
||||
}{
|
||||
{
|
||||
name: "Measurement cannot have trailing slash",
|
||||
measurement: `cpu\`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedNil: true,
|
||||
},
|
||||
{
|
||||
name: "Field key with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
`bad\`: `xyzzy`,
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Field value with trailing slash okay",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
"ok": `xyzzy\`,
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
"ok": `xyzzy\`,
|
||||
},
|
||||
expectedTags: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Must have one field after dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"bad": math.NaN(),
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedNil: true,
|
||||
},
|
||||
{
|
||||
name: "Tag key with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{
|
||||
`host\`: "localhost",
|
||||
"a": "x",
|
||||
},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Tag value with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{
|
||||
`host`: `localhost\`,
|
||||
"a": "x",
|
||||
},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
m := ri.MakeMetric(
|
||||
tc.measurement,
|
||||
tc.fields,
|
||||
tc.tags,
|
||||
telegraf.Untyped,
|
||||
now)
|
||||
|
||||
if tc.expectedNil {
|
||||
require.Nil(t, m)
|
||||
} else {
|
||||
require.NotNil(t, m)
|
||||
require.Equal(t, tc.expectedMeasurement, m.Name())
|
||||
require.Equal(t, tc.expectedFields, m.Fields())
|
||||
require.Equal(t, tc.expectedTags, m.Tags())
|
||||
}
|
||||
})
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
type testInput struct{}
|
||||
|
||||
@@ -87,7 +87,7 @@ func NewRunningOutput(
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
}
|
||||
ro.BufferLimit.Incr(int64(ro.MetricBufferLimit))
|
||||
ro.BufferLimit.Set(int64(ro.MetricBufferLimit))
|
||||
return ro
|
||||
}
|
||||
|
||||
|
||||
86
internal/templating/engine.go
Normal file
86
internal/templating/engine.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package templating
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultSeparator is the default separation character to use when separating template parts.
|
||||
DefaultSeparator = "."
|
||||
)
|
||||
|
||||
// Engine uses a Matcher to retrieve the appropriate template and applies the template
|
||||
// to the input string
|
||||
type Engine struct {
|
||||
joiner string
|
||||
matcher *matcher
|
||||
}
|
||||
|
||||
// Apply extracts the template fields from the given line and returns the measurement
|
||||
// name, tags and field name
|
||||
func (e *Engine) Apply(line string) (string, map[string]string, string, error) {
|
||||
return e.matcher.match(line).Apply(line, e.joiner)
|
||||
}
|
||||
|
||||
// NewEngine creates a new templating engine
|
||||
func NewEngine(joiner string, defaultTemplate *Template, templates []string) (*Engine, error) {
|
||||
engine := Engine{
|
||||
joiner: joiner,
|
||||
matcher: newMatcher(defaultTemplate),
|
||||
}
|
||||
templateSpecs := parseTemplateSpecs(templates)
|
||||
|
||||
for _, templateSpec := range templateSpecs {
|
||||
if err := engine.matcher.addSpec(templateSpec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &engine, nil
|
||||
}
|
||||
|
||||
func parseTemplateSpecs(templates []string) templateSpecs {
|
||||
tmplts := templateSpecs{}
|
||||
for _, pattern := range templates {
|
||||
tmplt := templateSpec{
|
||||
separator: DefaultSeparator,
|
||||
}
|
||||
|
||||
// Format is [separator] [filter] <template> [tag1=value1,tag2=value2]
|
||||
parts := strings.Fields(pattern)
|
||||
partsLength := len(parts)
|
||||
if partsLength < 1 {
|
||||
// ignore
|
||||
continue
|
||||
}
|
||||
if partsLength == 1 {
|
||||
tmplt.template = pattern
|
||||
} else if partsLength == 4 {
|
||||
tmplt.separator = parts[0]
|
||||
tmplt.filter = parts[1]
|
||||
tmplt.template = parts[2]
|
||||
tmplt.tagstring = parts[3]
|
||||
} else {
|
||||
hasTagstring := strings.Contains(parts[partsLength-1], "=")
|
||||
if hasTagstring {
|
||||
tmplt.tagstring = parts[partsLength-1]
|
||||
tmplt.template = parts[partsLength-2]
|
||||
if partsLength == 3 {
|
||||
tmplt.filter = parts[0]
|
||||
}
|
||||
} else {
|
||||
tmplt.template = parts[partsLength-1]
|
||||
if partsLength == 2 {
|
||||
tmplt.filter = parts[0]
|
||||
} else { // length == 3
|
||||
tmplt.separator = parts[0]
|
||||
tmplt.filter = parts[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
tmplts = append(tmplts, tmplt)
|
||||
}
|
||||
sort.Sort(tmplts)
|
||||
return tmplts
|
||||
}
|
||||
58
internal/templating/matcher.go
Normal file
58
internal/templating/matcher.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package templating
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// matcher determines which template should be applied to a given metric
|
||||
// based on a filter tree.
|
||||
type matcher struct {
|
||||
root *node
|
||||
defaultTemplate *Template
|
||||
}
|
||||
|
||||
// newMatcher creates a new matcher.
|
||||
func newMatcher(defaultTemplate *Template) *matcher {
|
||||
return &matcher{
|
||||
root: &node{},
|
||||
defaultTemplate: defaultTemplate,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *matcher) addSpec(tmplt templateSpec) error {
|
||||
// Parse out the default tags specific to this template
|
||||
tags := map[string]string{}
|
||||
if tmplt.tagstring != "" {
|
||||
for _, kv := range strings.Split(tmplt.tagstring, ",") {
|
||||
parts := strings.Split(kv, "=")
|
||||
tags[parts[0]] = parts[1]
|
||||
}
|
||||
}
|
||||
|
||||
tmpl, err := NewTemplate(tmplt.separator, tmplt.template, tags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.add(tmplt.filter, tmpl)
|
||||
return nil
|
||||
}
|
||||
|
||||
// add inserts the template in the filter tree based the given filter
|
||||
func (m *matcher) add(filter string, template *Template) {
|
||||
if filter == "" {
|
||||
m.defaultTemplate = template
|
||||
m.root.separator = template.separator
|
||||
return
|
||||
}
|
||||
m.root.insert(filter, template)
|
||||
}
|
||||
|
||||
// match returns the template that matches the given measurement line.
|
||||
// If no template matches, the default template is returned.
|
||||
func (m *matcher) match(line string) *Template {
|
||||
tmpl := m.root.search(line)
|
||||
if tmpl != nil {
|
||||
return tmpl
|
||||
}
|
||||
return m.defaultTemplate
|
||||
}
|
||||
122
internal/templating/node.go
Normal file
122
internal/templating/node.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package templating
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// node is an item in a sorted k-ary tree of filter parts. Each child is sorted by its part value.
|
||||
// The special value of "*", is always sorted last.
|
||||
type node struct {
|
||||
separator string
|
||||
value string
|
||||
children nodes
|
||||
template *Template
|
||||
}
|
||||
|
||||
// insert inserts the given string template into the tree. The filter string is separated
|
||||
// on the template separator and each part is used as the path in the tree.
|
||||
func (n *node) insert(filter string, template *Template) {
|
||||
n.separator = template.separator
|
||||
n.recursiveInsert(strings.Split(filter, n.separator), template)
|
||||
}
|
||||
|
||||
// recursiveInsert does the actual recursive insertion
|
||||
func (n *node) recursiveInsert(values []string, template *Template) {
|
||||
// Add the end, set the template
|
||||
if len(values) == 0 {
|
||||
n.template = template
|
||||
return
|
||||
}
|
||||
|
||||
// See if the the current element already exists in the tree. If so, insert the
|
||||
// into that sub-tree
|
||||
for _, v := range n.children {
|
||||
if v.value == values[0] {
|
||||
v.recursiveInsert(values[1:], template)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// New element, add it to the tree and sort the children
|
||||
newNode := &node{value: values[0]}
|
||||
n.children = append(n.children, newNode)
|
||||
sort.Sort(&n.children)
|
||||
|
||||
// Now insert the rest of the tree into the new element
|
||||
newNode.recursiveInsert(values[1:], template)
|
||||
}
|
||||
|
||||
// search searches for a template matching the input string
|
||||
func (n *node) search(line string) *Template {
|
||||
separator := n.separator
|
||||
return n.recursiveSearch(strings.Split(line, separator))
|
||||
}
|
||||
|
||||
// recursiveSearch performs the actual recursive search
|
||||
func (n *node) recursiveSearch(lineParts []string) *Template {
|
||||
// Nothing to search
|
||||
if len(lineParts) == 0 || len(n.children) == 0 {
|
||||
return n.template
|
||||
}
|
||||
|
||||
// If last element is a wildcard, don't include it in this search since it's sorted
|
||||
// to the end but lexicographically it would not always be and sort.Search assumes
|
||||
// the slice is sorted.
|
||||
length := len(n.children)
|
||||
if n.children[length-1].value == "*" {
|
||||
length--
|
||||
}
|
||||
|
||||
// Find the index of child with an exact match
|
||||
i := sort.Search(length, func(i int) bool {
|
||||
return n.children[i].value >= lineParts[0]
|
||||
})
|
||||
|
||||
// Found an exact match, so search that child sub-tree
|
||||
if i < len(n.children) && n.children[i].value == lineParts[0] {
|
||||
return n.children[i].recursiveSearch(lineParts[1:])
|
||||
}
|
||||
// Not an exact match, see if we have a wildcard child to search
|
||||
if n.children[len(n.children)-1].value == "*" {
|
||||
return n.children[len(n.children)-1].recursiveSearch(lineParts[1:])
|
||||
}
|
||||
return n.template
|
||||
}
|
||||
|
||||
// nodes is simply an array of nodes implementing the sorting interface.
|
||||
type nodes []*node
|
||||
|
||||
// Less returns a boolean indicating whether the filter at position j
|
||||
// is less than the filter at position k. Filters are order by string
|
||||
// comparison of each component parts. A wildcard value "*" is never
|
||||
// less than a non-wildcard value.
|
||||
//
|
||||
// For example, the filters:
|
||||
// "*.*"
|
||||
// "servers.*"
|
||||
// "servers.localhost"
|
||||
// "*.localhost"
|
||||
//
|
||||
// Would be sorted as:
|
||||
// "servers.localhost"
|
||||
// "servers.*"
|
||||
// "*.localhost"
|
||||
// "*.*"
|
||||
func (n *nodes) Less(j, k int) bool {
|
||||
if (*n)[j].value == "*" && (*n)[k].value != "*" {
|
||||
return false
|
||||
}
|
||||
|
||||
if (*n)[j].value != "*" && (*n)[k].value == "*" {
|
||||
return true
|
||||
}
|
||||
|
||||
return (*n)[j].value < (*n)[k].value
|
||||
}
|
||||
|
||||
// Swap swaps two elements of the array
|
||||
func (n *nodes) Swap(i, j int) { (*n)[i], (*n)[j] = (*n)[j], (*n)[i] }
|
||||
|
||||
// Len returns the length of the array
|
||||
func (n *nodes) Len() int { return len(*n) }
|
||||
148
internal/templating/template.go
Normal file
148
internal/templating/template.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package templating
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Template represents a pattern and tags to map a metric string to a influxdb Point
|
||||
type Template struct {
|
||||
separator string
|
||||
parts []string
|
||||
defaultTags map[string]string
|
||||
greedyField bool
|
||||
greedyMeasurement bool
|
||||
}
|
||||
|
||||
// apply extracts the template fields from the given line and returns the measurement
|
||||
// name, tags and field name
|
||||
func (t *Template) Apply(line string, joiner string) (string, map[string]string, string, error) {
|
||||
fields := strings.Split(line, t.separator)
|
||||
var (
|
||||
measurement []string
|
||||
tags = make(map[string][]string)
|
||||
field []string
|
||||
)
|
||||
|
||||
// Set any default tags
|
||||
for k, v := range t.defaultTags {
|
||||
tags[k] = append(tags[k], v)
|
||||
}
|
||||
|
||||
// See if an invalid combination has been specified in the template:
|
||||
for _, tag := range t.parts {
|
||||
if tag == "measurement*" {
|
||||
t.greedyMeasurement = true
|
||||
} else if tag == "field*" {
|
||||
t.greedyField = true
|
||||
}
|
||||
}
|
||||
if t.greedyField && t.greedyMeasurement {
|
||||
return "", nil, "",
|
||||
fmt.Errorf("either 'field*' or 'measurement*' can be used in each "+
|
||||
"template (but not both together): %q",
|
||||
strings.Join(t.parts, joiner))
|
||||
}
|
||||
|
||||
for i, tag := range t.parts {
|
||||
if i >= len(fields) {
|
||||
continue
|
||||
}
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
switch tag {
|
||||
case "measurement":
|
||||
measurement = append(measurement, fields[i])
|
||||
case "field":
|
||||
field = append(field, fields[i])
|
||||
case "field*":
|
||||
field = append(field, fields[i:]...)
|
||||
break
|
||||
case "measurement*":
|
||||
measurement = append(measurement, fields[i:]...)
|
||||
break
|
||||
default:
|
||||
tags[tag] = append(tags[tag], fields[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to map of strings.
|
||||
outtags := make(map[string]string)
|
||||
for k, values := range tags {
|
||||
outtags[k] = strings.Join(values, joiner)
|
||||
}
|
||||
|
||||
return strings.Join(measurement, joiner), outtags, strings.Join(field, joiner), nil
|
||||
}
|
||||
|
||||
func NewDefaultTemplateWithPattern(pattern string) (*Template, error) {
|
||||
return NewTemplate(DefaultSeparator, pattern, nil)
|
||||
}
|
||||
|
||||
// NewTemplate returns a new template ensuring it has a measurement
|
||||
// specified.
|
||||
func NewTemplate(separator string, pattern string, defaultTags map[string]string) (*Template, error) {
|
||||
parts := strings.Split(pattern, separator)
|
||||
hasMeasurement := false
|
||||
template := &Template{
|
||||
separator: separator,
|
||||
parts: parts,
|
||||
defaultTags: defaultTags,
|
||||
}
|
||||
|
||||
for _, part := range parts {
|
||||
if strings.HasPrefix(part, "measurement") {
|
||||
hasMeasurement = true
|
||||
}
|
||||
if part == "measurement*" {
|
||||
template.greedyMeasurement = true
|
||||
} else if part == "field*" {
|
||||
template.greedyField = true
|
||||
}
|
||||
}
|
||||
|
||||
if !hasMeasurement {
|
||||
return nil, fmt.Errorf("no measurement specified for template. %q", pattern)
|
||||
}
|
||||
|
||||
return template, nil
|
||||
}
|
||||
|
||||
// templateSpec is a template string split in its constituent parts
|
||||
type templateSpec struct {
|
||||
separator string
|
||||
filter string
|
||||
template string
|
||||
tagstring string
|
||||
}
|
||||
|
||||
// templateSpecs is simply an array of template specs implementing the sorting interface
|
||||
type templateSpecs []templateSpec
|
||||
|
||||
// Less reports whether the element with
|
||||
// index j should sort before the element with index k.
|
||||
func (e templateSpecs) Less(j, k int) bool {
|
||||
if len(e[j].filter) == 0 && len(e[k].filter) == 0 {
|
||||
jlength := len(strings.Split(e[j].template, e[j].separator))
|
||||
klength := len(strings.Split(e[k].template, e[k].separator))
|
||||
return jlength < klength
|
||||
}
|
||||
if len(e[j].filter) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(e[k].filter) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
jlength := len(strings.Split(e[j].template, e[j].separator))
|
||||
klength := len(strings.Split(e[k].template, e[k].separator))
|
||||
return jlength < klength
|
||||
}
|
||||
|
||||
// Swap swaps the elements with indexes i and j.
|
||||
func (e templateSpecs) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
|
||||
|
||||
// Len is the number of elements in the collection.
|
||||
func (e templateSpecs) Len() int { return len(e) }
|
||||
130
internal/tls/config.go
Normal file
130
internal/tls/config.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package tls
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
// ClientConfig represents the standard client TLS config.
|
||||
type ClientConfig struct {
|
||||
TLSCA string `toml:"tls_ca"`
|
||||
TLSCert string `toml:"tls_cert"`
|
||||
TLSKey string `toml:"tls_key"`
|
||||
InsecureSkipVerify bool `toml:"insecure_skip_verify"`
|
||||
|
||||
// Deprecated in 1.7; use TLS variables above
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
}
|
||||
|
||||
// ServerConfig represents the standard server TLS config.
|
||||
type ServerConfig struct {
|
||||
TLSCert string `toml:"tls_cert"`
|
||||
TLSKey string `toml:"tls_key"`
|
||||
TLSAllowedCACerts []string `toml:"tls_allowed_cacerts"`
|
||||
}
|
||||
|
||||
// TLSConfig returns a tls.Config, may be nil without error if TLS is not
|
||||
// configured.
|
||||
func (c *ClientConfig) TLSConfig() (*tls.Config, error) {
|
||||
// Support deprecated variable names
|
||||
if c.TLSCA == "" && c.SSLCA != "" {
|
||||
c.TLSCA = c.SSLCA
|
||||
}
|
||||
if c.TLSCert == "" && c.SSLCert != "" {
|
||||
c.TLSCert = c.SSLCert
|
||||
}
|
||||
if c.TLSKey == "" && c.SSLKey != "" {
|
||||
c.TLSKey = c.SSLKey
|
||||
}
|
||||
|
||||
// TODO: return default tls.Config; plugins should not call if they don't
|
||||
// want TLS, this will require using another option to determine. In the
|
||||
// case of an HTTP plugin, you could use `https`. Other plugins may need
|
||||
// the dedicated option `TLSEnable`.
|
||||
if c.TLSCA == "" && c.TLSKey == "" && c.TLSCert == "" && !c.InsecureSkipVerify {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: c.InsecureSkipVerify,
|
||||
Renegotiation: tls.RenegotiateNever,
|
||||
}
|
||||
|
||||
if c.TLSCA != "" {
|
||||
pool, err := makeCertPool([]string{c.TLSCA})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsConfig.RootCAs = pool
|
||||
}
|
||||
|
||||
if c.TLSCert != "" && c.TLSKey != "" {
|
||||
err := loadCertificate(tlsConfig, c.TLSCert, c.TLSKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
// TLSConfig returns a tls.Config, may be nil without error if TLS is not
|
||||
// configured.
|
||||
func (c *ServerConfig) TLSConfig() (*tls.Config, error) {
|
||||
if c.TLSCert == "" && c.TLSKey == "" && len(c.TLSAllowedCACerts) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{}
|
||||
|
||||
if len(c.TLSAllowedCACerts) != 0 {
|
||||
pool, err := makeCertPool(c.TLSAllowedCACerts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsConfig.ClientCAs = pool
|
||||
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
}
|
||||
|
||||
if c.TLSCert != "" && c.TLSKey != "" {
|
||||
err := loadCertificate(tlsConfig, c.TLSCert, c.TLSKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
func makeCertPool(certFiles []string) (*x509.CertPool, error) {
|
||||
pool := x509.NewCertPool()
|
||||
for _, certFile := range certFiles {
|
||||
pem, err := ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"could not read certificate %q: %v", certFile, err)
|
||||
}
|
||||
ok := pool.AppendCertsFromPEM(pem)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(
|
||||
"could not parse any PEM certificates %q: %v", certFile, err)
|
||||
}
|
||||
}
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
func loadCertificate(config *tls.Config, certFile, keyFile string) error {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"could not load keypair %s:%s: %v", certFile, keyFile, err)
|
||||
}
|
||||
|
||||
config.Certificates = []tls.Certificate{cert}
|
||||
config.BuildNameToCertificate()
|
||||
return nil
|
||||
}
|
||||
226
internal/tls/config_test.go
Normal file
226
internal/tls/config_test.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package tls_test
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var pki = testutil.NewPKI("../../testutil/pki")
|
||||
|
||||
func TestClientConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
client tls.ClientConfig
|
||||
expNil bool
|
||||
expErr bool
|
||||
}{
|
||||
{
|
||||
name: "unset",
|
||||
client: tls.ClientConfig{},
|
||||
expNil: true,
|
||||
},
|
||||
{
|
||||
name: "success",
|
||||
client: tls.ClientConfig{
|
||||
TLSCA: pki.CACertPath(),
|
||||
TLSCert: pki.ClientCertPath(),
|
||||
TLSKey: pki.ClientKeyPath(),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid ca",
|
||||
client: tls.ClientConfig{
|
||||
TLSCA: pki.ClientKeyPath(),
|
||||
TLSCert: pki.ClientCertPath(),
|
||||
TLSKey: pki.ClientKeyPath(),
|
||||
},
|
||||
expNil: true,
|
||||
expErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing ca is okay",
|
||||
client: tls.ClientConfig{
|
||||
TLSCert: pki.ClientCertPath(),
|
||||
TLSKey: pki.ClientKeyPath(),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid cert",
|
||||
client: tls.ClientConfig{
|
||||
TLSCA: pki.CACertPath(),
|
||||
TLSCert: pki.ClientKeyPath(),
|
||||
TLSKey: pki.ClientKeyPath(),
|
||||
},
|
||||
expNil: true,
|
||||
expErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing cert skips client keypair",
|
||||
client: tls.ClientConfig{
|
||||
TLSCA: pki.CACertPath(),
|
||||
TLSKey: pki.ClientKeyPath(),
|
||||
},
|
||||
expNil: false,
|
||||
expErr: false,
|
||||
},
|
||||
{
|
||||
name: "missing key skips client keypair",
|
||||
client: tls.ClientConfig{
|
||||
TLSCA: pki.CACertPath(),
|
||||
TLSCert: pki.ClientCertPath(),
|
||||
},
|
||||
expNil: false,
|
||||
expErr: false,
|
||||
},
|
||||
{
|
||||
name: "support deprecated ssl field names",
|
||||
client: tls.ClientConfig{
|
||||
SSLCA: pki.CACertPath(),
|
||||
SSLCert: pki.ClientCertPath(),
|
||||
SSLKey: pki.ClientKeyPath(),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tlsConfig, err := tt.client.TLSConfig()
|
||||
if !tt.expNil {
|
||||
require.NotNil(t, tlsConfig)
|
||||
} else {
|
||||
require.Nil(t, tlsConfig)
|
||||
}
|
||||
|
||||
if !tt.expErr {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServerConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
server tls.ServerConfig
|
||||
expNil bool
|
||||
expErr bool
|
||||
}{
|
||||
{
|
||||
name: "unset",
|
||||
server: tls.ServerConfig{},
|
||||
expNil: true,
|
||||
},
|
||||
{
|
||||
name: "success",
|
||||
server: tls.ServerConfig{
|
||||
TLSCert: pki.ServerCertPath(),
|
||||
TLSKey: pki.ServerKeyPath(),
|
||||
TLSAllowedCACerts: []string{pki.CACertPath()},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid ca",
|
||||
server: tls.ServerConfig{
|
||||
TLSCert: pki.ServerCertPath(),
|
||||
TLSKey: pki.ServerKeyPath(),
|
||||
TLSAllowedCACerts: []string{pki.ServerKeyPath()},
|
||||
},
|
||||
expNil: true,
|
||||
expErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing allowed ca is okay",
|
||||
server: tls.ServerConfig{
|
||||
TLSCert: pki.ServerCertPath(),
|
||||
TLSKey: pki.ServerKeyPath(),
|
||||
},
|
||||
expNil: true,
|
||||
expErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid cert",
|
||||
server: tls.ServerConfig{
|
||||
TLSCert: pki.ServerKeyPath(),
|
||||
TLSKey: pki.ServerKeyPath(),
|
||||
TLSAllowedCACerts: []string{pki.CACertPath()},
|
||||
},
|
||||
expNil: true,
|
||||
expErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing cert",
|
||||
server: tls.ServerConfig{
|
||||
TLSKey: pki.ServerKeyPath(),
|
||||
TLSAllowedCACerts: []string{pki.CACertPath()},
|
||||
},
|
||||
expNil: true,
|
||||
expErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing key",
|
||||
server: tls.ServerConfig{
|
||||
TLSCert: pki.ServerCertPath(),
|
||||
TLSAllowedCACerts: []string{pki.CACertPath()},
|
||||
},
|
||||
expNil: true,
|
||||
expErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tlsConfig, err := tt.server.TLSConfig()
|
||||
if !tt.expNil {
|
||||
require.NotNil(t, tlsConfig)
|
||||
}
|
||||
if !tt.expErr {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnect(t *testing.T) {
|
||||
clientConfig := tls.ClientConfig{
|
||||
TLSCA: pki.CACertPath(),
|
||||
TLSCert: pki.ClientCertPath(),
|
||||
TLSKey: pki.ClientKeyPath(),
|
||||
}
|
||||
|
||||
serverConfig := tls.ServerConfig{
|
||||
TLSCert: pki.ServerCertPath(),
|
||||
TLSKey: pki.ServerKeyPath(),
|
||||
TLSAllowedCACerts: []string{pki.CACertPath()},
|
||||
}
|
||||
|
||||
serverTLSConfig, err := serverConfig.TLSConfig()
|
||||
require.NoError(t, err)
|
||||
|
||||
ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
ts.TLS = serverTLSConfig
|
||||
|
||||
ts.StartTLS()
|
||||
defer ts.Close()
|
||||
|
||||
clientTLSConfig, err := clientConfig.TLSConfig()
|
||||
require.NoError(t, err)
|
||||
|
||||
client := http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: clientTLSConfig,
|
||||
},
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
resp, err := client.Get(ts.URL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
45
internal/usage.go
Normal file
45
internal/usage.go
Normal file
@@ -0,0 +1,45 @@
|
||||
// +build !windows
|
||||
|
||||
package internal
|
||||
|
||||
const Usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf [commands|flags]
|
||||
|
||||
The commands & flags are:
|
||||
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
--config-directory directory containing additional *.conf files
|
||||
--input-filter filter the input plugins to enable, separator is :
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||
--quiet run in quiet mode
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf --config telegraf.conf --test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf --config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
|
||||
# run telegraf with pprof
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
`
|
||||
54
internal/usage_windows.go
Normal file
54
internal/usage_windows.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// +build windows
|
||||
|
||||
package internal
|
||||
|
||||
const Usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf [commands|flags]
|
||||
|
||||
The commands & flags are:
|
||||
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
--config-directory directory containing additional *.conf files
|
||||
--input-filter filter the input plugins to enable, separator is :
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||
--quiet run in quiet mode
|
||||
|
||||
--console run as console application
|
||||
--service operate on service, one of: install, uninstall, start, stop
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf --config telegraf.conf --test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf --config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
|
||||
# run telegraf with pprof
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
|
||||
# run telegraf without service controller
|
||||
telegraf --console install --config "C:\Program Files\Telegraf\telegraf.conf"
|
||||
|
||||
# install telegraf service
|
||||
telegraf --service install --config "C:\Program Files\Telegraf\telegraf.conf"
|
||||
`
|
||||
60
metric.go
60
metric.go
@@ -17,48 +17,52 @@ const (
|
||||
Histogram
|
||||
)
|
||||
|
||||
type Tag struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
type Field struct {
|
||||
Key string
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
type Metric interface {
|
||||
// Serialize serializes the metric into a line-protocol byte buffer,
|
||||
// including a newline at the end.
|
||||
Serialize() []byte
|
||||
// same as Serialize, but avoids an allocation.
|
||||
// returns number of bytes copied into dst.
|
||||
SerializeTo(dst []byte) int
|
||||
// String is the same as Serialize, but returns a string.
|
||||
String() string
|
||||
// Copy deep-copies the metric.
|
||||
Copy() Metric
|
||||
// Split will attempt to return multiple metrics with the same timestamp
|
||||
// whose string representations are no longer than maxSize.
|
||||
// Metrics with a single field may exceed the requested size.
|
||||
Split(maxSize int) []Metric
|
||||
// Getting data structure functions
|
||||
Name() string
|
||||
Tags() map[string]string
|
||||
TagList() []*Tag
|
||||
Fields() map[string]interface{}
|
||||
FieldList() []*Field
|
||||
Time() time.Time
|
||||
Type() ValueType
|
||||
|
||||
// Name functions
|
||||
SetName(name string)
|
||||
AddPrefix(prefix string)
|
||||
AddSuffix(suffix string)
|
||||
|
||||
// Tag functions
|
||||
GetTag(key string) (string, bool)
|
||||
HasTag(key string) bool
|
||||
AddTag(key, value string)
|
||||
RemoveTag(key string)
|
||||
|
||||
// Field functions
|
||||
GetField(key string) (interface{}, bool)
|
||||
HasField(key string) bool
|
||||
AddField(key string, value interface{})
|
||||
RemoveField(key string) error
|
||||
RemoveField(key string)
|
||||
|
||||
// Name functions
|
||||
SetName(name string)
|
||||
SetPrefix(prefix string)
|
||||
SetSuffix(suffix string)
|
||||
SetTime(t time.Time)
|
||||
|
||||
// Getting data structure functions
|
||||
Name() string
|
||||
Tags() map[string]string
|
||||
Fields() map[string]interface{}
|
||||
Time() time.Time
|
||||
UnixNano() int64
|
||||
Type() ValueType
|
||||
Len() int // returns the length of the serialized metric, including newline
|
||||
// HashID returns an unique identifier for the series.
|
||||
HashID() uint64
|
||||
|
||||
// aggregator things:
|
||||
// Copy returns a deep copy of the Metric.
|
||||
Copy() Metric
|
||||
|
||||
// Mark Metric as an aggregate
|
||||
SetAggregate(bool)
|
||||
IsAggregate() bool
|
||||
}
|
||||
|
||||
53
metric/builder.go
Normal file
53
metric/builder.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type TimeFunc func() time.Time
|
||||
|
||||
type Builder struct {
|
||||
TimeFunc
|
||||
TimePrecision time.Duration
|
||||
|
||||
*metric
|
||||
}
|
||||
|
||||
func NewBuilder() *Builder {
|
||||
b := &Builder{
|
||||
TimeFunc: time.Now,
|
||||
TimePrecision: 1 * time.Nanosecond,
|
||||
}
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Builder) SetName(name string) {
|
||||
b.name = name
|
||||
}
|
||||
|
||||
func (b *Builder) AddTag(key string, value string) {
|
||||
b.metric.AddTag(key, value)
|
||||
}
|
||||
|
||||
func (b *Builder) AddField(key string, value interface{}) {
|
||||
b.metric.AddField(key, value)
|
||||
}
|
||||
|
||||
func (b *Builder) SetTime(tm time.Time) {
|
||||
b.tm = tm
|
||||
}
|
||||
|
||||
func (b *Builder) Reset() {
|
||||
b.metric = &metric{}
|
||||
}
|
||||
|
||||
func (b *Builder) Metric() (telegraf.Metric, error) {
|
||||
if b.tm.IsZero() {
|
||||
b.tm = b.TimeFunc().Truncate(b.TimePrecision)
|
||||
}
|
||||
|
||||
return b.metric, nil
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// escaper is for escaping:
|
||||
// - tag keys
|
||||
// - tag values
|
||||
// - field keys
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`)
|
||||
unEscaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`)
|
||||
|
||||
// nameEscaper is for escaping measurement names only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
nameEscaper = strings.NewReplacer(`,`, `\,`, ` `, `\ `)
|
||||
nameUnEscaper = strings.NewReplacer(`\,`, `,`, `\ `, ` `)
|
||||
|
||||
// stringFieldEscaper is for escaping string field values only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
stringFieldEscaper = strings.NewReplacer(
|
||||
`"`, `\"`,
|
||||
`\`, `\\`,
|
||||
)
|
||||
stringFieldUnEscaper = strings.NewReplacer(
|
||||
`\"`, `"`,
|
||||
`\\`, `\`,
|
||||
)
|
||||
)
|
||||
|
||||
func escape(s string, t string) string {
|
||||
switch t {
|
||||
case "fieldkey", "tagkey", "tagval":
|
||||
return escaper.Replace(s)
|
||||
case "name":
|
||||
return nameEscaper.Replace(s)
|
||||
case "fieldval":
|
||||
return stringFieldEscaper.Replace(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func unescape(s string, t string) string {
|
||||
switch t {
|
||||
case "fieldkey", "tagkey", "tagval":
|
||||
return unEscaper.Replace(s)
|
||||
case "name":
|
||||
return nameUnEscaper.Replace(s)
|
||||
case "fieldval":
|
||||
return stringFieldUnEscaper.Replace(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
|
||||
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseInt(s, base, bitSize)
|
||||
}
|
||||
|
||||
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
|
||||
func parseFloatBytes(b []byte, bitSize int) (float64, error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseFloat(s, bitSize)
|
||||
}
|
||||
|
||||
// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
|
||||
func parseBoolBytes(b []byte) (bool, error) {
|
||||
return strconv.ParseBool(unsafeBytesToString(b))
|
||||
}
|
||||
|
||||
// unsafeBytesToString converts a []byte to a string without a heap allocation.
|
||||
//
|
||||
// It is unsafe, and is intended to prepare input to short-lived functions
|
||||
// that require strings.
|
||||
func unsafeBytesToString(in []byte) string {
|
||||
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
|
||||
dst := reflect.StringHeader{
|
||||
Data: src.Data,
|
||||
Len: src.Len,
|
||||
}
|
||||
s := *(*string)(unsafe.Pointer(&dst))
|
||||
return s
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
func TestParseIntBytesEquivalenceFuzz(t *testing.T) {
|
||||
f := func(b []byte, base int, bitSize int) bool {
|
||||
exp, expErr := strconv.ParseInt(string(b), base, bitSize)
|
||||
got, gotErr := parseIntBytes(b, base, bitSize)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseIntBytesValid64bitBase10EquivalenceFuzz(t *testing.T) {
|
||||
buf := []byte{}
|
||||
f := func(n int64) bool {
|
||||
buf = strconv.AppendInt(buf[:0], n, 10)
|
||||
|
||||
exp, expErr := strconv.ParseInt(string(buf), 10, 64)
|
||||
got, gotErr := parseIntBytes(buf, 10, 64)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFloatBytesEquivalenceFuzz(t *testing.T) {
|
||||
f := func(b []byte, bitSize int) bool {
|
||||
exp, expErr := strconv.ParseFloat(string(b), bitSize)
|
||||
got, gotErr := parseFloatBytes(b, bitSize)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFloatBytesValid64bitEquivalenceFuzz(t *testing.T) {
|
||||
buf := []byte{}
|
||||
f := func(n float64) bool {
|
||||
buf = strconv.AppendFloat(buf[:0], n, 'f', -1, 64)
|
||||
|
||||
exp, expErr := strconv.ParseFloat(string(buf), 64)
|
||||
got, gotErr := parseFloatBytes(buf, 64)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseBoolBytesEquivalence(t *testing.T) {
|
||||
var buf []byte
|
||||
for _, s := range []string{"1", "t", "T", "TRUE", "true", "True", "0", "f", "F", "FALSE", "false", "False", "fail", "TrUe", "FAlSE", "numbers", ""} {
|
||||
buf = append(buf[:0], s...)
|
||||
|
||||
exp, expErr := strconv.ParseBool(s)
|
||||
got, gotErr := parseBoolBytes(buf)
|
||||
|
||||
if got != exp || !checkErrs(expErr, gotErr) {
|
||||
t.Errorf("Failed to parse boolean value %q correctly: wanted (%t, %v), got (%t, %v)", s, exp, expErr, got, gotErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkErrs(a, b error) bool {
|
||||
if (a == nil) != (b == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
return a == nil || a.Error() == b.Error()
|
||||
}
|
||||
793
metric/metric.go
793
metric/metric.go
@@ -1,623 +1,286 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
const MaxInt = int(^uint(0) >> 1)
|
||||
type metric struct {
|
||||
name string
|
||||
tags []*telegraf.Tag
|
||||
fields []*telegraf.Field
|
||||
tm time.Time
|
||||
|
||||
tp telegraf.ValueType
|
||||
aggregate bool
|
||||
}
|
||||
|
||||
func New(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
mType ...telegraf.ValueType,
|
||||
tm time.Time,
|
||||
tp ...telegraf.ValueType,
|
||||
) (telegraf.Metric, error) {
|
||||
if len(name) == 0 {
|
||||
return nil, fmt.Errorf("missing measurement name")
|
||||
}
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("%s: must have one or more fields", name)
|
||||
}
|
||||
if strings.HasSuffix(name, `\`) {
|
||||
return nil, fmt.Errorf("%s: measurement name cannot end with a backslash", name)
|
||||
}
|
||||
|
||||
var thisType telegraf.ValueType
|
||||
if len(mType) > 0 {
|
||||
thisType = mType[0]
|
||||
var vtype telegraf.ValueType
|
||||
if len(tp) > 0 {
|
||||
vtype = tp[0]
|
||||
} else {
|
||||
thisType = telegraf.Untyped
|
||||
vtype = telegraf.Untyped
|
||||
}
|
||||
|
||||
m := &metric{
|
||||
name: []byte(escape(name, "name")),
|
||||
t: []byte(fmt.Sprint(t.UnixNano())),
|
||||
nsec: t.UnixNano(),
|
||||
mType: thisType,
|
||||
name: name,
|
||||
tags: nil,
|
||||
fields: nil,
|
||||
tm: tm,
|
||||
tp: vtype,
|
||||
}
|
||||
|
||||
// pre-allocate exact size of the tags slice
|
||||
taglen := 0
|
||||
for k, v := range tags {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("%s: tag key cannot end with a backslash: %s", name, k)
|
||||
if len(tags) > 0 {
|
||||
m.tags = make([]*telegraf.Tag, 0, len(tags))
|
||||
for k, v := range tags {
|
||||
m.tags = append(m.tags,
|
||||
&telegraf.Tag{Key: k, Value: v})
|
||||
}
|
||||
if strings.HasSuffix(v, `\`) {
|
||||
return nil, fmt.Errorf("%s: tag value cannot end with a backslash: %s", name, v)
|
||||
}
|
||||
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
taglen += 2 + len(escape(k, "tagkey")) + len(escape(v, "tagval"))
|
||||
}
|
||||
m.tags = make([]byte, taglen)
|
||||
|
||||
i := 0
|
||||
for k, v := range tags {
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
m.tags[i] = ','
|
||||
i++
|
||||
i += copy(m.tags[i:], escape(k, "tagkey"))
|
||||
m.tags[i] = '='
|
||||
i++
|
||||
i += copy(m.tags[i:], escape(v, "tagval"))
|
||||
sort.Slice(m.tags, func(i, j int) bool { return m.tags[i].Key < m.tags[j].Key })
|
||||
}
|
||||
|
||||
// pre-allocate capacity of the fields slice
|
||||
fieldlen := 0
|
||||
for k, _ := range fields {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("%s: field key cannot end with a backslash: %s", name, k)
|
||||
}
|
||||
|
||||
// 10 bytes is completely arbitrary, but will at least prevent some
|
||||
// amount of allocations. There's a small possibility this will create
|
||||
// slightly more allocations for a metric that has many short fields.
|
||||
fieldlen += len(k) + 10
|
||||
}
|
||||
m.fields = make([]byte, 0, fieldlen)
|
||||
|
||||
i = 0
|
||||
m.fields = make([]*telegraf.Field, 0, len(fields))
|
||||
for k, v := range fields {
|
||||
if i != 0 {
|
||||
m.fields = append(m.fields, ',')
|
||||
v := convertField(v)
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
m.fields = appendField(m.fields, k, v)
|
||||
i++
|
||||
m.AddField(k, v)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// indexUnescapedByte finds the index of the first byte equal to b in buf that
|
||||
// is not escaped. Does not allow the escape char to be escaped. Returns -1 if
|
||||
// not found.
|
||||
func indexUnescapedByte(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
i := bytes.IndexByte(buf[keyi:], b)
|
||||
if i == -1 {
|
||||
return -1
|
||||
} else if i == 0 {
|
||||
break
|
||||
}
|
||||
keyi += i
|
||||
if buf[keyi-1] != '\\' {
|
||||
break
|
||||
} else {
|
||||
keyi++
|
||||
}
|
||||
}
|
||||
return keyi
|
||||
}
|
||||
|
||||
// indexUnescapedByteBackslashEscaping finds the index of the first byte equal
|
||||
// to b in buf that is not escaped. Allows for the escape char `\` to be
|
||||
// escaped. Returns -1 if not found.
|
||||
func indexUnescapedByteBackslashEscaping(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
i := bytes.IndexByte(buf[keyi:], b)
|
||||
if i == -1 {
|
||||
return -1
|
||||
} else if i == 0 {
|
||||
break
|
||||
}
|
||||
keyi += i
|
||||
if countBackslashes(buf, keyi-1)%2 == 0 {
|
||||
break
|
||||
} else {
|
||||
keyi++
|
||||
}
|
||||
}
|
||||
return keyi
|
||||
}
|
||||
|
||||
// countBackslashes counts the number of preceding backslashes starting at
|
||||
// the 'start' index.
|
||||
func countBackslashes(buf []byte, index int) int {
|
||||
var count int
|
||||
for {
|
||||
if index < 0 {
|
||||
return count
|
||||
}
|
||||
if buf[index] == '\\' {
|
||||
count++
|
||||
index--
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
name []byte
|
||||
tags []byte
|
||||
fields []byte
|
||||
t []byte
|
||||
|
||||
mType telegraf.ValueType
|
||||
aggregate bool
|
||||
|
||||
// cached values for reuse in "get" functions
|
||||
hashID uint64
|
||||
nsec int64
|
||||
}
|
||||
|
||||
func (m *metric) String() string {
|
||||
return string(m.name) + string(m.tags) + " " + string(m.fields) + " " + string(m.t) + "\n"
|
||||
return fmt.Sprintf("%s %v %v %d", m.name, m.Tags(), m.Fields(), m.tm.UnixNano())
|
||||
}
|
||||
|
||||
func (m *metric) Name() string {
|
||||
return m.name
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
tags := make(map[string]string, len(m.tags))
|
||||
for _, tag := range m.tags {
|
||||
tags[tag.Key] = tag.Value
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
func (m *metric) TagList() []*telegraf.Tag {
|
||||
return m.tags
|
||||
}
|
||||
|
||||
func (m *metric) Fields() map[string]interface{} {
|
||||
fields := make(map[string]interface{}, len(m.fields))
|
||||
for _, field := range m.fields {
|
||||
fields[field.Key] = field.Value
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
func (m *metric) FieldList() []*telegraf.Field {
|
||||
return m.fields
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
return m.tm
|
||||
}
|
||||
|
||||
func (m *metric) Type() telegraf.ValueType {
|
||||
return m.tp
|
||||
}
|
||||
|
||||
func (m *metric) SetName(name string) {
|
||||
m.name = name
|
||||
}
|
||||
|
||||
func (m *metric) AddPrefix(prefix string) {
|
||||
m.name = prefix + m.name
|
||||
}
|
||||
|
||||
func (m *metric) AddSuffix(suffix string) {
|
||||
m.name = m.name + suffix
|
||||
}
|
||||
|
||||
func (m *metric) AddTag(key, value string) {
|
||||
for i, tag := range m.tags {
|
||||
if key > tag.Key {
|
||||
continue
|
||||
}
|
||||
|
||||
if key == tag.Key {
|
||||
tag.Value = value
|
||||
return
|
||||
}
|
||||
|
||||
m.tags = append(m.tags, nil)
|
||||
copy(m.tags[i+1:], m.tags[i:])
|
||||
m.tags[i] = &telegraf.Tag{Key: key, Value: value}
|
||||
return
|
||||
}
|
||||
|
||||
m.tags = append(m.tags, &telegraf.Tag{Key: key, Value: value})
|
||||
}
|
||||
|
||||
func (m *metric) HasTag(key string) bool {
|
||||
for _, tag := range m.tags {
|
||||
if tag.Key == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *metric) GetTag(key string) (string, bool) {
|
||||
for _, tag := range m.tags {
|
||||
if tag.Key == key {
|
||||
return tag.Value, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (m *metric) RemoveTag(key string) {
|
||||
for i, tag := range m.tags {
|
||||
if tag.Key == key {
|
||||
copy(m.tags[i:], m.tags[i+1:])
|
||||
m.tags[len(m.tags)-1] = nil
|
||||
m.tags = m.tags[:len(m.tags)-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metric) AddField(key string, value interface{}) {
|
||||
for i, field := range m.fields {
|
||||
if key == field.Key {
|
||||
m.fields[i] = &telegraf.Field{Key: key, Value: convertField(value)}
|
||||
}
|
||||
}
|
||||
m.fields = append(m.fields, &telegraf.Field{Key: key, Value: convertField(value)})
|
||||
}
|
||||
|
||||
func (m *metric) HasField(key string) bool {
|
||||
for _, field := range m.fields {
|
||||
if field.Key == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *metric) GetField(key string) (interface{}, bool) {
|
||||
for _, field := range m.fields {
|
||||
if field.Key == key {
|
||||
return field.Value, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (m *metric) RemoveField(key string) {
|
||||
for i, field := range m.fields {
|
||||
if field.Key == key {
|
||||
copy(m.fields[i:], m.fields[i+1:])
|
||||
m.fields[len(m.fields)-1] = nil
|
||||
m.fields = m.fields[:len(m.fields)-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metric) SetTime(t time.Time) {
|
||||
m.tm = t
|
||||
}
|
||||
|
||||
func (m *metric) Copy() telegraf.Metric {
|
||||
m2 := &metric{
|
||||
name: m.name,
|
||||
tags: make([]*telegraf.Tag, len(m.tags)),
|
||||
fields: make([]*telegraf.Field, len(m.fields)),
|
||||
tm: m.tm,
|
||||
tp: m.tp,
|
||||
aggregate: m.aggregate,
|
||||
}
|
||||
|
||||
for i, tag := range m.tags {
|
||||
m2.tags[i] = tag
|
||||
}
|
||||
|
||||
for i, field := range m.fields {
|
||||
m2.fields[i] = field
|
||||
}
|
||||
return m2
|
||||
}
|
||||
|
||||
func (m *metric) SetAggregate(b bool) {
|
||||
m.aggregate = b
|
||||
m.aggregate = true
|
||||
}
|
||||
|
||||
func (m *metric) IsAggregate() bool {
|
||||
return m.aggregate
|
||||
}
|
||||
|
||||
func (m *metric) Type() telegraf.ValueType {
|
||||
return m.mType
|
||||
}
|
||||
|
||||
func (m *metric) Len() int {
|
||||
// 3 is for 2 spaces surrounding the fields array + newline at the end.
|
||||
return len(m.name) + len(m.tags) + len(m.fields) + len(m.t) + 3
|
||||
}
|
||||
|
||||
func (m *metric) Serialize() []byte {
|
||||
tmp := make([]byte, m.Len())
|
||||
i := 0
|
||||
i += copy(tmp[i:], m.name)
|
||||
i += copy(tmp[i:], m.tags)
|
||||
tmp[i] = ' '
|
||||
i++
|
||||
i += copy(tmp[i:], m.fields)
|
||||
tmp[i] = ' '
|
||||
i++
|
||||
i += copy(tmp[i:], m.t)
|
||||
tmp[i] = '\n'
|
||||
return tmp
|
||||
}
|
||||
|
||||
func (m *metric) SerializeTo(dst []byte) int {
|
||||
i := 0
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.name)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.tags)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
dst[i] = ' '
|
||||
i++
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.fields)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
dst[i] = ' '
|
||||
i++
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.t)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
dst[i] = '\n'
|
||||
|
||||
return i + 1
|
||||
}
|
||||
|
||||
func (m *metric) Split(maxSize int) []telegraf.Metric {
|
||||
if m.Len() <= maxSize {
|
||||
return []telegraf.Metric{m}
|
||||
}
|
||||
var out []telegraf.Metric
|
||||
|
||||
// constant number of bytes for each metric (in addition to field bytes)
|
||||
constant := len(m.name) + len(m.tags) + len(m.t) + 3
|
||||
// currently selected fields
|
||||
fields := make([]byte, 0, maxSize)
|
||||
|
||||
i := 0
|
||||
for {
|
||||
if i >= len(m.fields) {
|
||||
// hit the end of the field byte slice
|
||||
if len(fields) > 0 {
|
||||
out = append(out, copyWith(m.name, m.tags, fields, m.t))
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// find the end of the next field
|
||||
j := indexUnescapedByte(m.fields[i:], ',')
|
||||
if j == -1 {
|
||||
j = len(m.fields)
|
||||
} else {
|
||||
j += i
|
||||
}
|
||||
|
||||
// if true, then we need to create a metric _not_ including the currently
|
||||
// selected field
|
||||
if len(m.fields[i:j])+len(fields)+constant >= maxSize {
|
||||
// if false, then we'll create a metric including the currently
|
||||
// selected field anyways. This means that the given maxSize is too
|
||||
// small for a single field to fit.
|
||||
if len(fields) > 0 {
|
||||
out = append(out, copyWith(m.name, m.tags, fields, m.t))
|
||||
}
|
||||
|
||||
fields = make([]byte, 0, maxSize)
|
||||
}
|
||||
if len(fields) > 0 {
|
||||
fields = append(fields, ',')
|
||||
}
|
||||
fields = append(fields, m.fields[i:j]...)
|
||||
|
||||
i = j + 1
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (m *metric) Fields() map[string]interface{} {
|
||||
fieldMap := map[string]interface{}{}
|
||||
i := 0
|
||||
for {
|
||||
if i >= len(m.fields) {
|
||||
break
|
||||
}
|
||||
// end index of field key
|
||||
i1 := indexUnescapedByte(m.fields[i:], '=')
|
||||
if i1 == -1 {
|
||||
break
|
||||
}
|
||||
// start index of field value
|
||||
i2 := i1 + 1
|
||||
|
||||
// end index of field value
|
||||
var i3 int
|
||||
if m.fields[i:][i2] == '"' {
|
||||
i3 = indexUnescapedByteBackslashEscaping(m.fields[i:][i2+1:], '"')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
i3 += i2 + 2 // increment index to the comma
|
||||
} else {
|
||||
i3 = indexUnescapedByte(m.fields[i:], ',')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
}
|
||||
|
||||
switch m.fields[i:][i2] {
|
||||
case '"':
|
||||
// string field
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = unescape(string(m.fields[i:][i2+1:i3-1]), "fieldval")
|
||||
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
// number field
|
||||
switch m.fields[i:][i3-1] {
|
||||
case 'i':
|
||||
// integer field
|
||||
n, err := parseIntBytes(m.fields[i:][i2:i3-1], 10, 64)
|
||||
if err == nil {
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
|
||||
} else {
|
||||
// TODO handle error or just ignore field silently?
|
||||
}
|
||||
default:
|
||||
// float field
|
||||
n, err := parseFloatBytes(m.fields[i:][i2:i3], 64)
|
||||
if err == nil {
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
|
||||
} else {
|
||||
// TODO handle error or just ignore field silently?
|
||||
}
|
||||
}
|
||||
case 'T', 't':
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = true
|
||||
case 'F', 'f':
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = false
|
||||
default:
|
||||
// TODO handle unsupported field type
|
||||
}
|
||||
|
||||
i += i3 + 1
|
||||
}
|
||||
|
||||
return fieldMap
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
tagMap := map[string]string{}
|
||||
if len(m.tags) == 0 {
|
||||
return tagMap
|
||||
}
|
||||
|
||||
i := 0
|
||||
for {
|
||||
// start index of tag key
|
||||
i0 := indexUnescapedByte(m.tags[i:], ',') + 1
|
||||
if i0 == 0 {
|
||||
// didn't find a tag start
|
||||
break
|
||||
}
|
||||
// end index of tag key
|
||||
i1 := indexUnescapedByte(m.tags[i:], '=')
|
||||
// start index of tag value
|
||||
i2 := i1 + 1
|
||||
// end index of tag value (starting from i2)
|
||||
i3 := indexUnescapedByte(m.tags[i+i2:], ',')
|
||||
if i3 == -1 {
|
||||
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:]), "tagval")
|
||||
break
|
||||
}
|
||||
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:i2+i3]), "tagval")
|
||||
// increment start index for the next tag
|
||||
i += i2 + i3
|
||||
}
|
||||
|
||||
return tagMap
|
||||
}
|
||||
|
||||
func (m *metric) Name() string {
|
||||
return unescape(string(m.name), "name")
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
// assume metric has been verified already and ignore error:
|
||||
if m.nsec == 0 {
|
||||
m.nsec, _ = parseIntBytes(m.t, 10, 64)
|
||||
}
|
||||
return time.Unix(0, m.nsec)
|
||||
}
|
||||
|
||||
func (m *metric) UnixNano() int64 {
|
||||
// assume metric has been verified already and ignore error:
|
||||
if m.nsec == 0 {
|
||||
m.nsec, _ = parseIntBytes(m.t, 10, 64)
|
||||
}
|
||||
return m.nsec
|
||||
}
|
||||
|
||||
func (m *metric) SetName(name string) {
|
||||
m.hashID = 0
|
||||
m.name = []byte(nameEscaper.Replace(name))
|
||||
}
|
||||
|
||||
func (m *metric) SetPrefix(prefix string) {
|
||||
m.hashID = 0
|
||||
m.name = append([]byte(nameEscaper.Replace(prefix)), m.name...)
|
||||
}
|
||||
|
||||
func (m *metric) SetSuffix(suffix string) {
|
||||
m.hashID = 0
|
||||
m.name = append(m.name, []byte(nameEscaper.Replace(suffix))...)
|
||||
}
|
||||
|
||||
func (m *metric) AddTag(key, value string) {
|
||||
m.RemoveTag(key)
|
||||
m.tags = append(m.tags, []byte(","+escape(key, "tagkey")+"="+escape(value, "tagval"))...)
|
||||
}
|
||||
|
||||
func (m *metric) HasTag(key string) bool {
|
||||
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *metric) RemoveTag(key string) {
|
||||
m.hashID = 0
|
||||
|
||||
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return
|
||||
}
|
||||
|
||||
tmp := m.tags[0 : i-1]
|
||||
j := indexUnescapedByte(m.tags[i:], ',')
|
||||
if j != -1 {
|
||||
tmp = append(tmp, m.tags[i+j:]...)
|
||||
}
|
||||
m.tags = tmp
|
||||
return
|
||||
}
|
||||
|
||||
func (m *metric) AddField(key string, value interface{}) {
|
||||
m.fields = append(m.fields, ',')
|
||||
m.fields = appendField(m.fields, key, value)
|
||||
}
|
||||
|
||||
func (m *metric) HasField(key string) bool {
|
||||
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *metric) RemoveField(key string) error {
|
||||
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var tmp []byte
|
||||
if i != 0 {
|
||||
tmp = m.fields[0 : i-1]
|
||||
}
|
||||
j := indexUnescapedByte(m.fields[i:], ',')
|
||||
if j != -1 {
|
||||
tmp = append(tmp, m.fields[i+j:]...)
|
||||
}
|
||||
|
||||
if len(tmp) == 0 {
|
||||
return fmt.Errorf("Metric cannot remove final field: %s", m.fields)
|
||||
}
|
||||
|
||||
m.fields = tmp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *metric) Copy() telegraf.Metric {
|
||||
return copyWith(m.name, m.tags, m.fields, m.t)
|
||||
}
|
||||
|
||||
func copyWith(name, tags, fields, t []byte) telegraf.Metric {
|
||||
out := metric{
|
||||
name: make([]byte, len(name)),
|
||||
tags: make([]byte, len(tags)),
|
||||
fields: make([]byte, len(fields)),
|
||||
t: make([]byte, len(t)),
|
||||
}
|
||||
copy(out.name, name)
|
||||
copy(out.tags, tags)
|
||||
copy(out.fields, fields)
|
||||
copy(out.t, t)
|
||||
return &out
|
||||
}
|
||||
|
||||
func (m *metric) HashID() uint64 {
|
||||
if m.hashID == 0 {
|
||||
h := fnv.New64a()
|
||||
h.Write(m.name)
|
||||
|
||||
tags := m.Tags()
|
||||
tmp := make([]string, len(tags))
|
||||
i := 0
|
||||
for k, v := range tags {
|
||||
tmp[i] = k + v
|
||||
i++
|
||||
}
|
||||
sort.Strings(tmp)
|
||||
|
||||
for _, s := range tmp {
|
||||
h.Write([]byte(s))
|
||||
}
|
||||
|
||||
m.hashID = h.Sum64()
|
||||
h := fnv.New64a()
|
||||
h.Write([]byte(m.name))
|
||||
h.Write([]byte("\n"))
|
||||
for _, tag := range m.tags {
|
||||
h.Write([]byte(tag.Key))
|
||||
h.Write([]byte("\n"))
|
||||
h.Write([]byte(tag.Value))
|
||||
h.Write([]byte("\n"))
|
||||
}
|
||||
return m.hashID
|
||||
return h.Sum64()
|
||||
}
|
||||
|
||||
func appendField(b []byte, k string, v interface{}) []byte {
|
||||
if v == nil {
|
||||
return b
|
||||
}
|
||||
b = append(b, []byte(escape(k, "tagkey")+"=")...)
|
||||
|
||||
// check popular types first
|
||||
// Convert field to a supported type or nil if unconvertible
|
||||
func convertField(v interface{}) interface{} {
|
||||
switch v := v.(type) {
|
||||
case float64:
|
||||
b = strconv.AppendFloat(b, v, 'f', -1, 64)
|
||||
return v
|
||||
case int64:
|
||||
b = strconv.AppendInt(b, v, 10)
|
||||
b = append(b, 'i')
|
||||
return v
|
||||
case string:
|
||||
b = append(b, '"')
|
||||
b = append(b, []byte(escape(v, "fieldval"))...)
|
||||
b = append(b, '"')
|
||||
return v
|
||||
case bool:
|
||||
b = strconv.AppendBool(b, v)
|
||||
case int32:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int16:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int8:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
return v
|
||||
case int:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint64:
|
||||
// Cap uints above the maximum int value
|
||||
var intv int64
|
||||
if v <= uint64(MaxInt) {
|
||||
intv = int64(v)
|
||||
} else {
|
||||
intv = int64(MaxInt)
|
||||
}
|
||||
b = strconv.AppendInt(b, intv, 10)
|
||||
b = append(b, 'i')
|
||||
case uint32:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint16:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint8:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
return int64(v)
|
||||
case uint:
|
||||
// Cap uints above the maximum int value
|
||||
var intv int64
|
||||
if v <= uint(MaxInt) {
|
||||
intv = int64(v)
|
||||
} else {
|
||||
intv = int64(MaxInt)
|
||||
}
|
||||
b = strconv.AppendInt(b, intv, 10)
|
||||
b = append(b, 'i')
|
||||
case float32:
|
||||
b = strconv.AppendFloat(b, float64(v), 'f', -1, 32)
|
||||
return uint64(v)
|
||||
case uint64:
|
||||
return uint64(v)
|
||||
case []byte:
|
||||
b = append(b, v...)
|
||||
return string(v)
|
||||
case int32:
|
||||
return int64(v)
|
||||
case int16:
|
||||
return int64(v)
|
||||
case int8:
|
||||
return int64(v)
|
||||
case uint32:
|
||||
return uint64(v)
|
||||
case uint16:
|
||||
return uint64(v)
|
||||
case uint8:
|
||||
return uint64(v)
|
||||
case float32:
|
||||
return float64(v)
|
||||
default:
|
||||
// Can't determine the type, so convert to string
|
||||
b = append(b, '"')
|
||||
b = append(b, []byte(escape(fmt.Sprintf("%v", v), "fieldval"))...)
|
||||
b = append(b, '"')
|
||||
return nil
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -1,148 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
// vars for making sure that the compiler doesnt optimize out the benchmarks:
|
||||
var (
|
||||
s string
|
||||
I interface{}
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
)
|
||||
|
||||
func BenchmarkNewMetric(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
for n := 0; n < b.N; n++ {
|
||||
mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
}
|
||||
s = string(mt.String())
|
||||
}
|
||||
|
||||
func BenchmarkAddTag(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
mt = &metric{
|
||||
name: []byte("cpu"),
|
||||
tags: []byte(",host=localhost"),
|
||||
fields: []byte("a=101"),
|
||||
t: []byte("1480614053000000000"),
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
mt.AddTag("foo", "bar")
|
||||
}
|
||||
s = string(mt.String())
|
||||
}
|
||||
|
||||
func BenchmarkSplit(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
mt = &metric{
|
||||
name: []byte("cpu"),
|
||||
tags: []byte(",host=localhost"),
|
||||
fields: []byte("a=101,b=10i,c=10101,d=101010,e=42"),
|
||||
t: []byte("1480614053000000000"),
|
||||
}
|
||||
var metrics []telegraf.Metric
|
||||
for n := 0; n < b.N; n++ {
|
||||
metrics = mt.Split(60)
|
||||
}
|
||||
s = string(metrics[0].String())
|
||||
}
|
||||
|
||||
func BenchmarkTags(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
var mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
tags = mt.Tags()
|
||||
}
|
||||
s = fmt.Sprint(tags)
|
||||
}
|
||||
|
||||
func BenchmarkFields(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
var mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
fields = mt.Fields()
|
||||
}
|
||||
s = fmt.Sprint(fields)
|
||||
}
|
||||
|
||||
func BenchmarkString(b *testing.B) {
|
||||
mt, _ := New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var S string
|
||||
for n := 0; n < b.N; n++ {
|
||||
S = mt.String()
|
||||
}
|
||||
s = S
|
||||
}
|
||||
|
||||
func BenchmarkSerialize(b *testing.B) {
|
||||
mt, _ := New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var B []byte
|
||||
for n := 0; n < b.N; n++ {
|
||||
B = mt.Serialize()
|
||||
}
|
||||
s = string(B)
|
||||
}
|
||||
@@ -1,14 +1,10 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -25,102 +21,185 @@ func TestNewMetric(t *testing.T) {
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Untyped, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
require.Equal(t, "cpu", m.Name())
|
||||
require.Equal(t, tags, m.Tags())
|
||||
require.Equal(t, fields, m.Fields())
|
||||
require.Equal(t, 2, len(m.FieldList()))
|
||||
require.Equal(t, now, m.Time())
|
||||
}
|
||||
|
||||
func TestNewErrors(t *testing.T) {
|
||||
// creating a metric with an empty name produces an error:
|
||||
m, err := New(
|
||||
"",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
// creating a metric with empty fields produces an error:
|
||||
m, err = New(
|
||||
"foobar",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, m)
|
||||
}
|
||||
|
||||
func TestNewMetric_Tags(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
func baseMetric() telegraf.Metric {
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
assert.True(t, m.HasTag("host"))
|
||||
assert.True(t, m.HasTag("datacenter"))
|
||||
func TestHasTag(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.AddTag("newtag", "foo")
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
require.False(t, m.HasTag("host"))
|
||||
m.AddTag("host", "localhost")
|
||||
require.True(t, m.HasTag("host"))
|
||||
m.RemoveTag("host")
|
||||
require.False(t, m.HasTag("host"))
|
||||
}
|
||||
|
||||
func TestAddTagOverwrites(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.AddTag("host", "localhost")
|
||||
m.AddTag("host", "example.org")
|
||||
|
||||
value, ok := m.GetTag("host")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "example.org", value)
|
||||
require.Equal(t, 1, len(m.TagList()))
|
||||
}
|
||||
|
||||
func TestRemoveTagNoEffectOnMissingTags(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.RemoveTag("foo")
|
||||
m.AddTag("a", "x")
|
||||
m.RemoveTag("foo")
|
||||
m.RemoveTag("bar")
|
||||
value, ok := m.GetTag("a")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "x", value)
|
||||
}
|
||||
|
||||
func TestGetTag(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
value, ok := m.GetTag("host")
|
||||
require.False(t, ok)
|
||||
|
||||
m.AddTag("host", "localhost")
|
||||
|
||||
value, ok = m.GetTag("host")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "localhost", value)
|
||||
|
||||
m.RemoveTag("host")
|
||||
assert.False(t, m.HasTag("host"))
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
assert.True(t, m.HasTag("datacenter"))
|
||||
|
||||
m.RemoveTag("datacenter")
|
||||
assert.False(t, m.HasTag("datacenter"))
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
assert.Equal(t, map[string]string{"newtag": "foo"}, m.Tags())
|
||||
|
||||
m.RemoveTag("newtag")
|
||||
assert.False(t, m.HasTag("newtag"))
|
||||
assert.Equal(t, map[string]string{}, m.Tags())
|
||||
|
||||
assert.Equal(t, "cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n", m.String())
|
||||
value, ok = m.GetTag("host")
|
||||
require.False(t, ok)
|
||||
}
|
||||
|
||||
func TestSerialize(t *testing.T) {
|
||||
func TestHasField(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
require.False(t, m.HasField("x"))
|
||||
m.AddField("x", 42.0)
|
||||
require.True(t, m.HasField("x"))
|
||||
m.RemoveTag("x")
|
||||
require.False(t, m.HasTag("x"))
|
||||
}
|
||||
|
||||
func TestAddFieldOverwrites(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.AddField("value", 1.0)
|
||||
m.AddField("value", 42.0)
|
||||
|
||||
value, ok := m.GetField("value")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 42.0, value)
|
||||
}
|
||||
|
||||
func TestAddFieldChangesType(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.AddField("value", 1.0)
|
||||
m.AddField("value", "xyzzy")
|
||||
|
||||
value, ok := m.GetField("value")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "xyzzy", value)
|
||||
}
|
||||
|
||||
func TestRemoveFieldNoEffectOnMissingFields(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.RemoveField("foo")
|
||||
m.AddField("a", "x")
|
||||
m.RemoveField("foo")
|
||||
m.RemoveField("bar")
|
||||
value, ok := m.GetField("a")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "x", value)
|
||||
}
|
||||
|
||||
func TestGetField(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
value, ok := m.GetField("foo")
|
||||
require.False(t, ok)
|
||||
|
||||
m.AddField("foo", "bar")
|
||||
|
||||
value, ok = m.GetField("foo")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "bar", value)
|
||||
|
||||
m.RemoveTag("foo")
|
||||
value, ok = m.GetTag("foo")
|
||||
require.False(t, ok)
|
||||
}
|
||||
|
||||
func TestTagList_Sorted(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.AddTag("b", "y")
|
||||
m.AddTag("c", "z")
|
||||
m.AddTag("a", "x")
|
||||
|
||||
taglist := m.TagList()
|
||||
require.Equal(t, "a", taglist[0].Key)
|
||||
require.Equal(t, "b", taglist[1].Key)
|
||||
require.Equal(t, "c", taglist[2].Key)
|
||||
}
|
||||
|
||||
func TestEquals(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m1, err := New("cpu",
|
||||
map[string]string{
|
||||
"host": "localhost",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 42.0,
|
||||
},
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t,
|
||||
[]byte("cpu,datacenter=us-east-1 value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
|
||||
m.Serialize())
|
||||
m2, err := New("cpu",
|
||||
map[string]string{
|
||||
"host": "localhost",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 42.0,
|
||||
},
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
m.RemoveTag("datacenter")
|
||||
assert.Equal(t,
|
||||
[]byte("cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
|
||||
m.Serialize())
|
||||
lhs := m1.(*metric)
|
||||
require.Equal(t, lhs, m2)
|
||||
|
||||
m3 := m2.Copy()
|
||||
require.Equal(t, lhs, m3)
|
||||
m3.AddTag("a", "x")
|
||||
require.NotEqual(t, lhs, m3)
|
||||
}
|
||||
|
||||
func TestHashID(t *testing.T) {
|
||||
@@ -171,567 +250,88 @@ func TestHashID_Consistency(t *testing.T) {
|
||||
)
|
||||
hash := m.HashID()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
m2, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Equal(t, hash, m2.HashID())
|
||||
}
|
||||
m2, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Equal(t, hash, m2.HashID())
|
||||
|
||||
m3 := m.Copy()
|
||||
assert.Equal(t, m2.HashID(), m3.HashID())
|
||||
}
|
||||
|
||||
func TestNewMetric_NameModifiers(t *testing.T) {
|
||||
func TestHashID_Delimiting(t *testing.T) {
|
||||
m1, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"a": "x",
|
||||
"b": "y",
|
||||
"c": "z",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
m2, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"a": "xbycz",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.NotEqual(t, m1.HashID(), m2.HashID())
|
||||
}
|
||||
|
||||
func TestSetName(t *testing.T) {
|
||||
m := baseMetric()
|
||||
m.SetName("foo")
|
||||
require.Equal(t, "foo", m.Name())
|
||||
}
|
||||
|
||||
func TestAddPrefix(t *testing.T) {
|
||||
m := baseMetric()
|
||||
m.AddPrefix("foo_")
|
||||
require.Equal(t, "foo_cpu", m.Name())
|
||||
m.AddPrefix("foo_")
|
||||
require.Equal(t, "foo_foo_cpu", m.Name())
|
||||
}
|
||||
|
||||
func TestAddSuffix(t *testing.T) {
|
||||
m := baseMetric()
|
||||
m.AddSuffix("_foo")
|
||||
require.Equal(t, "cpu_foo", m.Name())
|
||||
m.AddSuffix("_foo")
|
||||
require.Equal(t, "cpu_foo_foo", m.Name())
|
||||
}
|
||||
|
||||
func TestValueType(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hash := m.HashID()
|
||||
suffix := fmt.Sprintf(" value=1 %d\n", now.UnixNano())
|
||||
assert.Equal(t, "cpu"+suffix, m.String())
|
||||
|
||||
m.SetPrefix("pre_")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
assert.Equal(t, "pre_cpu"+suffix, m.String())
|
||||
|
||||
m.SetSuffix("_post")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
assert.Equal(t, "pre_cpu_post"+suffix, m.String())
|
||||
|
||||
m.SetName("mem")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
assert.Equal(t, "mem"+suffix, m.String())
|
||||
}
|
||||
|
||||
func TestNewMetric_FieldModifiers(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, m.HasField("value"))
|
||||
assert.False(t, m.HasField("foo"))
|
||||
|
||||
m.AddField("newfield", "foo")
|
||||
assert.True(t, m.HasField("newfield"))
|
||||
|
||||
assert.NoError(t, m.RemoveField("newfield"))
|
||||
assert.False(t, m.HasField("newfield"))
|
||||
|
||||
// don't allow user to remove all fields:
|
||||
assert.Error(t, m.RemoveField("value"))
|
||||
|
||||
m.AddField("value2", int64(101))
|
||||
assert.NoError(t, m.RemoveField("value"))
|
||||
assert.False(t, m.HasField("value"))
|
||||
}
|
||||
|
||||
func TestNewMetric_Fields(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
"quote_string": `x"y`,
|
||||
"backslash_quote_string": `x\"y`,
|
||||
"backslash": `x\y`,
|
||||
"ends_with_backslash": `x\`,
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
}
|
||||
|
||||
func TestNewMetric_Time(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m = m.Copy()
|
||||
m2 := m.Copy()
|
||||
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m2.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewMetric_Copy(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m2 := m.Copy()
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
m.AddTag("host", "localhost")
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu,host=localhost float=1 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
|
||||
m2.String())
|
||||
}
|
||||
|
||||
func TestNewMetric_AllTypes(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"float64": float64(1),
|
||||
"float32": float32(1),
|
||||
"int64": int64(1),
|
||||
"int32": int32(1),
|
||||
"int16": int16(1),
|
||||
"int8": int8(1),
|
||||
"int": int(1),
|
||||
"uint64": uint64(1),
|
||||
"uint32": uint32(1),
|
||||
"uint16": uint16(1),
|
||||
"uint8": uint8(1),
|
||||
"uint": uint(1),
|
||||
"bytes": []byte("foo"),
|
||||
"nil": nil,
|
||||
"maxuint64": uint64(MaxInt) + 10,
|
||||
"maxuint": uint(MaxInt) + 10,
|
||||
"unsupported": []int{1, 2},
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, m.String(), "float64=1")
|
||||
assert.Contains(t, m.String(), "float32=1")
|
||||
assert.Contains(t, m.String(), "int64=1i")
|
||||
assert.Contains(t, m.String(), "int32=1i")
|
||||
assert.Contains(t, m.String(), "int16=1i")
|
||||
assert.Contains(t, m.String(), "int8=1i")
|
||||
assert.Contains(t, m.String(), "int=1i")
|
||||
assert.Contains(t, m.String(), "uint64=1i")
|
||||
assert.Contains(t, m.String(), "uint32=1i")
|
||||
assert.Contains(t, m.String(), "uint16=1i")
|
||||
assert.Contains(t, m.String(), "uint8=1i")
|
||||
assert.Contains(t, m.String(), "uint=1i")
|
||||
assert.NotContains(t, m.String(), "nil")
|
||||
assert.Contains(t, m.String(), fmt.Sprintf("maxuint64=%di", MaxInt))
|
||||
assert.Contains(t, m.String(), fmt.Sprintf("maxuint=%di", MaxInt))
|
||||
}
|
||||
|
||||
func TestIndexUnescapedByte(t *testing.T) {
|
||||
tests := []struct {
|
||||
in []byte
|
||||
b byte
|
||||
expected int
|
||||
}{
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'b',
|
||||
expected: 3,
|
||||
},
|
||||
{
|
||||
in: []byte(`foo\bar`),
|
||||
b: 'b',
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
in: []byte(`foo\\bar`),
|
||||
b: 'b',
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'f',
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'r',
|
||||
expected: 5,
|
||||
},
|
||||
{
|
||||
in: []byte(`\foobar`),
|
||||
b: 'f',
|
||||
expected: -1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := indexUnescapedByte(test.in, test.b)
|
||||
assert.Equal(t, test.expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewGaugeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
"value": float64(42),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now, telegraf.Gauge)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Gauge, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewCounterMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now, telegraf.Counter)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Counter, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
func TestSplitMetric(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split80 := m.Split(80)
|
||||
assert.Len(t, split80, 2)
|
||||
|
||||
split70 := m.Split(70)
|
||||
assert.Len(t, split70, 3)
|
||||
|
||||
split60 := m.Split(60)
|
||||
assert.Len(t, split60, 5)
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
// use a simple regex check to verify that the split metrics are valid
|
||||
func TestSplitMetric_RegexVerify(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"foo": float64(98934259085),
|
||||
"bar": float64(19385292),
|
||||
"number": float64(19385292),
|
||||
"another": float64(19385292),
|
||||
"n": float64(19385292),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// verification regex
|
||||
re := regexp.MustCompile(`cpu,host=localhost \w+=\d+(,\w+=\d+)* 1480940990034083306`)
|
||||
|
||||
split90 := m.Split(90)
|
||||
assert.Len(t, split90, 2)
|
||||
for _, splitM := range split90 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
|
||||
split70 := m.Split(70)
|
||||
assert.Len(t, split70, 3)
|
||||
for _, splitM := range split70 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
|
||||
split20 := m.Split(20)
|
||||
assert.Len(t, split20, 5)
|
||||
for _, splitM := range split20 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
}
|
||||
|
||||
// test splitting metric even when given length is shorter than
|
||||
// shortest possible length
|
||||
// Split should split metric as short as possible, ie, 1 field per metric
|
||||
func TestSplitMetric_TooShort(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split := m.Split(10)
|
||||
assert.Len(t, split, 5)
|
||||
strings := make([]string, 5)
|
||||
for i, splitM := range split {
|
||||
strings[i] = splitM.String()
|
||||
}
|
||||
|
||||
assert.Contains(t, strings, "cpu,host=localhost float=100001 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost int=100001i 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost bool=true 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost false=false 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost string=\"test\" 1480940990034083306\n")
|
||||
}
|
||||
|
||||
func TestSplitMetric_NoOp(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split := m.Split(1000)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, m, split[0])
|
||||
}
|
||||
|
||||
func TestSplitMetric_OneField(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", m.String())
|
||||
|
||||
split := m.Split(1000)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
|
||||
split = m.Split(1)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
|
||||
split = m.Split(40)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
}
|
||||
|
||||
func TestSplitMetric_ExactSize(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
actual := m.Split(m.Len())
|
||||
// check that no copy was made
|
||||
require.Equal(t, &m, &actual[0])
|
||||
}
|
||||
|
||||
func TestSplitMetric_NoRoomForNewline(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
actual := m.Split(m.Len() - 1)
|
||||
require.Equal(t, 2, len(actual))
|
||||
}
|
||||
|
||||
func TestNewMetricAggregate(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.False(t, m.IsAggregate())
|
||||
m.SetAggregate(true)
|
||||
assert.True(t, m.IsAggregate())
|
||||
}
|
||||
|
||||
func TestNewMetricString(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n",
|
||||
now.UnixNano())
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
}
|
||||
|
||||
func TestNewMetricFailNaN(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": math.NaN(),
|
||||
}
|
||||
|
||||
_, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestEmptyTagValueOrKey(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"emptytag": "",
|
||||
"": "valuewithoutkey",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
|
||||
assert.True(t, m.HasTag("host"))
|
||||
assert.False(t, m.HasTag("emptytag"))
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestNewMetric_TrailingSlash(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
}{
|
||||
{
|
||||
name: `cpu\`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
fields: map[string]interface{}{
|
||||
`value\`: "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
tags: map[string]string{
|
||||
`host\`: "localhost",
|
||||
},
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
tags: map[string]string{
|
||||
"host": `localhost\`,
|
||||
},
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
_, err := New(tc.name, tc.tags, tc.fields, now)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
func TestCopyAggreate(t *testing.T) {
|
||||
m1 := baseMetric()
|
||||
m1.SetAggregate(true)
|
||||
m2 := m1.Copy()
|
||||
assert.True(t, m2.IsAggregate())
|
||||
}
|
||||
|
||||
680
metric/parse.go
680
metric/parse.go
@@ -1,680 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInvalidNumber = errors.New("invalid number")
|
||||
)
|
||||
|
||||
const (
|
||||
// the number of characters for the largest possible int64 (9223372036854775807)
|
||||
maxInt64Digits = 19
|
||||
|
||||
// the number of characters for the smallest possible int64 (-9223372036854775808)
|
||||
minInt64Digits = 20
|
||||
|
||||
// the number of characters required for the largest float64 before a range check
|
||||
// would occur during parsing
|
||||
maxFloat64Digits = 25
|
||||
|
||||
// the number of characters required for smallest float64 before a range check occur
|
||||
// would occur during parsing
|
||||
minFloat64Digits = 27
|
||||
|
||||
MaxKeyLength = 65535
|
||||
)
|
||||
|
||||
// The following constants allow us to specify which state to move to
|
||||
// next, when scanning sections of a Point.
|
||||
const (
|
||||
tagKeyState = iota
|
||||
tagValueState
|
||||
fieldsState
|
||||
)
|
||||
|
||||
func Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
return ParseWithDefaultTimePrecision(buf, time.Now(), "")
|
||||
}
|
||||
|
||||
func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
|
||||
return ParseWithDefaultTimePrecision(buf, t, "")
|
||||
}
|
||||
|
||||
func ParseWithDefaultTimePrecision(
|
||||
buf []byte,
|
||||
t time.Time,
|
||||
precision string,
|
||||
) ([]telegraf.Metric, error) {
|
||||
if len(buf) == 0 {
|
||||
return []telegraf.Metric{}, nil
|
||||
}
|
||||
if len(buf) <= 6 {
|
||||
return []telegraf.Metric{}, makeError("buffer too short", buf, 0)
|
||||
}
|
||||
metrics := make([]telegraf.Metric, 0, bytes.Count(buf, []byte("\n"))+1)
|
||||
var errStr string
|
||||
i := 0
|
||||
for {
|
||||
j := bytes.IndexByte(buf[i:], '\n')
|
||||
if j == -1 {
|
||||
break
|
||||
}
|
||||
if len(buf[i:i+j]) < 2 {
|
||||
i += j + 1 // increment i past the previous newline
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := parseMetric(buf[i:i+j], t, precision)
|
||||
if err != nil {
|
||||
i += j + 1 // increment i past the previous newline
|
||||
errStr += " " + err.Error()
|
||||
continue
|
||||
}
|
||||
i += j + 1 // increment i past the previous newline
|
||||
|
||||
metrics = append(metrics, m)
|
||||
}
|
||||
|
||||
if len(errStr) > 0 {
|
||||
return metrics, fmt.Errorf(errStr)
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func parseMetric(buf []byte,
|
||||
defaultTime time.Time,
|
||||
precision string,
|
||||
) (telegraf.Metric, error) {
|
||||
var dTime string
|
||||
// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
|
||||
pos, key, err := scanKey(buf, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// measurement name is required
|
||||
if len(key) == 0 {
|
||||
return nil, fmt.Errorf("missing measurement")
|
||||
}
|
||||
|
||||
if len(key) > MaxKeyLength {
|
||||
return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
|
||||
}
|
||||
|
||||
// scan the second block is which is field1=value1[,field2=value2,...]
|
||||
pos, fields, err := scanFields(buf, pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// at least one field is required
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("missing fields")
|
||||
}
|
||||
|
||||
// scan the last block which is an optional integer timestamp
|
||||
pos, ts, err := scanTime(buf, pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// apply precision multiplier
|
||||
var nsec int64
|
||||
multiplier := getPrecisionMultiplier(precision)
|
||||
if len(ts) > 0 && multiplier > 1 {
|
||||
tsint, err := parseIntBytes(ts, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nsec := multiplier * tsint
|
||||
ts = []byte(strconv.FormatInt(nsec, 10))
|
||||
}
|
||||
|
||||
m := &metric{
|
||||
fields: fields,
|
||||
t: ts,
|
||||
nsec: nsec,
|
||||
}
|
||||
|
||||
// parse out the measurement name
|
||||
// namei is the index at which the "name" ends
|
||||
namei := indexUnescapedByte(key, ',')
|
||||
if namei < 1 {
|
||||
// no tags
|
||||
m.name = key
|
||||
} else {
|
||||
m.name = key[0:namei]
|
||||
m.tags = key[namei:]
|
||||
}
|
||||
|
||||
if len(m.t) == 0 {
|
||||
if len(dTime) == 0 {
|
||||
dTime = fmt.Sprint(defaultTime.UnixNano())
|
||||
}
|
||||
// use default time
|
||||
m.t = []byte(dTime)
|
||||
}
|
||||
|
||||
// here we copy on return because this allows us to later call
|
||||
// AddTag, AddField, RemoveTag, RemoveField, etc. without worrying about
|
||||
// modifying 'tag' bytes having an affect on 'field' bytes, for example.
|
||||
return m.Copy(), nil
|
||||
}
|
||||
|
||||
// scanKey scans buf starting at i for the measurement and tag portion of the point.
|
||||
// It returns the ending position and the byte slice of key within buf. If there
|
||||
// are tags, they will be sorted if they are not already.
|
||||
func scanKey(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
// First scan the Point's measurement.
|
||||
state, i, err := scanMeasurement(buf, i)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
|
||||
// Optionally scan tags if needed.
|
||||
if state == tagKeyState {
|
||||
i, err = scanTags(buf, i)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// scanMeasurement examines the measurement part of a Point, returning
|
||||
// the next state to move to, and the current location in the buffer.
|
||||
func scanMeasurement(buf []byte, i int) (int, int, error) {
|
||||
// Check first byte of measurement, anything except a comma is fine.
|
||||
// It can't be a space, since whitespace is stripped prior to this
|
||||
// function call.
|
||||
if i >= len(buf) || buf[i] == ',' {
|
||||
return -1, i, makeError("missing measurement", buf, i)
|
||||
}
|
||||
|
||||
for {
|
||||
i++
|
||||
if i >= len(buf) {
|
||||
// cpu
|
||||
return -1, i, makeError("missing fields", buf, i)
|
||||
}
|
||||
|
||||
if buf[i-1] == '\\' {
|
||||
// Skip character (it's escaped).
|
||||
continue
|
||||
}
|
||||
|
||||
// Unescaped comma; move onto scanning the tags.
|
||||
if buf[i] == ',' {
|
||||
return tagKeyState, i + 1, nil
|
||||
}
|
||||
|
||||
// Unescaped space; move onto scanning the fields.
|
||||
if buf[i] == ' ' {
|
||||
// cpu value=1.0
|
||||
return fieldsState, i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTags examines all the tags in a Point, keeping track of and
|
||||
// returning the updated indices slice, number of commas and location
|
||||
// in buf where to start examining the Point fields.
|
||||
func scanTags(buf []byte, i int) (int, error) {
|
||||
var (
|
||||
err error
|
||||
state = tagKeyState
|
||||
)
|
||||
|
||||
for {
|
||||
switch state {
|
||||
case tagKeyState:
|
||||
i, err = scanTagsKey(buf, i)
|
||||
state = tagValueState // tag value always follows a tag key
|
||||
case tagValueState:
|
||||
state, i, err = scanTagsValue(buf, i)
|
||||
case fieldsState:
|
||||
return i, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTagsKey scans each character in a tag key.
|
||||
func scanTagsKey(buf []byte, i int) (int, error) {
|
||||
// First character of the key.
|
||||
if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
|
||||
// cpu,{'', ' ', ',', '='}
|
||||
return i, makeError("missing tag key", buf, i)
|
||||
}
|
||||
|
||||
// Examine each character in the tag key until we hit an unescaped
|
||||
// equals (the tag value), or we hit an error (i.e., unescaped
|
||||
// space or comma).
|
||||
for {
|
||||
i++
|
||||
|
||||
// Either we reached the end of the buffer or we hit an
|
||||
// unescaped comma or space.
|
||||
if i >= len(buf) ||
|
||||
((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
|
||||
// cpu,tag{'', ' ', ','}
|
||||
return i, makeError("missing tag value", buf, i)
|
||||
}
|
||||
|
||||
if buf[i] == '=' && buf[i-1] != '\\' {
|
||||
// cpu,tag=
|
||||
return i + 1, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTagsValue scans each character in a tag value.
|
||||
func scanTagsValue(buf []byte, i int) (int, int, error) {
|
||||
// Tag value cannot be empty.
|
||||
if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
|
||||
// cpu,tag={',', ' '}
|
||||
return -1, i, makeError("missing tag value", buf, i)
|
||||
}
|
||||
|
||||
// Examine each character in the tag value until we hit an unescaped
|
||||
// comma (move onto next tag key), an unescaped space (move onto
|
||||
// fields), or we error out.
|
||||
for {
|
||||
i++
|
||||
if i >= len(buf) {
|
||||
// cpu,tag=value
|
||||
return -1, i, makeError("missing fields", buf, i)
|
||||
}
|
||||
|
||||
// An unescaped equals sign is an invalid tag value.
|
||||
if buf[i] == '=' && buf[i-1] != '\\' {
|
||||
// cpu,tag={'=', 'fo=o'}
|
||||
return -1, i, makeError("invalid tag format", buf, i)
|
||||
}
|
||||
|
||||
if buf[i] == ',' && buf[i-1] != '\\' {
|
||||
// cpu,tag=foo,
|
||||
return tagKeyState, i + 1, nil
|
||||
}
|
||||
|
||||
// cpu,tag=foo value=1.0
|
||||
// cpu, tag=foo\= value=1.0
|
||||
if buf[i] == ' ' && buf[i-1] != '\\' {
|
||||
return fieldsState, i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanFields scans buf, starting at i for the fields section of a point. It returns
|
||||
// the ending position and the byte slice of the fields within buf
|
||||
func scanFields(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
// track how many '"" we've seen since last '='
|
||||
quotes := 0
|
||||
|
||||
// tracks how many '=' we've seen
|
||||
equals := 0
|
||||
|
||||
// tracks how many commas we've seen
|
||||
commas := 0
|
||||
|
||||
for {
|
||||
// reached the end of buf?
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
// escaped characters?
|
||||
if buf[i] == '\\' && i+1 < len(buf) {
|
||||
i += 2
|
||||
continue
|
||||
}
|
||||
|
||||
// If the value is quoted, scan until we get to the end quote
|
||||
// Only quote values in the field value since quotes are not significant
|
||||
// in the field key
|
||||
if buf[i] == '"' && equals > commas {
|
||||
i++
|
||||
quotes++
|
||||
if quotes > 2 {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If we see an =, ensure that there is at least on char before and after it
|
||||
if buf[i] == '=' && quotes != 1 {
|
||||
quotes = 0
|
||||
equals++
|
||||
|
||||
// check for "... =123" but allow "a\ =123"
|
||||
if buf[i-1] == ' ' && buf[i-2] != '\\' {
|
||||
return i, buf[start:i], makeError("missing field key", buf, i)
|
||||
}
|
||||
|
||||
// check for "...a=123,=456" but allow "a=123,a\,=456"
|
||||
if buf[i-1] == ',' && buf[i-2] != '\\' {
|
||||
return i, buf[start:i], makeError("missing field key", buf, i)
|
||||
}
|
||||
|
||||
// check for "... value="
|
||||
if i+1 >= len(buf) {
|
||||
return i, buf[start:i], makeError("missing field value", buf, i)
|
||||
}
|
||||
|
||||
// check for "... value=,value2=..."
|
||||
if buf[i+1] == ',' || buf[i+1] == ' ' {
|
||||
return i, buf[start:i], makeError("missing field value", buf, i)
|
||||
}
|
||||
|
||||
if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
|
||||
var err error
|
||||
i, err = scanNumber(buf, i+1)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
continue
|
||||
}
|
||||
// If next byte is not a double-quote, the value must be a boolean
|
||||
if buf[i+1] != '"' {
|
||||
var err error
|
||||
i, _, err = scanBoolean(buf, i+1)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if buf[i] == ',' && quotes != 1 {
|
||||
commas++
|
||||
}
|
||||
|
||||
// reached end of block?
|
||||
if buf[i] == ' ' && quotes != 1 {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if quotes != 0 && quotes != 2 {
|
||||
return i, buf[start:i], makeError("unbalanced quotes", buf, i)
|
||||
}
|
||||
|
||||
// check that all field sections had key and values (e.g. prevent "a=1,b"
|
||||
if equals == 0 || commas != equals-1 {
|
||||
return i, buf[start:i], makeError("invalid field format", buf, i)
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// scanTime scans buf, starting at i for the time section of a point. It
|
||||
// returns the ending position and the byte slice of the timestamp within buf
|
||||
// and and error if the timestamp is not in the correct numeric format.
|
||||
func scanTime(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
for {
|
||||
// reached the end of buf?
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
// Reached end of block or trailing whitespace?
|
||||
if buf[i] == '\n' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
|
||||
// Handle negative timestamps
|
||||
if i == start && buf[i] == '-' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// Timestamps should be integers, make sure they are so we don't need
|
||||
// to actually parse the timestamp until needed.
|
||||
if buf[i] < '0' || buf[i] > '9' {
|
||||
return i, buf[start:i], makeError("invalid timestamp", buf, i)
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
func isNumeric(b byte) bool {
|
||||
return (b >= '0' && b <= '9') || b == '.'
|
||||
}
|
||||
|
||||
// scanNumber returns the end position within buf, start at i after
|
||||
// scanning over buf for an integer, or float. It returns an
|
||||
// error if a invalid number is scanned.
|
||||
func scanNumber(buf []byte, i int) (int, error) {
|
||||
start := i
|
||||
var isInt bool
|
||||
|
||||
// Is negative number?
|
||||
if i < len(buf) && buf[i] == '-' {
|
||||
i++
|
||||
// There must be more characters now, as just '-' is illegal.
|
||||
if i == len(buf) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
}
|
||||
|
||||
// how many decimal points we've see
|
||||
decimal := false
|
||||
|
||||
// indicates the number is float in scientific notation
|
||||
scientific := false
|
||||
|
||||
for {
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == ',' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == 'i' && i > start && !isInt {
|
||||
isInt = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
if buf[i] == '.' {
|
||||
// Can't have more than 1 decimal (e.g. 1.1.1 should fail)
|
||||
if decimal {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
decimal = true
|
||||
}
|
||||
|
||||
// `e` is valid for floats but not as the first char
|
||||
if i > start && (buf[i] == 'e' || buf[i] == 'E') {
|
||||
scientific = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// + and - are only valid at this point if they follow an e (scientific notation)
|
||||
if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// NaN is an unsupported value
|
||||
if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
if !isNumeric(buf[i]) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if isInt && (decimal || scientific) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
numericDigits := i - start
|
||||
if isInt {
|
||||
numericDigits--
|
||||
}
|
||||
if decimal {
|
||||
numericDigits--
|
||||
}
|
||||
if buf[start] == '-' {
|
||||
numericDigits--
|
||||
}
|
||||
|
||||
if numericDigits == 0 {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
// It's more common that numbers will be within min/max range for their type but we need to prevent
|
||||
// out or range numbers from being parsed successfully. This uses some simple heuristics to decide
|
||||
// if we should parse the number to the actual type. It does not do it all the time because it incurs
|
||||
// extra allocations and we end up converting the type again when writing points to disk.
|
||||
if isInt {
|
||||
// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
|
||||
if buf[i-1] != 'i' {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
// Parse the int to check bounds the number of digits could be larger than the max range
|
||||
// We subtract 1 from the index to remove the `i` from our tests
|
||||
if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
|
||||
if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil {
|
||||
return i, makeError(fmt.Sprintf("unable to parse integer %s: %s", buf[start:i-1], err), buf, i)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
|
||||
if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
|
||||
if _, err := parseFloatBytes(buf[start:i], 10); err != nil {
|
||||
return i, makeError("invalid float", buf, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// scanBoolean returns the end position within buf, start at i after
|
||||
// scanning over buf for boolean. Valid values for a boolean are
|
||||
// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean
|
||||
// is scanned.
|
||||
func scanBoolean(buf []byte, i int) (int, []byte, error) {
|
||||
start := i
|
||||
|
||||
if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
|
||||
return i, buf[start:i], makeError("invalid value", buf, i)
|
||||
}
|
||||
|
||||
i++
|
||||
for {
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == ',' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
// Single char bool (t, T, f, F) is ok
|
||||
if i-start == 1 {
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// length must be 4 for true or TRUE
|
||||
if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
// length must be 5 for false or FALSE
|
||||
if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
// Otherwise
|
||||
valid := false
|
||||
switch buf[start] {
|
||||
case 't':
|
||||
valid = bytes.Equal(buf[start:i], []byte("true"))
|
||||
case 'f':
|
||||
valid = bytes.Equal(buf[start:i], []byte("false"))
|
||||
case 'T':
|
||||
valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
|
||||
case 'F':
|
||||
valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
|
||||
}
|
||||
|
||||
if !valid {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
|
||||
}
|
||||
|
||||
// skipWhitespace returns the end position within buf, starting at i after
|
||||
// scanning over spaces in tags
|
||||
func skipWhitespace(buf []byte, i int) int {
|
||||
for i < len(buf) {
|
||||
if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// makeError is a helper function for making a metric parsing error.
|
||||
// reason is the reason why the error occurred.
|
||||
// buf should be the current buffer we are parsing.
|
||||
// i is the current index, to give some context on where in the buffer we are.
|
||||
func makeError(reason string, buf []byte, i int) error {
|
||||
return fmt.Errorf("metric parsing error, reason: [%s], buffer: [%s], index: [%d]",
|
||||
reason, buf, i)
|
||||
}
|
||||
|
||||
// getPrecisionMultiplier will return a multiplier for the precision specified.
|
||||
func getPrecisionMultiplier(precision string) int64 {
|
||||
d := time.Nanosecond
|
||||
switch precision {
|
||||
case "u":
|
||||
d = time.Microsecond
|
||||
case "ms":
|
||||
d = time.Millisecond
|
||||
case "s":
|
||||
d = time.Second
|
||||
case "m":
|
||||
d = time.Minute
|
||||
case "h":
|
||||
d = time.Hour
|
||||
}
|
||||
return int64(d)
|
||||
}
|
||||
@@ -1,413 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const trues = `booltest b=T
|
||||
booltest b=t
|
||||
booltest b=True
|
||||
booltest b=TRUE
|
||||
booltest b=true
|
||||
`
|
||||
|
||||
const falses = `booltest b=F
|
||||
booltest b=f
|
||||
booltest b=False
|
||||
booltest b=FALSE
|
||||
booltest b=false
|
||||
`
|
||||
|
||||
const withEscapes = `w\,\ eather,host=local temp=99 1465839830100400200
|
||||
w\,eather,host=local temp=99 1465839830100400200
|
||||
weather,location=us\,midwest temperature=82 1465839830100400200
|
||||
weather,location=us-midwest temp\=rature=82 1465839830100400200
|
||||
weather,location\ place=us-midwest temperature=82 1465839830100400200
|
||||
weather,location=us-midwest temperature="too\"hot\"" 1465839830100400200
|
||||
`
|
||||
|
||||
const withTimestamps = `cpu usage=99 1480595849000000000
|
||||
cpu usage=99 1480595850000000000
|
||||
cpu usage=99 1480595851700030000
|
||||
cpu usage=99 1480595852000000300
|
||||
`
|
||||
|
||||
const sevenMetrics = `cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
`
|
||||
|
||||
const negMetrics = `weather,host=local temp=-99i,temp_float=-99.4 1465839830100400200
|
||||
`
|
||||
|
||||
// some metrics are invalid
|
||||
const someInvalid = `cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu3, host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu4 , usage_idle=99,usage_busy=1
|
||||
cpu 1480595852000000300
|
||||
cpu usage=99 1480595852foobar300
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
`
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
start := time.Now()
|
||||
metrics, err := Parse([]byte(sevenMetrics))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 7)
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
firstTime := metrics[0].Time()
|
||||
for _, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"idle": float64(99),
|
||||
"busy": int64(1),
|
||||
"b": true,
|
||||
"s": "string",
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
},
|
||||
m.Tags(),
|
||||
)
|
||||
assert.True(t, m.Time().After(start))
|
||||
assert.True(t, m.Time().Equal(firstTime))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNegNumbers(t *testing.T) {
|
||||
metrics, err := Parse([]byte(negMetrics))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"temp": int64(-99),
|
||||
"temp_float": float64(-99.4),
|
||||
},
|
||||
metrics[0].Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "local",
|
||||
},
|
||||
metrics[0].Tags(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestParseErrors(t *testing.T) {
|
||||
start := time.Now()
|
||||
metrics, err := Parse([]byte(someInvalid))
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, metrics, 4)
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
firstTime := metrics[0].Time()
|
||||
for _, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
},
|
||||
m.Tags(),
|
||||
)
|
||||
assert.True(t, m.Time().After(start))
|
||||
assert.True(t, m.Time().Equal(firstTime))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseWithTimestamps(t *testing.T) {
|
||||
metrics, err := Parse([]byte(withTimestamps))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 4)
|
||||
|
||||
expectedTimestamps := []time.Time{
|
||||
time.Unix(0, 1480595849000000000),
|
||||
time.Unix(0, 1480595850000000000),
|
||||
time.Unix(0, 1480595851700030000),
|
||||
time.Unix(0, 1480595852000000300),
|
||||
}
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
for i, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.True(t, m.Time().Equal(expectedTimestamps[i]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEscapes(t *testing.T) {
|
||||
metrics, err := Parse([]byte(withEscapes))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 6)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
}{
|
||||
{
|
||||
name: `w, eather`,
|
||||
fields: map[string]interface{}{"temp": float64(99)},
|
||||
tags: map[string]string{"host": "local"},
|
||||
},
|
||||
{
|
||||
name: `w,eather`,
|
||||
fields: map[string]interface{}{"temp": float64(99)},
|
||||
tags: map[string]string{"host": "local"},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{"temperature": float64(82)},
|
||||
tags: map[string]string{"location": `us,midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{`temp=rature`: float64(82)},
|
||||
tags: map[string]string{"location": `us-midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{"temperature": float64(82)},
|
||||
tags: map[string]string{`location place`: `us-midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{`temperature`: `too"hot"`},
|
||||
tags: map[string]string{"location": `us-midwest`},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
assert.Equal(t, test.name, metrics[i].Name())
|
||||
assert.Equal(t, test.fields, metrics[i].Fields())
|
||||
assert.Equal(t, test.tags, metrics[i].Tags())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTrueBooleans(t *testing.T) {
|
||||
metrics, err := Parse([]byte(trues))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 5)
|
||||
|
||||
for _, metric := range metrics {
|
||||
assert.Equal(t, "booltest", metric.Name())
|
||||
assert.Equal(t, true, metric.Fields()["b"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFalseBooleans(t *testing.T) {
|
||||
metrics, err := Parse([]byte(falses))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 5)
|
||||
|
||||
for _, metric := range metrics {
|
||||
assert.Equal(t, "booltest", metric.Name())
|
||||
assert.Equal(t, false, metric.Fields()["b"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointBadNumber(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"cpu v=- ",
|
||||
"cpu v=-i ",
|
||||
"cpu v=-. ",
|
||||
"cpu v=. ",
|
||||
"cpu v=1.0i ",
|
||||
"cpu v=1ii ",
|
||||
"cpu v=1a ",
|
||||
"cpu v=-e-e-e ",
|
||||
"cpu v=42+3 ",
|
||||
"cpu v= ",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTagsMissingParts(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
`cpu,host`,
|
||||
`cpu,host,`,
|
||||
`cpu,host=`,
|
||||
`cpu,f=oo=bar value=1`,
|
||||
`cpu,host value=1i`,
|
||||
`cpu,host=serverA,region value=1i`,
|
||||
`cpu,host=serverA,region= value=1i`,
|
||||
`cpu,host=serverA,region=,zone=us-west value=1i`,
|
||||
`cpu, value=1`,
|
||||
`cpu, ,,`,
|
||||
`cpu,,,`,
|
||||
`cpu,host=serverA,=us-east value=1i`,
|
||||
`cpu,host=serverAa\,,=us-east value=1i`,
|
||||
`cpu,host=serverA\,,=us-east value=1i`,
|
||||
`cpu, =serverA value=1i`,
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointWhitespace(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000 `,
|
||||
} {
|
||||
m, err := Parse([]byte(tt + "\n"))
|
||||
assert.NoError(t, err, tt)
|
||||
assert.Equal(t, "cpu", m[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{"value": float64(1)}, m[0].Fields())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointInvalidFields(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test,foo=bar a=101,=value",
|
||||
"test,foo=bar =value",
|
||||
"test,foo=bar a=101,key=",
|
||||
"test,foo=bar key=",
|
||||
`test,foo=bar a=101,b="foo`,
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointNoFields(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"cpu_load_short,host=server01,region=us-west",
|
||||
"very_long_measurement_name",
|
||||
"cpu,host==",
|
||||
"============",
|
||||
"cpu",
|
||||
"cpu\n\n\n\n\n\n\n",
|
||||
" ",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
// a b=1 << this is the shortest possible metric
|
||||
// any shorter is just ignored
|
||||
func TestParseBufTooShort(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"",
|
||||
"a",
|
||||
"a ",
|
||||
"a b=",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidBooleans(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test b=tru",
|
||||
"test b=fals",
|
||||
"test b=faLse",
|
||||
"test q=foo",
|
||||
"test b=lambchops",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidNumbers(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test b=-",
|
||||
"test b=1.1.1",
|
||||
"test b=nan",
|
||||
"test b=9i10",
|
||||
"test b=9999999999999999999i",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNegativeTimestamps(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test foo=101 -1257894000000000000",
|
||||
} {
|
||||
metrics, err := Parse([]byte(tt + "\n"))
|
||||
assert.NoError(t, err, tt)
|
||||
assert.True(t, metrics[0].Time().Equal(time.Unix(0, -1257894000000000000)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePrecision(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
line string
|
||||
precision string
|
||||
expected int64
|
||||
}{
|
||||
{"test v=42 1491847420", "s", 1491847420000000000},
|
||||
{"test v=42 1491847420123", "ms", 1491847420123000000},
|
||||
{"test v=42 1491847420123456", "u", 1491847420123456000},
|
||||
{"test v=42 1491847420123456789", "ns", 1491847420123456789},
|
||||
|
||||
{"test v=42 1491847420123456789", "1s", 1491847420123456789},
|
||||
{"test v=42 1491847420123456789", "asdf", 1491847420123456789},
|
||||
} {
|
||||
metrics, err := ParseWithDefaultTimePrecision(
|
||||
[]byte(tt.line+"\n"), time.Now(), tt.precision)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expected, metrics[0].UnixNano())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePrecisionUnsetTime(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
line string
|
||||
precision string
|
||||
}{
|
||||
{"test v=42", "s"},
|
||||
{"test v=42", "ns"},
|
||||
} {
|
||||
_, err := ParseWithDefaultTimePrecision(
|
||||
[]byte(tt.line+"\n"), time.Now(), tt.precision)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMaxKeyLength(t *testing.T) {
|
||||
key := ""
|
||||
for {
|
||||
if len(key) > MaxKeyLength {
|
||||
break
|
||||
}
|
||||
key += "test"
|
||||
}
|
||||
|
||||
_, err := Parse([]byte(key + " value=1\n"))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
159
metric/reader.go
159
metric/reader.go
@@ -1,159 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type state int
|
||||
|
||||
const (
|
||||
_ state = iota
|
||||
// normal state copies whole metrics into the given buffer until we can't
|
||||
// fit the next metric.
|
||||
normal
|
||||
// split state means that we have a metric that we were able to split, so
|
||||
// that we can fit it into multiple metrics (and calls to Read)
|
||||
split
|
||||
// overflow state means that we have a metric that didn't fit into a single
|
||||
// buffer, and needs to be split across multiple calls to Read.
|
||||
overflow
|
||||
// splitOverflow state means that a split metric didn't fit into a single
|
||||
// buffer, and needs to be split across multiple calls to Read.
|
||||
splitOverflow
|
||||
// done means we're done reading metrics, and now always return (0, io.EOF)
|
||||
done
|
||||
)
|
||||
|
||||
type reader struct {
|
||||
metrics []telegraf.Metric
|
||||
splitMetrics []telegraf.Metric
|
||||
buf []byte
|
||||
state state
|
||||
|
||||
// metric index
|
||||
iM int
|
||||
// split metric index
|
||||
iSM int
|
||||
// buffer index
|
||||
iB int
|
||||
}
|
||||
|
||||
func NewReader(metrics []telegraf.Metric) io.Reader {
|
||||
return &reader{
|
||||
metrics: metrics,
|
||||
state: normal,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reader) Read(p []byte) (n int, err error) {
|
||||
var i int
|
||||
switch r.state {
|
||||
case done:
|
||||
return 0, io.EOF
|
||||
case normal:
|
||||
for {
|
||||
// this for-loop is the sunny-day scenario, where we are given a
|
||||
// buffer that is large enough to hold at least a single metric.
|
||||
// all of the cases below it are edge-cases.
|
||||
if r.metrics[r.iM].Len() <= len(p[i:]) {
|
||||
i += r.metrics[r.iM].SerializeTo(p[i:])
|
||||
} else {
|
||||
break
|
||||
}
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
}
|
||||
|
||||
// if we haven't written any bytes, check if we can split the current
|
||||
// metric into multiple full metrics at a smaller size.
|
||||
if i == 0 {
|
||||
tmp := r.metrics[r.iM].Split(len(p))
|
||||
if len(tmp) > 1 {
|
||||
r.splitMetrics = tmp
|
||||
r.state = split
|
||||
if r.splitMetrics[0].Len() <= len(p) {
|
||||
i += r.splitMetrics[0].SerializeTo(p)
|
||||
r.iSM = 1
|
||||
} else {
|
||||
// splitting didn't quite work, so we'll drop down and
|
||||
// overflow the metric.
|
||||
r.state = normal
|
||||
r.iSM = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we haven't written any bytes and we're not at the end of the metrics
|
||||
// slice, then it means we have a single metric that is larger than the
|
||||
// provided buffer.
|
||||
if i == 0 {
|
||||
r.buf = r.metrics[r.iM].Serialize()
|
||||
i += copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
r.state = overflow
|
||||
}
|
||||
|
||||
case split:
|
||||
if r.splitMetrics[r.iSM].Len() <= len(p) {
|
||||
// write the current split metric
|
||||
i += r.splitMetrics[r.iSM].SerializeTo(p)
|
||||
r.iSM++
|
||||
if r.iSM >= len(r.splitMetrics) {
|
||||
// done writing the current split metrics
|
||||
r.iSM = 0
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
}
|
||||
} else {
|
||||
// This would only happen if we split the metric, and then a
|
||||
// subsequent buffer was smaller than the initial one given,
|
||||
// so that our split metric no longer fits.
|
||||
r.buf = r.splitMetrics[r.iSM].Serialize()
|
||||
i += copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
r.state = splitOverflow
|
||||
}
|
||||
|
||||
case splitOverflow:
|
||||
i = copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
if r.iB >= len(r.buf) {
|
||||
r.iB = 0
|
||||
r.iSM++
|
||||
if r.iSM == len(r.splitMetrics) {
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
} else {
|
||||
r.state = split
|
||||
}
|
||||
}
|
||||
|
||||
case overflow:
|
||||
i = copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
if r.iB >= len(r.buf) {
|
||||
r.iB = 0
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
}
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
@@ -1,713 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func BenchmarkMetricReader(b *testing.B) {
|
||||
metrics := make([]telegraf.Metric, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
metrics[i], _ = New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(1)}, time.Now())
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
r := NewReader(metrics)
|
||||
io.Copy(ioutil.Discard, r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
metrics := make([]telegraf.Metric, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
metrics[i], _ = New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(1)}, ts)
|
||||
}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
buf := make([]byte, 35)
|
||||
for i := 0; i < 10; i++ {
|
||||
n, err := r.Read(buf)
|
||||
if err != nil {
|
||||
assert.True(t, err == io.EOF, err.Error())
|
||||
}
|
||||
assert.Equal(t, 33, n)
|
||||
assert.Equal(t, "foo value=1i 1481032190000000000\n", string(buf[0:n]))
|
||||
}
|
||||
|
||||
// reader should now be done, and always return 0, io.EOF
|
||||
for i := 0; i < 10; i++ {
|
||||
n, err := r.Read(buf)
|
||||
assert.True(t, err == io.EOF, err.Error())
|
||||
assert.Equal(t, 0, n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader_OverflowMetric(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(10)}, ts)
|
||||
metrics := []telegraf.Metric{m}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 5)
|
||||
|
||||
tests := []struct {
|
||||
exp string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
"foo v",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"alue=",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"10i 1",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"48103",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"21900",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"00000",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
io.EOF,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
assert.Equal(t, test.exp, string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric is the same size as the buffer.
|
||||
//
|
||||
// Previously EOF would not be set until the next call to Read.
|
||||
func TestMetricReader_MetricSizeEqualsBufferSize(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, m1.Len())
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric requires to be split and one of the
|
||||
// split metrics is exactly the size of the buffer.
|
||||
//
|
||||
// Previously an empty string would be returned on the next Read without error,
|
||||
// and then next Read call would panic.
|
||||
func TestMetricReader_SplitWithExactLengthSplit(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1), "bb": int64(2)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
// foo a=1i,bb=2i 1481032190000000000\n // len 35
|
||||
//
|
||||
// Requires this specific split order:
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo bb=2i 1481032190000000000\n // len 30
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric requires to be split and one of the
|
||||
// split metrics is larger than the buffer.
|
||||
//
|
||||
// Previously the metric index would be set incorrectly causing a panic.
|
||||
func TestMetricReader_SplitOverflowOversized(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
"bbb": int64(2),
|
||||
}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
// foo a=1i,bbb=2i 1481032190000000000\n // len 36
|
||||
//
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo bbb=2i 1481032190000000000\n // len 31
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a split metric exactly fits in the buffer.
|
||||
//
|
||||
// Previously the metric would be overflow split when not required.
|
||||
func TestMetricReader_SplitOverflowUneeded(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1), "b": int64(2)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 29)
|
||||
|
||||
// foo a=1i,b=2i 1481032190000000000\n // len 34
|
||||
//
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo b=2i 1481032190000000000\n // len 29
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader_OverflowMultipleMetrics(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(10)}, ts)
|
||||
metrics := []telegraf.Metric{m, m.Copy()}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 10)
|
||||
|
||||
tests := []struct {
|
||||
exp string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
"foo value=",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"10i 148103",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"2190000000",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
nil,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"foo value=",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"10i 148103",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"2190000000",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
io.EOF,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
assert.Equal(t, test.exp, string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test splitting a metric
|
||||
func TestMetricReader_SplitMetric(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
"value4": int64(10),
|
||||
"value5": int64(10),
|
||||
"value6": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 60)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
57,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test an array with one split metric and one unsplit
|
||||
func TestMetricReader_SplitMetric2(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
"value4": int64(10),
|
||||
"value5": int64(10),
|
||||
"value6": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 60)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test split that results in metrics that are still too long, which results in
|
||||
// the reader falling back to regular overflow.
|
||||
func TestMetricReader_SplitMetricTooLong(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i 1481`,
|
||||
nil,
|
||||
30,
|
||||
},
|
||||
{
|
||||
`032190000000000\n`,
|
||||
io.EOF,
|
||||
16,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test split with a changing buffer size in the middle of subsequent calls
|
||||
// to Read
|
||||
func TestMetricReader_SplitMetricChangingBuffer(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
buf []byte
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 148103219000000`,
|
||||
nil,
|
||||
30,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`0000\n`,
|
||||
nil,
|
||||
5,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
make([]byte, 36),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(test.buf)
|
||||
assert.Equal(t, test.n, n, test.expRegex)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(test.buf[0:n])), string(test.buf[0:n]))
|
||||
assert.Equal(t, test.err, err, test.expRegex)
|
||||
}
|
||||
}
|
||||
|
||||
// test split with a changing buffer size in the middle of subsequent calls
|
||||
// to Read
|
||||
func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
buf []byte
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 148103219000000`,
|
||||
nil,
|
||||
30,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`0000\n`,
|
||||
nil,
|
||||
5,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
make([]byte, 36),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(test.buf)
|
||||
assert.Equal(t, test.n, n, test.expRegex)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(test.buf[0:n])), string(test.buf[0:n]))
|
||||
assert.Equal(t, test.err, err, test.expRegex)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader_Read(t *testing.T) {
|
||||
epoch := time.Unix(0, 0)
|
||||
|
||||
type args struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
t time.Time
|
||||
mType []telegraf.ValueType
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
expected []byte
|
||||
}{
|
||||
{
|
||||
name: "escape backslashes in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape quote in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test"`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\"" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape quote and backslash in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\"`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\\"" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape multiple backslash in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\\`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\\\" 0`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
buf := make([]byte, 512)
|
||||
m, err := New(tt.args.name, tt.args.tags, tt.args.fields, tt.args.t, tt.args.mType...)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := NewReader([]telegraf.Metric{m})
|
||||
num, err := r.Read(buf)
|
||||
if err != io.EOF {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
line := string(buf[:num])
|
||||
// This is done so that we can use raw strings in the test spec
|
||||
noeol := strings.TrimRight(line, "\n")
|
||||
require.Equal(t, string(tt.expected), noeol)
|
||||
require.Equal(t, len(tt.expected)+1, num)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricRoundtrip(t *testing.T) {
|
||||
const lp = `nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=netstat,sr=database IpExtInBcastOctets=12570626154i,IpExtInBcastPkts=95541226i,IpExtInCEPkts=0i,IpExtInCsumErrors=0i,IpExtInECT0Pkts=55674i,IpExtInECT1Pkts=0i,IpExtInMcastOctets=5928296i,IpExtInMcastPkts=174365i,IpExtInNoECTPkts=17965863529i,IpExtInNoRoutes=20i,IpExtInOctets=3334866321815i,IpExtInTruncatedPkts=0i,IpExtOutBcastOctets=0i,IpExtOutBcastPkts=0i,IpExtOutMcastOctets=0i,IpExtOutMcastPkts=0i,IpExtOutOctets=31397892391399i,TcpExtArpFilter=0i,TcpExtBusyPollRxPackets=0i,TcpExtDelayedACKLocked=14094i,TcpExtDelayedACKLost=302083i,TcpExtDelayedACKs=55486507i,TcpExtEmbryonicRsts=11879i,TcpExtIPReversePathFilter=0i,TcpExtListenDrops=1736i,TcpExtListenOverflows=0i,TcpExtLockDroppedIcmps=0i,TcpExtOfoPruned=0i,TcpExtOutOfWindowIcmps=8i,TcpExtPAWSActive=0i,TcpExtPAWSEstab=974i,TcpExtPAWSPassive=0i,TcpExtPruneCalled=0i,TcpExtRcvPruned=0i,TcpExtSyncookiesFailed=12593i,TcpExtSyncookiesRecv=0i,TcpExtSyncookiesSent=0i,TcpExtTCPACKSkippedChallenge=0i,TcpExtTCPACKSkippedFinWait2=0i,TcpExtTCPACKSkippedPAWS=806i,TcpExtTCPACKSkippedSeq=519i,TcpExtTCPACKSkippedSynRecv=0i,TcpExtTCPACKSkippedTimeWait=0i,TcpExtTCPAbortFailed=0i,TcpExtTCPAbortOnClose=22i,TcpExtTCPAbortOnData=36593i,TcpExtTCPAbortOnLinger=0i,TcpExtTCPAbortOnMemory=0i,TcpExtTCPAbortOnTimeout=674i,TcpExtTCPAutoCorking=494253233i,TcpExtTCPBacklogDrop=0i,TcpExtTCPChallengeACK=281i,TcpExtTCPDSACKIgnoredNoUndo=93354i,TcpExtTCPDSACKIgnoredOld=336i,TcpExtTCPDSACKOfoRecv=0i,TcpExtTCPDSACKOfoSent=7i,TcpExtTCPDSACKOldSent=302073i,TcpExtTCPDSACKRecv=215884i,TcpExtTCPDSACKUndo=7633i,TcpExtTCPDeferAcceptDrop=0i,TcpExtTCPDirectCopyFromBacklog=0i,TcpExtTCPDirectCopyFromPrequeue=0i,TcpExtTCPFACKReorder=1320i,TcpExtTCPFastOpenActive=0i,TcpExtTCPFastOpenActiveFail=0i,TcpExtTCPFastOpenCookieReqd=0i,TcpExtTCPFastOpenListenOverflow=0i,TcpExtTCPFastOpenPassive=0i,TcpExtTCPFastOpenPassiveFail=0i,TcpExtTCPFastRetrans=350681i,TcpExtTCPForwardRetrans=142168i,TcpExtTCPFromZeroWindowAdv=4317i,TcpExtTCPFullUndo=29502i,TcpExtTCPHPAcks=10267073000i,TcpExtTCPHPHits=5629837098i,TcpExtTCPHPHitsToUser=0i,TcpExtTCPHystartDelayCwnd=285127i,TcpExtTCPHystartDelayDetect=12318i,TcpExtTCPHystartTrainCwnd=69160570i,TcpExtTCPHystartTrainDetect=3315799i,TcpExtTCPLossFailures=109i,TcpExtTCPLossProbeRecovery=110819i,TcpExtTCPLossProbes=233995i,TcpExtTCPLossUndo=5276i,TcpExtTCPLostRetransmit=397i,TcpExtTCPMD5NotFound=0i,TcpExtTCPMD5Unexpected=0i,TcpExtTCPMemoryPressures=0i,TcpExtTCPMinTTLDrop=0i,TcpExtTCPOFODrop=0i,TcpExtTCPOFOMerge=7i,TcpExtTCPOFOQueue=15196i,TcpExtTCPOrigDataSent=29055119435i,TcpExtTCPPartialUndo=21320i,TcpExtTCPPrequeueDropped=0i,TcpExtTCPPrequeued=0i,TcpExtTCPPureAcks=1236441827i,TcpExtTCPRcvCoalesce=225590473i,TcpExtTCPRcvCollapsed=0i,TcpExtTCPRenoFailures=0i,TcpExtTCPRenoRecovery=0i,TcpExtTCPRenoRecoveryFail=0i,TcpExtTCPRenoReorder=0i,TcpExtTCPReqQFullDoCookies=0i,TcpExtTCPReqQFullDrop=0i,TcpExtTCPRetransFail=41i,TcpExtTCPSACKDiscard=0i,TcpExtTCPSACKReneging=0i,TcpExtTCPSACKReorder=4307i,TcpExtTCPSYNChallenge=244i,TcpExtTCPSackFailures=1698i,TcpExtTCPSackMerged=184668i,TcpExtTCPSackRecovery=97369i,TcpExtTCPSackRecoveryFail=381i,TcpExtTCPSackShiftFallback=2697079i,TcpExtTCPSackShifted=760299i,TcpExtTCPSchedulerFailed=0i,TcpExtTCPSlowStartRetrans=9276i,TcpExtTCPSpuriousRTOs=959i,TcpExtTCPSpuriousRtxHostQueues=2973i,TcpExtTCPSynRetrans=200970i,TcpExtTCPTSReorder=15221i,TcpExtTCPTimeWaitOverflow=0i,TcpExtTCPTimeouts=70127i,TcpExtTCPToZeroWindowAdv=4317i,TcpExtTCPWantZeroWindowAdv=2133i,TcpExtTW=24809813i,TcpExtTWKilled=0i,TcpExtTWRecycled=0i 1496460785000000000
|
||||
nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=snmp,sr=database IcmpInAddrMaskReps=0i,IcmpInAddrMasks=90i,IcmpInCsumErrors=0i,IcmpInDestUnreachs=284401i,IcmpInEchoReps=9i,IcmpInEchos=1761912i,IcmpInErrors=407i,IcmpInMsgs=2047767i,IcmpInParmProbs=0i,IcmpInRedirects=0i,IcmpInSrcQuenchs=0i,IcmpInTimeExcds=46i,IcmpInTimestampReps=0i,IcmpInTimestamps=1309i,IcmpMsgInType0=9i,IcmpMsgInType11=46i,IcmpMsgInType13=1309i,IcmpMsgInType17=90i,IcmpMsgInType3=284401i,IcmpMsgInType8=1761912i,IcmpMsgOutType0=1761912i,IcmpMsgOutType14=1248i,IcmpMsgOutType3=108709i,IcmpMsgOutType8=9i,IcmpOutAddrMaskReps=0i,IcmpOutAddrMasks=0i,IcmpOutDestUnreachs=108709i,IcmpOutEchoReps=1761912i,IcmpOutEchos=9i,IcmpOutErrors=0i,IcmpOutMsgs=1871878i,IcmpOutParmProbs=0i,IcmpOutRedirects=0i,IcmpOutSrcQuenchs=0i,IcmpOutTimeExcds=0i,IcmpOutTimestampReps=1248i,IcmpOutTimestamps=0i,IpDefaultTTL=64i,IpForwDatagrams=0i,IpForwarding=2i,IpFragCreates=0i,IpFragFails=0i,IpFragOKs=0i,IpInAddrErrors=0i,IpInDelivers=17658795773i,IpInDiscards=0i,IpInHdrErrors=0i,IpInReceives=17659269339i,IpInUnknownProtos=0i,IpOutDiscards=236976i,IpOutNoRoutes=1009i,IpOutRequests=23466783734i,IpReasmFails=0i,IpReasmOKs=0i,IpReasmReqds=0i,IpReasmTimeout=0i,TcpActiveOpens=23308977i,TcpAttemptFails=3757543i,TcpCurrEstab=280i,TcpEstabResets=184792i,TcpInCsumErrors=0i,TcpInErrs=232i,TcpInSegs=17536573089i,TcpMaxConn=-1i,TcpOutRsts=4051451i,TcpOutSegs=29836254873i,TcpPassiveOpens=176546974i,TcpRetransSegs=878085i,TcpRtoAlgorithm=1i,TcpRtoMax=120000i,TcpRtoMin=200i,UdpInCsumErrors=0i,UdpInDatagrams=24441661i,UdpInErrors=0i,UdpLiteInCsumErrors=0i,UdpLiteInDatagrams=0i,UdpLiteInErrors=0i,UdpLiteNoPorts=0i,UdpLiteOutDatagrams=0i,UdpLiteRcvbufErrors=0i,UdpLiteSndbufErrors=0i,UdpNoPorts=17660i,UdpOutDatagrams=51807896i,UdpRcvbufErrors=0i,UdpSndbufErrors=236922i 1496460785000000000
|
||||
`
|
||||
metrics, err := Parse([]byte(lp))
|
||||
require.NoError(t, err)
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 128)
|
||||
_, err = r.Read(buf)
|
||||
require.NoError(t, err)
|
||||
metrics, err = Parse(buf)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
7
metric/uint_support.go
Normal file
7
metric/uint_support.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// +build uint64
|
||||
|
||||
package metric
|
||||
|
||||
func init() {
|
||||
EnableUintSupport()
|
||||
}
|
||||
@@ -4,4 +4,5 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/basicstats"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/histogram"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/valuecounter"
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# BasicStats Aggregator Plugin
|
||||
|
||||
The BasicStats aggregator plugin give us count,max,min,mean,s2(variance), stdev for a set of values,
|
||||
The BasicStats aggregator plugin give us count,max,min,mean,sum,s2(variance), stdev for a set of values,
|
||||
emitting the aggregate every `period` seconds.
|
||||
|
||||
### Configuration:
|
||||
@@ -8,14 +8,26 @@ emitting the aggregate every `period` seconds.
|
||||
```toml
|
||||
# Keep the aggregate basicstats of each metric passing through.
|
||||
[[aggregators.basicstats]]
|
||||
|
||||
## General Aggregator Arguments:
|
||||
|
||||
## The period on which to flush & clear the aggregator.
|
||||
period = "30s"
|
||||
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
|
||||
## BasicStats Arguments:
|
||||
|
||||
## Configures which basic stats to push as fields
|
||||
stats = ["count","min","max","mean","stdev","s2","sum"]
|
||||
```
|
||||
|
||||
- stats
|
||||
- If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are aggregated and pushed as fields. `sum` is not aggregated by default to maintain backwards compatibility.
|
||||
- If empty array, no stats are aggregated
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- measurement1
|
||||
@@ -23,6 +35,7 @@ emitting the aggregate every `period` seconds.
|
||||
- field1_max
|
||||
- field1_min
|
||||
- field1_mean
|
||||
- field1_sum
|
||||
- field1_s2 (variance)
|
||||
- field1_stdev (standard deviation)
|
||||
|
||||
@@ -36,8 +49,8 @@ No tags are applied by this aggregator.
|
||||
$ telegraf --config telegraf.conf --quiet
|
||||
system,host=tars load1=1 1475583980000000000
|
||||
system,host=tars load1=1 1475583990000000000
|
||||
system,host=tars load1_count=2,load1_max=1,load1_min=1,load1_mean=1,load1_s2=0,load1_stdev=0 1475584010000000000
|
||||
system,host=tars load1_count=2,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0 1475584010000000000
|
||||
system,host=tars load1=1 1475584020000000000
|
||||
system,host=tars load1=3 1475584030000000000
|
||||
system,host=tars load1_count=2,load1_max=3,load1_min=1,load1_mean=2,load1_s2=2,load1_stdev=1.414162 1475584010000000000
|
||||
system,host=tars load1_count=2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162 1475584010000000000
|
||||
```
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package basicstats
|
||||
|
||||
import (
|
||||
"log"
|
||||
"math"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -8,10 +9,23 @@ import (
|
||||
)
|
||||
|
||||
type BasicStats struct {
|
||||
cache map[uint64]aggregate
|
||||
Stats []string `toml:"stats"`
|
||||
|
||||
cache map[uint64]aggregate
|
||||
statsConfig *configuredStats
|
||||
}
|
||||
|
||||
func NewBasicStats() telegraf.Aggregator {
|
||||
type configuredStats struct {
|
||||
count bool
|
||||
min bool
|
||||
max bool
|
||||
mean bool
|
||||
variance bool
|
||||
stdev bool
|
||||
sum bool
|
||||
}
|
||||
|
||||
func NewBasicStats() *BasicStats {
|
||||
mm := &BasicStats{}
|
||||
mm.Reset()
|
||||
return mm
|
||||
@@ -27,6 +41,7 @@ type basicstats struct {
|
||||
count float64
|
||||
min float64
|
||||
max float64
|
||||
sum float64
|
||||
mean float64
|
||||
M2 float64 //intermedia value for variance/stdev
|
||||
}
|
||||
@@ -64,6 +79,7 @@ func (m *BasicStats) Add(in telegraf.Metric) {
|
||||
min: fv,
|
||||
max: fv,
|
||||
mean: fv,
|
||||
sum: fv,
|
||||
M2: 0.0,
|
||||
}
|
||||
}
|
||||
@@ -79,6 +95,7 @@ func (m *BasicStats) Add(in telegraf.Metric) {
|
||||
min: fv,
|
||||
max: fv,
|
||||
mean: fv,
|
||||
sum: fv,
|
||||
M2: 0.0,
|
||||
}
|
||||
continue
|
||||
@@ -106,6 +123,8 @@ func (m *BasicStats) Add(in telegraf.Metric) {
|
||||
} else if fv > tmp.max {
|
||||
tmp.max = fv
|
||||
}
|
||||
//sum compute
|
||||
tmp.sum += fv
|
||||
//store final data
|
||||
m.cache[id].fields[k] = tmp
|
||||
}
|
||||
@@ -114,25 +133,109 @@ func (m *BasicStats) Add(in telegraf.Metric) {
|
||||
}
|
||||
|
||||
func (m *BasicStats) Push(acc telegraf.Accumulator) {
|
||||
|
||||
config := getConfiguredStats(m)
|
||||
|
||||
for _, aggregate := range m.cache {
|
||||
fields := map[string]interface{}{}
|
||||
for k, v := range aggregate.fields {
|
||||
fields[k+"_count"] = v.count
|
||||
fields[k+"_min"] = v.min
|
||||
fields[k+"_max"] = v.max
|
||||
fields[k+"_mean"] = v.mean
|
||||
|
||||
if config.count {
|
||||
fields[k+"_count"] = v.count
|
||||
}
|
||||
if config.min {
|
||||
fields[k+"_min"] = v.min
|
||||
}
|
||||
if config.max {
|
||||
fields[k+"_max"] = v.max
|
||||
}
|
||||
if config.mean {
|
||||
fields[k+"_mean"] = v.mean
|
||||
}
|
||||
if config.sum {
|
||||
fields[k+"_sum"] = v.sum
|
||||
}
|
||||
|
||||
//v.count always >=1
|
||||
if v.count > 1 {
|
||||
variance := v.M2 / (v.count - 1)
|
||||
fields[k+"_s2"] = variance
|
||||
fields[k+"_stdev"] = math.Sqrt(variance)
|
||||
|
||||
if config.variance {
|
||||
fields[k+"_s2"] = variance
|
||||
}
|
||||
if config.stdev {
|
||||
fields[k+"_stdev"] = math.Sqrt(variance)
|
||||
}
|
||||
}
|
||||
//if count == 1 StdDev = infinite => so I won't send data
|
||||
}
|
||||
acc.AddFields(aggregate.name, fields, aggregate.tags)
|
||||
|
||||
if len(fields) > 0 {
|
||||
acc.AddFields(aggregate.name, fields, aggregate.tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseStats(names []string) *configuredStats {
|
||||
|
||||
parsed := &configuredStats{}
|
||||
|
||||
for _, name := range names {
|
||||
|
||||
switch name {
|
||||
|
||||
case "count":
|
||||
parsed.count = true
|
||||
case "min":
|
||||
parsed.min = true
|
||||
case "max":
|
||||
parsed.max = true
|
||||
case "mean":
|
||||
parsed.mean = true
|
||||
case "s2":
|
||||
parsed.variance = true
|
||||
case "stdev":
|
||||
parsed.stdev = true
|
||||
case "sum":
|
||||
parsed.sum = true
|
||||
|
||||
default:
|
||||
log.Printf("W! Unrecognized basic stat '%s', ignoring", name)
|
||||
}
|
||||
}
|
||||
|
||||
return parsed
|
||||
}
|
||||
|
||||
func defaultStats() *configuredStats {
|
||||
|
||||
defaults := &configuredStats{}
|
||||
|
||||
defaults.count = true
|
||||
defaults.min = true
|
||||
defaults.max = true
|
||||
defaults.mean = true
|
||||
defaults.variance = true
|
||||
defaults.stdev = true
|
||||
defaults.sum = false
|
||||
|
||||
return defaults
|
||||
}
|
||||
|
||||
func getConfiguredStats(m *BasicStats) *configuredStats {
|
||||
|
||||
if m.statsConfig == nil {
|
||||
|
||||
if m.Stats == nil {
|
||||
m.statsConfig = defaultStats()
|
||||
} else {
|
||||
m.statsConfig = parseStats(m.Stats)
|
||||
}
|
||||
}
|
||||
|
||||
return m.statsConfig
|
||||
}
|
||||
|
||||
func (m *BasicStats) Reset() {
|
||||
m.cache = make(map[uint64]aggregate)
|
||||
}
|
||||
@@ -143,6 +246,8 @@ func convert(in interface{}) (float64, bool) {
|
||||
return v, true
|
||||
case int64:
|
||||
return float64(v), true
|
||||
case uint64:
|
||||
return float64(v), true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var m1, _ = metric.New("m1",
|
||||
@@ -27,6 +28,7 @@ var m2, _ = metric.New("m1",
|
||||
"c": float64(4),
|
||||
"d": float64(6),
|
||||
"e": float64(200),
|
||||
"f": uint64(200),
|
||||
"ignoreme": "string",
|
||||
"andme": true,
|
||||
},
|
||||
@@ -80,6 +82,10 @@ func TestBasicStatsWithPeriod(t *testing.T) {
|
||||
"e_max": float64(200),
|
||||
"e_min": float64(200),
|
||||
"e_mean": float64(200),
|
||||
"f_count": float64(1), //f
|
||||
"f_max": float64(200),
|
||||
"f_min": float64(200),
|
||||
"f_mean": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
@@ -143,9 +149,384 @@ func TestBasicStatsDifferentPeriods(t *testing.T) {
|
||||
"e_max": float64(200),
|
||||
"e_min": float64(200),
|
||||
"e_mean": float64(200),
|
||||
"f_count": float64(1), //f
|
||||
"f_max": float64(200),
|
||||
"f_min": float64(200),
|
||||
"f_mean": float64(200),
|
||||
}
|
||||
expectedTags = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating count
|
||||
func TestBasicStatsWithOnlyCount(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"count"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_count": float64(2),
|
||||
"b_count": float64(2),
|
||||
"c_count": float64(2),
|
||||
"d_count": float64(2),
|
||||
"e_count": float64(1),
|
||||
"f_count": float64(1),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating minimum
|
||||
func TestBasicStatsWithOnlyMin(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"min"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_min": float64(1),
|
||||
"b_min": float64(1),
|
||||
"c_min": float64(2),
|
||||
"d_min": float64(2),
|
||||
"e_min": float64(200),
|
||||
"f_min": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating maximum
|
||||
func TestBasicStatsWithOnlyMax(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"max"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_max": float64(1),
|
||||
"b_max": float64(3),
|
||||
"c_max": float64(4),
|
||||
"d_max": float64(6),
|
||||
"e_max": float64(200),
|
||||
"f_max": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating mean
|
||||
func TestBasicStatsWithOnlyMean(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"mean"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_mean": float64(1),
|
||||
"b_mean": float64(2),
|
||||
"c_mean": float64(3),
|
||||
"d_mean": float64(4),
|
||||
"e_mean": float64(200),
|
||||
"f_mean": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating sum
|
||||
func TestBasicStatsWithOnlySum(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"sum"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_sum": float64(2),
|
||||
"b_sum": float64(4),
|
||||
"c_sum": float64(6),
|
||||
"d_sum": float64(8),
|
||||
"e_sum": float64(200),
|
||||
"f_sum": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Verify that sum doesn't suffer from floating point errors. Early
|
||||
// implementations of sum were calulated from mean and count, which
|
||||
// e.g. summed "1, 1, 5, 1" as "7.999999..." instead of 8.
|
||||
func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) {
|
||||
|
||||
var sum1, _ = metric.New("m1",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var sum2, _ = metric.New("m1",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var sum3, _ = metric.New("m1",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(5),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var sum4, _ = metric.New("m1",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"sum"}
|
||||
|
||||
aggregator.Add(sum1)
|
||||
aggregator.Add(sum2)
|
||||
aggregator.Add(sum3)
|
||||
aggregator.Add(sum4)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_sum": float64(8),
|
||||
}
|
||||
expectedTags := map[string]string{}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating variance
|
||||
func TestBasicStatsWithOnlyVariance(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"s2"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_s2": float64(0),
|
||||
"b_s2": float64(2),
|
||||
"c_s2": float64(2),
|
||||
"d_s2": float64(8),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating standard deviation
|
||||
func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"stdev"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_stdev": float64(0),
|
||||
"b_stdev": math.Sqrt(2),
|
||||
"c_stdev": math.Sqrt(2),
|
||||
"d_stdev": math.Sqrt(8),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating minimum and maximum
|
||||
func TestBasicStatsWithMinAndMax(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"min", "max"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_max": float64(1), //a
|
||||
"a_min": float64(1),
|
||||
"b_max": float64(3), //b
|
||||
"b_min": float64(1),
|
||||
"c_max": float64(4), //c
|
||||
"c_min": float64(2),
|
||||
"d_max": float64(6), //d
|
||||
"d_min": float64(2),
|
||||
"e_max": float64(200), //e
|
||||
"e_min": float64(200),
|
||||
"f_max": float64(200), //f
|
||||
"f_min": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test aggregating with all stats
|
||||
func TestBasicStatsWithAllStats(t *testing.T) {
|
||||
acc := testutil.Accumulator{}
|
||||
minmax := NewBasicStats()
|
||||
minmax.Stats = []string{"count", "min", "max", "mean", "stdev", "s2", "sum"}
|
||||
|
||||
minmax.Add(m1)
|
||||
minmax.Add(m2)
|
||||
minmax.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_count": float64(2), //a
|
||||
"a_max": float64(1),
|
||||
"a_min": float64(1),
|
||||
"a_mean": float64(1),
|
||||
"a_stdev": float64(0),
|
||||
"a_s2": float64(0),
|
||||
"a_sum": float64(2),
|
||||
"b_count": float64(2), //b
|
||||
"b_max": float64(3),
|
||||
"b_min": float64(1),
|
||||
"b_mean": float64(2),
|
||||
"b_s2": float64(2),
|
||||
"b_sum": float64(4),
|
||||
"b_stdev": math.Sqrt(2),
|
||||
"c_count": float64(2), //c
|
||||
"c_max": float64(4),
|
||||
"c_min": float64(2),
|
||||
"c_mean": float64(3),
|
||||
"c_s2": float64(2),
|
||||
"c_stdev": math.Sqrt(2),
|
||||
"c_sum": float64(6),
|
||||
"d_count": float64(2), //d
|
||||
"d_max": float64(6),
|
||||
"d_min": float64(2),
|
||||
"d_mean": float64(4),
|
||||
"d_s2": float64(8),
|
||||
"d_stdev": math.Sqrt(8),
|
||||
"d_sum": float64(8),
|
||||
"e_count": float64(1), //e
|
||||
"e_max": float64(200),
|
||||
"e_min": float64(200),
|
||||
"e_mean": float64(200),
|
||||
"e_sum": float64(200),
|
||||
"f_count": float64(1), //f
|
||||
"f_max": float64(200),
|
||||
"f_min": float64(200),
|
||||
"f_mean": float64(200),
|
||||
"f_sum": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test that if an empty array is passed, no points are pushed
|
||||
func TestBasicStatsWithNoStats(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
acc.AssertDoesNotContainMeasurement(t, "m1")
|
||||
}
|
||||
|
||||
// Test that if an unknown stat is configured, it doesn't explode
|
||||
func TestBasicStatsWithUnknownStat(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"crazy"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
acc.AssertDoesNotContainMeasurement(t, "m1")
|
||||
}
|
||||
|
||||
// Test that if Stats isn't supplied, then we only do count, min, max, mean,
|
||||
// stdev, and s2. We purposely exclude sum for backwards compatability,
|
||||
// otherwise user's working systems will suddenly (and surprisingly) start
|
||||
// capturing sum without their input.
|
||||
func TestBasicStatsWithDefaultStats(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
assert.True(t, acc.HasField("m1", "a_count"))
|
||||
assert.True(t, acc.HasField("m1", "a_min"))
|
||||
assert.True(t, acc.HasField("m1", "a_max"))
|
||||
assert.True(t, acc.HasField("m1", "a_mean"))
|
||||
assert.True(t, acc.HasField("m1", "a_stdev"))
|
||||
assert.True(t, acc.HasField("m1", "a_s2"))
|
||||
assert.False(t, acc.HasField("m1", "a_sum"))
|
||||
}
|
||||
|
||||
@@ -107,6 +107,8 @@ func convert(in interface{}) (float64, bool) {
|
||||
return v, true
|
||||
case int64:
|
||||
return float64(v), true
|
||||
case uint64:
|
||||
return float64(v), true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ var m2, _ = metric.New("m1",
|
||||
"i": float64(1),
|
||||
"j": float64(1),
|
||||
"k": float64(200),
|
||||
"l": uint64(200),
|
||||
"ignoreme": "string",
|
||||
"andme": true,
|
||||
},
|
||||
@@ -85,6 +86,8 @@ func TestMinMaxWithPeriod(t *testing.T) {
|
||||
"j_min": float64(1),
|
||||
"k_max": float64(200),
|
||||
"k_min": float64(200),
|
||||
"l_max": float64(200),
|
||||
"l_min": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
@@ -154,6 +157,8 @@ func TestMinMaxDifferentPeriods(t *testing.T) {
|
||||
"j_min": float64(1),
|
||||
"k_max": float64(200),
|
||||
"k_min": float64(200),
|
||||
"l_max": float64(200),
|
||||
"l_min": float64(200),
|
||||
}
|
||||
expectedTags = map[string]string{
|
||||
"foo": "bar",
|
||||
|
||||
73
plugins/aggregators/valuecounter/README.md
Normal file
73
plugins/aggregators/valuecounter/README.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# ValueCounter Aggregator Plugin
|
||||
|
||||
The valuecounter plugin counts the occurrence of values in fields and emits the
|
||||
counter once every 'period' seconds.
|
||||
|
||||
A use case for the valuecounter plugin is when you are processing a HTTP access
|
||||
log (with the logparser input) and want to count the HTTP status codes.
|
||||
|
||||
The fields which will be counted must be configured with the `fields`
|
||||
configuration directive. When no `fields` is provided the plugin will not count
|
||||
any fields. The results are emitted in fields in the format:
|
||||
`originalfieldname_fieldvalue = count`.
|
||||
|
||||
Valuecounter only works on fields of the type int, bool or string. Float fields
|
||||
are being dropped to prevent the creating of too many fields.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
[[aggregators.valuecounter]]
|
||||
## General Aggregator Arguments:
|
||||
## The period on which to flush & clear the aggregator.
|
||||
period = "30s"
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
## The fields for which the values will be counted
|
||||
fields = ["status"]
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- measurement1
|
||||
- field_value1
|
||||
- field_value2
|
||||
|
||||
### Tags:
|
||||
|
||||
No tags are applied by this aggregator.
|
||||
|
||||
### Example Output:
|
||||
|
||||
Example for parsing a HTTP access log.
|
||||
|
||||
telegraf.conf:
|
||||
```
|
||||
[[inputs.logparser]]
|
||||
files = ["/tmp/tst.log"]
|
||||
[inputs.logparser.grok]
|
||||
patterns = ['%{DATA:url:tag} %{NUMBER:response:string}']
|
||||
measurement = "access"
|
||||
|
||||
[[aggregators.valuecounter]]
|
||||
namepass = ["access"]
|
||||
fields = ["response"]
|
||||
```
|
||||
|
||||
/tmp/tst.log
|
||||
```
|
||||
/some/path 200
|
||||
/some/path 401
|
||||
/some/path 200
|
||||
```
|
||||
|
||||
```
|
||||
$ telegraf --config telegraf.conf --quiet
|
||||
|
||||
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="200" 1511948755991487011
|
||||
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="401" 1511948755991522282
|
||||
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="200" 1511948755991531697
|
||||
|
||||
access,path=/tmp/tst.log,host=localhost.localdomain,url=/some/path response_200=2i,response_401=1i 1511948761000000000
|
||||
```
|
||||
108
plugins/aggregators/valuecounter/valuecounter.go
Normal file
108
plugins/aggregators/valuecounter/valuecounter.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package valuecounter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||
)
|
||||
|
||||
type aggregate struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fieldCount map[string]int
|
||||
}
|
||||
|
||||
// ValueCounter an aggregation plugin
|
||||
type ValueCounter struct {
|
||||
cache map[uint64]aggregate
|
||||
Fields []string
|
||||
}
|
||||
|
||||
// NewValueCounter create a new aggregation plugin which counts the occurances
|
||||
// of fields and emits the count.
|
||||
func NewValueCounter() telegraf.Aggregator {
|
||||
vc := &ValueCounter{}
|
||||
vc.Reset()
|
||||
return vc
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## General Aggregator Arguments:
|
||||
## The period on which to flush & clear the aggregator.
|
||||
period = "30s"
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
## The fields for which the values will be counted
|
||||
fields = []
|
||||
`
|
||||
|
||||
// SampleConfig generates a sample config for the ValueCounter plugin
|
||||
func (vc *ValueCounter) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description returns the description of the ValueCounter plugin
|
||||
func (vc *ValueCounter) Description() string {
|
||||
return "Count the occurance of values in fields."
|
||||
}
|
||||
|
||||
// Add is run on every metric which passes the plugin
|
||||
func (vc *ValueCounter) Add(in telegraf.Metric) {
|
||||
id := in.HashID()
|
||||
|
||||
// Check if the cache already has an entry for this metric, if not create it
|
||||
if _, ok := vc.cache[id]; !ok {
|
||||
a := aggregate{
|
||||
name: in.Name(),
|
||||
tags: in.Tags(),
|
||||
fieldCount: make(map[string]int),
|
||||
}
|
||||
vc.cache[id] = a
|
||||
}
|
||||
|
||||
// Check if this metric has fields which we need to count, if so increment
|
||||
// the count.
|
||||
for fk, fv := range in.Fields() {
|
||||
for _, cf := range vc.Fields {
|
||||
if fk == cf {
|
||||
// Do not process float types to prevent memory from blowing up
|
||||
switch fv.(type) {
|
||||
default:
|
||||
log.Printf("I! Valuecounter: Unsupported field type. " +
|
||||
"Must be an int, string or bool. Ignoring.")
|
||||
continue
|
||||
case uint64, int64, string, bool:
|
||||
}
|
||||
fn := fmt.Sprintf("%v_%v", fk, fv)
|
||||
vc.cache[id].fieldCount[fn]++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Push emits the counters
|
||||
func (vc *ValueCounter) Push(acc telegraf.Accumulator) {
|
||||
for _, agg := range vc.cache {
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
for field, count := range agg.fieldCount {
|
||||
fields[field] = count
|
||||
}
|
||||
|
||||
acc.AddFields(agg.name, fields, agg.tags)
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the cache, executed after each push
|
||||
func (vc *ValueCounter) Reset() {
|
||||
vc.cache = make(map[uint64]aggregate)
|
||||
}
|
||||
|
||||
func init() {
|
||||
aggregators.Add("valuecounter", func() telegraf.Aggregator {
|
||||
return NewValueCounter()
|
||||
})
|
||||
}
|
||||
126
plugins/aggregators/valuecounter/valuecounter_test.go
Normal file
126
plugins/aggregators/valuecounter/valuecounter_test.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package valuecounter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
// Create a valuecounter with config
|
||||
func NewTestValueCounter(fields []string) telegraf.Aggregator {
|
||||
vc := &ValueCounter{
|
||||
Fields: fields,
|
||||
}
|
||||
vc.Reset()
|
||||
|
||||
return vc
|
||||
}
|
||||
|
||||
var m1, _ = metric.New("m1",
|
||||
map[string]string{"foo": "bar"},
|
||||
map[string]interface{}{
|
||||
"status": 200,
|
||||
"somefield": 20.1,
|
||||
"foobar": "bar",
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
var m2, _ = metric.New("m1",
|
||||
map[string]string{"foo": "bar"},
|
||||
map[string]interface{}{
|
||||
"status": "OK",
|
||||
"ignoreme": "string",
|
||||
"andme": true,
|
||||
"boolfield": false,
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
func BenchmarkApply(b *testing.B) {
|
||||
vc := NewTestValueCounter([]string{"status"})
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
vc.Add(m1)
|
||||
vc.Add(m2)
|
||||
}
|
||||
}
|
||||
|
||||
// Test basic functionality
|
||||
func TestBasic(t *testing.T) {
|
||||
vc := NewTestValueCounter([]string{"status"})
|
||||
acc := testutil.Accumulator{}
|
||||
|
||||
vc.Add(m1)
|
||||
vc.Add(m2)
|
||||
vc.Add(m1)
|
||||
vc.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"status_200": 2,
|
||||
"status_OK": 1,
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test with multiple fields to count
|
||||
func TestMultipleFields(t *testing.T) {
|
||||
vc := NewTestValueCounter([]string{"status", "somefield", "boolfield"})
|
||||
acc := testutil.Accumulator{}
|
||||
|
||||
vc.Add(m1)
|
||||
vc.Add(m2)
|
||||
vc.Add(m2)
|
||||
vc.Add(m1)
|
||||
vc.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"status_200": 2,
|
||||
"status_OK": 2,
|
||||
"boolfield_false": 2,
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test with a reset between two runs
|
||||
func TestWithReset(t *testing.T) {
|
||||
vc := NewTestValueCounter([]string{"status"})
|
||||
acc := testutil.Accumulator{}
|
||||
|
||||
vc.Add(m1)
|
||||
vc.Add(m1)
|
||||
vc.Add(m2)
|
||||
vc.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"status_200": 2,
|
||||
"status_OK": 1,
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
|
||||
acc.ClearMetrics()
|
||||
vc.Reset()
|
||||
|
||||
vc.Add(m2)
|
||||
vc.Add(m2)
|
||||
vc.Add(m1)
|
||||
vc.Push(&acc)
|
||||
|
||||
expectedFields = map[string]interface{}{
|
||||
"status_200": 1,
|
||||
"status_OK": 2,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
@@ -9,6 +9,27 @@ The metric names, to make it less complicated in querying, have replaced all `-`
|
||||
|
||||
All metrics are attempted to be cast to integers, then booleans, then strings.
|
||||
|
||||
### Configuration:
|
||||
```toml
|
||||
# Read stats from aerospike server(s)
|
||||
[[inputs.aerospike]]
|
||||
## Aerospike servers to connect to (with port)
|
||||
## This plugin will query all namespaces the aerospike
|
||||
## server has configured and get stats for them.
|
||||
servers = ["localhost:3000"]
|
||||
|
||||
# username = "telegraf"
|
||||
# password = "pa$$word"
|
||||
|
||||
## Optional TLS Config
|
||||
# enable_tls = false
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## If false, skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
```
|
||||
|
||||
### Measurements:
|
||||
|
||||
The aerospike metrics are under two measurement names:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package aerospike
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"log"
|
||||
"net"
|
||||
@@ -10,13 +11,24 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
tlsint "github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
as "github.com/aerospike/aerospike-client-go"
|
||||
)
|
||||
|
||||
type Aerospike struct {
|
||||
Servers []string
|
||||
Servers []string `toml:"servers"`
|
||||
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
|
||||
EnableTLS bool `toml:"enable_tls"`
|
||||
EnableSSL bool `toml:"enable_ssl"` // deprecated in 1.7; use enable_tls
|
||||
tlsint.ClientConfig
|
||||
|
||||
initialized bool
|
||||
tlsConfig *tls.Config
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
@@ -24,6 +36,17 @@ var sampleConfig = `
|
||||
## This plugin will query all namespaces the aerospike
|
||||
## server has configured and get stats for them.
|
||||
servers = ["localhost:3000"]
|
||||
|
||||
# username = "telegraf"
|
||||
# password = "pa$$word"
|
||||
|
||||
## Optional TLS Config
|
||||
# enable_tls = false
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## If false, skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
`
|
||||
|
||||
func (a *Aerospike) SampleConfig() string {
|
||||
@@ -35,6 +58,18 @@ func (a *Aerospike) Description() string {
|
||||
}
|
||||
|
||||
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
||||
if !a.initialized {
|
||||
tlsConfig, err := a.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tlsConfig == nil && (a.EnableTLS || a.EnableSSL) {
|
||||
tlsConfig = &tls.Config{}
|
||||
}
|
||||
a.tlsConfig = tlsConfig
|
||||
a.initialized = true
|
||||
}
|
||||
|
||||
if len(a.Servers) == 0 {
|
||||
return a.gatherServer("127.0.0.1:3000", acc)
|
||||
}
|
||||
@@ -63,7 +98,11 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||
iport = 3000
|
||||
}
|
||||
|
||||
c, err := as.NewClient(host, iport)
|
||||
policy := as.NewClientPolicy()
|
||||
policy.User = a.Username
|
||||
policy.Password = a.Password
|
||||
policy.TlsConfig = a.tlsConfig
|
||||
c, err := as.NewClientWithPolicy(policy, host, iport)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4,8 +4,10 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/aurora"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/bond"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/burrow"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cgroup"
|
||||
@@ -24,11 +26,13 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/fail2ban"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/fibaro"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/fluentd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
||||
@@ -36,9 +40,11 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/internal"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/interrupts"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ipset"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia2"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor"
|
||||
@@ -47,12 +53,14 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mcrouter"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mesos"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/minecraft"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mongodb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mysql"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nats"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
|
||||
@@ -61,6 +69,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/openldap"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||
@@ -88,11 +97,13 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/solr"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/syslog"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/system"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/teamspeak"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tengine"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tomcat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||
|
||||
@@ -15,28 +15,53 @@ The following defaults are known to work with RabbitMQ:
|
||||
```toml
|
||||
# AMQP consumer plugin
|
||||
[[inputs.amqp_consumer]]
|
||||
## AMQP url
|
||||
url = "amqp://localhost:5672/influxdb"
|
||||
## AMQP exchange
|
||||
## Broker to consume from.
|
||||
## deprecated in 1.7; use the brokers option
|
||||
# url = "amqp://localhost:5672/influxdb"
|
||||
|
||||
## Brokers to consume from. If multiple brokers are specified a random broker
|
||||
## will be selected anytime a connection is established. This can be
|
||||
## helpful for load balancing when not using a dedicated load balancer.
|
||||
brokers = ["amqp://localhost:5672/influxdb"]
|
||||
|
||||
## Authentication credentials for the PLAIN auth_method.
|
||||
# username = ""
|
||||
# password = ""
|
||||
|
||||
## Exchange to declare and consume from.
|
||||
exchange = "telegraf"
|
||||
|
||||
## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
|
||||
# exchange_type = "topic"
|
||||
|
||||
## If true, exchange will be passively declared.
|
||||
# exchange_passive = false
|
||||
|
||||
## Exchange durability can be either "transient" or "durable".
|
||||
# exchange_durability = "durable"
|
||||
|
||||
## Additional exchange arguments.
|
||||
# exchange_arguments = { }
|
||||
# exchange_arguments = {"hash_propery" = "timestamp"}
|
||||
|
||||
## AMQP queue name
|
||||
queue = "telegraf"
|
||||
## Binding Key
|
||||
binding_key = "#"
|
||||
|
||||
## Controls how many messages the server will try to keep on the network
|
||||
## for consumers before receiving delivery acks.
|
||||
#prefetch_count = 50
|
||||
## Maximum number of messages server should give to the worker.
|
||||
# prefetch_count = 50
|
||||
|
||||
## Auth method. PLAIN and EXTERNAL are supported.
|
||||
## Auth method. PLAIN and EXTERNAL are supported
|
||||
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
|
||||
## described here: https://www.rabbitmq.com/plugins.html
|
||||
# auth_method = "PLAIN"
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to consume.
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package amqp_consumer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -10,16 +12,23 @@ import (
|
||||
"github.com/streadway/amqp"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
|
||||
// AMQPConsumer is the top level struct for this plugin
|
||||
type AMQPConsumer struct {
|
||||
URL string
|
||||
// AMQP exchange
|
||||
Exchange string
|
||||
URL string `toml:"url"` // deprecated in 1.7; use brokers
|
||||
Brokers []string `toml:"brokers"`
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
Exchange string `toml:"exchange"`
|
||||
ExchangeType string `toml:"exchange_type"`
|
||||
ExchangeDurability string `toml:"exchange_durability"`
|
||||
ExchangePassive bool `toml:"exchange_passive"`
|
||||
ExchangeArguments map[string]string `toml:"exchange_arguments"`
|
||||
|
||||
// Queue Name
|
||||
Queue string
|
||||
// Binding Key
|
||||
@@ -31,14 +40,7 @@ type AMQPConsumer struct {
|
||||
|
||||
// AMQP Auth method
|
||||
AuthMethod string
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
tls.ClientConfig
|
||||
|
||||
parser parsers.Parser
|
||||
conn *amqp.Connection
|
||||
@@ -55,34 +57,66 @@ func (a *externalAuth) Response() string {
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultAuthMethod = "PLAIN"
|
||||
DefaultAuthMethod = "PLAIN"
|
||||
|
||||
DefaultBroker = "amqp://localhost:5672/influxdb"
|
||||
|
||||
DefaultExchangeType = "topic"
|
||||
DefaultExchangeDurability = "durable"
|
||||
|
||||
DefaultPrefetchCount = 50
|
||||
)
|
||||
|
||||
func (a *AMQPConsumer) SampleConfig() string {
|
||||
return `
|
||||
## AMQP url
|
||||
url = "amqp://localhost:5672/influxdb"
|
||||
## AMQP exchange
|
||||
## Broker to consume from.
|
||||
## deprecated in 1.7; use the brokers option
|
||||
# url = "amqp://localhost:5672/influxdb"
|
||||
|
||||
## Brokers to consume from. If multiple brokers are specified a random broker
|
||||
## will be selected anytime a connection is established. This can be
|
||||
## helpful for load balancing when not using a dedicated load balancer.
|
||||
brokers = ["amqp://localhost:5672/influxdb"]
|
||||
|
||||
## Authentication credentials for the PLAIN auth_method.
|
||||
# username = ""
|
||||
# password = ""
|
||||
|
||||
## Exchange to declare and consume from.
|
||||
exchange = "telegraf"
|
||||
|
||||
## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
|
||||
# exchange_type = "topic"
|
||||
|
||||
## If true, exchange will be passively declared.
|
||||
# exchange_passive = false
|
||||
|
||||
## Exchange durability can be either "transient" or "durable".
|
||||
# exchange_durability = "durable"
|
||||
|
||||
## Additional exchange arguments.
|
||||
# exchange_arguments = { }
|
||||
# exchange_arguments = {"hash_propery" = "timestamp"}
|
||||
|
||||
## AMQP queue name
|
||||
queue = "telegraf"
|
||||
|
||||
## Binding Key
|
||||
binding_key = "#"
|
||||
|
||||
## Maximum number of messages server should give to the worker.
|
||||
prefetch_count = 50
|
||||
# prefetch_count = 50
|
||||
|
||||
## Auth method. PLAIN and EXTERNAL are supported
|
||||
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
|
||||
## described here: https://www.rabbitmq.com/plugins.html
|
||||
# auth_method = "PLAIN"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to consume.
|
||||
@@ -108,22 +142,26 @@ func (a *AMQPConsumer) Gather(_ telegraf.Accumulator) error {
|
||||
|
||||
func (a *AMQPConsumer) createConfig() (*amqp.Config, error) {
|
||||
// make new tls config
|
||||
tls, err := internal.GetTLSConfig(
|
||||
a.SSLCert, a.SSLKey, a.SSLCA, a.InsecureSkipVerify)
|
||||
tls, err := a.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// parse auth method
|
||||
var sasl []amqp.Authentication // nil by default
|
||||
|
||||
var auth []amqp.Authentication
|
||||
if strings.ToUpper(a.AuthMethod) == "EXTERNAL" {
|
||||
sasl = []amqp.Authentication{&externalAuth{}}
|
||||
auth = []amqp.Authentication{&externalAuth{}}
|
||||
} else if a.Username != "" || a.Password != "" {
|
||||
auth = []amqp.Authentication{
|
||||
&amqp.PlainAuth{
|
||||
Username: a.Username,
|
||||
Password: a.Password,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
config := amqp.Config{
|
||||
TLSClientConfig: tls,
|
||||
SASL: sasl, // if nil, it will be PLAIN
|
||||
SASL: auth, // if nil, it will be PLAIN
|
||||
}
|
||||
return &config, nil
|
||||
}
|
||||
@@ -145,23 +183,25 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error {
|
||||
go a.process(msgs, acc)
|
||||
|
||||
go func() {
|
||||
err := <-a.conn.NotifyClose(make(chan *amqp.Error))
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("I! AMQP consumer connection closed: %s; trying to reconnect", err)
|
||||
for {
|
||||
msgs, err := a.connect(amqpConf)
|
||||
if err != nil {
|
||||
log.Printf("E! AMQP connection failed: %s", err)
|
||||
time.Sleep(10 * time.Second)
|
||||
continue
|
||||
err := <-a.conn.NotifyClose(make(chan *amqp.Error))
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
a.wg.Add(1)
|
||||
go a.process(msgs, acc)
|
||||
break
|
||||
log.Printf("I! AMQP consumer connection closed: %s; trying to reconnect", err)
|
||||
for {
|
||||
msgs, err := a.connect(amqpConf)
|
||||
if err != nil {
|
||||
log.Printf("E! AMQP connection failed: %s", err)
|
||||
time.Sleep(10 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
a.wg.Add(1)
|
||||
go a.process(msgs, acc)
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -169,28 +209,55 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, error) {
|
||||
conn, err := amqp.DialConfig(a.URL, *amqpConf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
brokers := a.Brokers
|
||||
if len(brokers) == 0 {
|
||||
brokers = []string{a.URL}
|
||||
}
|
||||
a.conn = conn
|
||||
|
||||
ch, err := conn.Channel()
|
||||
p := rand.Perm(len(brokers))
|
||||
for _, n := range p {
|
||||
broker := brokers[n]
|
||||
log.Printf("D! [amqp_consumer] connecting to %q", broker)
|
||||
conn, err := amqp.DialConfig(broker, *amqpConf)
|
||||
if err == nil {
|
||||
a.conn = conn
|
||||
log.Printf("D! [amqp_consumer] connected to %q", broker)
|
||||
break
|
||||
}
|
||||
log.Printf("D! [amqp_consumer] error connecting to %q", broker)
|
||||
}
|
||||
|
||||
if a.conn == nil {
|
||||
return nil, errors.New("could not connect to any broker")
|
||||
}
|
||||
|
||||
ch, err := a.conn.Channel()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to open a channel: %s", err)
|
||||
}
|
||||
|
||||
err = ch.ExchangeDeclare(
|
||||
a.Exchange, // name
|
||||
"topic", // type
|
||||
true, // durable
|
||||
false, // auto-deleted
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
var exchangeDurable = true
|
||||
switch a.ExchangeDurability {
|
||||
case "transient":
|
||||
exchangeDurable = false
|
||||
default:
|
||||
exchangeDurable = true
|
||||
}
|
||||
|
||||
exchangeArgs := make(amqp.Table, len(a.ExchangeArguments))
|
||||
for k, v := range a.ExchangeArguments {
|
||||
exchangeArgs[k] = v
|
||||
}
|
||||
|
||||
err = declareExchange(
|
||||
ch,
|
||||
a.Exchange,
|
||||
a.ExchangeType,
|
||||
a.ExchangePassive,
|
||||
exchangeDurable,
|
||||
exchangeArgs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to declare an exchange: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
q, err := ch.QueueDeclare(
|
||||
@@ -242,6 +309,42 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err
|
||||
return msgs, err
|
||||
}
|
||||
|
||||
func declareExchange(
|
||||
channel *amqp.Channel,
|
||||
exchangeName string,
|
||||
exchangeType string,
|
||||
exchangePassive bool,
|
||||
exchangeDurable bool,
|
||||
exchangeArguments amqp.Table,
|
||||
) error {
|
||||
var err error
|
||||
if exchangePassive {
|
||||
err = channel.ExchangeDeclarePassive(
|
||||
exchangeName,
|
||||
exchangeType,
|
||||
exchangeDurable,
|
||||
false, // delete when unused
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
exchangeArguments,
|
||||
)
|
||||
} else {
|
||||
err = channel.ExchangeDeclare(
|
||||
exchangeName,
|
||||
exchangeType,
|
||||
exchangeDurable,
|
||||
false, // delete when unused
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
exchangeArguments,
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error declaring exchange: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read messages from queue and add them to the Accumulator
|
||||
func (a *AMQPConsumer) process(msgs <-chan amqp.Delivery, acc telegraf.Accumulator) {
|
||||
defer a.wg.Done()
|
||||
@@ -273,8 +376,11 @@ func (a *AMQPConsumer) Stop() {
|
||||
func init() {
|
||||
inputs.Add("amqp_consumer", func() telegraf.Input {
|
||||
return &AMQPConsumer{
|
||||
AuthMethod: DefaultAuthMethod,
|
||||
PrefetchCount: DefaultPrefetchCount,
|
||||
URL: DefaultBroker,
|
||||
AuthMethod: DefaultAuthMethod,
|
||||
ExchangeType: DefaultExchangeType,
|
||||
ExchangeDurability: DefaultExchangeDurability,
|
||||
PrefetchCount: DefaultPrefetchCount,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
The Apache plugin collects server performance information using the [`mod_status`](https://httpd.apache.org/docs/2.4/mod/mod_status.html) module of the [Apache HTTP Server](https://httpd.apache.org/).
|
||||
|
||||
Typically, the `mod_status` module is configured to expose a page at the `/server-status?auto` location of the Apache server. The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) option must be enabled in order to collect all available fields. For information about how to configure your server reference the [module documenation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable).
|
||||
Typically, the `mod_status` module is configured to expose a page at the `/server-status?auto` location of the Apache server. The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) option must be enabled in order to collect all available fields. For information about how to configure your server reference the [module documentation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable).
|
||||
|
||||
### Configuration:
|
||||
|
||||
@@ -21,11 +21,11 @@ Typically, the `mod_status` module is configured to expose a page at the `/serve
|
||||
## Maximum time to receive response.
|
||||
# response_timeout = "5s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
@@ -21,14 +22,7 @@ type Apache struct {
|
||||
Username string
|
||||
Password string
|
||||
ResponseTimeout internal.Duration
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
tls.ClientConfig
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
@@ -46,11 +40,11 @@ var sampleConfig = `
|
||||
## Maximum time to receive response.
|
||||
# response_timeout = "5s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
@@ -63,6 +57,8 @@ func (n *Apache) Description() string {
|
||||
}
|
||||
|
||||
func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
if len(n.Urls) == 0 {
|
||||
n.Urls = []string{"http://localhost/server-status?auto"}
|
||||
}
|
||||
@@ -78,8 +74,6 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
n.client = client
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(n.Urls))
|
||||
for _, u := range n.Urls {
|
||||
addr, err := url.Parse(u)
|
||||
if err != nil {
|
||||
@@ -87,6 +81,7 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
acc.AddError(n.gatherUrl(addr, acc))
|
||||
@@ -98,8 +93,7 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
func (n *Apache) createHttpClient() (*http.Client, error) {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
|
||||
tlsCfg, err := n.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
63
plugins/inputs/aurora/README.md
Normal file
63
plugins/inputs/aurora/README.md
Normal file
File diff suppressed because one or more lines are too long
280
plugins/inputs/aurora/aurora.go
Normal file
280
plugins/inputs/aurora/aurora.go
Normal file
@@ -0,0 +1,280 @@
|
||||
package aurora
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type RoleType int
|
||||
|
||||
const (
|
||||
Unknown RoleType = iota
|
||||
Leader
|
||||
Follower
|
||||
)
|
||||
|
||||
func (r RoleType) String() string {
|
||||
switch r {
|
||||
case Leader:
|
||||
return "leader"
|
||||
case Follower:
|
||||
return "follower"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
defaultTimeout = 5 * time.Second
|
||||
defaultRoles = []string{"leader", "follower"}
|
||||
)
|
||||
|
||||
type Vars map[string]interface{}
|
||||
|
||||
type Aurora struct {
|
||||
Schedulers []string `toml:"schedulers"`
|
||||
Roles []string `toml:"roles"`
|
||||
Timeout internal.Duration `toml:"timeout"`
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
tls.ClientConfig
|
||||
|
||||
client *http.Client
|
||||
urls []*url.URL
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Schedulers are the base addresses of your Aurora Schedulers
|
||||
schedulers = ["http://127.0.0.1:8081"]
|
||||
|
||||
## Set of role types to collect metrics from.
|
||||
##
|
||||
## The scheduler roles are checked each interval by contacting the
|
||||
## scheduler nodes; zookeeper is not contacted.
|
||||
# roles = ["leader", "follower"]
|
||||
|
||||
## Timeout is the max time for total network operations.
|
||||
# timeout = "5s"
|
||||
|
||||
## Username and password are sent using HTTP Basic Auth.
|
||||
# username = "username"
|
||||
# password = "pa$$word"
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
func (a *Aurora) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (a *Aurora) Description() string {
|
||||
return "Gather metrics from Apache Aurora schedulers"
|
||||
}
|
||||
|
||||
func (a *Aurora) Gather(acc telegraf.Accumulator) error {
|
||||
if a.client == nil {
|
||||
err := a.initialize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), a.Timeout.Duration)
|
||||
defer cancel()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, u := range a.urls {
|
||||
wg.Add(1)
|
||||
go func(u *url.URL) {
|
||||
defer wg.Done()
|
||||
role, err := a.gatherRole(ctx, u)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("%s: %v", u, err))
|
||||
return
|
||||
}
|
||||
|
||||
if !a.roleEnabled(role) {
|
||||
return
|
||||
}
|
||||
|
||||
err = a.gatherScheduler(ctx, u, role, acc)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("%s: %v", u, err))
|
||||
}
|
||||
}(u)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Aurora) initialize() error {
|
||||
tlsCfg, err := a.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
}
|
||||
|
||||
urls := make([]*url.URL, 0, len(a.Schedulers))
|
||||
for _, s := range a.Schedulers {
|
||||
loc, err := url.Parse(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
urls = append(urls, loc)
|
||||
}
|
||||
|
||||
if a.Timeout.Duration < time.Second {
|
||||
a.Timeout.Duration = defaultTimeout
|
||||
}
|
||||
|
||||
if len(a.Roles) == 0 {
|
||||
a.Roles = defaultRoles
|
||||
}
|
||||
|
||||
a.client = client
|
||||
a.urls = urls
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Aurora) roleEnabled(role RoleType) bool {
|
||||
if len(a.Roles) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, v := range a.Roles {
|
||||
if role.String() == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (a *Aurora) gatherRole(ctx context.Context, origin *url.URL) (RoleType, error) {
|
||||
loc := *origin
|
||||
loc.Path = "leaderhealth"
|
||||
req, err := http.NewRequest("GET", loc.String(), nil)
|
||||
if err != nil {
|
||||
return Unknown, err
|
||||
}
|
||||
|
||||
if a.Username != "" || a.Password != "" {
|
||||
req.SetBasicAuth(a.Username, a.Password)
|
||||
}
|
||||
req.Header.Add("Accept", "text/plain")
|
||||
|
||||
resp, err := a.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return Unknown, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
return Leader, nil
|
||||
case http.StatusBadGateway:
|
||||
fallthrough
|
||||
case http.StatusServiceUnavailable:
|
||||
return Follower, nil
|
||||
default:
|
||||
return Unknown, fmt.Errorf("%v", resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Aurora) gatherScheduler(
|
||||
ctx context.Context, origin *url.URL, role RoleType, acc telegraf.Accumulator,
|
||||
) error {
|
||||
loc := *origin
|
||||
loc.Path = "vars.json"
|
||||
req, err := http.NewRequest("GET", loc.String(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if a.Username != "" || a.Password != "" {
|
||||
req.SetBasicAuth(a.Username, a.Password)
|
||||
}
|
||||
req.Header.Add("Accept", "application/json")
|
||||
|
||||
resp, err := a.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("%v", resp.Status)
|
||||
}
|
||||
|
||||
var vars Vars
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
decoder.UseNumber()
|
||||
err = decoder.Decode(&vars)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding response: %v", err)
|
||||
}
|
||||
|
||||
var fields = make(map[string]interface{}, len(vars))
|
||||
for k, v := range vars {
|
||||
switch v := v.(type) {
|
||||
case json.Number:
|
||||
// Aurora encodes numbers as you would specify them as a literal,
|
||||
// use this to determine if a value is a float or int.
|
||||
if strings.ContainsAny(v.String(), ".eE") {
|
||||
fv, err := v.Float64()
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
continue
|
||||
}
|
||||
fields[k] = fv
|
||||
} else {
|
||||
fi, err := v.Int64()
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
continue
|
||||
}
|
||||
fields[k] = fi
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("aurora",
|
||||
fields,
|
||||
map[string]string{
|
||||
"scheduler": origin.String(),
|
||||
"role": role.String(),
|
||||
},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("aurora", func() telegraf.Input {
|
||||
return &Aurora{}
|
||||
})
|
||||
}
|
||||
259
plugins/inputs/aurora/aurora_test.go
Normal file
259
plugins/inputs/aurora/aurora_test.go
Normal file
@@ -0,0 +1,259 @@
|
||||
package aurora
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type (
|
||||
TestHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request)
|
||||
CheckFunc func(t *testing.T, err error, acc *testutil.Accumulator)
|
||||
)
|
||||
|
||||
func TestAurora(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
plugin *Aurora
|
||||
schedulers []string
|
||||
roles []string
|
||||
leaderhealth TestHandlerFunc
|
||||
varsjson TestHandlerFunc
|
||||
check CheckFunc
|
||||
}{
|
||||
{
|
||||
name: "minimal",
|
||||
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
body := `{
|
||||
"variable_scrape_events": 2958,
|
||||
"variable_scrape_events_per_sec": 1.0,
|
||||
"variable_scrape_micros_per_event": 1484.0,
|
||||
"variable_scrape_micros_total": 4401084,
|
||||
"variable_scrape_micros_total_per_sec": 1485.0
|
||||
}`
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(body))
|
||||
},
|
||||
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(acc.Metrics))
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"aurora",
|
||||
map[string]interface{}{
|
||||
"variable_scrape_events": int64(2958),
|
||||
"variable_scrape_events_per_sec": 1.0,
|
||||
"variable_scrape_micros_per_event": 1484.0,
|
||||
"variable_scrape_micros_total": int64(4401084),
|
||||
"variable_scrape_micros_total_per_sec": 1485.0,
|
||||
},
|
||||
map[string]string{
|
||||
"scheduler": u.String(),
|
||||
"role": "leader",
|
||||
},
|
||||
)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "disabled role",
|
||||
roles: []string{"leader"},
|
||||
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
},
|
||||
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, acc.FirstError())
|
||||
require.Equal(t, 0, len(acc.Metrics))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no metrics available",
|
||||
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("{}"))
|
||||
},
|
||||
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, acc.FirstError())
|
||||
require.Equal(t, 0, len(acc.Metrics))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "string metrics skipped",
|
||||
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
body := `{
|
||||
"foo": "bar"
|
||||
}`
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(body))
|
||||
},
|
||||
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, acc.FirstError())
|
||||
require.Equal(t, 0, len(acc.Metrics))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "float64 unparseable",
|
||||
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
// too large
|
||||
body := `{
|
||||
"foo": 1e309
|
||||
}`
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(body))
|
||||
},
|
||||
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
|
||||
require.NoError(t, err)
|
||||
require.Error(t, acc.FirstError())
|
||||
require.Equal(t, 0, len(acc.Metrics))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "int64 unparseable",
|
||||
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
// too large
|
||||
body := `{
|
||||
"foo": 9223372036854775808
|
||||
}`
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(body))
|
||||
},
|
||||
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
|
||||
require.NoError(t, err)
|
||||
require.Error(t, acc.FirstError())
|
||||
require.Equal(t, 0, len(acc.Metrics))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bad json",
|
||||
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
body := `{]`
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(body))
|
||||
},
|
||||
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
|
||||
require.NoError(t, err)
|
||||
require.Error(t, acc.FirstError())
|
||||
require.Equal(t, 0, len(acc.Metrics))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "wrong status code",
|
||||
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
body := `{
|
||||
"value": 42
|
||||
}`
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
w.Write([]byte(body))
|
||||
},
|
||||
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
|
||||
require.NoError(t, err)
|
||||
require.Error(t, acc.FirstError())
|
||||
require.Equal(t, 0, len(acc.Metrics))
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/leaderhealth":
|
||||
tt.leaderhealth(t, w, r)
|
||||
case "/vars.json":
|
||||
tt.varsjson(t, w, r)
|
||||
default:
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
plugin := &Aurora{}
|
||||
plugin.Schedulers = []string{u.String()}
|
||||
plugin.Roles = tt.roles
|
||||
err := plugin.Gather(&acc)
|
||||
tt.check(t, err, &acc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasicAuth(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
username string
|
||||
password string
|
||||
}{
|
||||
{
|
||||
name: "no auth",
|
||||
},
|
||||
{
|
||||
name: "basic auth",
|
||||
username: "username",
|
||||
password: "pa$$word",
|
||||
},
|
||||
{
|
||||
name: "username only",
|
||||
username: "username",
|
||||
},
|
||||
{
|
||||
name: "password only",
|
||||
password: "pa$$word",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
username, password, _ := r.BasicAuth()
|
||||
require.Equal(t, tt.username, username)
|
||||
require.Equal(t, tt.password, password)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("{}"))
|
||||
})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
plugin := &Aurora{}
|
||||
plugin.Schedulers = []string{u.String()}
|
||||
plugin.Username = tt.username
|
||||
plugin.Password = tt.password
|
||||
err := plugin.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
# Bond Input Plugin
|
||||
|
||||
The Bond Input plugin collects bond interface status, bond's slaves interfaces
|
||||
status and failures count of bond's slaves interfaces.
|
||||
The Bond input plugin collects network bond interface status for both the
|
||||
network bond interface as well as slave interfaces.
|
||||
The plugin collects these metrics from `/proc/net/bonding/*` files.
|
||||
|
||||
### Configuration:
|
||||
|
||||
100
plugins/inputs/burrow/README.md
Normal file
100
plugins/inputs/burrow/README.md
Normal file
@@ -0,0 +1,100 @@
|
||||
# Telegraf Plugin: Burrow
|
||||
|
||||
Collect Kafka topic, consumer and partition status
|
||||
via [Burrow](https://github.com/linkedin/Burrow) HTTP [API](https://github.com/linkedin/Burrow/wiki/HTTP-Endpoint).
|
||||
|
||||
Supported Burrow version: `1.x`
|
||||
|
||||
### Configuration
|
||||
|
||||
```
|
||||
[[inputs.burrow]]
|
||||
## Burrow API endpoints in format "schema://host:port".
|
||||
## Default is "http://localhost:8000".
|
||||
servers = ["http://localhost:8000"]
|
||||
|
||||
## Override Burrow API prefix.
|
||||
## Useful when Burrow is behind reverse-proxy.
|
||||
# api_prefix = "/v3/kafka"
|
||||
|
||||
## Maximum time to receive response.
|
||||
# response_timeout = "5s"
|
||||
|
||||
## Limit per-server concurrent connections.
|
||||
## Useful in case of large number of topics or consumer groups.
|
||||
# concurrent_connections = 20
|
||||
|
||||
## Filter clusters, default is no filtering.
|
||||
## Values can be specified as glob patterns.
|
||||
# clusters_include = []
|
||||
# clusters_exclude = []
|
||||
|
||||
## Filter consumer groups, default is no filtering.
|
||||
## Values can be specified as glob patterns.
|
||||
# groups_include = []
|
||||
# groups_exclude = []
|
||||
|
||||
## Filter topics, default is no filtering.
|
||||
## Values can be specified as glob patterns.
|
||||
# topics_include = []
|
||||
# topics_exclude = []
|
||||
|
||||
## Credentials for basic HTTP authentication.
|
||||
# username = ""
|
||||
# password = ""
|
||||
|
||||
## Optional SSL config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
### Partition Status mappings
|
||||
|
||||
* `OK` = 1
|
||||
* `NOT_FOUND` = 2
|
||||
* `WARN` = 3
|
||||
* `ERR` = 4
|
||||
* `STOP` = 5
|
||||
* `STALL` = 6
|
||||
|
||||
> unknown value will be mapped to 0
|
||||
|
||||
### Fields
|
||||
|
||||
* `burrow_group` (one event per each consumer group)
|
||||
- status (string, see Partition Status mappings)
|
||||
- status_code (int, `1..6`, see Partition status mappings)
|
||||
- parition_count (int, `number of partitions`)
|
||||
- total_lag (int64, `totallag`)
|
||||
- lag (int64, `maxlag.current_lag || 0`)
|
||||
|
||||
* `burrow_partition` (one event per each topic partition)
|
||||
- status (string, see Partition Status mappings)
|
||||
- status_code (int, `1..6`, see Partition status mappings)
|
||||
- lag (int64, `current_lag || 0`)
|
||||
- offset (int64, `end.timestamp`)
|
||||
- timestamp (int64, `end.timestamp`)
|
||||
|
||||
* `burrow_topic` (one event per topic offset)
|
||||
- offset (int64)
|
||||
|
||||
|
||||
### Tags
|
||||
|
||||
* `burrow_group`
|
||||
- cluster (string)
|
||||
- group (string)
|
||||
|
||||
* `burrow_partition`
|
||||
- cluster (string)
|
||||
- group (string)
|
||||
- topic (string)
|
||||
- partition (int)
|
||||
- owner (string)
|
||||
|
||||
* `burrow_topic`
|
||||
- cluster (string)
|
||||
- topic (string)
|
||||
- partition (int)
|
||||
487
plugins/inputs/burrow/burrow.go
Normal file
487
plugins/inputs/burrow/burrow.go
Normal file
@@ -0,0 +1,487 @@
|
||||
package burrow
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBurrowPrefix = "/v3/kafka"
|
||||
defaultConcurrentConnections = 20
|
||||
defaultResponseTimeout = time.Second * 5
|
||||
defaultServer = "http://localhost:8000"
|
||||
)
|
||||
|
||||
const configSample = `
|
||||
## Burrow API endpoints in format "schema://host:port".
|
||||
## Default is "http://localhost:8000".
|
||||
servers = ["http://localhost:8000"]
|
||||
|
||||
## Override Burrow API prefix.
|
||||
## Useful when Burrow is behind reverse-proxy.
|
||||
# api_prefix = "/v3/kafka"
|
||||
|
||||
## Maximum time to receive response.
|
||||
# response_timeout = "5s"
|
||||
|
||||
## Limit per-server concurrent connections.
|
||||
## Useful in case of large number of topics or consumer groups.
|
||||
# concurrent_connections = 20
|
||||
|
||||
## Filter clusters, default is no filtering.
|
||||
## Values can be specified as glob patterns.
|
||||
# clusters_include = []
|
||||
# clusters_exclude = []
|
||||
|
||||
## Filter consumer groups, default is no filtering.
|
||||
## Values can be specified as glob patterns.
|
||||
# groups_include = []
|
||||
# groups_exclude = []
|
||||
|
||||
## Filter topics, default is no filtering.
|
||||
## Values can be specified as glob patterns.
|
||||
# topics_include = []
|
||||
# topics_exclude = []
|
||||
|
||||
## Credentials for basic HTTP authentication.
|
||||
# username = ""
|
||||
# password = ""
|
||||
|
||||
## Optional SSL config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
type (
|
||||
burrow struct {
|
||||
tls.ClientConfig
|
||||
|
||||
Servers []string
|
||||
Username string
|
||||
Password string
|
||||
ResponseTimeout internal.Duration
|
||||
ConcurrentConnections int
|
||||
|
||||
APIPrefix string `toml:"api_prefix"`
|
||||
ClustersExclude []string
|
||||
ClustersInclude []string
|
||||
GroupsExclude []string
|
||||
GroupsInclude []string
|
||||
TopicsExclude []string
|
||||
TopicsInclude []string
|
||||
|
||||
client *http.Client
|
||||
filterClusters filter.Filter
|
||||
filterGroups filter.Filter
|
||||
filterTopics filter.Filter
|
||||
}
|
||||
|
||||
// response
|
||||
apiResponse struct {
|
||||
Clusters []string `json:"clusters"`
|
||||
Groups []string `json:"consumers"`
|
||||
Topics []string `json:"topics"`
|
||||
Offsets []int64 `json:"offsets"`
|
||||
Status apiStatusResponse `json:"status"`
|
||||
}
|
||||
|
||||
// response: status field
|
||||
apiStatusResponse struct {
|
||||
Partitions []apiStatusResponseLag `json:"partitions"`
|
||||
Status string `json:"status"`
|
||||
PartitionCount int `json:"partition_count"`
|
||||
Maxlag *apiStatusResponseLag `json:"maxlag"`
|
||||
TotalLag int64 `json:"totallag"`
|
||||
}
|
||||
|
||||
// response: lag field
|
||||
apiStatusResponseLag struct {
|
||||
Topic string `json:"topic"`
|
||||
Partition int32 `json:"partition"`
|
||||
Status string `json:"status"`
|
||||
Start apiStatusResponseLagItem `json:"start"`
|
||||
End apiStatusResponseLagItem `json:"end"`
|
||||
CurrentLag int64 `json:"current_lag"`
|
||||
Owner string `json:"owner"`
|
||||
}
|
||||
|
||||
// response: lag field item
|
||||
apiStatusResponseLagItem struct {
|
||||
Offset int64 `json:"offset"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Lag int64 `json:"lag"`
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
inputs.Add("burrow", func() telegraf.Input {
|
||||
return &burrow{}
|
||||
})
|
||||
}
|
||||
|
||||
func (b *burrow) SampleConfig() string {
|
||||
return configSample
|
||||
}
|
||||
|
||||
func (b *burrow) Description() string {
|
||||
return "Collect Kafka topics and consumers status from Burrow HTTP API."
|
||||
}
|
||||
|
||||
func (b *burrow) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
if len(b.Servers) == 0 {
|
||||
b.Servers = []string{defaultServer}
|
||||
}
|
||||
|
||||
if b.client == nil {
|
||||
b.setDefaults()
|
||||
if err := b.compileGlobs(); err != nil {
|
||||
return err
|
||||
}
|
||||
c, err := b.createClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.client = c
|
||||
}
|
||||
|
||||
for _, addr := range b.Servers {
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("unable to parse address '%s': %s", addr, err))
|
||||
continue
|
||||
}
|
||||
if u.Path == "" {
|
||||
u.Path = b.APIPrefix
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(u *url.URL) {
|
||||
defer wg.Done()
|
||||
acc.AddError(b.gatherServer(u, acc))
|
||||
}(u)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *burrow) setDefaults() {
|
||||
if b.APIPrefix == "" {
|
||||
b.APIPrefix = defaultBurrowPrefix
|
||||
}
|
||||
if b.ConcurrentConnections < 1 {
|
||||
b.ConcurrentConnections = defaultConcurrentConnections
|
||||
}
|
||||
if b.ResponseTimeout.Duration < time.Second {
|
||||
b.ResponseTimeout = internal.Duration{
|
||||
Duration: defaultResponseTimeout,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *burrow) compileGlobs() error {
|
||||
var err error
|
||||
|
||||
// compile glob patterns
|
||||
b.filterClusters, err = filter.NewIncludeExcludeFilter(b.ClustersInclude, b.ClustersExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.filterGroups, err = filter.NewIncludeExcludeFilter(b.GroupsInclude, b.GroupsExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.filterTopics, err = filter.NewIncludeExcludeFilter(b.TopicsInclude, b.TopicsExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *burrow) createClient() (*http.Client, error) {
|
||||
tlsCfg, err := b.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
Timeout: b.ResponseTimeout.Duration,
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (b *burrow) getResponse(u *url.URL) (*apiResponse, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.Username != "" {
|
||||
req.SetBasicAuth(b.Username, b.Password)
|
||||
}
|
||||
|
||||
res, err := b.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("wrong response: %d", res.StatusCode)
|
||||
}
|
||||
|
||||
ares := &apiResponse{}
|
||||
dec := json.NewDecoder(res.Body)
|
||||
|
||||
return ares, dec.Decode(ares)
|
||||
}
|
||||
|
||||
func (b *burrow) gatherServer(src *url.URL, acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
r, err := b.getResponse(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
guard := make(chan struct{}, b.ConcurrentConnections)
|
||||
for _, cluster := range r.Clusters {
|
||||
if !b.filterClusters.Match(cluster) {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(cluster string) {
|
||||
defer wg.Done()
|
||||
|
||||
// fetch topic list
|
||||
// endpoint: <api_prefix>/(cluster)/topic
|
||||
ut := appendPathToURL(src, cluster, "topic")
|
||||
b.gatherTopics(guard, ut, cluster, acc)
|
||||
}(cluster)
|
||||
|
||||
wg.Add(1)
|
||||
go func(cluster string) {
|
||||
defer wg.Done()
|
||||
|
||||
// fetch consumer group list
|
||||
// endpoint: <api_prefix>/(cluster)/consumer
|
||||
uc := appendPathToURL(src, cluster, "consumer")
|
||||
b.gatherGroups(guard, uc, cluster, acc)
|
||||
}(cluster)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *burrow) gatherTopics(guard chan struct{}, src *url.URL, cluster string, acc telegraf.Accumulator) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
r, err := b.getResponse(src)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, topic := range r.Topics {
|
||||
if !b.filterTopics.Match(topic) {
|
||||
continue
|
||||
}
|
||||
|
||||
guard <- struct{}{}
|
||||
wg.Add(1)
|
||||
|
||||
go func(topic string) {
|
||||
defer func() {
|
||||
<-guard
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// fetch topic offsets
|
||||
// endpoint: <api_prefix>/<cluster>/topic/<topic>
|
||||
tu := appendPathToURL(src, topic)
|
||||
tr, err := b.getResponse(tu)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
|
||||
b.genTopicMetrics(tr, cluster, topic, acc)
|
||||
}(topic)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (b *burrow) genTopicMetrics(r *apiResponse, cluster, topic string, acc telegraf.Accumulator) {
|
||||
for i, offset := range r.Offsets {
|
||||
tags := map[string]string{
|
||||
"cluster": cluster,
|
||||
"topic": topic,
|
||||
"partition": strconv.Itoa(i),
|
||||
}
|
||||
|
||||
acc.AddFields(
|
||||
"burrow_topic",
|
||||
map[string]interface{}{
|
||||
"offset": offset,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *burrow) gatherGroups(guard chan struct{}, src *url.URL, cluster string, acc telegraf.Accumulator) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
r, err := b.getResponse(src)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, group := range r.Groups {
|
||||
if !b.filterGroups.Match(group) {
|
||||
continue
|
||||
}
|
||||
|
||||
guard <- struct{}{}
|
||||
wg.Add(1)
|
||||
|
||||
go func(group string) {
|
||||
defer func() {
|
||||
<-guard
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// fetch consumer group status
|
||||
// endpoint: <api_prefix>/<cluster>/consumer/<group>/lag
|
||||
gl := appendPathToURL(src, group, "lag")
|
||||
gr, err := b.getResponse(gl)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
|
||||
b.genGroupStatusMetrics(gr, cluster, group, acc)
|
||||
b.genGroupLagMetrics(gr, cluster, group, acc)
|
||||
}(group)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (b *burrow) genGroupStatusMetrics(r *apiResponse, cluster, group string, acc telegraf.Accumulator) {
|
||||
partitionCount := r.Status.PartitionCount
|
||||
if partitionCount == 0 {
|
||||
partitionCount = len(r.Status.Partitions)
|
||||
}
|
||||
|
||||
// get max timestamp and offset from partitions list
|
||||
offset := int64(0)
|
||||
timestamp := int64(0)
|
||||
for _, partition := range r.Status.Partitions {
|
||||
if partition.End.Offset > offset {
|
||||
offset = partition.End.Offset
|
||||
}
|
||||
if partition.End.Timestamp > timestamp {
|
||||
timestamp = partition.End.Timestamp
|
||||
}
|
||||
}
|
||||
|
||||
lag := int64(0)
|
||||
if r.Status.Maxlag != nil {
|
||||
lag = r.Status.Maxlag.CurrentLag
|
||||
}
|
||||
|
||||
acc.AddFields(
|
||||
"burrow_group",
|
||||
map[string]interface{}{
|
||||
"status": r.Status.Status,
|
||||
"status_code": mapStatusToCode(r.Status.Status),
|
||||
"partition_count": partitionCount,
|
||||
"total_lag": r.Status.TotalLag,
|
||||
"lag": lag,
|
||||
"offset": offset,
|
||||
"timestamp": timestamp,
|
||||
},
|
||||
map[string]string{
|
||||
"cluster": cluster,
|
||||
"group": group,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (b *burrow) genGroupLagMetrics(r *apiResponse, cluster, group string, acc telegraf.Accumulator) {
|
||||
for _, partition := range r.Status.Partitions {
|
||||
acc.AddFields(
|
||||
"burrow_partition",
|
||||
map[string]interface{}{
|
||||
"status": partition.Status,
|
||||
"status_code": mapStatusToCode(partition.Status),
|
||||
"lag": partition.CurrentLag,
|
||||
"offset": partition.End.Offset,
|
||||
"timestamp": partition.End.Timestamp,
|
||||
},
|
||||
map[string]string{
|
||||
"cluster": cluster,
|
||||
"group": group,
|
||||
"topic": partition.Topic,
|
||||
"partition": strconv.FormatInt(int64(partition.Partition), 10),
|
||||
"owner": partition.Owner,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func appendPathToURL(src *url.URL, parts ...string) *url.URL {
|
||||
dst := new(url.URL)
|
||||
*dst = *src
|
||||
|
||||
for i, part := range parts {
|
||||
parts[i] = url.PathEscape(part)
|
||||
}
|
||||
|
||||
ext := strings.Join(parts, "/")
|
||||
dst.Path = fmt.Sprintf("%s/%s", src.Path, ext)
|
||||
return dst
|
||||
}
|
||||
|
||||
func mapStatusToCode(src string) int {
|
||||
switch src {
|
||||
case "OK":
|
||||
return 1
|
||||
case "NOT_FOUND":
|
||||
return 2
|
||||
case "WARN":
|
||||
return 3
|
||||
case "ERR":
|
||||
return 4
|
||||
case "STOP":
|
||||
return 5
|
||||
case "STALL":
|
||||
return 6
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
285
plugins/inputs/burrow/burrow_test.go
Normal file
285
plugins/inputs/burrow/burrow_test.go
Normal file
@@ -0,0 +1,285 @@
|
||||
package burrow
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// remap uri to json file, eg: /v3/kafka -> ./testdata/v3_kafka.json
|
||||
func getResponseJSON(requestURI string) ([]byte, int) {
|
||||
uri := strings.TrimLeft(requestURI, "/")
|
||||
mappedFile := strings.Replace(uri, "/", "_", -1)
|
||||
jsonFile := fmt.Sprintf("./testdata/%s.json", mappedFile)
|
||||
|
||||
code := 200
|
||||
_, err := os.Stat(jsonFile)
|
||||
if err != nil {
|
||||
code = 404
|
||||
jsonFile = "./testdata/error.json"
|
||||
}
|
||||
|
||||
// respond with file
|
||||
b, _ := ioutil.ReadFile(jsonFile)
|
||||
return b, code
|
||||
}
|
||||
|
||||
// return mocked HTTP server
|
||||
func getHTTPServer() *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
body, code := getResponseJSON(r.RequestURI)
|
||||
w.WriteHeader(code)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(body)
|
||||
}))
|
||||
}
|
||||
|
||||
// return mocked HTTP server with basic auth
|
||||
func getHTTPServerBasicAuth() *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`)
|
||||
|
||||
username, password, authOK := r.BasicAuth()
|
||||
if authOK == false {
|
||||
http.Error(w, "Not authorized", 401)
|
||||
return
|
||||
}
|
||||
|
||||
if username != "test" && password != "test" {
|
||||
http.Error(w, "Not authorized", 401)
|
||||
return
|
||||
}
|
||||
|
||||
// ok, continue
|
||||
body, code := getResponseJSON(r.RequestURI)
|
||||
w.WriteHeader(code)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(body)
|
||||
}))
|
||||
}
|
||||
|
||||
// test burrow_topic measurement
|
||||
func TestBurrowTopic(t *testing.T) {
|
||||
s := getHTTPServer()
|
||||
defer s.Close()
|
||||
|
||||
plugin := &burrow{Servers: []string{s.URL}}
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
fields := []map[string]interface{}{
|
||||
// topicA
|
||||
{"offset": int64(459178195)},
|
||||
{"offset": int64(459178022)},
|
||||
{"offset": int64(456491598)},
|
||||
}
|
||||
tags := []map[string]string{
|
||||
// topicA
|
||||
{"cluster": "clustername1", "topic": "topicA", "partition": "0"},
|
||||
{"cluster": "clustername1", "topic": "topicA", "partition": "1"},
|
||||
{"cluster": "clustername1", "topic": "topicA", "partition": "2"},
|
||||
}
|
||||
|
||||
require.Empty(t, acc.Errors)
|
||||
require.Equal(t, true, acc.HasMeasurement("burrow_topic"))
|
||||
for i := 0; i < len(fields); i++ {
|
||||
acc.AssertContainsTaggedFields(t, "burrow_topic", fields[i], tags[i])
|
||||
}
|
||||
}
|
||||
|
||||
// test burrow_partition measurement
|
||||
func TestBurrowPartition(t *testing.T) {
|
||||
s := getHTTPServer()
|
||||
defer s.Close()
|
||||
|
||||
plugin := &burrow{
|
||||
Servers: []string{s.URL},
|
||||
}
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
fields := []map[string]interface{}{
|
||||
{
|
||||
"status": "OK",
|
||||
"status_code": 1,
|
||||
"lag": int64(0),
|
||||
"offset": int64(431323195),
|
||||
"timestamp": int64(1515609490008),
|
||||
},
|
||||
{
|
||||
"status": "OK",
|
||||
"status_code": 1,
|
||||
"lag": int64(0),
|
||||
"offset": int64(431322962),
|
||||
"timestamp": int64(1515609490008),
|
||||
},
|
||||
{
|
||||
"status": "OK",
|
||||
"status_code": 1,
|
||||
"lag": int64(0),
|
||||
"offset": int64(428636563),
|
||||
"timestamp": int64(1515609490008),
|
||||
},
|
||||
}
|
||||
tags := []map[string]string{
|
||||
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "0", "owner": "kafka1"},
|
||||
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "1", "owner": "kafka2"},
|
||||
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "2", "owner": "kafka3"},
|
||||
}
|
||||
|
||||
require.Empty(t, acc.Errors)
|
||||
require.Equal(t, true, acc.HasMeasurement("burrow_partition"))
|
||||
|
||||
for i := 0; i < len(fields); i++ {
|
||||
acc.AssertContainsTaggedFields(t, "burrow_partition", fields[i], tags[i])
|
||||
}
|
||||
}
|
||||
|
||||
// burrow_group
|
||||
func TestBurrowGroup(t *testing.T) {
|
||||
s := getHTTPServer()
|
||||
defer s.Close()
|
||||
|
||||
plugin := &burrow{
|
||||
Servers: []string{s.URL},
|
||||
}
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
fields := []map[string]interface{}{
|
||||
{
|
||||
"status": "OK",
|
||||
"status_code": 1,
|
||||
"partition_count": 3,
|
||||
"total_lag": int64(0),
|
||||
"lag": int64(0),
|
||||
"offset": int64(431323195),
|
||||
"timestamp": int64(1515609490008),
|
||||
},
|
||||
}
|
||||
|
||||
tags := []map[string]string{
|
||||
{"cluster": "clustername1", "group": "group1"},
|
||||
}
|
||||
|
||||
require.Empty(t, acc.Errors)
|
||||
require.Equal(t, true, acc.HasMeasurement("burrow_group"))
|
||||
|
||||
for i := 0; i < len(fields); i++ {
|
||||
acc.AssertContainsTaggedFields(t, "burrow_group", fields[i], tags[i])
|
||||
}
|
||||
}
|
||||
|
||||
// collect from multiple servers
|
||||
func TestMultipleServers(t *testing.T) {
|
||||
s1 := getHTTPServer()
|
||||
defer s1.Close()
|
||||
|
||||
s2 := getHTTPServer()
|
||||
defer s2.Close()
|
||||
|
||||
plugin := &burrow{
|
||||
Servers: []string{s1.URL, s2.URL},
|
||||
}
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
require.Exactly(t, 14, len(acc.Metrics))
|
||||
require.Empty(t, acc.Errors)
|
||||
}
|
||||
|
||||
// collect multiple times
|
||||
func TestMultipleRuns(t *testing.T) {
|
||||
s := getHTTPServer()
|
||||
defer s.Close()
|
||||
|
||||
plugin := &burrow{
|
||||
Servers: []string{s.URL},
|
||||
}
|
||||
for i := 0; i < 4; i++ {
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
require.Exactly(t, 7, len(acc.Metrics))
|
||||
require.Empty(t, acc.Errors)
|
||||
}
|
||||
}
|
||||
|
||||
// collect from http basic auth server
|
||||
func TestBasicAuthConfig(t *testing.T) {
|
||||
s := getHTTPServerBasicAuth()
|
||||
defer s.Close()
|
||||
|
||||
plugin := &burrow{
|
||||
Servers: []string{s.URL},
|
||||
Username: "test",
|
||||
Password: "test",
|
||||
}
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
require.Exactly(t, 7, len(acc.Metrics))
|
||||
require.Empty(t, acc.Errors)
|
||||
}
|
||||
|
||||
// collect from whitelisted clusters
|
||||
func TestFilterClusters(t *testing.T) {
|
||||
s := getHTTPServer()
|
||||
defer s.Close()
|
||||
|
||||
plugin := &burrow{
|
||||
Servers: []string{s.URL},
|
||||
ClustersInclude: []string{"wrongname*"}, // clustername1 -> no match
|
||||
}
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
// no match by cluster
|
||||
require.Exactly(t, 0, len(acc.Metrics))
|
||||
require.Empty(t, acc.Errors)
|
||||
}
|
||||
|
||||
// collect from whitelisted groups
|
||||
func TestFilterGroups(t *testing.T) {
|
||||
s := getHTTPServer()
|
||||
defer s.Close()
|
||||
|
||||
plugin := &burrow{
|
||||
Servers: []string{s.URL},
|
||||
GroupsInclude: []string{"group?"}, // group1 -> match
|
||||
TopicsExclude: []string{"*"}, // exclude all
|
||||
}
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
require.Exactly(t, 4, len(acc.Metrics))
|
||||
require.Empty(t, acc.Errors)
|
||||
}
|
||||
|
||||
// collect from whitelisted topics
|
||||
func TestFilterTopics(t *testing.T) {
|
||||
s := getHTTPServer()
|
||||
defer s.Close()
|
||||
|
||||
plugin := &burrow{
|
||||
Servers: []string{s.URL},
|
||||
TopicsInclude: []string{"topic?"}, // topicA -> match
|
||||
GroupsExclude: []string{"*"}, // exclude all
|
||||
}
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
require.Exactly(t, 3, len(acc.Metrics))
|
||||
require.Empty(t, acc.Errors)
|
||||
}
|
||||
11
plugins/inputs/burrow/testdata/error.json
vendored
Normal file
11
plugins/inputs/burrow/testdata/error.json
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"error": true,
|
||||
"message": "Detailed error message",
|
||||
"request": {
|
||||
"uri": "/invalid/request",
|
||||
"host": "responding.host.example.com",
|
||||
"cluster": "",
|
||||
"group": "",
|
||||
"topic": ""
|
||||
}
|
||||
}
|
||||
11
plugins/inputs/burrow/testdata/v3_kafka.json
vendored
Normal file
11
plugins/inputs/burrow/testdata/v3_kafka.json
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"error": false,
|
||||
"message": "cluster list returned",
|
||||
"clusters": [
|
||||
"clustername1"
|
||||
],
|
||||
"request": {
|
||||
"url": "/v3/kafka",
|
||||
"host": "example.com"
|
||||
}
|
||||
}
|
||||
11
plugins/inputs/burrow/testdata/v3_kafka_clustername1_consumer.json
vendored
Normal file
11
plugins/inputs/burrow/testdata/v3_kafka_clustername1_consumer.json
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"error": false,
|
||||
"message": "consumer list returned",
|
||||
"consumers": [
|
||||
"group1"
|
||||
],
|
||||
"request": {
|
||||
"url": "/v3/kafka/clustername1/consumer",
|
||||
"host": "example.com"
|
||||
}
|
||||
}
|
||||
90
plugins/inputs/burrow/testdata/v3_kafka_clustername1_consumer_group1_lag.json
vendored
Normal file
90
plugins/inputs/burrow/testdata/v3_kafka_clustername1_consumer_group1_lag.json
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
{
|
||||
"error": false,
|
||||
"message": "consumer status returned",
|
||||
"status": {
|
||||
"cluster": "clustername1",
|
||||
"group": "group1",
|
||||
"status": "OK",
|
||||
"complete": 1,
|
||||
"partitions": [
|
||||
{
|
||||
"topic": "topicA",
|
||||
"partition": 0,
|
||||
"owner": "kafka1",
|
||||
"status": "OK",
|
||||
"start": {
|
||||
"offset": 431323195,
|
||||
"timestamp": 1515609445004,
|
||||
"lag": 0
|
||||
},
|
||||
"end": {
|
||||
"offset": 431323195,
|
||||
"timestamp": 1515609490008,
|
||||
"lag": 0
|
||||
},
|
||||
"current_lag": 0,
|
||||
"complete": 1
|
||||
},
|
||||
{
|
||||
"topic": "topicA",
|
||||
"partition": 1,
|
||||
"owner": "kafka2",
|
||||
"status": "OK",
|
||||
"start": {
|
||||
"offset": 431322962,
|
||||
"timestamp": 1515609445004,
|
||||
"lag": 0
|
||||
},
|
||||
"end": {
|
||||
"offset": 431322962,
|
||||
"timestamp": 1515609490008,
|
||||
"lag": 0
|
||||
},
|
||||
"current_lag": 0,
|
||||
"complete": 1
|
||||
},
|
||||
{
|
||||
"topic": "topicA",
|
||||
"partition": 2,
|
||||
"owner": "kafka3",
|
||||
"status": "OK",
|
||||
"start": {
|
||||
"offset": 428636563,
|
||||
"timestamp": 1515609445004,
|
||||
"lag": 0
|
||||
},
|
||||
"end": {
|
||||
"offset": 428636563,
|
||||
"timestamp": 1515609490008,
|
||||
"lag": 0
|
||||
},
|
||||
"current_lag": 0,
|
||||
"complete": 1
|
||||
}
|
||||
],
|
||||
"partition_count": 3,
|
||||
"maxlag": {
|
||||
"topic": "topicA",
|
||||
"partition": 0,
|
||||
"owner": "kafka",
|
||||
"status": "OK",
|
||||
"start": {
|
||||
"offset": 431323195,
|
||||
"timestamp": 1515609445004,
|
||||
"lag": 0
|
||||
},
|
||||
"end": {
|
||||
"offset": 431323195,
|
||||
"timestamp": 1515609490008,
|
||||
"lag": 0
|
||||
},
|
||||
"current_lag": 0,
|
||||
"complete": 1
|
||||
},
|
||||
"totallag": 0
|
||||
},
|
||||
"request": {
|
||||
"url": "/v3/kafka/clustername1/consumer/group1/lag",
|
||||
"host": "example.com"
|
||||
}
|
||||
}
|
||||
11
plugins/inputs/burrow/testdata/v3_kafka_clustername1_topic.json
vendored
Normal file
11
plugins/inputs/burrow/testdata/v3_kafka_clustername1_topic.json
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"error": false,
|
||||
"message": "topic list returned",
|
||||
"topics": [
|
||||
"topicA"
|
||||
],
|
||||
"request": {
|
||||
"url": "/v3/kafka/clustername1/topic",
|
||||
"host": "example.com"
|
||||
}
|
||||
}
|
||||
13
plugins/inputs/burrow/testdata/v3_kafka_clustername1_topic_topicA.json
vendored
Normal file
13
plugins/inputs/burrow/testdata/v3_kafka_clustername1_topic_topicA.json
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"error": false,
|
||||
"message": "topic offsets returned",
|
||||
"offsets": [
|
||||
459178195,
|
||||
459178022,
|
||||
456491598
|
||||
],
|
||||
"request": {
|
||||
"url": "/v3/kafka/clustername1/topic/topicA",
|
||||
"host": "example.com"
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,8 @@
|
||||
|
||||
# Telegraf plugin: Cassandra
|
||||
|
||||
### **Deprecated in version 1.7**: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin with the [cassandra.conf](/plugins/inputs/jolokia2/examples/cassandra.conf) example configuration.
|
||||
|
||||
#### Plugin arguments:
|
||||
- **context** string: Context root used for jolokia url
|
||||
- **servers** []string: List of servers with the format "<user:passwd@><host>:port"
|
||||
|
||||
@@ -4,12 +4,14 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type JolokiaClient interface {
|
||||
@@ -60,7 +62,8 @@ func newCassandraMetric(host string, metric string,
|
||||
func addValuesAsFields(values map[string]interface{}, fields map[string]interface{},
|
||||
mname string) {
|
||||
for k, v := range values {
|
||||
if v != nil {
|
||||
switch v.(type) {
|
||||
case int64, float64, string, bool:
|
||||
fields[mname+"_"+k] = v
|
||||
}
|
||||
}
|
||||
@@ -117,7 +120,7 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) {
|
||||
switch t := values.(type) {
|
||||
case map[string]interface{}:
|
||||
addValuesAsFields(values.(map[string]interface{}), fields, attribute)
|
||||
case interface{}:
|
||||
case int64, float64, string, bool:
|
||||
fields[attribute] = t
|
||||
}
|
||||
j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
|
||||
@@ -172,7 +175,11 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
||||
|
||||
func (j *Cassandra) SampleConfig() string {
|
||||
return `
|
||||
# This is the context root used to compose the jolokia url
|
||||
## DEPRECATED: The cassandra plugin has been deprecated. Please use the
|
||||
## jolokia2 plugin instead.
|
||||
##
|
||||
## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
|
||||
|
||||
context = "/jolokia/read"
|
||||
## List of cassandra servers exposing jolokia read service
|
||||
servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
|
||||
@@ -256,6 +263,16 @@ func parseServerTokens(server string) map[string]string {
|
||||
return serverTokens
|
||||
}
|
||||
|
||||
func (c *Cassandra) Start(acc telegraf.Accumulator) error {
|
||||
log.Println("W! DEPRECATED: The cassandra plugin has been deprecated. " +
|
||||
"Please use the jolokia2 plugin instead. " +
|
||||
"https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cassandra) Stop() {
|
||||
}
|
||||
|
||||
func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
context := c.Context
|
||||
servers := c.Servers
|
||||
|
||||
@@ -3,14 +3,15 @@
|
||||
package conntrack
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func restoreDflts(savedFiles, savedDirs []string) {
|
||||
@@ -39,6 +40,7 @@ func TestDefaultsUsed(t *testing.T) {
|
||||
|
||||
tmpFile, err := ioutil.TempFile(tmpdir, "ip_conntrack_count")
|
||||
assert.NoError(t, err)
|
||||
defer os.Remove(tmpFile.Name())
|
||||
|
||||
dfltDirs = []string{tmpdir}
|
||||
fname := path.Base(tmpFile.Name())
|
||||
@@ -63,6 +65,8 @@ func TestConfigsUsed(t *testing.T) {
|
||||
cntFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_count")
|
||||
maxFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_max")
|
||||
assert.NoError(t, err)
|
||||
defer os.Remove(cntFile.Name())
|
||||
defer os.Remove(maxFile.Name())
|
||||
|
||||
dfltDirs = []string{tmpdir}
|
||||
cntFname := path.Base(cntFile.Name())
|
||||
|
||||
@@ -1,43 +1,59 @@
|
||||
# Telegraf Input Plugin: Consul
|
||||
# Consul Input Plugin
|
||||
|
||||
This plugin will collect statistics about all health checks registered in the Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
|
||||
to query the data. It will not report the [telemetry](https://www.consul.io/docs/agent/telemetry.html) but Consul can report those stats already using StatsD protocol if needed.
|
||||
This plugin will collect statistics about all health checks registered in the
|
||||
Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
|
||||
to query the data. It will not report the
|
||||
[telemetry](https://www.consul.io/docs/agent/telemetry.html) but Consul can
|
||||
report those stats already using StatsD protocol if needed.
|
||||
|
||||
## Configuration:
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
```toml
|
||||
# Gather health check statuses from services registered in Consul
|
||||
[[inputs.consul]]
|
||||
## Most of these values defaults to the one configured on a Consul's agent level.
|
||||
## Optional Consul server address (default: "")
|
||||
# address = ""
|
||||
## Optional URI scheme for the Consul server (default: "")
|
||||
# scheme = ""
|
||||
## Optional ACL token used in every request (default: "")
|
||||
## Consul server address
|
||||
# address = "localhost"
|
||||
|
||||
## URI scheme for the Consul server, one of "http", "https"
|
||||
# scheme = "http"
|
||||
|
||||
## ACL token used in every request
|
||||
# token = ""
|
||||
## Optional username used for request HTTP Basic Authentication (default: "")
|
||||
|
||||
## HTTP Basic Authentication username and password.
|
||||
# username = ""
|
||||
## Optional password used for HTTP Basic Authentication (default: "")
|
||||
# password = ""
|
||||
## Optional data centre to query the health checks from (default: "")
|
||||
|
||||
## Data centre to query the health checks from
|
||||
# datacentre = ""
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
|
||||
## Consul checks' tag splitting
|
||||
# When tags are formatted like "key:value" with ":" as a delimiter then
|
||||
# they will be splitted and reported as proper key:value in Telegraf
|
||||
# tag_delimiter = ":"
|
||||
```
|
||||
|
||||
## Measurements:
|
||||
### Metrics:
|
||||
|
||||
### Consul:
|
||||
Tags:
|
||||
- node: on which node check/service is registered on
|
||||
- service_name: name of the service (this is the service name not the service ID)
|
||||
- check_id
|
||||
|
||||
Fields:
|
||||
- check_name
|
||||
- service_id
|
||||
- status
|
||||
- passing
|
||||
- critical
|
||||
- warning
|
||||
- consul_health_checks
|
||||
- tags:
|
||||
- node (node that check/service is registred on)
|
||||
- service_name
|
||||
- check_id
|
||||
- fields:
|
||||
- check_name
|
||||
- service_id
|
||||
- status
|
||||
- passing (integer)
|
||||
- critical (integer)
|
||||
- warning (integer)
|
||||
|
||||
`passing`, `critical`, and `warning` are integer representations of the health
|
||||
check state. A value of `1` represents that the status was the state of the
|
||||
@@ -46,8 +62,6 @@ the health check at this sample.
|
||||
## Example output
|
||||
|
||||
```
|
||||
$ telegraf --config ./telegraf.conf --input-filter consul --test
|
||||
* Plugin: consul, Collection 1
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing",passing=1i,critical=0i,warning=0i 1464698464486439902
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical",passing=0i,critical=1i,warning=0i 1464698464486519036
|
||||
consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing",passing=1i,critical=0i,warning=0i 1464698464486439902
|
||||
consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical",passing=0i,critical=1i,warning=0i 1464698464486519036
|
||||
```
|
||||
|
||||
@@ -2,10 +2,11 @@ package consul
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
@@ -16,34 +17,41 @@ type Consul struct {
|
||||
Username string
|
||||
Password string
|
||||
Datacentre string
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
tls.ClientConfig
|
||||
TagDelimiter string
|
||||
|
||||
// client used to connect to Consul agnet
|
||||
client *api.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Most of these values defaults to the one configured on a Consul's agent level.
|
||||
## Optional Consul server address (default: "localhost")
|
||||
## Consul server address
|
||||
# address = "localhost"
|
||||
## Optional URI scheme for the Consul server (default: "http")
|
||||
|
||||
## URI scheme for the Consul server, one of "http", "https"
|
||||
# scheme = "http"
|
||||
## Optional ACL token used in every request (default: "")
|
||||
|
||||
## ACL token used in every request
|
||||
# token = ""
|
||||
## Optional username used for request HTTP Basic Authentication (default: "")
|
||||
|
||||
## HTTP Basic Authentication username and password.
|
||||
# username = ""
|
||||
## Optional password used for HTTP Basic Authentication (default: "")
|
||||
# password = ""
|
||||
## Optional data centre to query the health checks from (default: "")
|
||||
|
||||
## Data centre to query the health checks from
|
||||
# datacentre = ""
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
|
||||
## Consul checks' tag splitting
|
||||
# When tags are formatted like "key:value" with ":" as a delimiter then
|
||||
# they will be splitted and reported as proper key:value in Telegraf
|
||||
# tag_delimiter = ":"
|
||||
`
|
||||
|
||||
func (c *Consul) Description() string {
|
||||
@@ -80,14 +88,12 @@ func (c *Consul) createAPIClient() (*api.Client, error) {
|
||||
}
|
||||
}
|
||||
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
c.SSLCert, c.SSLKey, c.SSLCA, c.InsecureSkipVerify)
|
||||
|
||||
tlsCfg, err := c.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config.HttpClient.Transport = &http.Transport{
|
||||
config.Transport = &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
|
||||
@@ -112,6 +118,19 @@ func (c *Consul) GatherHealthCheck(acc telegraf.Accumulator, checks []*api.Healt
|
||||
tags["service_name"] = check.ServiceName
|
||||
tags["check_id"] = check.CheckID
|
||||
|
||||
for _, checkTag := range check.ServiceTags {
|
||||
if c.TagDelimiter != "" {
|
||||
splittedTag := strings.SplitN(checkTag, c.TagDelimiter, 2)
|
||||
if len(splittedTag) == 1 {
|
||||
tags[checkTag] = checkTag
|
||||
} else if len(splittedTag) == 2 {
|
||||
tags[splittedTag[0]] = splittedTag[1]
|
||||
}
|
||||
} else {
|
||||
tags[checkTag] = checkTag
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("consul_health_checks", record, tags)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ var sampleChecks = []*api.HealthCheck{
|
||||
Output: "OK",
|
||||
ServiceID: "foo.123",
|
||||
ServiceName: "foo",
|
||||
ServiceTags: []string{"bar", "env:sandbox", "tagkey:value:stillvalue"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -31,9 +32,12 @@ func TestGatherHealthCheck(t *testing.T) {
|
||||
}
|
||||
|
||||
expectedTags := map[string]string{
|
||||
"node": "localhost",
|
||||
"service_name": "foo",
|
||||
"check_id": "foo.health123",
|
||||
"node": "localhost",
|
||||
"service_name": "foo",
|
||||
"check_id": "foo.health123",
|
||||
"bar": "bar",
|
||||
"env:sandbox": "env:sandbox",
|
||||
"tagkey:value:stillvalue": "tagkey:value:stillvalue",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
@@ -43,3 +47,32 @@ func TestGatherHealthCheck(t *testing.T) {
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "consul_health_checks", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
func TestGatherHealthCheckWithDelimitedTags(t *testing.T) {
|
||||
expectedFields := map[string]interface{}{
|
||||
"check_name": "foo.health",
|
||||
"status": "passing",
|
||||
"passing": 1,
|
||||
"critical": 0,
|
||||
"warning": 0,
|
||||
"service_id": "foo.123",
|
||||
}
|
||||
|
||||
expectedTags := map[string]string{
|
||||
"node": "localhost",
|
||||
"service_name": "foo",
|
||||
"check_id": "foo.health123",
|
||||
"bar": "bar",
|
||||
"env": "sandbox",
|
||||
"tagkey": "value:stillvalue",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
consul := &Consul{
|
||||
TagDelimiter: ":",
|
||||
}
|
||||
consul.GatherHealthCheck(&acc, sampleChecks)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "consul_health_checks", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
@@ -8,11 +8,18 @@ Depending on the work load of your DC/OS cluster, this plugin can quickly
|
||||
create a high number of series which, when unchecked, can cause high load on
|
||||
your database.
|
||||
|
||||
- Use [measurement filtering](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#measurement-filtering) liberally to exclude unneeded metrics as well as the node, container, and app inclue/exclude options.
|
||||
- Write to a database with an appropriate [retention policy](https://docs.influxdata.com/influxdb/v1.3/concepts/glossary/#retention-policy-rp).
|
||||
- Limit the number of series allowed in your database using the `max-series-per-database` and `max-values-per-tag` settings.
|
||||
- Consider enabling the [TSI](https://docs.influxdata.com/influxdb/v1.3/about_the_project/releasenotes-changelog/#release-notes-8) engine.
|
||||
- Monitor your [series cardinality](https://docs.influxdata.com/influxdb/v1.3/troubleshooting/frequently-asked-questions/#how-can-i-query-for-series-cardinality).
|
||||
- Use the
|
||||
[measurement filtering](https://docs.influxdata.com/telegraf/latest/administration/configuration/#measurement-filtering)
|
||||
options to exclude unneeded tags.
|
||||
- Write to a database with an appropriate
|
||||
[retention policy](https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/).
|
||||
- Limit series cardinality in your database using the
|
||||
[`max-series-per-database`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000) and
|
||||
[`max-values-per-tag`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000) settings.
|
||||
- Consider using the
|
||||
[Time Series Index](https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/).
|
||||
- Monitor your databases
|
||||
[series cardinality](https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality).
|
||||
|
||||
### Configuration:
|
||||
```toml
|
||||
@@ -47,10 +54,10 @@ your database.
|
||||
## Maximum time to receive a response from cluster.
|
||||
# response_timeout = "20s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## If false, skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
|
||||
@@ -106,7 +113,7 @@ the cluster. For more information on this technique reference
|
||||
### Metrics:
|
||||
|
||||
Please consult the [Metrics Reference](https://docs.mesosphere.com/1.10/metrics/reference/)
|
||||
for details on interprete field interpretation.
|
||||
for details about field interpretation.
|
||||
|
||||
- dcos_node
|
||||
- tags:
|
||||
|
||||
@@ -31,6 +31,7 @@ type Client interface {
|
||||
}
|
||||
|
||||
type APIError struct {
|
||||
URL string
|
||||
StatusCode int
|
||||
Title string
|
||||
Description string
|
||||
@@ -105,9 +106,9 @@ type claims struct {
|
||||
|
||||
func (e APIError) Error() string {
|
||||
if e.Description != "" {
|
||||
return fmt.Sprintf("%s: %s", e.Title, e.Description)
|
||||
return fmt.Sprintf("[%s] %s: %s", e.URL, e.Title, e.Description)
|
||||
}
|
||||
return e.Title
|
||||
return fmt.Sprintf("[%s] %s", e.URL, e.Title)
|
||||
}
|
||||
|
||||
func NewClusterClient(
|
||||
@@ -156,7 +157,8 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", c.url("/acs/api/v1/auth/login"), bytes.NewBuffer(octets))
|
||||
loc := c.url("/acs/api/v1/auth/login")
|
||||
req, err := http.NewRequest("POST", loc, bytes.NewBuffer(octets))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -189,6 +191,7 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok
|
||||
err = dec.Decode(loginError)
|
||||
if err != nil {
|
||||
err := &APIError{
|
||||
URL: loc,
|
||||
StatusCode: resp.StatusCode,
|
||||
Title: resp.Status,
|
||||
}
|
||||
@@ -196,6 +199,7 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok
|
||||
}
|
||||
|
||||
err = &APIError{
|
||||
URL: loc,
|
||||
StatusCode: resp.StatusCode,
|
||||
Title: loginError.Title,
|
||||
Description: loginError.Description,
|
||||
@@ -301,6 +305,7 @@ func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) er
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return &APIError{
|
||||
URL: url,
|
||||
StatusCode: resp.StatusCode,
|
||||
Title: resp.Status,
|
||||
}
|
||||
@@ -315,7 +320,7 @@ func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) er
|
||||
}
|
||||
|
||||
func (c *ClusterClient) url(path string) string {
|
||||
url := c.clusterURL
|
||||
url := *c.clusterURL
|
||||
url.Path = path
|
||||
return url.String()
|
||||
}
|
||||
@@ -325,7 +330,7 @@ func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) {
|
||||
UID: sa.AccountID,
|
||||
StandardClaims: jwt.StandardClaims{
|
||||
// How long we have to login with this token
|
||||
ExpiresAt: int64(5 * time.Minute / time.Second),
|
||||
ExpiresAt: time.Now().Add(5 * time.Minute).Unix(),
|
||||
},
|
||||
})
|
||||
return token.SignedString(sa.PrivateKey)
|
||||
|
||||
@@ -9,28 +9,16 @@ import (
|
||||
"testing"
|
||||
|
||||
jwt "github.com/dgrijalva/jwt-go"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
privateKey = `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQCwlGyzVp9cqtwiNCgCnaR0kilPZhr4xFBcnXxvQ8/uzOHaWKxj
|
||||
XWR38cKR3gPh5+4iSmzMdo3HDJM5ks6imXGnp+LPOA5iNewnpLNs7UxA2arwKH/6
|
||||
4qIaAXAtf5jE46wZIMgc2EW9wGL3dxC0JY8EXPpBFB/3J8gADkorFR8lwwIDAQAB
|
||||
AoGBAJaFHxfMmjHK77U0UnrQWFSKFy64cftmlL4t/Nl3q7L68PdIKULWZIMeEWZ4
|
||||
I0UZiFOwr4em83oejQ1ByGSwekEuiWaKUI85IaHfcbt+ogp9hY/XbOEo56OPQUAd
|
||||
bEZv1JqJOqta9Ug1/E1P9LjEEyZ5F5ubx7813rxAE31qKtKJAkEA1zaMlCWIr+Rj
|
||||
hGvzv5rlHH3wbOB4kQFXO4nqj3J/ttzR5QiJW24STMDcbNngFlVcDVju56LrNTiD
|
||||
dPh9qvl7nwJBANILguR4u33OMksEZTYB7nQZSurqXsq6382zH7pTl29ANQTROHaM
|
||||
PKC8dnDWq8RGTqKuvWblIzzGIKqIMovZo10CQC96T0UXirITFolOL3XjvAuvFO1Q
|
||||
EAkdXJs77805m0dCK+P1IChVfiAEpBw3bKJArpAbQIlFfdI953JUp5SieU0CQEub
|
||||
BSSEKMjh/cxu6peEHnb/262vayuCFKkQPu1sxWewLuVrAe36EKCy9dcsDmv5+rgo
|
||||
Odjdxc9Madm4aKlaT6kCQQCpAgeblDrrxTrNQ+Typzo37PlnQrvI+0EceAUuJ72G
|
||||
P0a+YZUeHNRqT2pPN9lMTAZGGi3CtcF2XScbLNEBeXge
|
||||
-----END RSA PRIVATE KEY-----`
|
||||
)
|
||||
var privateKey = testutil.NewPKI("../../../testutil/pki").ReadServerKey()
|
||||
|
||||
func TestLogin(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
responseCode int
|
||||
@@ -40,16 +28,21 @@ func TestLogin(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "Login successful",
|
||||
responseCode: 200,
|
||||
responseCode: http.StatusOK,
|
||||
responseBody: `{"token": "XXX.YYY.ZZZ"}`,
|
||||
expectedError: nil,
|
||||
expectedToken: "XXX.YYY.ZZZ",
|
||||
},
|
||||
{
|
||||
name: "Unauthorized Error",
|
||||
responseCode: http.StatusUnauthorized,
|
||||
responseBody: `{"title": "x", "description": "y"}`,
|
||||
expectedError: &APIError{http.StatusUnauthorized, "x", "y"},
|
||||
name: "Unauthorized Error",
|
||||
responseCode: http.StatusUnauthorized,
|
||||
responseBody: `{"title": "x", "description": "y"}`,
|
||||
expectedError: &APIError{
|
||||
URL: ts.URL + "/acs/api/v1/auth/login",
|
||||
StatusCode: http.StatusUnauthorized,
|
||||
Title: "x",
|
||||
Description: "y",
|
||||
},
|
||||
expectedToken: "",
|
||||
},
|
||||
}
|
||||
@@ -59,11 +52,11 @@ func TestLogin(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(tt.responseCode)
|
||||
fmt.Fprintln(w, tt.responseBody)
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -82,13 +75,14 @@ func TestLogin(t *testing.T) {
|
||||
} else {
|
||||
require.Nil(t, auth)
|
||||
}
|
||||
|
||||
ts.Close()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSummary(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
responseCode int
|
||||
@@ -98,7 +92,7 @@ func TestGetSummary(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "No nodes",
|
||||
responseCode: 200,
|
||||
responseCode: http.StatusOK,
|
||||
responseBody: `{"cluster": "a", "slaves": []}`,
|
||||
expectedValue: &Summary{Cluster: "a", Slaves: []Slave{}},
|
||||
expectedError: nil,
|
||||
@@ -108,11 +102,15 @@ func TestGetSummary(t *testing.T) {
|
||||
responseCode: http.StatusUnauthorized,
|
||||
responseBody: `<html></html>`,
|
||||
expectedValue: nil,
|
||||
expectedError: &APIError{StatusCode: http.StatusUnauthorized, Title: "401 Unauthorized"},
|
||||
expectedError: &APIError{
|
||||
URL: ts.URL + "/mesos/master/state-summary",
|
||||
StatusCode: http.StatusUnauthorized,
|
||||
Title: "401 Unauthorized",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Has nodes",
|
||||
responseCode: 200,
|
||||
responseCode: http.StatusOK,
|
||||
responseBody: `{"cluster": "a", "slaves": [{"id": "a"}, {"id": "b"}]}`,
|
||||
expectedValue: &Summary{
|
||||
Cluster: "a",
|
||||
@@ -127,12 +125,12 @@ func TestGetSummary(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// check the path
|
||||
w.WriteHeader(tt.responseCode)
|
||||
fmt.Fprintln(w, tt.responseBody)
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -142,14 +140,15 @@ func TestGetSummary(t *testing.T) {
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
require.Equal(t, tt.expectedValue, summary)
|
||||
|
||||
ts.Close()
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetNodeMetrics(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
responseCode int
|
||||
@@ -159,7 +158,7 @@ func TestGetNodeMetrics(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "Empty Body",
|
||||
responseCode: 200,
|
||||
responseCode: http.StatusOK,
|
||||
responseBody: `{}`,
|
||||
expectedValue: &Metrics{},
|
||||
expectedError: nil,
|
||||
@@ -168,12 +167,12 @@ func TestGetNodeMetrics(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// check the path
|
||||
w.WriteHeader(tt.responseCode)
|
||||
fmt.Fprintln(w, tt.responseBody)
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -183,14 +182,15 @@ func TestGetNodeMetrics(t *testing.T) {
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
require.Equal(t, tt.expectedValue, m)
|
||||
|
||||
ts.Close()
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetContainerMetrics(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
responseCode int
|
||||
@@ -199,8 +199,8 @@ func TestGetContainerMetrics(t *testing.T) {
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "204 No Contents",
|
||||
responseCode: 204,
|
||||
name: "204 No Content",
|
||||
responseCode: http.StatusNoContent,
|
||||
responseBody: ``,
|
||||
expectedValue: &Metrics{},
|
||||
expectedError: nil,
|
||||
@@ -209,12 +209,12 @@ func TestGetContainerMetrics(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// check the path
|
||||
w.WriteHeader(tt.responseCode)
|
||||
fmt.Fprintln(w, tt.responseBody)
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -224,8 +224,6 @@ func TestGetContainerMetrics(t *testing.T) {
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
require.Equal(t, tt.expectedValue, m)
|
||||
|
||||
ts.Close()
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
@@ -56,11 +57,7 @@ type DCOS struct {
|
||||
|
||||
MaxConnections int
|
||||
ResponseTimeout internal.Duration
|
||||
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
InsecureSkipVerify bool `toml:"insecure_skip_verify"`
|
||||
tls.ClientConfig
|
||||
|
||||
client Client
|
||||
creds Credentials
|
||||
@@ -107,10 +104,10 @@ var sampleConfig = `
|
||||
## Maximum time to receive a response from cluster.
|
||||
# response_timeout = "20s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## If false, skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
|
||||
@@ -351,8 +348,7 @@ func (d *DCOS) init() error {
|
||||
}
|
||||
|
||||
func (d *DCOS) createClient() (Client, error) {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
d.SSLCert, d.SSLKey, d.SSLCA, d.InsecureSkipVerify)
|
||||
tlsCfg, err := d.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -3,9 +3,8 @@
|
||||
The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wikipedia.org/wiki/Dig_\(command\))
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Sample Config:
|
||||
```toml
|
||||
# Query given DNS server and gives statistics
|
||||
[[inputs.dns_query]]
|
||||
## servers to query
|
||||
servers = ["8.8.8.8"]
|
||||
@@ -27,29 +26,20 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi
|
||||
# timeout = 2
|
||||
```
|
||||
|
||||
For querying more than one record type make:
|
||||
### Metrics:
|
||||
|
||||
- dns_query
|
||||
- tags:
|
||||
- server
|
||||
- domain
|
||||
- record_type
|
||||
- result
|
||||
- fields:
|
||||
- query_time_ms (float)
|
||||
- result_code (int, success = 0, timeout = 1, error = 2)
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
[[inputs.dns_query]]
|
||||
domains = ["mjasion.pl"]
|
||||
servers = ["8.8.8.8", "8.8.4.4"]
|
||||
record_type = "A"
|
||||
|
||||
[[inputs.dns_query]]
|
||||
domains = ["mjasion.pl"]
|
||||
servers = ["8.8.8.8", "8.8.4.4"]
|
||||
record_type = "MX"
|
||||
```
|
||||
|
||||
### Tags:
|
||||
|
||||
- server
|
||||
- domain
|
||||
- record_type
|
||||
|
||||
### Example output:
|
||||
|
||||
```
|
||||
telegraf --input-filter dns_query --test
|
||||
> dns_query,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=67.189842 1456082743585760680
|
||||
dns_query,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=67.189842 1456082743585760680
|
||||
```
|
||||
|
||||
@@ -13,6 +13,14 @@ import (
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type ResultType uint64
|
||||
|
||||
const (
|
||||
Success ResultType = 0
|
||||
Timeout = 1
|
||||
Error = 2
|
||||
)
|
||||
|
||||
type DnsQuery struct {
|
||||
// Domains or subdomains to query
|
||||
Domains []string
|
||||
@@ -66,15 +74,24 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
for _, domain := range d.Domains {
|
||||
for _, server := range d.Servers {
|
||||
dnsQueryTime, err := d.getDnsQueryTime(domain, server)
|
||||
acc.AddError(err)
|
||||
fields := make(map[string]interface{}, 2)
|
||||
tags := map[string]string{
|
||||
"server": server,
|
||||
"domain": domain,
|
||||
"record_type": d.RecordType,
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{"query_time_ms": dnsQueryTime}
|
||||
dnsQueryTime, err := d.getDnsQueryTime(domain, server)
|
||||
if err == nil {
|
||||
setResult(Success, fields, tags)
|
||||
fields["query_time_ms"] = dnsQueryTime
|
||||
} else if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() {
|
||||
setResult(Timeout, fields, tags)
|
||||
} else if err != nil {
|
||||
setResult(Error, fields, tags)
|
||||
acc.AddError(err)
|
||||
}
|
||||
|
||||
acc.AddFields("dns_query", fields, tags)
|
||||
}
|
||||
}
|
||||
@@ -165,6 +182,21 @@ func (d *DnsQuery) parseRecordType() (uint16, error) {
|
||||
return recordType, error
|
||||
}
|
||||
|
||||
func setResult(result ResultType, fields map[string]interface{}, tags map[string]string) {
|
||||
var tag string
|
||||
switch result {
|
||||
case Success:
|
||||
tag = "success"
|
||||
case Timeout:
|
||||
tag = "timeout"
|
||||
case Error:
|
||||
tag = "error"
|
||||
}
|
||||
|
||||
tags["result"] = tag
|
||||
fields["result_code"] = uint64(result)
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("dns_query", func() telegraf.Input {
|
||||
return &DnsQuery{}
|
||||
|
||||
@@ -4,12 +4,11 @@ The docker plugin uses the Docker Engine API to gather metrics on running
|
||||
docker containers.
|
||||
|
||||
The docker plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client)
|
||||
to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/).
|
||||
[Library Documentation](https://godoc.org/github.com/moby/moby/client)
|
||||
to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/).
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
```toml
|
||||
# Read metrics about docker containers
|
||||
[[inputs.docker]]
|
||||
## Docker Endpoint
|
||||
@@ -31,6 +30,11 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/)
|
||||
container_name_include = []
|
||||
container_name_exclude = []
|
||||
|
||||
## Container states to include and exclude. Globs accepted.
|
||||
## When empty only containers in the "running" state will be captured.
|
||||
# container_state_include = []
|
||||
# container_state_exclude = []
|
||||
|
||||
## Timeout for docker list, info, and stats commands
|
||||
timeout = "5s"
|
||||
|
||||
@@ -49,11 +53,11 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/)
|
||||
## Which environment variables should we use as a tag
|
||||
tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
@@ -71,15 +75,58 @@ may prefer to exclude them:
|
||||
```
|
||||
|
||||
|
||||
### Measurements & Fields:
|
||||
### Metrics:
|
||||
|
||||
Every effort was made to preserve the names based on the JSON response from the
|
||||
docker API.
|
||||
|
||||
Note that the docker_container_cpu metric may appear multiple times per collection,
|
||||
based on the availability of per-cpu stats on your system.
|
||||
- docker
|
||||
- tags:
|
||||
- unit
|
||||
- engine_host
|
||||
- server_version
|
||||
- fields:
|
||||
- n_used_file_descriptors
|
||||
- n_cpus
|
||||
- n_containers
|
||||
- n_containers_running
|
||||
- n_containers_stopped
|
||||
- n_containers_paused
|
||||
- n_images
|
||||
- n_goroutines
|
||||
- n_listener_events
|
||||
- memory_total
|
||||
- pool_blocksize
|
||||
|
||||
- docker_data
|
||||
- tags:
|
||||
- unit
|
||||
- engine_host
|
||||
- server_version
|
||||
- fields:
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
|
||||
- docker_metadata
|
||||
- tags:
|
||||
- unit
|
||||
- engine_host
|
||||
- server_version
|
||||
- fields:
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
|
||||
- docker_container_mem
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- fields:
|
||||
- total_pgmafault
|
||||
- cache
|
||||
- mapped_file
|
||||
@@ -114,7 +161,17 @@ based on the availability of per-cpu stats on your system.
|
||||
- failcnt
|
||||
- limit
|
||||
- container_id
|
||||
|
||||
- docker_container_cpu
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- cpu
|
||||
- fields:
|
||||
- throttling_periods
|
||||
- throttling_throttled_periods
|
||||
- throttling_throttled_time
|
||||
@@ -124,7 +181,17 @@ based on the availability of per-cpu stats on your system.
|
||||
- usage_total
|
||||
- usage_percent
|
||||
- container_id
|
||||
|
||||
- docker_container_net
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- network
|
||||
- fields:
|
||||
- rx_dropped
|
||||
- rx_bytes
|
||||
- rx_errors
|
||||
@@ -134,7 +201,17 @@ based on the availability of per-cpu stats on your system.
|
||||
- tx_errors
|
||||
- tx_bytes
|
||||
- container_id
|
||||
|
||||
- docker_container_blkio
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- device
|
||||
- fields:
|
||||
- io_service_bytes_recursive_async
|
||||
- io_service_bytes_recursive_read
|
||||
- io_service_bytes_recursive_sync
|
||||
@@ -146,115 +223,54 @@ based on the availability of per-cpu stats on your system.
|
||||
- io_serviced_recursive_total
|
||||
- io_serviced_recursive_write
|
||||
- container_id
|
||||
- docker_
|
||||
- n_used_file_descriptors
|
||||
- n_cpus
|
||||
- n_containers
|
||||
- n_containers_running
|
||||
- n_containers_stopped
|
||||
- n_containers_paused
|
||||
- n_images
|
||||
- n_goroutines
|
||||
- n_listener_events
|
||||
- memory_total
|
||||
- pool_blocksize
|
||||
- docker_data
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
- docker_metadata
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
- docker_swarm
|
||||
- tasks_desired
|
||||
- tasks_running
|
||||
|
||||
|
||||
### Tags:
|
||||
#### Docker Engine tags
|
||||
- docker (memory_total)
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker (pool_blocksize)
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker_data
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker_metadata
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
|
||||
#### Docker Container tags
|
||||
- Tags on all containers:
|
||||
- docker_container_health
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- docker_container_mem specific:
|
||||
- docker_container_cpu specific:
|
||||
- cpu
|
||||
- docker_container_net specific:
|
||||
- network
|
||||
- docker_container_blkio specific:
|
||||
- device
|
||||
- docker_swarm specific:
|
||||
- fields:
|
||||
- health_status (string)
|
||||
- failing_streak (integer)
|
||||
|
||||
- docker_container_status
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- fields:
|
||||
- oomkilled (boolean)
|
||||
- pid (integer)
|
||||
- exitcode (integer)
|
||||
- started_at (integer)
|
||||
- finished_at (integer)
|
||||
|
||||
- docker_swarm
|
||||
- tags:
|
||||
- service_id
|
||||
- service_name
|
||||
- service_mode
|
||||
- fields:
|
||||
- tasks_desired
|
||||
- tasks_running
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
% ./telegraf --config ~/ws/telegraf.conf --input-filter docker --test
|
||||
* Plugin: docker, Collection 1
|
||||
> docker n_cpus=8i 1456926671065383978
|
||||
> docker n_used_file_descriptors=15i 1456926671065383978
|
||||
> docker n_containers=7i 1456926671065383978
|
||||
> docker n_containers_running=7i 1456926671065383978
|
||||
> docker n_containers_stopped=3i 1456926671065383978
|
||||
> docker n_containers_paused=0i 1456926671065383978
|
||||
> docker n_images=152i 1456926671065383978
|
||||
> docker n_goroutines=36i 1456926671065383978
|
||||
> docker n_listener_events=0i 1456926671065383978
|
||||
> docker,unit=bytes memory_total=18935443456i 1456926671065383978
|
||||
> docker,unit=bytes pool_blocksize=65540i 1456926671065383978
|
||||
> docker_data,unit=bytes available=24340000000i,total=107400000000i,used=14820000000i 1456926671065383978
|
||||
> docker_metadata,unit=bytes available=2126999999i,total=2146999999i,used=20420000i 145692667106538
|
||||
> docker_container_mem,
|
||||
container_image=spotify/kafka,container_name=kafka \
|
||||
active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\
|
||||
hierarchical_memory_limit=9223372036854771712i,inactive_anon=52707328i,\
|
||||
inactive_file=5111808i,limit=1044578304i,mapped_file=10301440i,\
|
||||
max_usage=140656640i,pgfault=63762i,pgmajfault=2837i,pgpgin=73355i,\
|
||||
pgpgout=45736i,rss=105275392i,rss_huge=4194304i,total_active_anon=52568064i,\
|
||||
total_active_file=6926336i,total_cache=12038144i,total_inactive_anon=52707328i,\
|
||||
total_inactive_file=5111808i,total_mapped_file=10301440i,total_pgfault=63762i,\
|
||||
total_pgmafault=0i,total_pgpgin=73355i,total_pgpgout=45736i,\
|
||||
total_rss=105275392i,total_rss_huge=4194304i,total_unevictable=0i,\
|
||||
total_writeback=0i,unevictable=0i,usage=117440512i,writeback=0i 1453409536840126713
|
||||
> docker_container_cpu,
|
||||
container_image=spotify/kafka,container_name=kafka,cpu=cpu-total \
|
||||
throttling_periods=0i,throttling_throttled_periods=0i,\
|
||||
throttling_throttled_time=0i,usage_in_kernelmode=440000000i,\
|
||||
usage_in_usermode=2290000000i,usage_system=84795360000000i,\
|
||||
usage_total=6628208865i 1453409536840126713
|
||||
> docker_container_cpu,
|
||||
container_image=spotify/kafka,container_name=kafka,cpu=cpu0 \
|
||||
usage_total=6628208865i 1453409536840126713
|
||||
> docker_container_net,\
|
||||
container_image=spotify/kafka,container_name=kafka,network=eth0 \
|
||||
rx_bytes=7468i,rx_dropped=0i,rx_errors=0i,rx_packets=94i,tx_bytes=946i,\
|
||||
tx_dropped=0i,tx_errors=0i,tx_packets=13i 1453409536840126713
|
||||
> docker_container_blkio,
|
||||
container_image=spotify/kafka,container_name=kafka,device=8:0 \
|
||||
io_service_bytes_recursive_async=80216064i,io_service_bytes_recursive_read=79925248i,\
|
||||
io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888i,\
|
||||
io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\
|
||||
io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\
|
||||
io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713
|
||||
>docker_swarm,
|
||||
service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test,\
|
||||
tasks_desired=3,tasks_running=3 1508968160000000000
|
||||
```
|
||||
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce n_containers=6i,n_containers_paused=0i,n_containers_running=1i,n_containers_stopped=5i,n_cpus=2i,n_goroutines=41i,n_images=2i,n_listener_events=0i,n_used_file_descriptors=27i 1524002041000000000
|
||||
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce,unit=bytes memory_total=2101661696i 1524002041000000000
|
||||
docker_container_mem,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce active_anon=8327168i,active_file=2314240i,cache=27402240i,container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",hierarchical_memory_limit=9223372036854771712i,inactive_anon=0i,inactive_file=25088000i,limit=2101661696i,mapped_file=20582400i,max_usage=36646912i,pgfault=4193i,pgmajfault=214i,pgpgin=9243i,pgpgout=520i,rss=8327168i,rss_huge=0i,total_active_anon=8327168i,total_active_file=2314240i,total_cache=27402240i,total_inactive_anon=0i,total_inactive_file=25088000i,total_mapped_file=20582400i,total_pgfault=4193i,total_pgmajfault=214i,total_pgpgin=9243i,total_pgpgout=520i,total_rss=8327168i,total_rss_huge=0i,total_unevictable=0i,total_writeback=0i,unevictable=0i,usage=36528128i,usage_percent=0.4342225020025297,writeback=0i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,cpu=cpu-total,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",throttling_periods=0i,throttling_throttled_periods=0i,throttling_throttled_time=0i,usage_in_kernelmode=40000000i,usage_in_usermode=100000000i,usage_percent=0,usage_system=6394210000000i,usage_total=117319068i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,cpu=cpu0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=20825265i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,cpu=cpu1,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=96493803i 1524002042000000000
|
||||
docker_container_net,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,network=eth0,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",rx_bytes=1576i,rx_dropped=0i,rx_errors=0i,rx_packets=20i,tx_bytes=0i,tx_dropped=0i,tx_errors=0i,tx_packets=0i 1524002042000000000
|
||||
docker_container_blkio,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,device=254:0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",io_service_bytes_recursive_async=27398144i,io_service_bytes_recursive_read=27398144i,io_service_bytes_recursive_sync=0i,io_service_bytes_recursive_total=27398144i,io_service_bytes_recursive_write=0i,io_serviced_recursive_async=529i,io_serviced_recursive_read=529i,io_serviced_recursive_sync=0i,io_serviced_recursive_total=529i,io_serviced_recursive_write=0i 1524002042000000000
|
||||
docker_container_health,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce failing_streak=0i,health_status="healthy" 1524007529000000000
|
||||
docker_swarm,service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test tasks_desired=3,tasks_running=3 1508968160000000000
|
||||
```
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user