Compare commits
468 Commits
1.4.2
...
hugepages-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
af5017d3dc | ||
|
|
32dd1b3725 | ||
|
|
1b0e87a8b0 | ||
|
|
efa9095829 | ||
|
|
89974d96d7 | ||
|
|
8c51d629eb | ||
|
|
ea0be51985 | ||
|
|
5639d5608d | ||
|
|
9a1d69a2ae | ||
|
|
b7a68eef56 | ||
|
|
be688ec761 | ||
|
|
3208fc32ee | ||
|
|
1f87c10dd4 | ||
|
|
281f4d3688 | ||
|
|
3dcf66aed6 | ||
|
|
01479af096 | ||
|
|
23933e1139 | ||
|
|
a7571d5730 | ||
|
|
12d62e60b3 | ||
|
|
4153d2ca42 | ||
|
|
8c8c9200e7 | ||
|
|
426360d61f | ||
|
|
86e08e6ce7 | ||
|
|
a462b555a7 | ||
|
|
a2635573a8 | ||
|
|
ec8e923fda | ||
|
|
d43e8262b7 | ||
|
|
7b365180d0 | ||
|
|
32732d42f8 | ||
|
|
10e51e4b49 | ||
|
|
3a85e7b1f0 | ||
|
|
5d87ad85a1 | ||
|
|
c28d0e1b16 | ||
|
|
1b0a4e49cd | ||
|
|
f9c48ee2f0 | ||
|
|
1b84ac08ab | ||
|
|
bcefe90846 | ||
|
|
da12c64791 | ||
|
|
de03ee3caa | ||
|
|
fbd3544a9d | ||
|
|
fb947e8fe7 | ||
|
|
5b130b6ea0 | ||
|
|
48092ed598 | ||
|
|
efb9d5b4cb | ||
|
|
c17427631d | ||
|
|
8527a1b7b8 | ||
|
|
d831dbc51d | ||
|
|
f9c0aa1e23 | ||
|
|
3e4c91880a | ||
|
|
899c3a2ae1 | ||
|
|
4558aeddeb | ||
|
|
36c9113917 | ||
|
|
5270aa451c | ||
|
|
91fc2765b1 | ||
|
|
ef776f120b | ||
|
|
5bac08662e | ||
|
|
601dc99606 | ||
|
|
0f55d9eba2 | ||
|
|
f374a295d9 | ||
|
|
548157852c | ||
|
|
822cfbc8e8 | ||
|
|
fa5f1bf6d9 | ||
|
|
ad921a3840 | ||
|
|
9d559292a5 | ||
|
|
6e24056757 | ||
|
|
87830a1c38 | ||
|
|
d188b78d9e | ||
|
|
6e4650da3a | ||
|
|
7ab0d50116 | ||
|
|
97f6c9d8e1 | ||
|
|
666eb47613 | ||
|
|
90b6b760d1 | ||
|
|
f3147cc44d | ||
|
|
3cf0ba1ccf | ||
|
|
2b972dcd56 | ||
|
|
ce06d0cee0 | ||
|
|
24ae3293bc | ||
|
|
0ddb1d26a0 | ||
|
|
317de40ac4 | ||
|
|
9cfa3b292b | ||
|
|
0bf63a29f1 | ||
|
|
1d86064fb7 | ||
|
|
53e7537c5c | ||
|
|
6dd5c3b2c0 | ||
|
|
2938c2fa79 | ||
|
|
35f1b9f500 | ||
|
|
ae848e9539 | ||
|
|
163f18f959 | ||
|
|
37757b7782 | ||
|
|
315fd1e987 | ||
|
|
b0c2bb870e | ||
|
|
11c6a7f9c9 | ||
|
|
92acef1664 | ||
|
|
5397c02570 | ||
|
|
87f1d45ee0 | ||
|
|
07cb749e04 | ||
|
|
acea7109d4 | ||
|
|
009b649a13 | ||
|
|
b900967b78 | ||
|
|
81f42e8b17 | ||
|
|
56be3d3236 | ||
|
|
a440ed8d8c | ||
|
|
06c21fb9f7 | ||
|
|
4f7afb8cb5 | ||
|
|
ef6e5c5a85 | ||
|
|
005face7c0 | ||
|
|
1011cd0c94 | ||
|
|
6c075c4346 | ||
|
|
7f3f556b39 | ||
|
|
6639f44c17 | ||
|
|
801a248668 | ||
|
|
496452144c | ||
|
|
3029d58cad | ||
|
|
fcc9c82d34 | ||
|
|
4f1ea13ebf | ||
|
|
b90ee4a43c | ||
|
|
4537eb2c5d | ||
|
|
d6fd9ce738 | ||
|
|
5b40173bcb | ||
|
|
6638fc68de | ||
|
|
9ad0297b1f | ||
|
|
15266bb7eb | ||
|
|
d935dfa9ed | ||
|
|
8785c7d78d | ||
|
|
fb3d66cdd3 | ||
|
|
de180d1e56 | ||
|
|
df9c7590b3 | ||
|
|
d7d224d511 | ||
|
|
abcad439eb | ||
|
|
8484de6c12 | ||
|
|
ab8376de03 | ||
|
|
ff634c5056 | ||
|
|
14b31a2354 | ||
|
|
663a5b1f50 | ||
|
|
93d16a4603 | ||
|
|
88746b01c3 | ||
|
|
37095ef47d | ||
|
|
4f42d8a298 | ||
|
|
574034c301 | ||
|
|
654e953a89 | ||
|
|
4d91162abd | ||
|
|
177e7e2c73 | ||
|
|
d8966d5067 | ||
|
|
bdda6ceb70 | ||
|
|
ca8911fec0 | ||
|
|
2c5a5373f6 | ||
|
|
cabe10b88a | ||
|
|
7f66863b87 | ||
|
|
e400ec2b57 | ||
|
|
44320a5421 | ||
|
|
a9951710b3 | ||
|
|
6426bca1f8 | ||
|
|
f92a4f528f | ||
|
|
3ba5458220 | ||
|
|
beb9d7560d | ||
|
|
24d82aebe6 | ||
|
|
7dc256e845 | ||
|
|
297897ae0a | ||
|
|
414a7e34fb | ||
|
|
bf65e19486 | ||
|
|
2c70958c24 | ||
|
|
d727a6f85c | ||
|
|
4e9b19f7a6 | ||
|
|
132fb50150 | ||
|
|
d1ba75176d | ||
|
|
76240b9f18 | ||
|
|
06e22ee7ac | ||
|
|
a18eedb970 | ||
|
|
6514399baf | ||
|
|
27994abcb5 | ||
|
|
a9ada5f65b | ||
|
|
f758d0c6c3 | ||
|
|
7442b5645f | ||
|
|
d5bd426e0c | ||
|
|
154b263f14 | ||
|
|
92ca661662 | ||
|
|
54b0b9e727 | ||
|
|
dc2c8791d0 | ||
|
|
367bbdeb7e | ||
|
|
e544d742f9 | ||
|
|
393c4c6c2d | ||
|
|
4d1bc620b2 | ||
|
|
db8e767f1f | ||
|
|
afe05fcfef | ||
|
|
9422cca2cc | ||
|
|
a06ee58785 | ||
|
|
b13eea89b1 | ||
|
|
b813e2ecae | ||
|
|
8364417009 | ||
|
|
136c15ba33 | ||
|
|
19839c0167 | ||
|
|
72682973bd | ||
|
|
a411306fba | ||
|
|
cbd346117a | ||
|
|
181a56018f | ||
|
|
6ee6d55751 | ||
|
|
ebd73b7279 | ||
|
|
6a57395731 | ||
|
|
be13f69305 | ||
|
|
62ec3e50d9 | ||
|
|
07297e80a8 | ||
|
|
f0578b8c83 | ||
|
|
493af043d3 | ||
|
|
47d013132a | ||
|
|
dcff769fed | ||
|
|
5141f8a2a0 | ||
|
|
bb14589469 | ||
|
|
b81bea658f | ||
|
|
2c2dc97702 | ||
|
|
cbbdf1043b | ||
|
|
c55f285de0 | ||
|
|
e1295c41c8 | ||
|
|
e0df62c27b | ||
|
|
fdf12ce6b4 | ||
|
|
e5a265c8c7 | ||
|
|
112955a9f5 | ||
|
|
da0ca8a870 | ||
|
|
6e6aefe5da | ||
|
|
ae2635b547 | ||
|
|
c14478f025 | ||
|
|
2c31345c70 | ||
|
|
4a9fa7ef4b | ||
|
|
7db06d2aa4 | ||
|
|
871fae6eb3 | ||
|
|
8e587e74f5 | ||
|
|
440918a03b | ||
|
|
f64b23b724 | ||
|
|
c11739d143 | ||
|
|
883696c224 | ||
|
|
0ea0519e89 | ||
|
|
4596ae70a9 | ||
|
|
92caf33fff | ||
|
|
a987118b01 | ||
|
|
a6ada03b91 | ||
|
|
8ed00af10a | ||
|
|
86961cc814 | ||
|
|
ba462f5c94 | ||
|
|
1d1d5e6089 | ||
|
|
8560c2f88d | ||
|
|
5d135cece3 | ||
|
|
9b0af4478b | ||
|
|
26ccc1f205 | ||
|
|
76ed70340b | ||
|
|
5f215c22fe | ||
|
|
63842d48fd | ||
|
|
777b84d1dc | ||
|
|
c116af35c7 | ||
|
|
fcfcc803b1 | ||
|
|
4d5de8698b | ||
|
|
23ad959d71 | ||
|
|
d9fa916711 | ||
|
|
53b13a20d0 | ||
|
|
ffa8a4a716 | ||
|
|
8b4708c82a | ||
|
|
88ec171293 | ||
|
|
5885ef2c1c | ||
|
|
a519abf13f | ||
|
|
6ea61b55d9 | ||
|
|
206397d475 | ||
|
|
a6797a44d5 | ||
|
|
13c1f1524a | ||
|
|
9a062498e7 | ||
|
|
f64cf89db1 | ||
|
|
6d1777276c | ||
|
|
65580759fc | ||
|
|
d2f9fc7d8c | ||
|
|
77cc071796 | ||
|
|
4deb6238a3 | ||
|
|
7088d98304 | ||
|
|
4243403432 | ||
|
|
3bbc2beeed | ||
|
|
0e6a70b199 | ||
|
|
ec4efe5b03 | ||
|
|
adb1f5588c | ||
|
|
6e5915c59f | ||
|
|
9b59cdd10e | ||
|
|
02baa696c3 | ||
|
|
a4fa19252f | ||
|
|
7ba376964c | ||
|
|
a75ab3e190 | ||
|
|
2208657d73 | ||
|
|
9d8e935734 | ||
|
|
f5a9d1bc75 | ||
|
|
4b05edea53 | ||
|
|
246ffab3e0 | ||
|
|
3ea41e885c | ||
|
|
1f348037b7 | ||
|
|
86f19dee2b | ||
|
|
a1796989f7 | ||
|
|
6b67fedfdc | ||
|
|
5cd3327d5f | ||
|
|
bf9f94eb9d | ||
|
|
0f9f757da7 | ||
|
|
2f8d0f4d47 | ||
|
|
024dea2ff9 | ||
|
|
fa25e123d8 | ||
|
|
bed14e5037 | ||
|
|
c74c29b164 | ||
|
|
4e0c8e6026 | ||
|
|
d7ea83f39b | ||
|
|
b641f06552 | ||
|
|
c7a6d4eaa4 | ||
|
|
61b0336d97 | ||
|
|
761544f56d | ||
|
|
0f452ad0df | ||
|
|
4093bc98b7 | ||
|
|
75567d5b51 | ||
|
|
59bb31e765 | ||
|
|
13c7802b84 | ||
|
|
cce40c515a | ||
|
|
6e1fa559a3 | ||
|
|
f56dda0ac8 | ||
|
|
4fab572b6b | ||
|
|
b9f319529f | ||
|
|
0bb32570ba | ||
|
|
a4ea4c7a25 | ||
|
|
e69c3f9d1c | ||
|
|
002ccf3295 | ||
|
|
a163effa6d | ||
|
|
93ff811358 | ||
|
|
dd4299e925 | ||
|
|
b610276485 | ||
|
|
6aee40fac1 | ||
|
|
79f66dc5b3 | ||
|
|
0a55ab42b4 | ||
|
|
aba269e94c | ||
|
|
f67350107d | ||
|
|
8e3ed96d6f | ||
|
|
771fbc311a | ||
|
|
d7b88b10ad | ||
|
|
cdca81c999 | ||
|
|
ed6f438c9d | ||
|
|
366f3f560c | ||
|
|
e4f5547d37 | ||
|
|
e1bf655ef9 | ||
|
|
29b6f4168c | ||
|
|
3d62e045af | ||
|
|
ad4a5aa7a0 | ||
|
|
f2cb1da7cf | ||
|
|
c3d15f0aff | ||
|
|
b2453e3ec3 | ||
|
|
c3b11f9cfb | ||
|
|
cd1791494a | ||
|
|
402460f038 | ||
|
|
f85db90780 | ||
|
|
9bddd50a64 | ||
|
|
b8a0b8461a | ||
|
|
ee26191eb5 | ||
|
|
cadafa6405 | ||
|
|
22a9ffbb9d | ||
|
|
2e1457a496 | ||
|
|
8614445235 | ||
|
|
f23d1eb078 | ||
|
|
ef5c12bd86 | ||
|
|
c013cc1497 | ||
|
|
bb665cf013 | ||
|
|
5dff5932fd | ||
|
|
f823fc73f6 | ||
|
|
fd702e6bb8 | ||
|
|
50024c1860 | ||
|
|
a4b8805f7f | ||
|
|
837e6b1a32 | ||
|
|
063f3f68df | ||
|
|
b24663b0bd | ||
|
|
366bda45c3 | ||
|
|
c010fb1c3c | ||
|
|
08c197f73a | ||
|
|
cafb22d145 | ||
|
|
73df179bd6 | ||
|
|
c3bea59f3b | ||
|
|
52393582d2 | ||
|
|
ce29ca78e3 | ||
|
|
6e6ed075dc | ||
|
|
c0a4bd99a1 | ||
|
|
decb09e760 | ||
|
|
a63f80e017 | ||
|
|
daee48c861 | ||
|
|
dea8bf7ac0 | ||
|
|
292c5229bf | ||
|
|
0048bf2120 | ||
|
|
b8e134cd37 | ||
|
|
0339dc7faf | ||
|
|
575a07c985 | ||
|
|
b94cda6b46 | ||
|
|
73372872c2 | ||
|
|
103ae3b710 | ||
|
|
171332c579 | ||
|
|
875ab3c4b7 | ||
|
|
1c5ebd4be3 | ||
|
|
103d24bfba | ||
|
|
d5f48e3e96 | ||
|
|
7a41d2c586 | ||
|
|
fa1982323a | ||
|
|
cdf63c5776 | ||
|
|
0a8c2e0b3b | ||
|
|
9197a59cdb | ||
|
|
9c8f4afa37 | ||
|
|
eebee9759f | ||
|
|
ee85f9275e | ||
|
|
4e53464fe2 | ||
|
|
2163981872 | ||
|
|
c5cfde667a | ||
|
|
8a68e7424c | ||
|
|
cc63b3b667 | ||
|
|
5488f4b3ac | ||
|
|
14a4b108b4 | ||
|
|
32f313a6a6 | ||
|
|
c720200883 | ||
|
|
f62e543003 | ||
|
|
be83c8c8f0 | ||
|
|
c809debfd4 | ||
|
|
247c2e71fd | ||
|
|
7b08f9d099 | ||
|
|
d0b690f040 | ||
|
|
98ca22597d | ||
|
|
99dfc69fbb | ||
|
|
144862354a | ||
|
|
402a0f16e1 | ||
|
|
5d4eec606f | ||
|
|
ab1c11b06d | ||
|
|
864ea1efaf | ||
|
|
4fb1c3a2bc | ||
|
|
9796d3c99d | ||
|
|
98e784faf3 | ||
|
|
16d6011ca1 | ||
|
|
f43af72785 | ||
|
|
28d16188b3 | ||
|
|
19f3264073 | ||
|
|
8225bd0173 | ||
|
|
3806424aab | ||
|
|
ef8876b70b | ||
|
|
5fd8ab36d3 | ||
|
|
ac1fa05672 | ||
|
|
73d57c8a02 | ||
|
|
95fe0e43f5 | ||
|
|
02f7b0d030 | ||
|
|
a9a40cbf87 | ||
|
|
a98496591a | ||
|
|
0a6541dfa8 | ||
|
|
8ecc58639a | ||
|
|
6abecd0ac7 | ||
|
|
0502b65316 | ||
|
|
e400fcf5da | ||
|
|
d449833de9 | ||
|
|
58751fa4df | ||
|
|
656ce31d98 | ||
|
|
485e273187 | ||
|
|
f95c239a3f | ||
|
|
ae24a0754b | ||
|
|
f253623231 | ||
|
|
f0db4fd901 | ||
|
|
8c68bd9ddb | ||
|
|
9fc7220c2e | ||
|
|
6597b55477 | ||
|
|
1f4a997164 | ||
|
|
5224b526f4 | ||
|
|
371638ce56 | ||
|
|
53c5d3a290 | ||
|
|
b480022330 | ||
|
|
ccf17a9f93 | ||
|
|
13a6b917c3 | ||
|
|
1f1e9cc49f | ||
|
|
70c2b83f00 | ||
|
|
4de264ffc8 | ||
|
|
36c2c88fd2 | ||
|
|
e31d91f0f9 | ||
|
|
3006ccbf2f | ||
|
|
8b588ea37f |
49
.circleci/config.yml
Normal file
49
.circleci/config.yml
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
defaults: &defaults
|
||||
docker:
|
||||
- image: 'circleci/golang:1.9.2'
|
||||
working_directory: '/go/src/github.com/influxdata/telegraf'
|
||||
version: 2
|
||||
jobs:
|
||||
build:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- checkout
|
||||
- run: 'make ci-test'
|
||||
release:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- checkout
|
||||
- run: './scripts/release.sh'
|
||||
- store_artifacts:
|
||||
path: './artifacts'
|
||||
destination: '.'
|
||||
nightly:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- checkout
|
||||
- run: './scripts/release.sh'
|
||||
- store_artifacts:
|
||||
path: './artifacts'
|
||||
destination: '.'
|
||||
workflows:
|
||||
version: 2
|
||||
build_and_release:
|
||||
jobs:
|
||||
- 'build'
|
||||
- 'release':
|
||||
requires:
|
||||
- 'build'
|
||||
nightly:
|
||||
jobs:
|
||||
- 'build'
|
||||
- 'nightly':
|
||||
requires:
|
||||
- 'build'
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 18 * * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,7 +1,5 @@
|
||||
build
|
||||
tivan
|
||||
.vagrant
|
||||
/telegraf
|
||||
.idea
|
||||
/telegraf.gz
|
||||
*~
|
||||
*#
|
||||
|
||||
274
CHANGELOG.md
274
CHANGELOG.md
@@ -1,4 +1,265 @@
|
||||
## v1.4 [unreleased]
|
||||
## v1.6 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- The `mysql` input plugin has been updated to convert values to the
|
||||
correct data type. This may cause a `field type error` when inserting into
|
||||
InfluxDB due the change of types. It is recommended to drop the `mysql`,
|
||||
`mysql_variables`, and `mysql_innodb`:
|
||||
```
|
||||
DROP MEASUREMENT mysql
|
||||
DROP MEASUREMENT mysql_variables
|
||||
DROP MEASUREMENT mysql_innodb
|
||||
```
|
||||
|
||||
- The `postgresql` plugins now defaults to using a persistent connection to the database.
|
||||
In environments where TCP connections are terminated the `max_lifetime`
|
||||
setting should be set less than the collection `interval` to prevent errors.
|
||||
|
||||
- The `sqlserver` input plugin has a new query and data model that can be enabled
|
||||
by setting `query_version = 2`. It is encouraged to migrate to the new
|
||||
model when possible as the old version is deprecated and will be removed in
|
||||
a future version.
|
||||
|
||||
- An option has been added to the `openldap` input plugin that reverses metric
|
||||
name to improve grouping. This change is enabled when `reverse_metric_names = true`
|
||||
is set. It is encouraged to enable this option when possible as the old
|
||||
ordering is deprecated.
|
||||
|
||||
|
||||
### New Plugins
|
||||
|
||||
- [ipset](./plugins/inputs/ipset/README.md) - Thanks to @sajoupa
|
||||
- [nats](./plugins/inputs/nats/README.md) - Thanks to @mjs & @levex
|
||||
|
||||
### Features
|
||||
|
||||
- [#3551](https://github.com/influxdata/telegraf/pull/3551): Add health status mapping from string to int in elasticsearch input.
|
||||
- [#3580](https://github.com/influxdata/telegraf/pull/3580): Add control over which stats to gather in basicstats aggregator.
|
||||
- [#3596](https://github.com/influxdata/telegraf/pull/3596): Add messages_delivered_get to rabbitmq input.
|
||||
- [#3632](https://github.com/influxdata/telegraf/pull/3632): Add wired field to mem input.
|
||||
- [#3619](https://github.com/influxdata/telegraf/pull/3619): Add support for gathering exchange metrics to the rabbitmq input.
|
||||
- [#3565](https://github.com/influxdata/telegraf/pull/3565): Add support for additional metrics on Linux in zfs input.
|
||||
- [#3524](https://github.com/influxdata/telegraf/pull/3524): Add available_entropy field to kernel input plugin.
|
||||
- [#3643](https://github.com/influxdata/telegraf/pull/3643): Add user privilege level setting to IPMI sensors.
|
||||
- [#2701](https://github.com/influxdata/telegraf/pull/2701): Use persistent connection to postgresql database.
|
||||
- [#2846](https://github.com/influxdata/telegraf/pull/2846): Add support for dropwizard input format.
|
||||
- [#3666](https://github.com/influxdata/telegraf/pull/3666): Add container health metrics to docker input.
|
||||
- [#3687](https://github.com/influxdata/telegraf/pull/3687): Add support for using globs in devices list of diskio input plugin.
|
||||
- [#2754](https://github.com/influxdata/telegraf/pull/2754): Allow running as console application on Windows.
|
||||
- [#3703](https://github.com/influxdata/telegraf/pull/3703): Add listener counts and node running status to rabbitmq input.
|
||||
- [#3674](https://github.com/influxdata/telegraf/pull/3674): Add NATS Monitoring Input Plugin.
|
||||
- [#3702](https://github.com/influxdata/telegraf/pull/3702): Add ability to select which queues will be gathered in rabbitmq input.
|
||||
- [#3726](https://github.com/influxdata/telegraf/pull/3726): Add support for setting bsd source address to the ping input.
|
||||
- [#3346](https://github.com/influxdata/telegraf/pull/3346): Add Ipset input plugin.
|
||||
- [#3719](https://github.com/influxdata/telegraf/pull/3719): Add TLS and HTTP basic auth to prometheus_client output.
|
||||
- [#3618](https://github.com/influxdata/telegraf/pull/3618): Add new sqlserver output data model.
|
||||
- [#3559](https://github.com/influxdata/telegraf/pull/3559): Add native Go method for finding pids to procstat.
|
||||
- [#3722](https://github.com/influxdata/telegraf/pull/3722): Add additional metrics and reverse metric names option to openldap.
|
||||
- [#3769](https://github.com/influxdata/telegraf/pull/3769): Add TLS support to the mesos input plugin.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1896](https://github.com/influxdata/telegraf/issues/1896): Fix various mysql data type conversions.
|
||||
|
||||
## v1.5.3 [unreleased]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3729](https://github.com/influxdata/telegraf/issues/3729): Set path to / if HOST_MOUNT_PREFIX matches full path.
|
||||
- [#3739](https://github.com/influxdata/telegraf/issues/3739): Remove userinfo from url tag in prometheus input.
|
||||
|
||||
## v1.5.2 [2018-01-30]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3684](https://github.com/influxdata/telegraf/pull/3684): Ignore empty lines in Graphite plaintext.
|
||||
- [#3604](https://github.com/influxdata/telegraf/issues/3604): Fix index out of bounds error in solr input plugin.
|
||||
- [#3680](https://github.com/influxdata/telegraf/pull/3680): Reconnect before sending graphite metrics if disconnected.
|
||||
- [#3693](https://github.com/influxdata/telegraf/pull/3693): Align aggregator period with internal ticker to avoid skipping metrics.
|
||||
- [#3629](https://github.com/influxdata/telegraf/issues/3629): Fix a potential deadlock when using aggregators.
|
||||
- [#3697](https://github.com/influxdata/telegraf/issues/3697): Limit wait time for writes in mqtt output.
|
||||
- [#3698](https://github.com/influxdata/telegraf/issues/3698): Revert change in graphite output where dot in field key was replaced by underscore.
|
||||
- [#3710](https://github.com/influxdata/telegraf/issues/3710): Add timeout to wavefront output write.
|
||||
- [#3725](https://github.com/influxdata/telegraf/issues/3725): Exclude master_replid fields from redis input.
|
||||
|
||||
## v1.5.1 [2018-01-10]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3624](https://github.com/influxdata/telegraf/pull/3624): Fix name error in jolokia2_agent sample config.
|
||||
- [#3625](https://github.com/influxdata/telegraf/pull/3625): Fix DC/OS login expiration time.
|
||||
- [#3593](https://github.com/influxdata/telegraf/pull/3593): Set Content-Type charset in influxdb output and allow it be overridden.
|
||||
- [#3594](https://github.com/influxdata/telegraf/pull/3594): Document permissions setup for postfix input.
|
||||
- [#3633](https://github.com/influxdata/telegraf/pull/3633): Fix deliver_get field in rabbitmq input.
|
||||
- [#3607](https://github.com/influxdata/telegraf/issues/3607): Escape environment variables during config toml parsing.
|
||||
|
||||
## v1.5 [2017-12-14]
|
||||
|
||||
### New Plugins
|
||||
- [basicstats](./plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno
|
||||
- [bond](./plugins/inputs/bond/README.md) - Thanks to @ildarsv
|
||||
- [cratedb](./plugins/outputs/cratedb/README.md) - Thanks to @felixge
|
||||
- [dcos](./plugins/inputs/dcos/README.md) - Thanks to @influxdata
|
||||
- [jolokia2](./plugins/inputs/jolokia2/README.md) - Thanks to @dylanmei
|
||||
- [nginx_plus](./plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah
|
||||
- [opensmtpd](./plugins/inputs/opensmtpd/README.md) - Thanks to @aromeyer
|
||||
- [particle](./plugins/inputs/webhooks/particle/README.md) - Thanks to @davidgs
|
||||
- [pf](./plugins/inputs/pf/README.md) - Thanks to @nferch
|
||||
- [postfix](./plugins/inputs/postfix/README.md) - Thanks to @phemmer
|
||||
- [smart](./plugins/inputs/smart/README.md) - Thanks to @rickard-von-essen
|
||||
- [solr](./plugins/inputs/solr/README.md) - Thanks to @ljagiello
|
||||
- [teamspeak](./plugins/inputs/teamspeak/README.md) - Thanks to @p4ddy1
|
||||
- [unbound](./plugins/inputs/unbound/README.md) - Thanks to @aromeyer
|
||||
- [wavefront](./plugins/outputs/wavefront/README.md) - Thanks to @puckpuck
|
||||
|
||||
### Release Notes
|
||||
|
||||
- In the `kinesis` output, use of the `partition_key` and
|
||||
`use_random_partitionkey` options has been deprecated in favor of the
|
||||
`partition` subtable. This allows for more flexible methods to set the
|
||||
partition key such as by metric name or by tag.
|
||||
|
||||
- With the release of the new improved `jolokia2` input, the legacy `jolokia`
|
||||
plugin is deprecated and will be removed in a future release. Users of this
|
||||
plugin are encouraged to update to the new `jolokia2` plugin.
|
||||
|
||||
- In the `postgresql` and `postgresql_extensible` plugins, the type of the oid
|
||||
data type has changed from string to integer. It is recommended to drop
|
||||
affected fields until a new shard is started. For details on how to
|
||||
workaround this issue please see [#3622](https://github.com/influxdata/telegraf/issues/3622).
|
||||
|
||||
### Features
|
||||
|
||||
- [#3170](https://github.com/influxdata/telegraf/pull/3170): Add support for sharding based on metric name.
|
||||
- [#3196](https://github.com/influxdata/telegraf/pull/3196): Add Kafka output plugin topic_suffix option.
|
||||
- [#3027](https://github.com/influxdata/telegraf/pull/3027): Include mount mode option in disk metrics.
|
||||
- [#3191](https://github.com/influxdata/telegraf/pull/3191): TLS and MTLS enhancements to HTTPListener input plugin.
|
||||
- [#3213](https://github.com/influxdata/telegraf/pull/3213): Add polling method to logparser and tail inputs.
|
||||
- [#3211](https://github.com/influxdata/telegraf/pull/3211): Add timeout option for kubernetes input.
|
||||
- [#3234](https://github.com/influxdata/telegraf/pull/3234): Add support for timing sums in statsd input.
|
||||
- [#2617](https://github.com/influxdata/telegraf/issues/2617): Add resource limit monitoring to procstat.
|
||||
- [#3236](https://github.com/influxdata/telegraf/pull/3236): Add support for k8s service DNS discovery to prometheus input.
|
||||
- [#3245](https://github.com/influxdata/telegraf/pull/3245): Add configurable metrics endpoint to prometheus output.
|
||||
- [#3214](https://github.com/influxdata/telegraf/pull/3214): Add new nginx_plus input plugin.
|
||||
- [#3215](https://github.com/influxdata/telegraf/pull/3215): Add support for NSQLookupd to nsq_consumer.
|
||||
- [#2278](https://github.com/influxdata/telegraf/pull/2278): Add redesigned Jolokia input plugin.
|
||||
- [#3106](https://github.com/influxdata/telegraf/pull/3106): Add configurable separator for metrics and fields in opentsdb output.
|
||||
- [#1692](https://github.com/influxdata/telegraf/pull/1692): Add support for the rollbar occurrence webhook event.
|
||||
- [#3160](https://github.com/influxdata/telegraf/pull/3160): Add Wavefront output plugin.
|
||||
- [#3281](https://github.com/influxdata/telegraf/pull/3281): Add extra wired tiger cache metrics to mongodb input.
|
||||
- [#3141](https://github.com/influxdata/telegraf/pull/3141): Collect Docker Swarm service metrics in docker input plugin.
|
||||
- [#2449](https://github.com/influxdata/telegraf/pull/2449): Add smart input plugin for collecting S.M.A.R.T. data.
|
||||
- [#3269](https://github.com/influxdata/telegraf/pull/3269): Add cluster health level configuration to elasticsearch input.
|
||||
- [#3304](https://github.com/influxdata/telegraf/pull/3304): Add ability to limit node stats in elasticsearch input.
|
||||
- [#2167](https://github.com/influxdata/telegraf/pull/2167): Add new basicstats aggregator.
|
||||
- [#3344](https://github.com/influxdata/telegraf/pull/3344): Add UDP IPv6 support to statsd input.
|
||||
- [#3350](https://github.com/influxdata/telegraf/pull/3350): Use labels in prometheus output for string fields.
|
||||
- [#3358](https://github.com/influxdata/telegraf/pull/3358): Add support for decimal timestamps to ts-epoch modifier.
|
||||
- [#3337](https://github.com/influxdata/telegraf/pull/3337): Add histogram and summary types and use in prometheus plugins.
|
||||
- [#3365](https://github.com/influxdata/telegraf/pull/3365): Gather concurrently from snmp agents.
|
||||
- [#3333](https://github.com/influxdata/telegraf/issues/3333): Perform DNS lookup before ping and report result.
|
||||
- [#3398](https://github.com/influxdata/telegraf/issues/3398): Add instance name option to varnish plugin.
|
||||
- [#3406](https://github.com/influxdata/telegraf/pull/3406): Add support for SSL settings to ElasticSearch output plugin.
|
||||
- [#3315](https://github.com/influxdata/telegraf/pull/3315): Add Teamspeak 3 input plugin.
|
||||
- [#3305](https://github.com/influxdata/telegraf/pull/3305): Add modification_time field to filestat input plugin.
|
||||
- [#2019](https://github.com/influxdata/telegraf/pull/2019): Add Solr input plugin.
|
||||
- [#3210](https://github.com/influxdata/telegraf/pull/3210): Add CrateDB output plugin.
|
||||
- [#3459](https://github.com/influxdata/telegraf/pull/3459): Add systemd unit pid and cgroup matching to procstat.
|
||||
- [#3477](https://github.com/influxdata/telegraf/pull/3477): Add Particle Webhook Plugin.
|
||||
- [#3471](https://github.com/influxdata/telegraf/pull/3471): Use MAX() instead of SUM() for latency measurements in sqlserver.
|
||||
- [#3490](https://github.com/influxdata/telegraf/pull/3490): Add index by week number to Elasticsearch output.
|
||||
- [#3434](https://github.com/influxdata/telegraf/pull/3434): Add unbound input plugin.
|
||||
- [#3449](https://github.com/influxdata/telegraf/pull/3449): Add opensmtpd input plugin.
|
||||
- [#3470](https://github.com/influxdata/telegraf/pull/3470): Add support for tags in the index name in elasticsearch output.
|
||||
- [#2553](https://github.com/influxdata/telegraf/pull/2553): Add postfix input plugin.
|
||||
- [#3424](https://github.com/influxdata/telegraf/pull/3424): Add bond input plugin.
|
||||
- [#3518](https://github.com/influxdata/telegraf/pull/3518): Add slab to mem plugin.
|
||||
- [#3519](https://github.com/influxdata/telegraf/pull/3519): Add input plugin for DC/OS.
|
||||
- [#3140](https://github.com/influxdata/telegraf/pull/3140): Add support for glob patterns in net input plugin.
|
||||
- [#3405](https://github.com/influxdata/telegraf/pull/3405): Add input plugin for OpenBSD/FreeBSD pf.
|
||||
- [#3528](https://github.com/influxdata/telegraf/pull/3528): Add option to amqp output to publish persistent messages.
|
||||
- [#3530](https://github.com/influxdata/telegraf/pull/3530): Support I (idle) process state on procfs+Linux.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3136](https://github.com/influxdata/telegraf/issues/3136): Fix webhooks input address in use during reload.
|
||||
- [#3258](https://github.com/influxdata/telegraf/issues/3258): Unlock Statsd when stopping to prevent deadlock.
|
||||
- [#3319](https://github.com/influxdata/telegraf/issues/3319): Fix cloudwatch output requires unneeded permissions.
|
||||
- [#3351](https://github.com/influxdata/telegraf/issues/3351): Fix prometheus passthrough for existing value types.
|
||||
- [#3430](https://github.com/influxdata/telegraf/issues/3430): Always ignore autofs filesystems in disk input.
|
||||
- [#3326](https://github.com/influxdata/telegraf/issues/3326): Fail metrics parsing on unescaped quotes.
|
||||
- [#3473](https://github.com/influxdata/telegraf/pull/3473): Whitelist allowed char classes for graphite output.
|
||||
- [#3488](https://github.com/influxdata/telegraf/pull/3488): Use hexadecimal ids and lowercase names in zipkin input.
|
||||
- [#3263](https://github.com/influxdata/telegraf/issues/3263): Fix snmp-tools output parsing with Windows EOLs.
|
||||
- [#3447](https://github.com/influxdata/telegraf/issues/3447): Add shadow-utils dependency to rpm package.
|
||||
- [#3448](https://github.com/influxdata/telegraf/issues/3448): Use deb-systemd-invoke to restart service.
|
||||
- [#3553](https://github.com/influxdata/telegraf/issues/3553): Fix kafka_consumer outside range of offsets error.
|
||||
- [#3568](https://github.com/influxdata/telegraf/issues/3568): Fix separation of multiple prometheus_client outputs.
|
||||
- [#3577](https://github.com/influxdata/telegraf/issues/3577): Don't add system input uptime_format as a counter.
|
||||
|
||||
## v1.4.5 [2017-12-01]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3500](https://github.com/influxdata/telegraf/issues/3500): Fix global variable collection when using interval_slow option in mysql input.
|
||||
- [#3486](https://github.com/influxdata/telegraf/issues/3486): Fix error getting net connections info in netstat input.
|
||||
- [#3529](https://github.com/influxdata/telegraf/issues/3529): Fix HOST_MOUNT_PREFIX in docker with disk input.
|
||||
|
||||
## v1.4.4 [2017-11-08]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3401](https://github.com/influxdata/telegraf/pull/3401): Use schema specified in mqtt_consumer input.
|
||||
- [#3419](https://github.com/influxdata/telegraf/issues/3419): Redact datadog API key in log output.
|
||||
- [#3311](https://github.com/influxdata/telegraf/issues/3311): Fix error getting pids in netstat input.
|
||||
- [#3339](https://github.com/influxdata/telegraf/issues/3339): Support HOST_VAR envvar to locate /var in system input.
|
||||
- [#3383](https://github.com/influxdata/telegraf/issues/3383): Use current time if docker container read time is zero value.
|
||||
|
||||
## v1.4.3 [2017-10-25]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3327](https://github.com/influxdata/telegraf/issues/3327): Fix container name filters in docker input.
|
||||
- [#3321](https://github.com/influxdata/telegraf/issues/3321): Fix snmpwalk address format in leofs input.
|
||||
- [#3329](https://github.com/influxdata/telegraf/issues/3329): Fix case sensitivity issue in sqlserver query.
|
||||
- [#3342](https://github.com/influxdata/telegraf/pull/3342): Fix CPU input plugin stuck after suspend on Linux.
|
||||
- [#3013](https://github.com/influxdata/telegraf/issues/3013): Fix mongodb input panic when restarting mongodb.
|
||||
- [#3224](https://github.com/influxdata/telegraf/pull/3224): Preserve url path prefix in influx output.
|
||||
- [#3354](https://github.com/influxdata/telegraf/pull/3354): Fix TELEGRAF_OPTS expansion in systemd service unit.
|
||||
- [#3357](https://github.com/influxdata/telegraf/issues/3357): Remove warning when JSON contains null value.
|
||||
- [#3375](https://github.com/influxdata/telegraf/issues/3375): Fix ACL token usage in consul input plugin.
|
||||
- [#3369](https://github.com/influxdata/telegraf/issues/3369): Fix unquoting error with Tomcat 6.
|
||||
- [#3373](https://github.com/influxdata/telegraf/issues/3373): Fix syscall panic in diskio on some Linux systems.
|
||||
|
||||
## v1.4.2 [2017-10-10]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3259](https://github.com/influxdata/telegraf/issues/3259): Fix error if int larger than 32-bit in /proc/vmstat.
|
||||
- [#3265](https://github.com/influxdata/telegraf/issues/3265): Fix parsing of JSON with a UTF8 BOM in httpjson.
|
||||
- [#2887](https://github.com/influxdata/telegraf/issues/2887): Allow JSON data format to contain zero metrics.
|
||||
- [#3284](https://github.com/influxdata/telegraf/issues/3284): Fix format of connection_timeout in mqtt_consumer.
|
||||
- [#3081](https://github.com/influxdata/telegraf/issues/3081): Fix case sensitivity error in sqlserver input.
|
||||
- [#3297](https://github.com/influxdata/telegraf/issues/3297): Add support for proxy environment variables to http_response.
|
||||
- [#1588](https://github.com/influxdata/telegraf/issues/1588): Add support for standard proxy env vars in outputs.
|
||||
- [#3282](https://github.com/influxdata/telegraf/issues/3282): Fix panic in cpu input if number of cpus changes.
|
||||
- [#2854](https://github.com/influxdata/telegraf/issues/2854): Use chunked transfer encoding in InfluxDB output.
|
||||
|
||||
## v1.4.1 [2017-09-26]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3167](https://github.com/influxdata/telegraf/issues/3167): Fix MQTT input exits if Broker is not available on startup.
|
||||
- [#3217](https://github.com/influxdata/telegraf/issues/3217): Fix optional field value conversions in fluentd input.
|
||||
- [#3227](https://github.com/influxdata/telegraf/issues/3227): Whitelist allowed char classes for opentsdb output.
|
||||
- [#3232](https://github.com/influxdata/telegraf/issues/3232): Fix counter and gauge metric types.
|
||||
- [#3235](https://github.com/influxdata/telegraf/issues/3235): Fix skipped line with empty target in iptables.
|
||||
- [#3175](https://github.com/influxdata/telegraf/issues/3175): Fix duplicate keys in perf counters sqlserver query.
|
||||
- [#3230](https://github.com/influxdata/telegraf/issues/3230): Fix panic in statsd p100 calculation.
|
||||
- [#3242](https://github.com/influxdata/telegraf/issues/3242): Fix arm64 packages contain 32-bit executable.
|
||||
|
||||
## v1.4 [2017-09-05]
|
||||
|
||||
### Release Notes
|
||||
|
||||
@@ -62,6 +323,7 @@
|
||||
- [#2978](https://github.com/influxdata/telegraf/pull/2978): Add gzip content-encoding support to influxdb output.
|
||||
- [#3127](https://github.com/influxdata/telegraf/pull/3127): Allow using system plugin in Windows.
|
||||
- [#3112](https://github.com/influxdata/telegraf/pull/3112): Add tomcat input plugin.
|
||||
- [#3182](https://github.com/influxdata/telegraf/pull/3182): HTTP headers can be added to InfluxDB output.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
@@ -93,6 +355,16 @@
|
||||
- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilcation of logparser and tail on solaris.
|
||||
- [#2951](https://github.com/influxdata/telegraf/issues/2951): Discard logging from tail library.
|
||||
- [#3126](https://github.com/influxdata/telegraf/pull/3126): Remove log message on ping timeout.
|
||||
- [#3144](https://github.com/influxdata/telegraf/issues/3144): Don't retry points beyond retention policy.
|
||||
- [#3015](https://github.com/influxdata/telegraf/issues/3015): Don't start Telegraf on install in Amazon Linux.
|
||||
- [#3153](https://github.com/influxdata/telegraf/issues/3053): Enable hddtemp input on all platforms.
|
||||
- [#3142](https://github.com/influxdata/telegraf/issues/3142): Escape backslash within string fields.
|
||||
- [#3162](https://github.com/influxdata/telegraf/issues/3162): Fix parsing of SHM remotes in ntpq input
|
||||
- [#3149](https://github.com/influxdata/telegraf/issues/3149): Don't fail parsing zpool stats if pool health is UNAVAIL on FreeBSD.
|
||||
- [#2672](https://github.com/influxdata/telegraf/issues/2672): Fix NSQ input plugin when used with version 1.0.0-compat.
|
||||
- [#2523](https://github.com/influxdata/telegraf/issues/2523): Added CloudWatch metric constraint validation.
|
||||
- [#3179](https://github.com/influxdata/telegraf/issues/3179): Skip non-numerical values in graphite format.
|
||||
- [#3187](https://github.com/influxdata/telegraf/issues/3187): Fix panic when handling string fields with escapes.
|
||||
|
||||
## v1.3.5 [2017-07-26]
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ but any information you can provide on how the data will look is appreciated.
|
||||
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
||||
for a good example.
|
||||
1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin.
|
||||
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf). Or mention @jackzampolin in a PR comment with some common queries that you would want to alert on and he will write one for you.
|
||||
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf).
|
||||
|
||||
## GoDoc
|
||||
|
||||
@@ -52,7 +52,7 @@ See below for a quick example.
|
||||
* Input Plugins must be added to the
|
||||
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
plugin can be configured. This is include in `telegraf -sample-config`.
|
||||
plugin can be configured. This is include in `telegraf config`.
|
||||
* The `Description` function should say in one line what this plugin does.
|
||||
|
||||
Let's say you've written a plugin that emits metrics about processes on the
|
||||
@@ -79,7 +79,10 @@ func (s *Simple) Description() string {
|
||||
}
|
||||
|
||||
func (s *Simple) SampleConfig() string {
|
||||
return "ok = true # indicate if everything is fine"
|
||||
return `
|
||||
## Indicate if everything is fine
|
||||
ok = true
|
||||
`
|
||||
}
|
||||
|
||||
func (s *Simple) Gather(acc telegraf.Accumulator) error {
|
||||
@@ -183,7 +186,7 @@ See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
output can be configured. This is include in `telegraf -sample-config`.
|
||||
output can be configured. This is include in `telegraf config`.
|
||||
* The `Description` function should say in one line what this output does.
|
||||
|
||||
### Output Example
|
||||
@@ -207,7 +210,9 @@ func (s *Simple) Description() string {
|
||||
}
|
||||
|
||||
func (s *Simple) SampleConfig() string {
|
||||
return "url = localhost"
|
||||
return `
|
||||
ok = true
|
||||
`
|
||||
}
|
||||
|
||||
func (s *Simple) Connect() error {
|
||||
@@ -287,7 +292,7 @@ See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdata/telegraf/plugins/processors/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
processor can be configured. This is include in `telegraf -sample-config`.
|
||||
processor can be configured. This is include in the output of `telegraf config`.
|
||||
* The `Description` function should say in one line what this processor does.
|
||||
|
||||
### Processor Example
|
||||
@@ -344,7 +349,7 @@ See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
aggregator can be configured. This is include in `telegraf -sample-config`.
|
||||
aggregator can be configured. This is include in `telegraf config`.
|
||||
* The `Description` function should say in one line what this aggregator does.
|
||||
* The Aggregator plugin will need to keep caches of metrics that have passed
|
||||
through it. This should be done using the builtin `HashID()` function of each
|
||||
@@ -457,29 +462,28 @@ func init() {
|
||||
|
||||
## Unit Tests
|
||||
|
||||
Before opening a pull request you should run the linter checks and
|
||||
the short tests.
|
||||
|
||||
### Execute linter
|
||||
|
||||
execute `make lint`
|
||||
|
||||
### Execute short tests
|
||||
|
||||
execute `make test-short`
|
||||
execute `make test`
|
||||
|
||||
### Execute long tests
|
||||
### Execute integration tests
|
||||
|
||||
As Telegraf collects metrics from several third-party services it becomes a
|
||||
difficult task to mock each service as some of them have complicated protocols
|
||||
which would take some time to replicate.
|
||||
Running the integration tests requires several docker containers to be
|
||||
running. You can start the containers with:
|
||||
```
|
||||
make docker-run
|
||||
```
|
||||
|
||||
To overcome this situation we've decided to use docker containers to provide a
|
||||
fast and reproducible environment to test those services which require it.
|
||||
For other situations
|
||||
(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/redis/redis_test.go)
|
||||
a simple mock will suffice.
|
||||
And run the full test suite with:
|
||||
```
|
||||
make test-all
|
||||
```
|
||||
|
||||
To execute Telegraf tests follow these simple steps:
|
||||
|
||||
- Install docker following [these](https://docs.docker.com/installation/)
|
||||
instructions
|
||||
- execute `make test`
|
||||
|
||||
### Unit test troubleshooting
|
||||
|
||||
Try cleaning up your test environment by executing `make docker-kill` and
|
||||
re-running
|
||||
Use `make docker-kill` to stop the containers.
|
||||
|
||||
19
Godeps
19
Godeps
@@ -4,18 +4,19 @@ github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
|
||||
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
|
||||
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/bsm/sarama-cluster ccdc0803695fbce22f1706d04ded46cd518fd832
|
||||
github.com/bsm/sarama-cluster abf039439f66c1ce78017f560b490612552f6472
|
||||
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
|
||||
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
|
||||
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
|
||||
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/dgrijalva/jwt-go dbeaa9332f19a944acb5736b4456cfcc02140e29
|
||||
github.com/docker/docker f5ec1e2936dcbe7b5001c2b817188b095c700c27
|
||||
github.com/docker/go-connections 990a1a1a70b0da4c4cb70e117971a4f0babfbf1a
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
|
||||
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
|
||||
github.com/eclipse/paho.mqtt.golang d4f545eb108a2d19f9b1a735689dbfb719bc21fb
|
||||
github.com/eclipse/paho.mqtt.golang aff15770515e3c57fc6109da73d42b0d46f7f483
|
||||
github.com/go-logfmt/logfmt 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
|
||||
@@ -26,13 +27,14 @@ github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
|
||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
||||
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
|
||||
github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
|
||||
github.com/go-redis/redis 73b70592cdaa9e6abdfcfbf97b4a90d80728c836
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
|
||||
github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea
|
||||
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
|
||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||
github.com/jackc/pgx b84338d7d62598f75859b2b146d830b22f1b9ec8
|
||||
github.com/jackc/pgx 63f58fd32edb5684b9e9f4cfaac847c6b42b3917
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
|
||||
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
|
||||
@@ -40,11 +42,14 @@ github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
|
||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
||||
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
|
||||
github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4
|
||||
github.com/multiplay/go-ts3 07477f49b8dfa3ada231afc7b7b17617d42afe8e
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/nats-io/gnatsd 393bbb7c031433e68707c8810fda0bfcfbe6ab9b
|
||||
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
|
||||
github.com/nsqio/go-nsq a53d495e81424aaf7a7665a9d32a97715c40e953
|
||||
github.com/nsqio/go-nsq eee57a3ac4174c55924125bb15eeeda8cffb6e6f
|
||||
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
||||
github.com/opentracing-contrib/go-observer a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
|
||||
github.com/opentracing/opentracing-go 06f47b42c792fef2796e9681353e1d908c417827
|
||||
@@ -60,15 +65,17 @@ github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
|
||||
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||
github.com/shirou/gopsutil 9a4a9167ad3b4355dbf1c2c7a0f5f0d3fb1e9ab9
|
||||
github.com/shirou/gopsutil 384a55110aa5ae052eb93ea94940548c1e305a99
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
github.com/Shopify/sarama c01858abb625b73a3af51d0798e4ad42c8147093
|
||||
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
|
||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||
github.com/tidwall/gjson 0623bd8fbdbf97cc62b98d15108832851a658e59
|
||||
github.com/tidwall/match 173748da739a410c5b0b813b956f89ff94730b4c
|
||||
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
|
||||
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
|
||||
github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
|
||||
|
||||
116
Makefile
116
Makefile
@@ -2,6 +2,9 @@ PREFIX := /usr/local
|
||||
VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
|
||||
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
COMMIT := $(shell git rev-parse --short HEAD)
|
||||
GOFILES ?= $(shell git ls-files '*.go')
|
||||
GOFMT ?= $(shell gofmt -l $(GOFILES))
|
||||
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
else
|
||||
@@ -15,17 +18,24 @@ ifdef VERSION
|
||||
LDFLAGS += -X main.version=$(VERSION)
|
||||
endif
|
||||
|
||||
|
||||
all:
|
||||
$(MAKE) fmtcheck
|
||||
$(MAKE) deps
|
||||
$(MAKE) telegraf
|
||||
|
||||
ci-test:
|
||||
$(MAKE) deps
|
||||
$(MAKE) fmtcheck
|
||||
$(MAKE) vet
|
||||
$(MAKE) test
|
||||
|
||||
deps:
|
||||
go get -u github.com/golang/lint/golint
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
|
||||
telegraf:
|
||||
go build -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
|
||||
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
|
||||
|
||||
go-install:
|
||||
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
|
||||
@@ -37,81 +47,53 @@ install: telegraf
|
||||
test:
|
||||
go test -short ./...
|
||||
|
||||
fmt:
|
||||
@gofmt -w $(GOFILES)
|
||||
|
||||
fmtcheck:
|
||||
@echo '[INFO] running gofmt to identify incorrectly formatted code...'
|
||||
@if [ ! -z $(GOFMT) ]; then \
|
||||
echo "[ERROR] gofmt has found errors in the following files:" ; \
|
||||
echo "$(GOFMT)" ; \
|
||||
echo "" ;\
|
||||
echo "Run make fmt to fix them." ; \
|
||||
exit 1 ;\
|
||||
fi
|
||||
@echo '[INFO] done.'
|
||||
|
||||
lint:
|
||||
golint ./...
|
||||
|
||||
test-windows:
|
||||
go test ./plugins/inputs/ping/...
|
||||
go test ./plugins/inputs/win_perf_counters/...
|
||||
go test ./plugins/inputs/win_services/...
|
||||
go test ./plugins/inputs/procstat/...
|
||||
|
||||
lint:
|
||||
go vet ./...
|
||||
# vet runs the Go source code static analysis tool `vet` to find
|
||||
# any common errors.
|
||||
vet:
|
||||
@echo 'go vet $$(go list ./...)'
|
||||
@go vet $$(go list ./...) ; if [ $$? -eq 1 ]; then \
|
||||
echo ""; \
|
||||
echo "go vet has found suspicious constructs. Please remediate any reported errors"; \
|
||||
echo "to fix them before submitting code for review."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
test-all: lint
|
||||
test-all: vet
|
||||
go test ./...
|
||||
|
||||
package:
|
||||
./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload
|
||||
./scripts/build.py --package --platform=all --arch=all
|
||||
|
||||
clean:
|
||||
-rm -f telegraf
|
||||
-rm -f telegraf.exe
|
||||
rm -f telegraf
|
||||
rm -f telegraf.exe
|
||||
|
||||
# Run all docker containers necessary for integration tests
|
||||
docker-run:
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
||||
docker run --name zookeeper -p "2181:2181" -d wurstmeister/zookeeper
|
||||
docker run --name kafka \
|
||||
--link zookeeper:zookeeper \
|
||||
-e KAFKA_ADVERTISED_HOST_NAME=localhost \
|
||||
-e KAFKA_ADVERTISED_PORT=9092 \
|
||||
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
|
||||
-e KAFKA_CREATE_TOPICS="test:1:1" \
|
||||
-p "9092:9092" \
|
||||
-d wurstmeister/kafka
|
||||
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
|
||||
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
||||
docker run --name memcached -p "11211:11211" -d memcached
|
||||
docker run --name postgres -p "5432:5432" -d postgres
|
||||
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
||||
docker run --name redis -p "6379:6379" -d redis
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
docker run --name openldap \
|
||||
-e SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" \
|
||||
-e SLAPD_CONFIG_ROOTPW="secret" \
|
||||
-p "389:389" -p "636:636" \
|
||||
-d cobaugh/openldap-alpine
|
||||
docker-image:
|
||||
./scripts/build.py --package --platform=linux --arch=amd64
|
||||
cp build/telegraf*$(COMMIT)*.deb .
|
||||
docker build -f scripts/dev.docker --build-arg "package=telegraf*$(COMMIT)*.deb" -t "telegraf-dev:$(COMMIT)" .
|
||||
|
||||
# Run docker containers necessary for integration tests; skipping services provided
|
||||
# by CircleCI
|
||||
docker-run-circle:
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
||||
docker run --name zookeeper -p "2181:2181" -d wurstmeister/zookeeper
|
||||
docker run --name kafka \
|
||||
--link zookeeper:zookeeper \
|
||||
-e KAFKA_ADVERTISED_HOST_NAME=localhost \
|
||||
-e KAFKA_ADVERTISED_PORT=9092 \
|
||||
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
|
||||
-e KAFKA_CREATE_TOPICS="test:1:1" \
|
||||
-p "9092:9092" \
|
||||
-d wurstmeister/kafka
|
||||
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
docker run --name openldap \
|
||||
-e SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" \
|
||||
-e SLAPD_CONFIG_ROOTPW="secret" \
|
||||
-p "389:389" -p "636:636" \
|
||||
-d cobaugh/openldap-alpine
|
||||
|
||||
docker-kill:
|
||||
-docker kill aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
|
||||
openldap postgres rabbitmq redis riemann zookeeper
|
||||
-docker rm aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
|
||||
openldap postgres rabbitmq redis riemann zookeeper
|
||||
|
||||
.PHONY: deps telegraf telegraf.exe install test test-windows lint test-all \
|
||||
package clean docker-run docker-run-circle docker-kill
|
||||
.PHONY: deps telegraf install test test-windows lint vet test-all package clean docker-image fmtcheck
|
||||
|
||||
57
README.md
57
README.md
@@ -5,8 +5,7 @@ and writing metrics.
|
||||
|
||||
Design goals are to have a minimal memory footprint with a plugin system so
|
||||
that developers in the community can easily add support for collecting metrics
|
||||
from well known services (like Hadoop, Postgres, or Redis) and third party
|
||||
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
|
||||
from local or remote services.
|
||||
|
||||
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
|
||||
|
||||
@@ -52,6 +51,33 @@ which is installed by the Makefile if you don't have it already.
|
||||
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
|
||||
5. Run `make`
|
||||
|
||||
### Nightly Builds
|
||||
|
||||
These builds are generated from the master branch:
|
||||
- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb)
|
||||
- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb)
|
||||
- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm)
|
||||
- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb)
|
||||
- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm)
|
||||
- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb)
|
||||
- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm)
|
||||
- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz)
|
||||
- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz)
|
||||
- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb)
|
||||
- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm)
|
||||
- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz)
|
||||
- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz)
|
||||
- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz)
|
||||
- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz)
|
||||
- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz)
|
||||
- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz)
|
||||
- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb)
|
||||
- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm)
|
||||
- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip)
|
||||
- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip)
|
||||
- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm)
|
||||
- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz)
|
||||
|
||||
## How to use it:
|
||||
|
||||
See usage with:
|
||||
@@ -103,6 +129,7 @@ configuration options.
|
||||
* [apache](./plugins/inputs/apache)
|
||||
* [aws cloudwatch](./plugins/inputs/cloudwatch)
|
||||
* [bcache](./plugins/inputs/bcache)
|
||||
* [bond](./plugins/inputs/bond)
|
||||
* [cassandra](./plugins/inputs/cassandra)
|
||||
* [ceph](./plugins/inputs/ceph)
|
||||
* [cgroup](./plugins/inputs/cgroup)
|
||||
@@ -111,6 +138,7 @@ configuration options.
|
||||
* [conntrack](./plugins/inputs/conntrack)
|
||||
* [couchbase](./plugins/inputs/couchbase)
|
||||
* [couchdb](./plugins/inputs/couchdb)
|
||||
* [DC/OS](./plugins/inputs/dcos)
|
||||
* [disque](./plugins/inputs/disque)
|
||||
* [dmcache](./plugins/inputs/dmcache)
|
||||
* [dns query time](./plugins/inputs/dns_query)
|
||||
@@ -131,7 +159,9 @@ configuration options.
|
||||
* [interrupts](./plugins/inputs/interrupts)
|
||||
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
|
||||
* [iptables](./plugins/inputs/iptables)
|
||||
* [jolokia](./plugins/inputs/jolokia)
|
||||
* [ipset](./plugins/inputs/ipset)
|
||||
* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
|
||||
* [jolokia2](./plugins/inputs/jolokia2)
|
||||
* [kapacitor](./plugins/inputs/kapacitor)
|
||||
* [kubernetes](./plugins/inputs/kubernetes)
|
||||
* [leofs](./plugins/inputs/leofs)
|
||||
@@ -142,17 +172,22 @@ configuration options.
|
||||
* [minecraft](./plugins/inputs/minecraft)
|
||||
* [mongodb](./plugins/inputs/mongodb)
|
||||
* [mysql](./plugins/inputs/mysql)
|
||||
* [nats](./plugins/inputs/nats)
|
||||
* [net_response](./plugins/inputs/net_response)
|
||||
* [nginx](./plugins/inputs/nginx)
|
||||
* [nginx_plus](./plugins/inputs/nginx_plus)
|
||||
* [nsq](./plugins/inputs/nsq)
|
||||
* [nstat](./plugins/inputs/nstat)
|
||||
* [ntpq](./plugins/inputs/ntpq)
|
||||
* [openldap](./plugins/inputs/openldap)
|
||||
* [opensmtpd](./plugins/inputs/opensmtpd)
|
||||
* [pf](./plugins/inputs/pf)
|
||||
* [phpfpm](./plugins/inputs/phpfpm)
|
||||
* [phusion passenger](./plugins/inputs/passenger)
|
||||
* [ping](./plugins/inputs/ping)
|
||||
* [postgresql](./plugins/inputs/postgresql)
|
||||
* [postfix](./plugins/inputs/postfix)
|
||||
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
|
||||
* [postgresql](./plugins/inputs/postgresql)
|
||||
* [powerdns](./plugins/inputs/powerdns)
|
||||
* [procstat](./plugins/inputs/procstat)
|
||||
* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server))
|
||||
@@ -164,15 +199,20 @@ configuration options.
|
||||
* [riak](./plugins/inputs/riak)
|
||||
* [salesforce](./plugins/inputs/salesforce)
|
||||
* [sensors](./plugins/inputs/sensors)
|
||||
* [smart](./plugins/inputs/smart)
|
||||
* [snmp](./plugins/inputs/snmp)
|
||||
* [snmp_legacy](./plugins/inputs/snmp_legacy)
|
||||
* [solr](./plugins/inputs/solr)
|
||||
* [sql server](./plugins/inputs/sqlserver) (microsoft)
|
||||
* [teamspeak](./plugins/inputs/teamspeak)
|
||||
* [tomcat](./plugins/inputs/tomcat)
|
||||
* [twemproxy](./plugins/inputs/twemproxy)
|
||||
* [unbound](./plugins/input/unbound)
|
||||
* [varnish](./plugins/inputs/varnish)
|
||||
* [zfs](./plugins/inputs/zfs)
|
||||
* [zookeeper](./plugins/inputs/zookeeper)
|
||||
* [win_perf_counters ](./plugins/inputs/win_perf_counters) (windows performance counters)
|
||||
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
|
||||
* [win_services](./plugins/inputs/win_services)
|
||||
* [sysstat](./plugins/inputs/sysstat)
|
||||
* [system](./plugins/inputs/system)
|
||||
* cpu
|
||||
@@ -204,8 +244,9 @@ Telegraf can also collect metrics via the following service plugins:
|
||||
* [filestack](./plugins/inputs/webhooks/filestack)
|
||||
* [github](./plugins/inputs/webhooks/github)
|
||||
* [mandrill](./plugins/inputs/webhooks/mandrill)
|
||||
* [rollbar](./plugins/inputs/webhooks/rollbar)
|
||||
* [papertrail](./plugins/inputs/webhooks/papertrail)
|
||||
* [particle](./plugins/inputs/webhooks/particle)
|
||||
* [rollbar](./plugins/inputs/webhooks/rollbar)
|
||||
* [zipkin](./plugins/inputs/zipkin)
|
||||
|
||||
Telegraf is able to parse the following input data formats into metrics, these
|
||||
@@ -217,6 +258,7 @@ formats may be used with input plugins supporting the `data_format` option:
|
||||
* [Value](./docs/DATA_FORMATS_INPUT.md#value)
|
||||
* [Nagios](./docs/DATA_FORMATS_INPUT.md#nagios)
|
||||
* [Collectd](./docs/DATA_FORMATS_INPUT.md#collectd)
|
||||
* [Dropwizard](./docs/DATA_FORMATS_INPUT.md#dropwizard)
|
||||
|
||||
## Processor Plugins
|
||||
|
||||
@@ -224,6 +266,7 @@ formats may be used with input plugins supporting the `data_format` option:
|
||||
|
||||
## Aggregator Plugins
|
||||
|
||||
* [basicstats](./plugins/aggregators/basicstats)
|
||||
* [minmax](./plugins/aggregators/minmax)
|
||||
* [histogram](./plugins/aggregators/histogram)
|
||||
|
||||
@@ -234,6 +277,7 @@ formats may be used with input plugins supporting the `data_format` option:
|
||||
* [amqp](./plugins/outputs/amqp) (rabbitmq)
|
||||
* [aws kinesis](./plugins/outputs/kinesis)
|
||||
* [aws cloudwatch](./plugins/outputs/cloudwatch)
|
||||
* [cratedb](./plugins/outputs/cratedb)
|
||||
* [datadog](./plugins/outputs/datadog)
|
||||
* [discard](./plugins/outputs/discard)
|
||||
* [elasticsearch](./plugins/outputs/elasticsearch)
|
||||
@@ -253,3 +297,4 @@ formats may be used with input plugins supporting the `data_format` option:
|
||||
* [socket_writer](./plugins/outputs/socket_writer)
|
||||
* [tcp](./plugins/outputs/socket_writer)
|
||||
* [udp](./plugins/outputs/socket_writer)
|
||||
* [wavefront](./plugins/outputs/wavefront)
|
||||
|
||||
@@ -28,6 +28,18 @@ type Accumulator interface {
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
// AddSummary is the same as AddFields, but will add the metric as a "Summary" type
|
||||
AddSummary(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
// AddHistogram is the same as AddFields, but will add the metric as a "Histogram" type
|
||||
AddHistogram(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
SetPrecision(precision, interval time.Duration)
|
||||
|
||||
AddError(err error)
|
||||
|
||||
@@ -26,7 +26,7 @@ type MetricMaker interface {
|
||||
func NewAccumulator(
|
||||
maker MetricMaker,
|
||||
metrics chan telegraf.Metric,
|
||||
) *accumulator {
|
||||
) telegraf.Accumulator {
|
||||
acc := accumulator{
|
||||
maker: maker,
|
||||
metrics: metrics,
|
||||
@@ -76,6 +76,28 @@ func (ac *accumulator) AddCounter(
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddSummary(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Summary, ac.getTime(t)); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddHistogram(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Histogram, ac.getTime(t)); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
// AddError passes a runtime error to the accumulator.
|
||||
// The error will be tagged with the plugin name and written to the log.
|
||||
func (ac *accumulator) AddError(err error) {
|
||||
|
||||
@@ -115,15 +115,15 @@ func TestAddNoIntervalWithPrecision(t *testing.T) {
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
@@ -147,15 +147,15 @@ func TestAddDisablePrecision(t *testing.T) {
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||
@@ -179,15 +179,15 @@ func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
@@ -204,7 +204,7 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm := <-a.metrics
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
@@ -214,7 +214,7 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800083000000)),
|
||||
@@ -224,7 +224,7 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082913000)),
|
||||
@@ -234,7 +234,7 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||
|
||||
@@ -143,7 +143,7 @@ func (a *Agent) gatherer(
|
||||
func gatherWithTimeout(
|
||||
shutdown chan struct{},
|
||||
input *models.RunningInput,
|
||||
acc *accumulator,
|
||||
acc telegraf.Accumulator,
|
||||
timeout time.Duration,
|
||||
) {
|
||||
ticker := time.NewTicker(timeout)
|
||||
@@ -252,7 +252,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, ag
|
||||
// the flusher will flush after metrics are collected.
|
||||
time.Sleep(time.Millisecond * 300)
|
||||
|
||||
// create an output metric channel and a gorouting that continously passes
|
||||
// create an output metric channel and a gorouting that continuously passes
|
||||
// each metric onto the output plugins & aggregators.
|
||||
outMetricC := make(chan telegraf.Metric, 100)
|
||||
var wg sync.WaitGroup
|
||||
@@ -308,7 +308,13 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, ag
|
||||
metrics = processor.Apply(metrics...)
|
||||
}
|
||||
for _, m := range metrics {
|
||||
outMetricC <- m
|
||||
for i, o := range a.Config.Outputs {
|
||||
if i == len(a.Config.Outputs)-1 {
|
||||
o.AddMetric(m)
|
||||
} else {
|
||||
o.AddMetric(m.Copy())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -364,8 +370,6 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
metricC := make(chan telegraf.Metric, 100)
|
||||
aggC := make(chan telegraf.Metric, 100)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// Start all ServicePlugins
|
||||
for _, input := range a.Config.Inputs {
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
@@ -406,7 +410,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
acc := NewAccumulator(agg, aggC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
agg.Run(acc, now, shutdown)
|
||||
agg.Run(acc, shutdown)
|
||||
}(aggregator)
|
||||
}
|
||||
|
||||
|
||||
@@ -12,18 +12,19 @@ platform: x64
|
||||
|
||||
install:
|
||||
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
|
||||
- IF NOT EXIST "C:\Cache\go1.8.1.msi" curl -o "C:\Cache\go1.8.1.msi" https://storage.googleapis.com/golang/go1.8.1.windows-amd64.msi
|
||||
- IF NOT EXIST "C:\Cache\go1.9.2.msi" curl -o "C:\Cache\go1.9.2.msi" https://storage.googleapis.com/golang/go1.9.2.windows-amd64.msi
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
|
||||
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
|
||||
- msiexec.exe /i "C:\Cache\go1.8.1.msi" /quiet
|
||||
- msiexec.exe /i "C:\Cache\go1.9.2.msi" /quiet
|
||||
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
|
||||
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
|
||||
- go version
|
||||
- go env
|
||||
|
||||
build_script:
|
||||
- cmd: C:\GnuWin32\bin\make
|
||||
- cmd: C:\GnuWin32\bin\make deps
|
||||
- cmd: C:\GnuWin32\bin\make telegraf
|
||||
|
||||
test_script:
|
||||
- cmd: C:\GnuWin32\bin\make test-windows
|
||||
|
||||
16
circle.yml
16
circle.yml
@@ -1,16 +0,0 @@
|
||||
machine:
|
||||
go:
|
||||
version: 1.8.1
|
||||
services:
|
||||
- docker
|
||||
- memcached
|
||||
- redis
|
||||
- rabbitmq-server
|
||||
|
||||
dependencies:
|
||||
override:
|
||||
- docker info
|
||||
|
||||
test:
|
||||
override:
|
||||
- bash scripts/circle-test.sh
|
||||
@@ -54,12 +54,10 @@ var fUsage = flag.String("usage", "",
|
||||
"print usage for a plugin, ie, 'telegraf --usage mysql'")
|
||||
var fService = flag.String("service", "",
|
||||
"operate on the service")
|
||||
|
||||
// Telegraf version, populated linker.
|
||||
// ie, -ldflags "-X main.version=`git describe --always --tags`"
|
||||
var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)")
|
||||
|
||||
var (
|
||||
nextVersion = "1.4.0"
|
||||
nextVersion = "1.6.0"
|
||||
version string
|
||||
commit string
|
||||
branch string
|
||||
@@ -268,7 +266,7 @@ func (p *program) Stop(s service.Service) error {
|
||||
|
||||
func displayVersion() string {
|
||||
if version == "" {
|
||||
return fmt.Sprintf("v%s~pre%s", nextVersion, commit)
|
||||
return fmt.Sprintf("v%s~%s", nextVersion, commit)
|
||||
}
|
||||
return "v" + version
|
||||
}
|
||||
@@ -361,7 +359,7 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
if runtime.GOOS == "windows" && !(*fRunAsConsole) {
|
||||
svcConfig := &service.Config{
|
||||
Name: "telegraf",
|
||||
DisplayName: "Telegraf Data Collector Service",
|
||||
|
||||
93
docker-compose.yml
Normal file
93
docker-compose.yml
Normal file
@@ -0,0 +1,93 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
aerospike:
|
||||
image: aerospike/aerospike-server:3.9.0
|
||||
ports:
|
||||
- "3000:3000"
|
||||
zookeeper:
|
||||
image: wurstmeister/zookeeper
|
||||
environment:
|
||||
- JAVA_OPTS="-Xms256m -Xmx256m"
|
||||
ports:
|
||||
- "2181:2181"
|
||||
kafka:
|
||||
image: wurstmeister/kafka
|
||||
environment:
|
||||
- KAFKA_ADVERTISED_HOST_NAME=localhost
|
||||
- KAFKA_ADVERTISED_PORT=9092
|
||||
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
|
||||
- KAFKA_CREATE_TOPICS="test:1:1"
|
||||
- JAVA_OPTS="-Xms256m -Xmx256m"
|
||||
ports:
|
||||
- "9092:9092"
|
||||
depends_on:
|
||||
- zookeeper
|
||||
elasticsearch:
|
||||
image: elasticsearch:5
|
||||
environment:
|
||||
- JAVA_OPTS="-Xms256m -Xmx256m"
|
||||
ports:
|
||||
- "9200:9200"
|
||||
- "9300:9300"
|
||||
mysql:
|
||||
image: mysql
|
||||
environment:
|
||||
- MYSQL_ALLOW_EMPTY_PASSWORD=yes
|
||||
ports:
|
||||
- "3306:3306"
|
||||
memcached:
|
||||
image: memcached
|
||||
ports:
|
||||
- "11211:11211"
|
||||
postgres:
|
||||
image: postgres:alpine
|
||||
ports:
|
||||
- "5432:5432"
|
||||
rabbitmq:
|
||||
image: rabbitmq:3-management
|
||||
ports:
|
||||
- "15672:15672"
|
||||
- "5672:5672"
|
||||
redis:
|
||||
image: redis:alpine
|
||||
ports:
|
||||
- "6379:6379"
|
||||
nsq:
|
||||
image: nsqio/nsq
|
||||
ports:
|
||||
- "4150:4150"
|
||||
command: "/nsqd"
|
||||
mqtt:
|
||||
image: ncarlier/mqtt
|
||||
ports:
|
||||
- "1883:1883"
|
||||
riemann:
|
||||
image: stealthly/docker-riemann
|
||||
ports:
|
||||
- "5555:5555"
|
||||
nats:
|
||||
image: nats
|
||||
ports:
|
||||
- "4222:4222"
|
||||
openldap:
|
||||
image: cobaugh/openldap-alpine
|
||||
environment:
|
||||
- SLAPD_CONFIG_ROOTDN="cn=manager,cn=config"
|
||||
- SLAPD_CONFIG_ROOTPW="secret"
|
||||
ports:
|
||||
- "389:389"
|
||||
- "636:636"
|
||||
crate:
|
||||
image: crate/crate
|
||||
ports:
|
||||
- "4200:4200"
|
||||
- "4230:4230"
|
||||
command:
|
||||
- crate
|
||||
- -Cnetwork.host=0.0.0.0
|
||||
- -Ctransport.host=localhost
|
||||
- -Clicense.enterprise=false
|
||||
environment:
|
||||
- CRATE_HEAP_SIZE=128m
|
||||
- JAVA_OPTS='-Xms256m -Xmx256m'
|
||||
@@ -39,6 +39,11 @@ metrics as they pass through Telegraf:
|
||||
|
||||
Both Aggregators and Processors analyze metrics as they pass through Telegraf.
|
||||
|
||||
Use [measurement filtering](CONFIGURATION.md#measurement-filtering)
|
||||
to control which metrics are passed through a processor or aggregator. If a
|
||||
metric is filtered out the metric bypasses the plugin and is passed downstream
|
||||
to the next plugin.
|
||||
|
||||
**Processor** plugins process metrics as they pass through and immediately emit
|
||||
results based on the values they process. For example, this could be printing
|
||||
all metrics or adding a tag to all metrics that pass through.
|
||||
|
||||
@@ -24,11 +24,17 @@ Environment variables can be used anywhere in the config file, simply prepend
|
||||
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
|
||||
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
|
||||
|
||||
When using the `.deb` or `.rpm` packages, you can define environment variables
|
||||
in the `/etc/default/telegraf` file.
|
||||
|
||||
## Configuration file locations
|
||||
|
||||
The location of the configuration file can be set via the `--config` command
|
||||
line flag. Telegraf will also pick up all files matching the pattern `*.conf` if
|
||||
the `-config-directory` command line flag is used.
|
||||
line flag.
|
||||
|
||||
When the `--config-directory` command line flag is used files ending with
|
||||
`.conf` in the specified directory will also be included in the Telegraf
|
||||
configuration.
|
||||
|
||||
On most systems, the default locations are `/etc/telegraf/telegraf.conf` for
|
||||
the main configuration file and `/etc/telegraf/telegraf.d` for the directory of
|
||||
@@ -92,9 +98,13 @@ you can configure that here.
|
||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||
|
||||
The [measurement filtering](#measurement-filtering) parameters can be used to
|
||||
limit what metrics are emitted from the input plugin.
|
||||
|
||||
## Output Configuration
|
||||
|
||||
There are no generic configuration options available for all outputs.
|
||||
The [measurement filtering](#measurement-filtering) parameters can be used to
|
||||
limit what metrics are emitted from the output plugin.
|
||||
|
||||
## Aggregator Configuration
|
||||
|
||||
@@ -115,6 +125,10 @@ aggregator and will not get sent to the output plugins.
|
||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||
|
||||
The [measurement filtering](#measurement-filtering) parameters can be used to
|
||||
limit what metrics are handled by the aggregator. Excluded metrics are passed
|
||||
downstream to the next aggregator.
|
||||
|
||||
## Processor Configuration
|
||||
|
||||
The following config parameters are available for all processors:
|
||||
@@ -122,6 +136,10 @@ The following config parameters are available for all processors:
|
||||
* **order**: This is the order in which the processor(s) get executed. If this
|
||||
is not specified then processor execution order will be random.
|
||||
|
||||
The [measurement filtering](#measurement-filtering) parameters can be used
|
||||
to limit what metrics are handled by the processor. Excluded metrics are
|
||||
passed downstream to the next processor.
|
||||
|
||||
#### Measurement Filtering
|
||||
|
||||
Filters can be configured per input, output, processor, or aggregator,
|
||||
@@ -371,3 +389,15 @@ to the system load metrics due to the `namepass` parameter.
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
```
|
||||
|
||||
#### Processor Configuration Examples:
|
||||
|
||||
Print only the metrics with `cpu` as the measurement name, all metrics are
|
||||
passed to the output:
|
||||
```toml
|
||||
[[processors.printer]]
|
||||
namepass = "cpu"
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["/tmp/metrics.out"]
|
||||
```
|
||||
|
||||
@@ -8,6 +8,7 @@ Telegraf is able to parse the following input data formats into metrics:
|
||||
1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
|
||||
1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
|
||||
1. [Collectd](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#collectd)
|
||||
1. [Dropwizard](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#dropwizard)
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||
@@ -479,3 +480,176 @@ You can also change the path to the typesdb or add additional typesdb using
|
||||
## Path of to TypesDB specifications
|
||||
collectd_typesdb = ["/usr/share/collectd/types.db"]
|
||||
```
|
||||
|
||||
# Dropwizard:
|
||||
|
||||
The dropwizard format can parse the JSON representation of a single dropwizard metric registry. By default, tags are parsed from metric names as if they were actual influxdb line protocol keys (`measurement<,tag_set>`) which can be overriden by defining custom [measurement & tag templates](./DATA_FORMATS_INPUT.md#measurement--tag-templates). All field value types are supported, `string`, `number` and `boolean`.
|
||||
|
||||
A typical JSON of a dropwizard metric registry:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "3.0.0",
|
||||
"counters" : {
|
||||
"measurement,tag1=green" : {
|
||||
"count" : 1
|
||||
}
|
||||
},
|
||||
"meters" : {
|
||||
"measurement" : {
|
||||
"count" : 1,
|
||||
"m15_rate" : 1.0,
|
||||
"m1_rate" : 1.0,
|
||||
"m5_rate" : 1.0,
|
||||
"mean_rate" : 1.0,
|
||||
"units" : "events/second"
|
||||
}
|
||||
},
|
||||
"gauges" : {
|
||||
"measurement" : {
|
||||
"value" : 1
|
||||
}
|
||||
},
|
||||
"histograms" : {
|
||||
"measurement" : {
|
||||
"count" : 1,
|
||||
"max" : 1.0,
|
||||
"mean" : 1.0,
|
||||
"min" : 1.0,
|
||||
"p50" : 1.0,
|
||||
"p75" : 1.0,
|
||||
"p95" : 1.0,
|
||||
"p98" : 1.0,
|
||||
"p99" : 1.0,
|
||||
"p999" : 1.0,
|
||||
"stddev" : 1.0
|
||||
}
|
||||
},
|
||||
"timers" : {
|
||||
"measurement" : {
|
||||
"count" : 1,
|
||||
"max" : 1.0,
|
||||
"mean" : 1.0,
|
||||
"min" : 1.0,
|
||||
"p50" : 1.0,
|
||||
"p75" : 1.0,
|
||||
"p95" : 1.0,
|
||||
"p98" : 1.0,
|
||||
"p99" : 1.0,
|
||||
"p999" : 1.0,
|
||||
"stddev" : 1.0,
|
||||
"m15_rate" : 1.0,
|
||||
"m1_rate" : 1.0,
|
||||
"m5_rate" : 1.0,
|
||||
"mean_rate" : 1.0,
|
||||
"duration_units" : "seconds",
|
||||
"rate_units" : "calls/second"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Would get translated into 4 different measurements:
|
||||
|
||||
```
|
||||
measurement,metric_type=counter,tag1=green count=1
|
||||
measurement,metric_type=meter count=1,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0
|
||||
measurement,metric_type=gauge value=1
|
||||
measurement,metric_type=histogram count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0
|
||||
measurement,metric_type=timer count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0,stddev=1.0,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0
|
||||
```
|
||||
|
||||
You may also parse a dropwizard registry from any JSON document which contains a dropwizard registry in some inner field.
|
||||
Eg. to parse the following JSON document:
|
||||
|
||||
```json
|
||||
{
|
||||
"time" : "2017-02-22T14:33:03.662+02:00",
|
||||
"tags" : {
|
||||
"tag1" : "green",
|
||||
"tag2" : "yellow"
|
||||
},
|
||||
"metrics" : {
|
||||
"counters" : {
|
||||
"measurement" : {
|
||||
"count" : 1
|
||||
}
|
||||
},
|
||||
"meters" : {},
|
||||
"gauges" : {},
|
||||
"histograms" : {},
|
||||
"timers" : {}
|
||||
}
|
||||
}
|
||||
```
|
||||
and translate it into:
|
||||
|
||||
```
|
||||
measurement,metric_type=counter,tag1=green,tag2=yellow count=1 1487766783662000000
|
||||
```
|
||||
|
||||
you simply need to use the following additional configuration properties:
|
||||
|
||||
```toml
|
||||
dropwizard_metric_registry_path = "metrics"
|
||||
dropwizard_time_path = "time"
|
||||
dropwizard_time_format = "2006-01-02T15:04:05Z07:00"
|
||||
dropwizard_tags_path = "tags"
|
||||
## tag paths per tag are supported too, eg.
|
||||
#[inputs.yourinput.dropwizard_tag_paths]
|
||||
# tag1 = "tags.tag1"
|
||||
# tag2 = "tags.tag2"
|
||||
```
|
||||
|
||||
|
||||
For more information about the dropwizard json format see
|
||||
[here](http://metrics.dropwizard.io/3.1.0/manual/json/).
|
||||
|
||||
#### Dropwizard Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["curl http://localhost:8080/sys/metrics"]
|
||||
timeout = "5s"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "dropwizard"
|
||||
|
||||
## Used by the templating engine to join matched values when cardinality is > 1
|
||||
separator = "_"
|
||||
|
||||
## Each template line requires a template pattern. It can have an optional
|
||||
## filter before the template and separated by spaces. It can also have optional extra
|
||||
## tags following the template. Multiple tags should be separated by commas and no spaces
|
||||
## similar to the line protocol format. There can be only one default template.
|
||||
## Templates support below format:
|
||||
## 1. filter + template
|
||||
## 2. filter + template + extra tag(s)
|
||||
## 3. filter + template with field key
|
||||
## 4. default template
|
||||
## By providing an empty template array, templating is disabled and measurements are parsed as influxdb line protocol keys (measurement<,tag_set>)
|
||||
templates = []
|
||||
|
||||
## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax)
|
||||
## to locate the metric registry within the JSON document
|
||||
# dropwizard_metric_registry_path = "metrics"
|
||||
|
||||
## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax)
|
||||
## to locate the default time of the measurements within the JSON document
|
||||
# dropwizard_time_path = "time"
|
||||
# dropwizard_time_format = "2006-01-02T15:04:05Z07:00"
|
||||
|
||||
## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax)
|
||||
## to locate the tags map within the JSON document
|
||||
# dropwizard_tags_path = "tags"
|
||||
|
||||
## You may even use tag paths per tag
|
||||
# [inputs.exec.dropwizard_tag_paths]
|
||||
# tag1 = "tags.tag1"
|
||||
# tag2 = "tags.tag2"
|
||||
|
||||
```
|
||||
@@ -96,6 +96,9 @@ tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
|
||||
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
||||
```
|
||||
|
||||
Fields with string values will be skipped. Boolean fields will be converted
|
||||
to 1 (true) or 0 (false).
|
||||
|
||||
### Graphite Configuration:
|
||||
|
||||
```toml
|
||||
|
||||
46
docs/FAQ.md
Normal file
46
docs/FAQ.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Frequently Asked Questions
|
||||
|
||||
### Q: How can I monitor the Docker Engine Host from within a container?
|
||||
|
||||
You will need to setup several volume mounts as well as some environment
|
||||
variables:
|
||||
```
|
||||
docker run --name telegraf
|
||||
-v /:/hostfs:ro
|
||||
-v /etc:/hostfs/etc:ro
|
||||
-v /proc:/hostfs/proc:ro
|
||||
-v /sys:/hostfs/sys:ro
|
||||
-v /var/run/utmp:/var/run/utmp:ro
|
||||
-e HOST_ETC=/hostfs/etc
|
||||
-e HOST_PROC=/hostfs/proc
|
||||
-e HOST_SYS=/hostfs/sys
|
||||
-e HOST_MOUNT_PREFIX=/hostfs
|
||||
telegraf
|
||||
```
|
||||
|
||||
|
||||
### Q: Why do I get a "no such host" error resolving hostnames that other
|
||||
programs can resolve?
|
||||
|
||||
Go uses a pure Go resolver by default for [name resolution](https://golang.org/pkg/net/#hdr-Name_Resolution).
|
||||
This resolver behaves differently than the C library functions but is more
|
||||
efficient when used with the Go runtime.
|
||||
|
||||
If you encounter problems or want to use more advanced name resolution methods
|
||||
that are unsupported by the pure Go resolver, you can switch to the cgo
|
||||
resolver.
|
||||
|
||||
If running manually set:
|
||||
```
|
||||
export GODEBUG=netdns=cgo
|
||||
```
|
||||
|
||||
If running as a service add the environment variable to `/etc/default/telegraf`:
|
||||
```
|
||||
GODEBUG=netdns=cgo
|
||||
```
|
||||
|
||||
### Q: When will the next version be released?
|
||||
|
||||
The latest release date estimate can be viewed on the
|
||||
[milestones](https://github.com/influxdata/telegraf/milestones) page.
|
||||
@@ -54,6 +54,7 @@ following works:
|
||||
- github.com/miekg/dns [BSD](https://github.com/miekg/dns/blob/master/LICENSE)
|
||||
- github.com/naoina/go-stringutil [MIT](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||
- github.com/naoina/toml [MIT](https://github.com/naoina/toml/blob/master/LICENSE)
|
||||
- github.com/nats-io/gnatsd [MIT](https://github.com/nats-io/gnatsd/blob/master/LICENSE)
|
||||
- github.com/nats-io/go-nats [MIT](https://github.com/nats-io/go-nats/blob/master/LICENSE)
|
||||
- github.com/nats-io/nats [MIT](https://github.com/nats-io/nats/blob/master/LICENSE)
|
||||
- github.com/nats-io/nuid [MIT](https://github.com/nats-io/nuid/blob/master/LICENSE)
|
||||
@@ -82,6 +83,10 @@ following works:
|
||||
- github.com/streadway/amqp [BSD](https://github.com/streadway/amqp/blob/master/LICENSE)
|
||||
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
|
||||
- github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
|
||||
- github.com/tidwall/gjson [MIT](https://github.com/tidwall/gjson/blob/master/LICENSE)
|
||||
- github.com/tidwall/match [MIT](https://github.com/tidwall/match/blob/master/LICENSE)
|
||||
- github.com/mitchellh/mapstructure [MIT](https://github.com/mitchellh/mapstructure/blob/master/LICENSE)
|
||||
- github.com/multiplay/go-ts3 [BSD](https://github.com/multiplay/go-ts3/blob/master/LICENSE)
|
||||
- github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE)
|
||||
- github.com/wvanbergen/kafka [MIT](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
|
||||
- github.com/wvanbergen/kazoo-go [MIT](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
||||
|
||||
@@ -38,7 +38,7 @@ Telegraf can manage its own service through the --service flag:
|
||||
| `telegraf.exe --service stop` | Stop the telegraf service |
|
||||
|
||||
|
||||
Trobleshooting common error #1067
|
||||
Troubleshooting common error #1067
|
||||
|
||||
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -63,8 +63,8 @@
|
||||
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||
# Multiple urls can be specified but it is assumed that they are part of the same
|
||||
# cluster, this means that only ONE of the urls will be written to each interval.
|
||||
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
||||
urls = ["http://localhost:8086"] # required
|
||||
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
|
||||
urls = ["http://127.0.0.1:8086"] # required
|
||||
# The target database for metrics (telegraf will create it if not exists)
|
||||
database = "telegraf" # required
|
||||
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
|
||||
@@ -77,3 +77,40 @@ func compileFilterNoGlob(filters []string) Filter {
|
||||
}
|
||||
return &out
|
||||
}
|
||||
|
||||
type IncludeExcludeFilter struct {
|
||||
include Filter
|
||||
exclude Filter
|
||||
}
|
||||
|
||||
func NewIncludeExcludeFilter(
|
||||
include []string,
|
||||
exclude []string,
|
||||
) (Filter, error) {
|
||||
in, err := Compile(include)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ex, err := Compile(exclude)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &IncludeExcludeFilter{in, ex}, nil
|
||||
}
|
||||
|
||||
func (f *IncludeExcludeFilter) Match(s string) bool {
|
||||
if f.include != nil {
|
||||
if !f.include.Match(s) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if f.exclude != nil {
|
||||
if f.exclude.Match(s) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -40,6 +40,11 @@ var (
|
||||
|
||||
// envVarRe is a regex to find environment variables in the config file
|
||||
envVarRe = regexp.MustCompile(`\$\w+`)
|
||||
|
||||
envVarEscaper = strings.NewReplacer(
|
||||
`"`, `\"`,
|
||||
`\`, `\\`,
|
||||
)
|
||||
)
|
||||
|
||||
// Config specifies the URL/user/password for the database that telegraf
|
||||
@@ -126,7 +131,7 @@ type AgentConfig struct {
|
||||
|
||||
// TODO(cam): Remove UTC and parameter, they are no longer
|
||||
// valid for the agent config. Leaving them here for now for backwards-
|
||||
// compatability
|
||||
// compatibility
|
||||
UTC bool `toml:"utc"`
|
||||
|
||||
// Debug is the option for running in debug mode
|
||||
@@ -683,12 +688,17 @@ func (c *Config) LoadConfig(path string) error {
|
||||
}
|
||||
|
||||
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
|
||||
// this is for Windows compatability only.
|
||||
// this is for Windows compatibility only.
|
||||
// see https://github.com/influxdata/telegraf/issues/1378
|
||||
func trimBOM(f []byte) []byte {
|
||||
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
|
||||
}
|
||||
|
||||
// escapeEnv escapes a value for inserting into a TOML string.
|
||||
func escapeEnv(value string) string {
|
||||
return envVarEscaper.Replace(value)
|
||||
}
|
||||
|
||||
// parseFile loads a TOML configuration from a provided path and
|
||||
// returns the AST produced from the TOML parser. When loading the file, it
|
||||
// will find environment variables and replace them.
|
||||
@@ -702,8 +712,9 @@ func parseFile(fpath string) (*ast.Table, error) {
|
||||
|
||||
env_vars := envVarRe.FindAll(contents, -1)
|
||||
for _, env_var := range env_vars {
|
||||
env_val := os.Getenv(strings.TrimPrefix(string(env_var), "$"))
|
||||
if env_val != "" {
|
||||
env_val, ok := os.LookupEnv(strings.TrimPrefix(string(env_var), "$"))
|
||||
if ok {
|
||||
env_val = escapeEnv(env_val)
|
||||
contents = bytes.Replace(contents, env_var, []byte(env_val), 1)
|
||||
}
|
||||
}
|
||||
@@ -1261,6 +1272,47 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["dropwizard_metric_registry_path"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.DropwizardMetricRegistryPath = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
if node, ok := tbl.Fields["dropwizard_time_path"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.DropwizardTimePath = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
if node, ok := tbl.Fields["dropwizard_time_format"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.DropwizardTimeFormat = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
if node, ok := tbl.Fields["dropwizard_tags_path"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.DropwizardTagsPath = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
c.DropwizardTagPathsMap = make(map[string]string)
|
||||
if node, ok := tbl.Fields["dropwizard_tag_paths"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
for name, val := range subtbl.Fields {
|
||||
if kv, ok := val.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.DropwizardTagPathsMap[name] = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.MetricName = name
|
||||
|
||||
delete(tbl.Fields, "data_format")
|
||||
@@ -1271,6 +1323,11 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
|
||||
delete(tbl.Fields, "collectd_auth_file")
|
||||
delete(tbl.Fields, "collectd_security_level")
|
||||
delete(tbl.Fields, "collectd_typesdb")
|
||||
delete(tbl.Fields, "dropwizard_metric_registry_path")
|
||||
delete(tbl.Fields, "dropwizard_time_path")
|
||||
delete(tbl.Fields, "dropwizard_time_format")
|
||||
delete(tbl.Fields, "dropwizard_tags_path")
|
||||
delete(tbl.Fields, "dropwizard_tag_paths")
|
||||
|
||||
return parsers.NewParser(c)
|
||||
}
|
||||
|
||||
@@ -40,9 +40,13 @@ func TestSnakeCase(t *testing.T) {
|
||||
var (
|
||||
sleepbin, _ = exec.LookPath("sleep")
|
||||
echobin, _ = exec.LookPath("echo")
|
||||
shell, _ = exec.LookPath("sh")
|
||||
)
|
||||
|
||||
func TestRunTimeout(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping test due to random failures.")
|
||||
}
|
||||
if sleepbin == "" {
|
||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||
}
|
||||
@@ -57,6 +61,8 @@ func TestRunTimeout(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCombinedOutputTimeout(t *testing.T) {
|
||||
// TODO: Fix this test
|
||||
t.Skip("Test failing too often, skip for now and revisit later.")
|
||||
if sleepbin == "" {
|
||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||
}
|
||||
@@ -84,13 +90,13 @@ func TestCombinedOutput(t *testing.T) {
|
||||
// test that CombinedOutputTimeout and exec.Cmd.CombinedOutput return
|
||||
// the same output from a failed command.
|
||||
func TestCombinedOutputError(t *testing.T) {
|
||||
if sleepbin == "" {
|
||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||
if shell == "" {
|
||||
t.Skip("'sh' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(sleepbin, "foo")
|
||||
cmd := exec.Command(shell, "-c", "false")
|
||||
expected, err := cmd.CombinedOutput()
|
||||
|
||||
cmd2 := exec.Command(sleepbin, "foo")
|
||||
cmd2 := exec.Command(shell, "-c", "false")
|
||||
actual, err := CombinedOutputTimeout(cmd2, time.Second)
|
||||
|
||||
assert.Error(t, err)
|
||||
@@ -98,16 +104,18 @@ func TestCombinedOutputError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRunError(t *testing.T) {
|
||||
if sleepbin == "" {
|
||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||
if shell == "" {
|
||||
t.Skip("'sh' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(sleepbin, "foo")
|
||||
cmd := exec.Command(shell, "-c", "false")
|
||||
err := RunTimeout(cmd, time.Second)
|
||||
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestRandomSleep(t *testing.T) {
|
||||
// TODO: Fix this test
|
||||
t.Skip("Test failing too often, skip for now and revisit later.")
|
||||
// test that zero max returns immediately
|
||||
s := time.Now()
|
||||
RandomSleep(time.Duration(0), make(chan struct{}))
|
||||
|
||||
@@ -150,12 +150,6 @@ func makemetric(
|
||||
continue
|
||||
}
|
||||
case string:
|
||||
if strings.HasSuffix(val, `\`) {
|
||||
log.Printf("D! Measurement [%s] field [%s] has a value "+
|
||||
"ending with a backslash, skipping", measurement, k)
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
fields[k] = v
|
||||
default:
|
||||
fields[k] = v
|
||||
|
||||
@@ -114,7 +114,6 @@ func (r *RunningAggregator) reset() {
|
||||
// for period ticks to tell it when to push and reset the aggregator.
|
||||
func (r *RunningAggregator) Run(
|
||||
acc telegraf.Accumulator,
|
||||
now time.Time,
|
||||
shutdown chan struct{},
|
||||
) {
|
||||
// The start of the period is truncated to the nearest second.
|
||||
@@ -133,6 +132,7 @@ func (r *RunningAggregator) Run(
|
||||
// 2nd interval: 00:10 - 00:20.5
|
||||
// etc.
|
||||
//
|
||||
now := time.Now()
|
||||
r.periodStart = now.Truncate(time.Second)
|
||||
truncation := now.Sub(r.periodStart)
|
||||
r.periodEnd = r.periodStart.Add(r.Config.Period)
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestAdd(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
go ra.Run(&acc, time.Now(), make(chan struct{}))
|
||||
go ra.Run(&acc, make(chan struct{}))
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
@@ -55,7 +55,7 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
go ra.Run(&acc, time.Now(), make(chan struct{}))
|
||||
go ra.Run(&acc, make(chan struct{}))
|
||||
|
||||
// metric before current period
|
||||
m := ra.MakeMetric(
|
||||
@@ -113,7 +113,7 @@ func TestAddAndPushOnePeriod(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ra.Run(&acc, time.Now(), shutdown)
|
||||
ra.Run(&acc, shutdown)
|
||||
}()
|
||||
|
||||
m := ra.MakeMetric(
|
||||
|
||||
@@ -370,16 +370,17 @@ func TestMakeMetric_TrailingSlash(t *testing.T) {
|
||||
expectedTags: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Field value with trailing slash dropped",
|
||||
name: "Field value with trailing slash okay",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
"bad": `xyzzy\`,
|
||||
"ok": `xyzzy\`,
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
"ok": `xyzzy\`,
|
||||
},
|
||||
expectedTags: map[string]string{},
|
||||
},
|
||||
@@ -387,7 +388,7 @@ func TestMakeMetric_TrailingSlash(t *testing.T) {
|
||||
name: "Must have one field after dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"bad": `xyzzy\`,
|
||||
"bad": math.NaN(),
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedNil: true,
|
||||
|
||||
86
internal/templating/engine.go
Normal file
86
internal/templating/engine.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package templating
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultSeparator is the default separation character to use when separating template parts.
|
||||
DefaultSeparator = "."
|
||||
)
|
||||
|
||||
// Engine uses a Matcher to retrieve the appropriate template and applies the template
|
||||
// to the input string
|
||||
type Engine struct {
|
||||
joiner string
|
||||
matcher *matcher
|
||||
}
|
||||
|
||||
// Apply extracts the template fields from the given line and returns the measurement
|
||||
// name, tags and field name
|
||||
func (e *Engine) Apply(line string) (string, map[string]string, string, error) {
|
||||
return e.matcher.match(line).Apply(line, e.joiner)
|
||||
}
|
||||
|
||||
// NewEngine creates a new templating engine
|
||||
func NewEngine(joiner string, defaultTemplate *Template, templates []string) (*Engine, error) {
|
||||
engine := Engine{
|
||||
joiner: joiner,
|
||||
matcher: newMatcher(defaultTemplate),
|
||||
}
|
||||
templateSpecs := parseTemplateSpecs(templates)
|
||||
|
||||
for _, templateSpec := range templateSpecs {
|
||||
if err := engine.matcher.addSpec(templateSpec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &engine, nil
|
||||
}
|
||||
|
||||
func parseTemplateSpecs(templates []string) templateSpecs {
|
||||
tmplts := templateSpecs{}
|
||||
for _, pattern := range templates {
|
||||
tmplt := templateSpec{
|
||||
separator: DefaultSeparator,
|
||||
}
|
||||
|
||||
// Format is [separator] [filter] <template> [tag1=value1,tag2=value2]
|
||||
parts := strings.Fields(pattern)
|
||||
partsLength := len(parts)
|
||||
if partsLength < 1 {
|
||||
// ignore
|
||||
continue
|
||||
}
|
||||
if partsLength == 1 {
|
||||
tmplt.template = pattern
|
||||
} else if partsLength == 4 {
|
||||
tmplt.separator = parts[0]
|
||||
tmplt.filter = parts[1]
|
||||
tmplt.template = parts[2]
|
||||
tmplt.tagstring = parts[3]
|
||||
} else {
|
||||
hasTagstring := strings.Contains(parts[partsLength-1], "=")
|
||||
if hasTagstring {
|
||||
tmplt.tagstring = parts[partsLength-1]
|
||||
tmplt.template = parts[partsLength-2]
|
||||
if partsLength == 3 {
|
||||
tmplt.filter = parts[0]
|
||||
}
|
||||
} else {
|
||||
tmplt.template = parts[partsLength-1]
|
||||
if partsLength == 2 {
|
||||
tmplt.filter = parts[0]
|
||||
} else { // length == 3
|
||||
tmplt.separator = parts[0]
|
||||
tmplt.filter = parts[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
tmplts = append(tmplts, tmplt)
|
||||
}
|
||||
sort.Sort(tmplts)
|
||||
return tmplts
|
||||
}
|
||||
58
internal/templating/matcher.go
Normal file
58
internal/templating/matcher.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package templating
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// matcher determines which template should be applied to a given metric
|
||||
// based on a filter tree.
|
||||
type matcher struct {
|
||||
root *node
|
||||
defaultTemplate *Template
|
||||
}
|
||||
|
||||
// newMatcher creates a new matcher.
|
||||
func newMatcher(defaultTemplate *Template) *matcher {
|
||||
return &matcher{
|
||||
root: &node{},
|
||||
defaultTemplate: defaultTemplate,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *matcher) addSpec(tmplt templateSpec) error {
|
||||
// Parse out the default tags specific to this template
|
||||
tags := map[string]string{}
|
||||
if tmplt.tagstring != "" {
|
||||
for _, kv := range strings.Split(tmplt.tagstring, ",") {
|
||||
parts := strings.Split(kv, "=")
|
||||
tags[parts[0]] = parts[1]
|
||||
}
|
||||
}
|
||||
|
||||
tmpl, err := NewTemplate(tmplt.separator, tmplt.template, tags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.add(tmplt.filter, tmpl)
|
||||
return nil
|
||||
}
|
||||
|
||||
// add inserts the template in the filter tree based the given filter
|
||||
func (m *matcher) add(filter string, template *Template) {
|
||||
if filter == "" {
|
||||
m.defaultTemplate = template
|
||||
m.root.separator = template.separator
|
||||
return
|
||||
}
|
||||
m.root.insert(filter, template)
|
||||
}
|
||||
|
||||
// match returns the template that matches the given measurement line.
|
||||
// If no template matches, the default template is returned.
|
||||
func (m *matcher) match(line string) *Template {
|
||||
tmpl := m.root.search(line)
|
||||
if tmpl != nil {
|
||||
return tmpl
|
||||
}
|
||||
return m.defaultTemplate
|
||||
}
|
||||
122
internal/templating/node.go
Normal file
122
internal/templating/node.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package templating
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// node is an item in a sorted k-ary tree of filter parts. Each child is sorted by its part value.
|
||||
// The special value of "*", is always sorted last.
|
||||
type node struct {
|
||||
separator string
|
||||
value string
|
||||
children nodes
|
||||
template *Template
|
||||
}
|
||||
|
||||
// insert inserts the given string template into the tree. The filter string is separated
|
||||
// on the template separator and each part is used as the path in the tree.
|
||||
func (n *node) insert(filter string, template *Template) {
|
||||
n.separator = template.separator
|
||||
n.recursiveInsert(strings.Split(filter, n.separator), template)
|
||||
}
|
||||
|
||||
// recursiveInsert does the actual recursive insertion
|
||||
func (n *node) recursiveInsert(values []string, template *Template) {
|
||||
// Add the end, set the template
|
||||
if len(values) == 0 {
|
||||
n.template = template
|
||||
return
|
||||
}
|
||||
|
||||
// See if the the current element already exists in the tree. If so, insert the
|
||||
// into that sub-tree
|
||||
for _, v := range n.children {
|
||||
if v.value == values[0] {
|
||||
v.recursiveInsert(values[1:], template)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// New element, add it to the tree and sort the children
|
||||
newNode := &node{value: values[0]}
|
||||
n.children = append(n.children, newNode)
|
||||
sort.Sort(&n.children)
|
||||
|
||||
// Now insert the rest of the tree into the new element
|
||||
newNode.recursiveInsert(values[1:], template)
|
||||
}
|
||||
|
||||
// search searches for a template matching the input string
|
||||
func (n *node) search(line string) *Template {
|
||||
separator := n.separator
|
||||
return n.recursiveSearch(strings.Split(line, separator))
|
||||
}
|
||||
|
||||
// recursiveSearch performs the actual recursive search
|
||||
func (n *node) recursiveSearch(lineParts []string) *Template {
|
||||
// Nothing to search
|
||||
if len(lineParts) == 0 || len(n.children) == 0 {
|
||||
return n.template
|
||||
}
|
||||
|
||||
// If last element is a wildcard, don't include it in this search since it's sorted
|
||||
// to the end but lexicographically it would not always be and sort.Search assumes
|
||||
// the slice is sorted.
|
||||
length := len(n.children)
|
||||
if n.children[length-1].value == "*" {
|
||||
length--
|
||||
}
|
||||
|
||||
// Find the index of child with an exact match
|
||||
i := sort.Search(length, func(i int) bool {
|
||||
return n.children[i].value >= lineParts[0]
|
||||
})
|
||||
|
||||
// Found an exact match, so search that child sub-tree
|
||||
if i < len(n.children) && n.children[i].value == lineParts[0] {
|
||||
return n.children[i].recursiveSearch(lineParts[1:])
|
||||
}
|
||||
// Not an exact match, see if we have a wildcard child to search
|
||||
if n.children[len(n.children)-1].value == "*" {
|
||||
return n.children[len(n.children)-1].recursiveSearch(lineParts[1:])
|
||||
}
|
||||
return n.template
|
||||
}
|
||||
|
||||
// nodes is simply an array of nodes implementing the sorting interface.
|
||||
type nodes []*node
|
||||
|
||||
// Less returns a boolean indicating whether the filter at position j
|
||||
// is less than the filter at position k. Filters are order by string
|
||||
// comparison of each component parts. A wildcard value "*" is never
|
||||
// less than a non-wildcard value.
|
||||
//
|
||||
// For example, the filters:
|
||||
// "*.*"
|
||||
// "servers.*"
|
||||
// "servers.localhost"
|
||||
// "*.localhost"
|
||||
//
|
||||
// Would be sorted as:
|
||||
// "servers.localhost"
|
||||
// "servers.*"
|
||||
// "*.localhost"
|
||||
// "*.*"
|
||||
func (n *nodes) Less(j, k int) bool {
|
||||
if (*n)[j].value == "*" && (*n)[k].value != "*" {
|
||||
return false
|
||||
}
|
||||
|
||||
if (*n)[j].value != "*" && (*n)[k].value == "*" {
|
||||
return true
|
||||
}
|
||||
|
||||
return (*n)[j].value < (*n)[k].value
|
||||
}
|
||||
|
||||
// Swap swaps two elements of the array
|
||||
func (n *nodes) Swap(i, j int) { (*n)[i], (*n)[j] = (*n)[j], (*n)[i] }
|
||||
|
||||
// Len returns the length of the array
|
||||
func (n *nodes) Len() int { return len(*n) }
|
||||
148
internal/templating/template.go
Normal file
148
internal/templating/template.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package templating
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Template represents a pattern and tags to map a metric string to a influxdb Point
|
||||
type Template struct {
|
||||
separator string
|
||||
parts []string
|
||||
defaultTags map[string]string
|
||||
greedyField bool
|
||||
greedyMeasurement bool
|
||||
}
|
||||
|
||||
// apply extracts the template fields from the given line and returns the measurement
|
||||
// name, tags and field name
|
||||
func (t *Template) Apply(line string, joiner string) (string, map[string]string, string, error) {
|
||||
fields := strings.Split(line, t.separator)
|
||||
var (
|
||||
measurement []string
|
||||
tags = make(map[string][]string)
|
||||
field []string
|
||||
)
|
||||
|
||||
// Set any default tags
|
||||
for k, v := range t.defaultTags {
|
||||
tags[k] = append(tags[k], v)
|
||||
}
|
||||
|
||||
// See if an invalid combination has been specified in the template:
|
||||
for _, tag := range t.parts {
|
||||
if tag == "measurement*" {
|
||||
t.greedyMeasurement = true
|
||||
} else if tag == "field*" {
|
||||
t.greedyField = true
|
||||
}
|
||||
}
|
||||
if t.greedyField && t.greedyMeasurement {
|
||||
return "", nil, "",
|
||||
fmt.Errorf("either 'field*' or 'measurement*' can be used in each "+
|
||||
"template (but not both together): %q",
|
||||
strings.Join(t.parts, joiner))
|
||||
}
|
||||
|
||||
for i, tag := range t.parts {
|
||||
if i >= len(fields) {
|
||||
continue
|
||||
}
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
switch tag {
|
||||
case "measurement":
|
||||
measurement = append(measurement, fields[i])
|
||||
case "field":
|
||||
field = append(field, fields[i])
|
||||
case "field*":
|
||||
field = append(field, fields[i:]...)
|
||||
break
|
||||
case "measurement*":
|
||||
measurement = append(measurement, fields[i:]...)
|
||||
break
|
||||
default:
|
||||
tags[tag] = append(tags[tag], fields[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to map of strings.
|
||||
outtags := make(map[string]string)
|
||||
for k, values := range tags {
|
||||
outtags[k] = strings.Join(values, joiner)
|
||||
}
|
||||
|
||||
return strings.Join(measurement, joiner), outtags, strings.Join(field, joiner), nil
|
||||
}
|
||||
|
||||
func NewDefaultTemplateWithPattern(pattern string) (*Template, error) {
|
||||
return NewTemplate(DefaultSeparator, pattern, nil)
|
||||
}
|
||||
|
||||
// NewTemplate returns a new template ensuring it has a measurement
|
||||
// specified.
|
||||
func NewTemplate(separator string, pattern string, defaultTags map[string]string) (*Template, error) {
|
||||
parts := strings.Split(pattern, separator)
|
||||
hasMeasurement := false
|
||||
template := &Template{
|
||||
separator: separator,
|
||||
parts: parts,
|
||||
defaultTags: defaultTags,
|
||||
}
|
||||
|
||||
for _, part := range parts {
|
||||
if strings.HasPrefix(part, "measurement") {
|
||||
hasMeasurement = true
|
||||
}
|
||||
if part == "measurement*" {
|
||||
template.greedyMeasurement = true
|
||||
} else if part == "field*" {
|
||||
template.greedyField = true
|
||||
}
|
||||
}
|
||||
|
||||
if !hasMeasurement {
|
||||
return nil, fmt.Errorf("no measurement specified for template. %q", pattern)
|
||||
}
|
||||
|
||||
return template, nil
|
||||
}
|
||||
|
||||
// templateSpec is a template string split in its constituent parts
|
||||
type templateSpec struct {
|
||||
separator string
|
||||
filter string
|
||||
template string
|
||||
tagstring string
|
||||
}
|
||||
|
||||
// templateSpecs is simply an array of template specs implementing the sorting interface
|
||||
type templateSpecs []templateSpec
|
||||
|
||||
// Less reports whether the element with
|
||||
// index j should sort before the element with index k.
|
||||
func (e templateSpecs) Less(j, k int) bool {
|
||||
if len(e[j].filter) == 0 && len(e[k].filter) == 0 {
|
||||
jlength := len(strings.Split(e[j].template, e[j].separator))
|
||||
klength := len(strings.Split(e[k].template, e[k].separator))
|
||||
return jlength < klength
|
||||
}
|
||||
if len(e[j].filter) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(e[k].filter) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
jlength := len(strings.Split(e[j].template, e[j].separator))
|
||||
klength := len(strings.Split(e[k].template, e[k].separator))
|
||||
return jlength < klength
|
||||
}
|
||||
|
||||
// Swap swaps the elements with indexes i and j.
|
||||
func (e templateSpecs) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
|
||||
|
||||
// Len is the number of elements in the collection.
|
||||
func (e templateSpecs) Len() int { return len(e) }
|
||||
@@ -13,6 +13,8 @@ const (
|
||||
Counter
|
||||
Gauge
|
||||
Untyped
|
||||
Summary
|
||||
Histogram
|
||||
)
|
||||
|
||||
type Metric interface {
|
||||
|
||||
@@ -20,8 +20,14 @@ var (
|
||||
|
||||
// stringFieldEscaper is for escaping string field values only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
stringFieldEscaper = strings.NewReplacer(`"`, `\"`)
|
||||
stringFieldUnEscaper = strings.NewReplacer(`\"`, `"`)
|
||||
stringFieldEscaper = strings.NewReplacer(
|
||||
`"`, `\"`,
|
||||
`\`, `\\`,
|
||||
)
|
||||
stringFieldUnEscaper = strings.NewReplacer(
|
||||
`\"`, `"`,
|
||||
`\\`, `\`,
|
||||
)
|
||||
)
|
||||
|
||||
func escape(s string, t string) string {
|
||||
|
||||
@@ -21,14 +21,14 @@ func New(
|
||||
t time.Time,
|
||||
mType ...telegraf.ValueType,
|
||||
) (telegraf.Metric, error) {
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("Metric cannot be made without any fields")
|
||||
}
|
||||
if len(name) == 0 {
|
||||
return nil, fmt.Errorf("Metric cannot be made with an empty name")
|
||||
return nil, fmt.Errorf("missing measurement name")
|
||||
}
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("%s: must have one or more fields", name)
|
||||
}
|
||||
if strings.HasSuffix(name, `\`) {
|
||||
return nil, fmt.Errorf("Metric cannot have measurement name ending with a backslash")
|
||||
return nil, fmt.Errorf("%s: measurement name cannot end with a backslash", name)
|
||||
}
|
||||
|
||||
var thisType telegraf.ValueType
|
||||
@@ -49,10 +49,10 @@ func New(
|
||||
taglen := 0
|
||||
for k, v := range tags {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("Metric cannot have tag key ending with a backslash")
|
||||
return nil, fmt.Errorf("%s: tag key cannot end with a backslash: %s", name, k)
|
||||
}
|
||||
if strings.HasSuffix(v, `\`) {
|
||||
return nil, fmt.Errorf("Metric cannot have tag value ending with a backslash")
|
||||
return nil, fmt.Errorf("%s: tag value cannot end with a backslash: %s", name, v)
|
||||
}
|
||||
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
@@ -77,15 +77,9 @@ func New(
|
||||
|
||||
// pre-allocate capacity of the fields slice
|
||||
fieldlen := 0
|
||||
for k, v := range fields {
|
||||
for k, _ := range fields {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("Metric cannot have field key ending with a backslash")
|
||||
}
|
||||
switch val := v.(type) {
|
||||
case string:
|
||||
if strings.HasSuffix(val, `\`) {
|
||||
return nil, fmt.Errorf("Metric cannot have field value ending with a backslash")
|
||||
}
|
||||
return nil, fmt.Errorf("%s: field key cannot end with a backslash: %s", name, k)
|
||||
}
|
||||
|
||||
// 10 bytes is completely arbitrary, but will at least prevent some
|
||||
@@ -108,7 +102,8 @@ func New(
|
||||
}
|
||||
|
||||
// indexUnescapedByte finds the index of the first byte equal to b in buf that
|
||||
// is not escaped. Returns -1 if not found.
|
||||
// is not escaped. Does not allow the escape char to be escaped. Returns -1 if
|
||||
// not found.
|
||||
func indexUnescapedByte(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
@@ -128,6 +123,46 @@ func indexUnescapedByte(buf []byte, b byte) int {
|
||||
return keyi
|
||||
}
|
||||
|
||||
// indexUnescapedByteBackslashEscaping finds the index of the first byte equal
|
||||
// to b in buf that is not escaped. Allows for the escape char `\` to be
|
||||
// escaped. Returns -1 if not found.
|
||||
func indexUnescapedByteBackslashEscaping(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
i := bytes.IndexByte(buf[keyi:], b)
|
||||
if i == -1 {
|
||||
return -1
|
||||
} else if i == 0 {
|
||||
break
|
||||
}
|
||||
keyi += i
|
||||
if countBackslashes(buf, keyi-1)%2 == 0 {
|
||||
break
|
||||
} else {
|
||||
keyi++
|
||||
}
|
||||
}
|
||||
return keyi
|
||||
}
|
||||
|
||||
// countBackslashes counts the number of preceding backslashes starting at
|
||||
// the 'start' index.
|
||||
func countBackslashes(buf []byte, index int) int {
|
||||
var count int
|
||||
for {
|
||||
if index < 0 {
|
||||
return count
|
||||
}
|
||||
if buf[index] == '\\' {
|
||||
count++
|
||||
index--
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
name []byte
|
||||
tags []byte
|
||||
@@ -289,7 +324,7 @@ func (m *metric) Fields() map[string]interface{} {
|
||||
// end index of field value
|
||||
var i3 int
|
||||
if m.fields[i:][i2] == '"' {
|
||||
i3 = indexUnescapedByte(m.fields[i:][i2+1:], '"')
|
||||
i3 = indexUnescapedByteBackslashEscaping(m.fields[i:][i2+1:], '"')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ func TestNewMetric(t *testing.T) {
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
@@ -257,6 +257,8 @@ func TestNewMetric_Fields(t *testing.T) {
|
||||
"string": "test",
|
||||
"quote_string": `x"y`,
|
||||
"backslash_quote_string": `x\"y`,
|
||||
"backslash": `x\y`,
|
||||
"ends_with_backslash": `x\`,
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
@@ -412,7 +414,7 @@ func TestNewGaugeMetric(t *testing.T) {
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
@@ -434,7 +436,7 @@ func TestNewCounterMetric(t *testing.T) {
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
@@ -708,12 +710,6 @@ func TestNewMetric_TrailingSlash(t *testing.T) {
|
||||
`value\`: "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
fields: map[string]interface{}{
|
||||
"value": `x\`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
tags: map[string]string{
|
||||
|
||||
@@ -326,7 +326,9 @@ func scanTagsValue(buf []byte, i int) (int, int, error) {
|
||||
func scanFields(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
quoted := false
|
||||
|
||||
// track how many '"" we've seen since last '='
|
||||
quotes := 0
|
||||
|
||||
// tracks how many '=' we've seen
|
||||
equals := 0
|
||||
@@ -350,13 +352,17 @@ func scanFields(buf []byte, i int) (int, []byte, error) {
|
||||
// Only quote values in the field value since quotes are not significant
|
||||
// in the field key
|
||||
if buf[i] == '"' && equals > commas {
|
||||
quoted = !quoted
|
||||
i++
|
||||
quotes++
|
||||
if quotes > 2 {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If we see an =, ensure that there is at least on char before and after it
|
||||
if buf[i] == '=' && !quoted {
|
||||
if buf[i] == '=' && quotes != 1 {
|
||||
quotes = 0
|
||||
equals++
|
||||
|
||||
// check for "... =123" but allow "a\ =123"
|
||||
@@ -398,18 +404,18 @@ func scanFields(buf []byte, i int) (int, []byte, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if buf[i] == ',' && !quoted {
|
||||
if buf[i] == ',' && quotes != 1 {
|
||||
commas++
|
||||
}
|
||||
|
||||
// reached end of block?
|
||||
if buf[i] == ' ' && !quoted {
|
||||
if buf[i] == ' ' && quotes != 1 {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if quoted {
|
||||
if quotes != 0 && quotes != 2 {
|
||||
return i, buf[start:i], makeError("unbalanced quotes", buf, i)
|
||||
}
|
||||
|
||||
@@ -647,7 +653,7 @@ func skipWhitespace(buf []byte, i int) int {
|
||||
}
|
||||
|
||||
// makeError is a helper function for making a metric parsing error.
|
||||
// reason is the reason that the error occured.
|
||||
// reason is the reason why the error occurred.
|
||||
// buf should be the current buffer we are parsing.
|
||||
// i is the current index, to give some context on where in the buffer we are.
|
||||
func makeError(reason string, buf []byte, i int) error {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -180,7 +181,7 @@ func TestMetricReader_SplitWithExactLengthSplit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Regresssion test for when a metric requires to be split and one of the
|
||||
// Regression test for when a metric requires to be split and one of the
|
||||
// split metrics is larger than the buffer.
|
||||
//
|
||||
// Previously the metric index would be set incorrectly causing a panic.
|
||||
@@ -217,7 +218,7 @@ func TestMetricReader_SplitOverflowOversized(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Regresssion test for when a split metric exactly fits in the buffer.
|
||||
// Regression test for when a split metric exactly fits in the buffer.
|
||||
//
|
||||
// Previously the metric would be overflow split when not required.
|
||||
func TestMetricReader_SplitOverflowUneeded(t *testing.T) {
|
||||
@@ -620,6 +621,83 @@ func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader_Read(t *testing.T) {
|
||||
epoch := time.Unix(0, 0)
|
||||
|
||||
type args struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
t time.Time
|
||||
mType []telegraf.ValueType
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
expected []byte
|
||||
}{
|
||||
{
|
||||
name: "escape backslashes in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape quote in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test"`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\"" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape quote and backslash in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\"`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\\"" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape multiple backslash in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\\`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\\\" 0`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
buf := make([]byte, 512)
|
||||
m, err := New(tt.args.name, tt.args.tags, tt.args.fields, tt.args.t, tt.args.mType...)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := NewReader([]telegraf.Metric{m})
|
||||
num, err := r.Read(buf)
|
||||
if err != io.EOF {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
line := string(buf[:num])
|
||||
// This is done so that we can use raw strings in the test spec
|
||||
noeol := strings.TrimRight(line, "\n")
|
||||
require.Equal(t, string(tt.expected), noeol)
|
||||
require.Equal(t, len(tt.expected)+1, num)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricRoundtrip(t *testing.T) {
|
||||
const lp = `nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=netstat,sr=database IpExtInBcastOctets=12570626154i,IpExtInBcastPkts=95541226i,IpExtInCEPkts=0i,IpExtInCsumErrors=0i,IpExtInECT0Pkts=55674i,IpExtInECT1Pkts=0i,IpExtInMcastOctets=5928296i,IpExtInMcastPkts=174365i,IpExtInNoECTPkts=17965863529i,IpExtInNoRoutes=20i,IpExtInOctets=3334866321815i,IpExtInTruncatedPkts=0i,IpExtOutBcastOctets=0i,IpExtOutBcastPkts=0i,IpExtOutMcastOctets=0i,IpExtOutMcastPkts=0i,IpExtOutOctets=31397892391399i,TcpExtArpFilter=0i,TcpExtBusyPollRxPackets=0i,TcpExtDelayedACKLocked=14094i,TcpExtDelayedACKLost=302083i,TcpExtDelayedACKs=55486507i,TcpExtEmbryonicRsts=11879i,TcpExtIPReversePathFilter=0i,TcpExtListenDrops=1736i,TcpExtListenOverflows=0i,TcpExtLockDroppedIcmps=0i,TcpExtOfoPruned=0i,TcpExtOutOfWindowIcmps=8i,TcpExtPAWSActive=0i,TcpExtPAWSEstab=974i,TcpExtPAWSPassive=0i,TcpExtPruneCalled=0i,TcpExtRcvPruned=0i,TcpExtSyncookiesFailed=12593i,TcpExtSyncookiesRecv=0i,TcpExtSyncookiesSent=0i,TcpExtTCPACKSkippedChallenge=0i,TcpExtTCPACKSkippedFinWait2=0i,TcpExtTCPACKSkippedPAWS=806i,TcpExtTCPACKSkippedSeq=519i,TcpExtTCPACKSkippedSynRecv=0i,TcpExtTCPACKSkippedTimeWait=0i,TcpExtTCPAbortFailed=0i,TcpExtTCPAbortOnClose=22i,TcpExtTCPAbortOnData=36593i,TcpExtTCPAbortOnLinger=0i,TcpExtTCPAbortOnMemory=0i,TcpExtTCPAbortOnTimeout=674i,TcpExtTCPAutoCorking=494253233i,TcpExtTCPBacklogDrop=0i,TcpExtTCPChallengeACK=281i,TcpExtTCPDSACKIgnoredNoUndo=93354i,TcpExtTCPDSACKIgnoredOld=336i,TcpExtTCPDSACKOfoRecv=0i,TcpExtTCPDSACKOfoSent=7i,TcpExtTCPDSACKOldSent=302073i,TcpExtTCPDSACKRecv=215884i,TcpExtTCPDSACKUndo=7633i,TcpExtTCPDeferAcceptDrop=0i,TcpExtTCPDirectCopyFromBacklog=0i,TcpExtTCPDirectCopyFromPrequeue=0i,TcpExtTCPFACKReorder=1320i,TcpExtTCPFastOpenActive=0i,TcpExtTCPFastOpenActiveFail=0i,TcpExtTCPFastOpenCookieReqd=0i,TcpExtTCPFastOpenListenOverflow=0i,TcpExtTCPFastOpenPassive=0i,TcpExtTCPFastOpenPassiveFail=0i,TcpExtTCPFastRetrans=350681i,TcpExtTCPForwardRetrans=142168i,TcpExtTCPFromZeroWindowAdv=4317i,TcpExtTCPFullUndo=29502i,TcpExtTCPHPAcks=10267073000i,TcpExtTCPHPHits=5629837098i,TcpExtTCPHPHitsToUser=0i,TcpExtTCPHystartDelayCwnd=285127i,TcpExtTCPHystartDelayDetect=12318i,TcpExtTCPHystartTrainCwnd=69160570i,TcpExtTCPHystartTrainDetect=3315799i,TcpExtTCPLossFailures=109i,TcpExtTCPLossProbeRecovery=110819i,TcpExtTCPLossProbes=233995i,TcpExtTCPLossUndo=5276i,TcpExtTCPLostRetransmit=397i,TcpExtTCPMD5NotFound=0i,TcpExtTCPMD5Unexpected=0i,TcpExtTCPMemoryPressures=0i,TcpExtTCPMinTTLDrop=0i,TcpExtTCPOFODrop=0i,TcpExtTCPOFOMerge=7i,TcpExtTCPOFOQueue=15196i,TcpExtTCPOrigDataSent=29055119435i,TcpExtTCPPartialUndo=21320i,TcpExtTCPPrequeueDropped=0i,TcpExtTCPPrequeued=0i,TcpExtTCPPureAcks=1236441827i,TcpExtTCPRcvCoalesce=225590473i,TcpExtTCPRcvCollapsed=0i,TcpExtTCPRenoFailures=0i,TcpExtTCPRenoRecovery=0i,TcpExtTCPRenoRecoveryFail=0i,TcpExtTCPRenoReorder=0i,TcpExtTCPReqQFullDoCookies=0i,TcpExtTCPReqQFullDrop=0i,TcpExtTCPRetransFail=41i,TcpExtTCPSACKDiscard=0i,TcpExtTCPSACKReneging=0i,TcpExtTCPSACKReorder=4307i,TcpExtTCPSYNChallenge=244i,TcpExtTCPSackFailures=1698i,TcpExtTCPSackMerged=184668i,TcpExtTCPSackRecovery=97369i,TcpExtTCPSackRecoveryFail=381i,TcpExtTCPSackShiftFallback=2697079i,TcpExtTCPSackShifted=760299i,TcpExtTCPSchedulerFailed=0i,TcpExtTCPSlowStartRetrans=9276i,TcpExtTCPSpuriousRTOs=959i,TcpExtTCPSpuriousRtxHostQueues=2973i,TcpExtTCPSynRetrans=200970i,TcpExtTCPTSReorder=15221i,TcpExtTCPTimeWaitOverflow=0i,TcpExtTCPTimeouts=70127i,TcpExtTCPToZeroWindowAdv=4317i,TcpExtTCPWantZeroWindowAdv=2133i,TcpExtTW=24809813i,TcpExtTWKilled=0i,TcpExtTWRecycled=0i 1496460785000000000
|
||||
nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=snmp,sr=database IcmpInAddrMaskReps=0i,IcmpInAddrMasks=90i,IcmpInCsumErrors=0i,IcmpInDestUnreachs=284401i,IcmpInEchoReps=9i,IcmpInEchos=1761912i,IcmpInErrors=407i,IcmpInMsgs=2047767i,IcmpInParmProbs=0i,IcmpInRedirects=0i,IcmpInSrcQuenchs=0i,IcmpInTimeExcds=46i,IcmpInTimestampReps=0i,IcmpInTimestamps=1309i,IcmpMsgInType0=9i,IcmpMsgInType11=46i,IcmpMsgInType13=1309i,IcmpMsgInType17=90i,IcmpMsgInType3=284401i,IcmpMsgInType8=1761912i,IcmpMsgOutType0=1761912i,IcmpMsgOutType14=1248i,IcmpMsgOutType3=108709i,IcmpMsgOutType8=9i,IcmpOutAddrMaskReps=0i,IcmpOutAddrMasks=0i,IcmpOutDestUnreachs=108709i,IcmpOutEchoReps=1761912i,IcmpOutEchos=9i,IcmpOutErrors=0i,IcmpOutMsgs=1871878i,IcmpOutParmProbs=0i,IcmpOutRedirects=0i,IcmpOutSrcQuenchs=0i,IcmpOutTimeExcds=0i,IcmpOutTimestampReps=1248i,IcmpOutTimestamps=0i,IpDefaultTTL=64i,IpForwDatagrams=0i,IpForwarding=2i,IpFragCreates=0i,IpFragFails=0i,IpFragOKs=0i,IpInAddrErrors=0i,IpInDelivers=17658795773i,IpInDiscards=0i,IpInHdrErrors=0i,IpInReceives=17659269339i,IpInUnknownProtos=0i,IpOutDiscards=236976i,IpOutNoRoutes=1009i,IpOutRequests=23466783734i,IpReasmFails=0i,IpReasmOKs=0i,IpReasmReqds=0i,IpReasmTimeout=0i,TcpActiveOpens=23308977i,TcpAttemptFails=3757543i,TcpCurrEstab=280i,TcpEstabResets=184792i,TcpInCsumErrors=0i,TcpInErrs=232i,TcpInSegs=17536573089i,TcpMaxConn=-1i,TcpOutRsts=4051451i,TcpOutSegs=29836254873i,TcpPassiveOpens=176546974i,TcpRetransSegs=878085i,TcpRtoAlgorithm=1i,TcpRtoMax=120000i,TcpRtoMin=200i,UdpInCsumErrors=0i,UdpInDatagrams=24441661i,UdpInErrors=0i,UdpLiteInCsumErrors=0i,UdpLiteInDatagrams=0i,UdpLiteInErrors=0i,UdpLiteNoPorts=0i,UdpLiteOutDatagrams=0i,UdpLiteRcvbufErrors=0i,UdpLiteSndbufErrors=0i,UdpNoPorts=17660i,UdpOutDatagrams=51807896i,UdpRcvbufErrors=0i,UdpSndbufErrors=236922i 1496460785000000000
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/basicstats"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/histogram"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
|
||||
)
|
||||
|
||||
55
plugins/aggregators/basicstats/README.md
Normal file
55
plugins/aggregators/basicstats/README.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# BasicStats Aggregator Plugin
|
||||
|
||||
The BasicStats aggregator plugin give us count,max,min,mean,s2(variance), stdev for a set of values,
|
||||
emitting the aggregate every `period` seconds.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Keep the aggregate basicstats of each metric passing through.
|
||||
[[aggregators.basicstats]]
|
||||
|
||||
## General Aggregator Arguments:
|
||||
|
||||
## The period on which to flush & clear the aggregator.
|
||||
period = "30s"
|
||||
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
|
||||
## BasicStats Arguments:
|
||||
|
||||
## Configures which basic stats to push as fields
|
||||
stats = ["count","min","max","mean","stdev","s2"]
|
||||
```
|
||||
|
||||
- stats
|
||||
- If not specified, all stats are aggregated and pushed as fields
|
||||
- If empty array, no stats are aggregated
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- measurement1
|
||||
- field1_count
|
||||
- field1_max
|
||||
- field1_min
|
||||
- field1_mean
|
||||
- field1_s2 (variance)
|
||||
- field1_stdev (standard deviation)
|
||||
|
||||
### Tags:
|
||||
|
||||
No tags are applied by this aggregator.
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ telegraf --config telegraf.conf --quiet
|
||||
system,host=tars load1=1 1475583980000000000
|
||||
system,host=tars load1=1 1475583990000000000
|
||||
system,host=tars load1_count=2,load1_max=1,load1_min=1,load1_mean=1,load1_s2=0,load1_stdev=0 1475584010000000000
|
||||
system,host=tars load1=1 1475584020000000000
|
||||
system,host=tars load1=3 1475584030000000000
|
||||
system,host=tars load1_count=2,load1_max=3,load1_min=1,load1_mean=2,load1_s2=2,load1_stdev=1.414162 1475584010000000000
|
||||
```
|
||||
246
plugins/aggregators/basicstats/basicstats.go
Normal file
246
plugins/aggregators/basicstats/basicstats.go
Normal file
@@ -0,0 +1,246 @@
|
||||
package basicstats
|
||||
|
||||
import (
|
||||
"log"
|
||||
"math"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||
)
|
||||
|
||||
type BasicStats struct {
|
||||
Stats []string `toml:"stats"`
|
||||
|
||||
cache map[uint64]aggregate
|
||||
statsConfig *configuredStats
|
||||
}
|
||||
|
||||
type configuredStats struct {
|
||||
count bool
|
||||
min bool
|
||||
max bool
|
||||
mean bool
|
||||
variance bool
|
||||
stdev bool
|
||||
}
|
||||
|
||||
func NewBasicStats() *BasicStats {
|
||||
mm := &BasicStats{}
|
||||
mm.Reset()
|
||||
return mm
|
||||
}
|
||||
|
||||
type aggregate struct {
|
||||
fields map[string]basicstats
|
||||
name string
|
||||
tags map[string]string
|
||||
}
|
||||
|
||||
type basicstats struct {
|
||||
count float64
|
||||
min float64
|
||||
max float64
|
||||
mean float64
|
||||
M2 float64 //intermedia value for variance/stdev
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## General Aggregator Arguments:
|
||||
## The period on which to flush & clear the aggregator.
|
||||
period = "30s"
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
`
|
||||
|
||||
func (m *BasicStats) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (m *BasicStats) Description() string {
|
||||
return "Keep the aggregate basicstats of each metric passing through."
|
||||
}
|
||||
|
||||
func (m *BasicStats) Add(in telegraf.Metric) {
|
||||
id := in.HashID()
|
||||
if _, ok := m.cache[id]; !ok {
|
||||
// hit an uncached metric, create caches for first time:
|
||||
a := aggregate{
|
||||
name: in.Name(),
|
||||
tags: in.Tags(),
|
||||
fields: make(map[string]basicstats),
|
||||
}
|
||||
for k, v := range in.Fields() {
|
||||
if fv, ok := convert(v); ok {
|
||||
a.fields[k] = basicstats{
|
||||
count: 1,
|
||||
min: fv,
|
||||
max: fv,
|
||||
mean: fv,
|
||||
M2: 0.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
m.cache[id] = a
|
||||
} else {
|
||||
for k, v := range in.Fields() {
|
||||
if fv, ok := convert(v); ok {
|
||||
if _, ok := m.cache[id].fields[k]; !ok {
|
||||
// hit an uncached field of a cached metric
|
||||
m.cache[id].fields[k] = basicstats{
|
||||
count: 1,
|
||||
min: fv,
|
||||
max: fv,
|
||||
mean: fv,
|
||||
M2: 0.0,
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
tmp := m.cache[id].fields[k]
|
||||
//https://en.m.wikipedia.org/wiki/Algorithms_for_calculating_variance
|
||||
//variable initialization
|
||||
x := fv
|
||||
mean := tmp.mean
|
||||
M2 := tmp.M2
|
||||
//counter compute
|
||||
n := tmp.count + 1
|
||||
tmp.count = n
|
||||
//mean compute
|
||||
delta := x - mean
|
||||
mean = mean + delta/n
|
||||
tmp.mean = mean
|
||||
//variance/stdev compute
|
||||
M2 = M2 + delta*(x-mean)
|
||||
tmp.M2 = M2
|
||||
//max/min compute
|
||||
if fv < tmp.min {
|
||||
tmp.min = fv
|
||||
} else if fv > tmp.max {
|
||||
tmp.max = fv
|
||||
}
|
||||
//store final data
|
||||
m.cache[id].fields[k] = tmp
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *BasicStats) Push(acc telegraf.Accumulator) {
|
||||
|
||||
config := getConfiguredStats(m)
|
||||
|
||||
for _, aggregate := range m.cache {
|
||||
fields := map[string]interface{}{}
|
||||
for k, v := range aggregate.fields {
|
||||
|
||||
if config.count {
|
||||
fields[k+"_count"] = v.count
|
||||
}
|
||||
if config.min {
|
||||
fields[k+"_min"] = v.min
|
||||
}
|
||||
if config.max {
|
||||
fields[k+"_max"] = v.max
|
||||
}
|
||||
if config.mean {
|
||||
fields[k+"_mean"] = v.mean
|
||||
}
|
||||
|
||||
//v.count always >=1
|
||||
if v.count > 1 {
|
||||
variance := v.M2 / (v.count - 1)
|
||||
|
||||
if config.variance {
|
||||
fields[k+"_s2"] = variance
|
||||
}
|
||||
if config.stdev {
|
||||
fields[k+"_stdev"] = math.Sqrt(variance)
|
||||
}
|
||||
}
|
||||
//if count == 1 StdDev = infinite => so I won't send data
|
||||
}
|
||||
|
||||
if len(fields) > 0 {
|
||||
acc.AddFields(aggregate.name, fields, aggregate.tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseStats(names []string) *configuredStats {
|
||||
|
||||
parsed := &configuredStats{}
|
||||
|
||||
for _, name := range names {
|
||||
|
||||
switch name {
|
||||
|
||||
case "count":
|
||||
parsed.count = true
|
||||
case "min":
|
||||
parsed.min = true
|
||||
case "max":
|
||||
parsed.max = true
|
||||
case "mean":
|
||||
parsed.mean = true
|
||||
case "s2":
|
||||
parsed.variance = true
|
||||
case "stdev":
|
||||
parsed.stdev = true
|
||||
|
||||
default:
|
||||
log.Printf("W! Unrecognized basic stat '%s', ignoring", name)
|
||||
}
|
||||
}
|
||||
|
||||
return parsed
|
||||
}
|
||||
|
||||
func defaultStats() *configuredStats {
|
||||
|
||||
defaults := &configuredStats{}
|
||||
|
||||
defaults.count = true
|
||||
defaults.min = true
|
||||
defaults.max = true
|
||||
defaults.mean = true
|
||||
defaults.variance = true
|
||||
defaults.stdev = true
|
||||
|
||||
return defaults
|
||||
}
|
||||
|
||||
func getConfiguredStats(m *BasicStats) *configuredStats {
|
||||
|
||||
if m.statsConfig == nil {
|
||||
|
||||
if m.Stats == nil {
|
||||
m.statsConfig = defaultStats()
|
||||
} else {
|
||||
m.statsConfig = parseStats(m.Stats)
|
||||
}
|
||||
}
|
||||
|
||||
return m.statsConfig
|
||||
}
|
||||
|
||||
func (m *BasicStats) Reset() {
|
||||
m.cache = make(map[uint64]aggregate)
|
||||
}
|
||||
|
||||
func convert(in interface{}) (float64, bool) {
|
||||
switch v := in.(type) {
|
||||
case float64:
|
||||
return v, true
|
||||
case int64:
|
||||
return float64(v), true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
aggregators.Add("basicstats", func() telegraf.Aggregator {
|
||||
return NewBasicStats()
|
||||
})
|
||||
}
|
||||
359
plugins/aggregators/basicstats/basicstats_test.go
Normal file
359
plugins/aggregators/basicstats/basicstats_test.go
Normal file
@@ -0,0 +1,359 @@
|
||||
package basicstats
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
var m1, _ = metric.New("m1",
|
||||
map[string]string{"foo": "bar"},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
"b": int64(1),
|
||||
"c": float64(2),
|
||||
"d": float64(2),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var m2, _ = metric.New("m1",
|
||||
map[string]string{"foo": "bar"},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
"b": int64(3),
|
||||
"c": float64(4),
|
||||
"d": float64(6),
|
||||
"e": float64(200),
|
||||
"ignoreme": "string",
|
||||
"andme": true,
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
func BenchmarkApply(b *testing.B) {
|
||||
minmax := NewBasicStats()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
minmax.Add(m1)
|
||||
minmax.Add(m2)
|
||||
}
|
||||
}
|
||||
|
||||
// Test two metrics getting added.
|
||||
func TestBasicStatsWithPeriod(t *testing.T) {
|
||||
acc := testutil.Accumulator{}
|
||||
minmax := NewBasicStats()
|
||||
|
||||
minmax.Add(m1)
|
||||
minmax.Add(m2)
|
||||
minmax.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_count": float64(2), //a
|
||||
"a_max": float64(1),
|
||||
"a_min": float64(1),
|
||||
"a_mean": float64(1),
|
||||
"a_stdev": float64(0),
|
||||
"a_s2": float64(0),
|
||||
"b_count": float64(2), //b
|
||||
"b_max": float64(3),
|
||||
"b_min": float64(1),
|
||||
"b_mean": float64(2),
|
||||
"b_s2": float64(2),
|
||||
"b_stdev": math.Sqrt(2),
|
||||
"c_count": float64(2), //c
|
||||
"c_max": float64(4),
|
||||
"c_min": float64(2),
|
||||
"c_mean": float64(3),
|
||||
"c_s2": float64(2),
|
||||
"c_stdev": math.Sqrt(2),
|
||||
"d_count": float64(2), //d
|
||||
"d_max": float64(6),
|
||||
"d_min": float64(2),
|
||||
"d_mean": float64(4),
|
||||
"d_s2": float64(8),
|
||||
"d_stdev": math.Sqrt(8),
|
||||
"e_count": float64(1), //e
|
||||
"e_max": float64(200),
|
||||
"e_min": float64(200),
|
||||
"e_mean": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test two metrics getting added with a push/reset in between (simulates
|
||||
// getting added in different periods.)
|
||||
func TestBasicStatsDifferentPeriods(t *testing.T) {
|
||||
acc := testutil.Accumulator{}
|
||||
minmax := NewBasicStats()
|
||||
|
||||
minmax.Add(m1)
|
||||
minmax.Push(&acc)
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_count": float64(1), //a
|
||||
"a_max": float64(1),
|
||||
"a_min": float64(1),
|
||||
"a_mean": float64(1),
|
||||
"b_count": float64(1), //b
|
||||
"b_max": float64(1),
|
||||
"b_min": float64(1),
|
||||
"b_mean": float64(1),
|
||||
"c_count": float64(1), //c
|
||||
"c_max": float64(2),
|
||||
"c_min": float64(2),
|
||||
"c_mean": float64(2),
|
||||
"d_count": float64(1), //d
|
||||
"d_max": float64(2),
|
||||
"d_min": float64(2),
|
||||
"d_mean": float64(2),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
|
||||
acc.ClearMetrics()
|
||||
minmax.Reset()
|
||||
minmax.Add(m2)
|
||||
minmax.Push(&acc)
|
||||
expectedFields = map[string]interface{}{
|
||||
"a_count": float64(1), //a
|
||||
"a_max": float64(1),
|
||||
"a_min": float64(1),
|
||||
"a_mean": float64(1),
|
||||
"b_count": float64(1), //b
|
||||
"b_max": float64(3),
|
||||
"b_min": float64(3),
|
||||
"b_mean": float64(3),
|
||||
"c_count": float64(1), //c
|
||||
"c_max": float64(4),
|
||||
"c_min": float64(4),
|
||||
"c_mean": float64(4),
|
||||
"d_count": float64(1), //d
|
||||
"d_max": float64(6),
|
||||
"d_min": float64(6),
|
||||
"d_mean": float64(6),
|
||||
"e_count": float64(1), //e
|
||||
"e_max": float64(200),
|
||||
"e_min": float64(200),
|
||||
"e_mean": float64(200),
|
||||
}
|
||||
expectedTags = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating count
|
||||
func TestBasicStatsWithOnlyCount(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"count"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_count": float64(2),
|
||||
"b_count": float64(2),
|
||||
"c_count": float64(2),
|
||||
"d_count": float64(2),
|
||||
"e_count": float64(1),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating minimum
|
||||
func TestBasicStatsWithOnlyMin(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"min"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_min": float64(1),
|
||||
"b_min": float64(1),
|
||||
"c_min": float64(2),
|
||||
"d_min": float64(2),
|
||||
"e_min": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating maximum
|
||||
func TestBasicStatsWithOnlyMax(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"max"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_max": float64(1),
|
||||
"b_max": float64(3),
|
||||
"c_max": float64(4),
|
||||
"d_max": float64(6),
|
||||
"e_max": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating mean
|
||||
func TestBasicStatsWithOnlyMean(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"mean"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_mean": float64(1),
|
||||
"b_mean": float64(2),
|
||||
"c_mean": float64(3),
|
||||
"d_mean": float64(4),
|
||||
"e_mean": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating variance
|
||||
func TestBasicStatsWithOnlyVariance(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"s2"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_s2": float64(0),
|
||||
"b_s2": float64(2),
|
||||
"c_s2": float64(2),
|
||||
"d_s2": float64(8),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating standard deviation
|
||||
func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"stdev"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_stdev": float64(0),
|
||||
"b_stdev": math.Sqrt(2),
|
||||
"c_stdev": math.Sqrt(2),
|
||||
"d_stdev": math.Sqrt(8),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating minimum and maximum
|
||||
func TestBasicStatsWithMinAndMax(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"min", "max"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_max": float64(1), //a
|
||||
"a_min": float64(1),
|
||||
"b_max": float64(3), //b
|
||||
"b_min": float64(1),
|
||||
"c_max": float64(4), //c
|
||||
"c_min": float64(2),
|
||||
"d_max": float64(6), //d
|
||||
"d_min": float64(2),
|
||||
"e_max": float64(200), //e
|
||||
"e_min": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test that if an empty array is passed, no points are pushed
|
||||
func TestBasicStatsWithNoStats(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
acc.AssertDoesNotContainMeasurement(t, "m1")
|
||||
}
|
||||
|
||||
// Test that if an unknown stat is configured, it doesn't explode
|
||||
func TestBasicStatsWithUnknownStat(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"crazy"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
acc.AssertDoesNotContainMeasurement(t, "m1")
|
||||
}
|
||||
@@ -1,38 +1,25 @@
|
||||
# Histogram Aggregator Plugin
|
||||
|
||||
#### Goal
|
||||
The histogram aggregator plugin creates histograms containing the counts of
|
||||
field values within a range.
|
||||
|
||||
This plugin was added for ability to build histograms.
|
||||
Values added to a bucket are also added to the larger buckets in the
|
||||
distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg).
|
||||
|
||||
#### Description
|
||||
Like other Telegraf aggregators, the metric is emitted every `period` seconds.
|
||||
Bucket counts however are not reset between periods and will be non-strictly
|
||||
increasing while Telegraf is running.
|
||||
|
||||
The histogram aggregator plugin aggregates values of specified metric's
|
||||
fields. The metric is emitted every `period` seconds. All you need to do
|
||||
is to specify borders of histogram buckets and fields, for which you want
|
||||
to aggregate histogram.
|
||||
#### Design
|
||||
|
||||
#### How it works
|
||||
|
||||
The each metric is passed to the aggregator and this aggregator searches
|
||||
Each metric is passed to the aggregator and this aggregator searches
|
||||
histogram buckets for those fields, which have been specified in the
|
||||
config. If buckets are found, the aggregator will put +1 to appropriate
|
||||
bucket. Otherwise, nothing will happen. Every `period` seconds these data
|
||||
will be pushed to output.
|
||||
config. If buckets are found, the aggregator will increment +1 to the appropriate
|
||||
bucket otherwise it will be added to the `+Inf` bucket. Every `period`
|
||||
seconds this data will be forwarded to the outputs.
|
||||
|
||||
Note, that the all hits of current bucket will be also added to all next
|
||||
buckets in final result of distribution. Why does it work this way? In
|
||||
configuration you define right borders for each bucket in a ascending
|
||||
sequence. Internally buckets are presented as ranges with borders
|
||||
(0..bucketBorder]: 0..1, 0..10, 0..50, …, 0..+Inf. So the value "+1" will be
|
||||
put into those buckets, in which the metric value fell with such ranges of
|
||||
buckets.
|
||||
|
||||
This plugin creates cumulative histograms. It means, that the hits in the
|
||||
buckets will always increase from the moment of telegraf start. But if you
|
||||
restart telegraf, all hits in the buckets will be reset to 0.
|
||||
|
||||
Also, the algorithm of hit counting to buckets was implemented on the base
|
||||
of the algorithm, which is implemented in the Prometheus
|
||||
The algorithm of hit counting to buckets was implemented on the base
|
||||
of the algorithm which is implemented in the Prometheus
|
||||
[client](https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go).
|
||||
|
||||
### Configuration
|
||||
@@ -40,61 +27,44 @@ of the algorithm, which is implemented in the Prometheus
|
||||
```toml
|
||||
# Configuration for aggregate histogram metrics
|
||||
[[aggregators.histogram]]
|
||||
## General Aggregator Arguments:
|
||||
## The period on which to flush & clear the aggregator.
|
||||
## The period in which to flush the aggregator.
|
||||
period = "30s"
|
||||
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
|
||||
## The example of config to aggregate histogram for all fields of specified metric.
|
||||
[[aggregators.histogram.config]]
|
||||
## The set of buckets.
|
||||
buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||
## The name of metric.
|
||||
metric_name = "cpu"
|
||||
## Example config that aggregates all fields of the metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||
# ## The name of metric.
|
||||
# measurement_name = "cpu"
|
||||
|
||||
## The example of config to aggregate histogram for concrete fields of specified metric.
|
||||
[[aggregators.histogram.config]]
|
||||
## The set of buckets.
|
||||
buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||
## The name of metric.
|
||||
metric_name = "diskio"
|
||||
## The concrete fields of metric.
|
||||
metric_fields = ["io_time", "read_time", "write_time"]
|
||||
## Example config that aggregates only specific fields of the metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||
# ## The name of metric.
|
||||
# measurement_name = "diskio"
|
||||
# ## The concrete fields of metric
|
||||
# fields = ["io_time", "read_time", "write_time"]
|
||||
```
|
||||
|
||||
#### Explanation
|
||||
The user is responsible for defining the bounds of the histogram bucket as
|
||||
well as the measurement name and fields to aggregate.
|
||||
|
||||
The field `metric_fields` is the list of metric fields. For example, the
|
||||
metric `cpu` has the following fields: usage_user, usage_system,
|
||||
usage_idle, usage_nice, usage_iowait, usage_irq, usage_softirq, usage_steal,
|
||||
usage_guest, usage_guest_nice.
|
||||
Each histogram config section must contain a `buckets` and `measurement_name`
|
||||
option. Optionally, if `fields` is set only the fields listed will be
|
||||
aggregated. If `fields` is not set all fields are aggregated.
|
||||
|
||||
Note that histogram metrics will be pushed every `period` seconds.
|
||||
As you know telegraf calls aggregator `Reset()` func each `period` seconds.
|
||||
Histogram aggregator ignores `Reset()` and continues to count hits.
|
||||
The `buckets` option contains a list of floats which specify the bucket
|
||||
boundaries. Each float value defines the inclusive upper bound of the bucket.
|
||||
The `+Inf` bucket is added automatically and does not need to be defined.
|
||||
|
||||
#### Use cases
|
||||
|
||||
You can specify fields using two cases:
|
||||
|
||||
1. The specifying only metric name. In this case all fields of metric
|
||||
will be aggregated.
|
||||
2. The specifying metric name and concrete field.
|
||||
|
||||
#### Some rules
|
||||
|
||||
- The setting of each histogram must be in separate section with title
|
||||
`aggregators.histogram.config`.
|
||||
|
||||
- The each value of bucket must be float value.
|
||||
|
||||
- Don\`t include the border bucket `+Inf`. It will be done automatically.
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
The postfix `bucket` will be added to each field.
|
||||
The postfix `bucket` will be added to each field key.
|
||||
|
||||
- measurement1
|
||||
- field1_bucket
|
||||
@@ -102,16 +72,15 @@ The postfix `bucket` will be added to each field.
|
||||
|
||||
### Tags:
|
||||
|
||||
All measurements have tag `le`. This tag has the border value of bucket. It
|
||||
means that the metric value is less or equal to the value of this tag. For
|
||||
example, let assume that we have the metric value 10 and the following
|
||||
buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value 10,
|
||||
because the metrics value is passed into bucket with right border value `10`.
|
||||
All measurements are given the tag `le`. This tag has the border value of
|
||||
bucket. It means that the metric value is less than or equal to the value of
|
||||
this tag. For example, let assume that we have the metric value 10 and the
|
||||
following buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value
|
||||
10, because the metrics value is passed into bucket with right border value
|
||||
`10`.
|
||||
|
||||
### Example Output:
|
||||
|
||||
The following output will return to the Prometheus client.
|
||||
|
||||
```
|
||||
cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=0i 1486998330000000000
|
||||
|
||||
@@ -24,8 +24,8 @@ type HistogramAggregator struct {
|
||||
|
||||
// config is the config, which contains name, field of metric and histogram buckets.
|
||||
type config struct {
|
||||
Metric string `toml:"metric_name"`
|
||||
Fields []string `toml:"metric_fields"`
|
||||
Metric string `toml:"measurement_name"`
|
||||
Fields []string `toml:"fields"`
|
||||
Buckets buckets `toml:"buckets"`
|
||||
}
|
||||
|
||||
@@ -65,28 +65,28 @@ func NewHistogramAggregator() telegraf.Aggregator {
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## General Aggregator Arguments:
|
||||
## The period on which to flush & clear the aggregator.
|
||||
## The period in which to flush the aggregator.
|
||||
period = "30s"
|
||||
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
|
||||
## The example of config to aggregate histogram for all fields of specified metric.
|
||||
[[aggregators.histogram.config]]
|
||||
## The set of buckets.
|
||||
buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||
## The name of metric.
|
||||
metric_name = "cpu"
|
||||
## Example config that aggregates all fields of the metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||
# ## The name of metric.
|
||||
# measurement_name = "cpu"
|
||||
|
||||
## The example of config to aggregate for specified fields of metric.
|
||||
[[aggregators.histogram.config]]
|
||||
## The set of buckets.
|
||||
buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||
## The name of metric.
|
||||
metric_name = "diskio"
|
||||
## The concrete fields of metric
|
||||
metric_fields = ["io_time", "read_time", "write_time"]
|
||||
## Example config that aggregates only specific fields of the metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||
# ## The name of metric.
|
||||
# measurement_name = "diskio"
|
||||
# ## The concrete fields of metric
|
||||
# fields = ["io_time", "read_time", "write_time"]
|
||||
`
|
||||
|
||||
// SampleConfig returns sample of config
|
||||
@@ -96,7 +96,7 @@ func (h *HistogramAggregator) SampleConfig() string {
|
||||
|
||||
// Description returns description of aggregator plugin
|
||||
func (h *HistogramAggregator) Description() string {
|
||||
return "Keep the aggregate histogram of each metric passing through."
|
||||
return "Create aggregate histograms."
|
||||
}
|
||||
|
||||
// Add adds new hit to the buckets
|
||||
|
||||
@@ -6,30 +6,37 @@ additional information can be found.
|
||||
|
||||
### Configuration:
|
||||
|
||||
This section contains the default TOML to configure the plugin. You can
|
||||
generate it using `telegraf --usage <plugin-name>`.
|
||||
|
||||
```toml
|
||||
# Description
|
||||
[[inputs.example]]
|
||||
# SampleConfig
|
||||
example_option = "example_value"
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
### Metrics:
|
||||
|
||||
Here you should add an optional description and links to where the user can
|
||||
get more information about the measurements.
|
||||
|
||||
If the output is determined dynamically based on the input source, or there
|
||||
are more metrics than can reasonably be listed, describe how the input is
|
||||
mapped to the output.
|
||||
|
||||
- measurement1
|
||||
- field1 (type, unit)
|
||||
- field2 (float, percent)
|
||||
- measurement2
|
||||
- field3 (integer, bytes)
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- tags:
|
||||
- tag1 (optional description)
|
||||
- tag2
|
||||
- measurement2 has the following tags:
|
||||
- fields:
|
||||
- field1 (type, unit)
|
||||
- field2 (float, percent)
|
||||
|
||||
- measurement2
|
||||
- tags:
|
||||
- tag3
|
||||
- fields:
|
||||
- field3 (integer, bytes)
|
||||
|
||||
### Sample Queries:
|
||||
|
||||
@@ -44,6 +51,10 @@ SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar A
|
||||
|
||||
### Example Output:
|
||||
|
||||
This section shows example output in Line Protocol format. You can often use
|
||||
`telegraf --input-filter <plugin-name> --test` or use the `file` output to get
|
||||
this information.
|
||||
|
||||
```
|
||||
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
|
||||
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/bond"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cgroup"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/consul"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dcos"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dmcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
|
||||
@@ -34,8 +36,10 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/internal"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/interrupts"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ipset"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia2"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor"
|
||||
@@ -50,17 +54,22 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mongodb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mysql"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nats"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/openldap"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/pf"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/postfix"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/powerdns"
|
||||
@@ -74,19 +83,23 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/salesforce"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/smart"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/socket_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/solr"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/system"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/teamspeak"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tomcat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/unbound"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/webhooks"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
|
||||
|
||||
@@ -39,9 +39,9 @@ The following defaults are known to work with RabbitMQ:
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to output.
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
@@ -85,10 +85,10 @@ func (a *AMQPConsumer) SampleConfig() string {
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to output.
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
`
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
The Apache plugin collects server performance information using the [`mod_status`](https://httpd.apache.org/docs/2.4/mod/mod_status.html) module of the [Apache HTTP Server](https://httpd.apache.org/).
|
||||
|
||||
Typically, the `mod_status` module is configured to expose a page at the `/server-status?auto` location of the Apache server. The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) option must be enabled in order to collect all available fields. For information about how to configure your server reference the [module documenation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable).
|
||||
Typically, the `mod_status` module is configured to expose a page at the `/server-status?auto` location of the Apache server. The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) option must be enabled in order to collect all available fields. For information about how to configure your server reference the [module documentation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable).
|
||||
|
||||
### Configuration:
|
||||
|
||||
|
||||
85
plugins/inputs/bond/README.md
Normal file
85
plugins/inputs/bond/README.md
Normal file
@@ -0,0 +1,85 @@
|
||||
# Bond Input Plugin
|
||||
|
||||
The Bond input plugin collects network bond interface status for both the
|
||||
network bond interface as well as slave interfaces.
|
||||
The plugin collects these metrics from `/proc/net/bonding/*` files.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.bond]]
|
||||
## Sets 'proc' directory path
|
||||
## If not specified, then default is /proc
|
||||
# host_proc = "/proc"
|
||||
|
||||
## By default, telegraf gather stats for all bond interfaces
|
||||
## Setting interfaces will restrict the stats to the specified
|
||||
## bond interfaces.
|
||||
# bond_interfaces = ["bond0"]
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- bond
|
||||
- active_slave (for active-backup mode)
|
||||
- status
|
||||
|
||||
- bond_slave
|
||||
- failures
|
||||
- status
|
||||
|
||||
### Description:
|
||||
|
||||
```
|
||||
active_slave
|
||||
Currently active slave interface for active-backup mode.
|
||||
|
||||
status
|
||||
Status of bond interface or bonds's slave interface (down = 0, up = 1).
|
||||
|
||||
failures
|
||||
Amount of failures for bond's slave interface.
|
||||
```
|
||||
|
||||
### Tags:
|
||||
|
||||
- bond
|
||||
- bond
|
||||
|
||||
- bond_slave
|
||||
- bond
|
||||
- interface
|
||||
|
||||
### Example output:
|
||||
|
||||
Configuration:
|
||||
|
||||
```
|
||||
[[inputs.bond]]
|
||||
## Sets 'proc' directory path
|
||||
## If not specified, then default is /proc
|
||||
host_proc = "/proc"
|
||||
|
||||
## By default, telegraf gather stats for all bond interfaces
|
||||
## Setting interfaces will restrict the stats to the specified
|
||||
## bond interfaces.
|
||||
bond_interfaces = ["bond0", "bond1"]
|
||||
```
|
||||
|
||||
Run:
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf --input-filter bond --test
|
||||
```
|
||||
|
||||
Output:
|
||||
|
||||
```
|
||||
* Plugin: inputs.bond, Collection 1
|
||||
> bond,bond=bond1,host=local active_slave="eth0",status=1i 1509704525000000000
|
||||
> bond_slave,bond=bond1,interface=eth0,host=local status=1i,failures=0i 1509704525000000000
|
||||
> bond_slave,host=local,bond=bond1,interface=eth1 status=1i,failures=0i 1509704525000000000
|
||||
> bond,bond=bond0,host=isvetlov-mac.local status=1i 1509704525000000000
|
||||
> bond_slave,bond=bond0,interface=eth1,host=local status=1i,failures=0i 1509704525000000000
|
||||
> bond_slave,bond=bond0,interface=eth2,host=local status=1i,failures=0i 1509704525000000000
|
||||
```
|
||||
204
plugins/inputs/bond/bond.go
Normal file
204
plugins/inputs/bond/bond.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package bond
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// default host proc path
|
||||
const defaultHostProc = "/proc"
|
||||
|
||||
// env host proc variable name
|
||||
const envProc = "HOST_PROC"
|
||||
|
||||
type Bond struct {
|
||||
HostProc string `toml:"host_proc"`
|
||||
BondInterfaces []string `toml:"bond_interfaces"`
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Sets 'proc' directory path
|
||||
## If not specified, then default is /proc
|
||||
# host_proc = "/proc"
|
||||
|
||||
## By default, telegraf gather stats for all bond interfaces
|
||||
## Setting interfaces will restrict the stats to the specified
|
||||
## bond interfaces.
|
||||
# bond_interfaces = ["bond0"]
|
||||
`
|
||||
|
||||
func (bond *Bond) Description() string {
|
||||
return "Collect bond interface status, slaves statuses and failures count"
|
||||
}
|
||||
|
||||
func (bond *Bond) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (bond *Bond) Gather(acc telegraf.Accumulator) error {
|
||||
// load proc path, get default value if config value and env variable are empty
|
||||
bond.loadPath()
|
||||
// list bond interfaces from bonding directory or gather all interfaces.
|
||||
bondNames, err := bond.listInterfaces()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, bondName := range bondNames {
|
||||
bondAbsPath := bond.HostProc + "/net/bonding/" + bondName
|
||||
file, err := ioutil.ReadFile(bondAbsPath)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondAbsPath, err))
|
||||
continue
|
||||
}
|
||||
rawFile := strings.TrimSpace(string(file))
|
||||
err = bond.gatherBondInterface(bondName, rawFile, acc)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondName, err))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bond *Bond) gatherBondInterface(bondName string, rawFile string, acc telegraf.Accumulator) error {
|
||||
splitIndex := strings.Index(rawFile, "Slave Interface:")
|
||||
if splitIndex == -1 {
|
||||
splitIndex = len(rawFile)
|
||||
}
|
||||
bondPart := rawFile[:splitIndex]
|
||||
slavePart := rawFile[splitIndex:]
|
||||
|
||||
err := bond.gatherBondPart(bondName, bondPart, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = bond.gatherSlavePart(bondName, slavePart, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.Accumulator) error {
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{
|
||||
"bond": bondName,
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(strings.NewReader(rawFile))
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
stats := strings.Split(line, ":")
|
||||
if len(stats) < 2 {
|
||||
continue
|
||||
}
|
||||
name := strings.TrimSpace(stats[0])
|
||||
value := strings.TrimSpace(stats[1])
|
||||
if strings.Contains(name, "Currently Active Slave") {
|
||||
fields["active_slave"] = value
|
||||
}
|
||||
if strings.Contains(name, "MII Status") {
|
||||
fields["status"] = 0
|
||||
if value == "up" {
|
||||
fields["status"] = 1
|
||||
}
|
||||
acc.AddFields("bond", fields, tags)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("Couldn't find status info for '%s' ", bondName)
|
||||
}
|
||||
|
||||
func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf.Accumulator) error {
|
||||
var slave string
|
||||
var status int
|
||||
|
||||
scanner := bufio.NewScanner(strings.NewReader(rawFile))
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
stats := strings.Split(line, ":")
|
||||
if len(stats) < 2 {
|
||||
continue
|
||||
}
|
||||
name := strings.TrimSpace(stats[0])
|
||||
value := strings.TrimSpace(stats[1])
|
||||
if strings.Contains(name, "Slave Interface") {
|
||||
slave = value
|
||||
}
|
||||
if strings.Contains(name, "MII Status") {
|
||||
status = 0
|
||||
if value == "up" {
|
||||
status = 1
|
||||
}
|
||||
}
|
||||
if strings.Contains(name, "Link Failure Count") {
|
||||
count, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"status": status,
|
||||
"failures": count,
|
||||
}
|
||||
tags := map[string]string{
|
||||
"bond": bondName,
|
||||
"interface": slave,
|
||||
}
|
||||
acc.AddFields("bond_slave", fields, tags)
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadPath can be used to read path firstly from config
|
||||
// if it is empty then try read from env variable
|
||||
func (bond *Bond) loadPath() {
|
||||
if bond.HostProc == "" {
|
||||
bond.HostProc = proc(envProc, defaultHostProc)
|
||||
}
|
||||
}
|
||||
|
||||
// proc can be used to read file paths from env
|
||||
func proc(env, path string) string {
|
||||
// try to read full file path
|
||||
if p := os.Getenv(env); p != "" {
|
||||
return p
|
||||
}
|
||||
// return default path
|
||||
return path
|
||||
}
|
||||
|
||||
func (bond *Bond) listInterfaces() ([]string, error) {
|
||||
var interfaces []string
|
||||
if len(bond.BondInterfaces) > 0 {
|
||||
interfaces = bond.BondInterfaces
|
||||
} else {
|
||||
paths, err := filepath.Glob(bond.HostProc + "/net/bonding/*")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, p := range paths {
|
||||
interfaces = append(interfaces, filepath.Base(p))
|
||||
}
|
||||
}
|
||||
return interfaces, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("bond", func() telegraf.Input {
|
||||
return &Bond{}
|
||||
})
|
||||
}
|
||||
77
plugins/inputs/bond/bond_test.go
Normal file
77
plugins/inputs/bond/bond_test.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package bond
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
var sampleTest802 = `
|
||||
Ethernet Channel Bonding Driver: v3.5.0 (November 4, 2008)
|
||||
|
||||
Bonding Mode: IEEE 802.3ad Dynamic link aggregation
|
||||
Transmit Hash Policy: layer2 (0)
|
||||
MII Status: up
|
||||
MII Polling Interval (ms): 100
|
||||
Up Delay (ms): 0
|
||||
Down Delay (ms): 0
|
||||
|
||||
802.3ad info
|
||||
LACP rate: fast
|
||||
Aggregator selection policy (ad_select): stable
|
||||
bond bond0 has no active aggregator
|
||||
|
||||
Slave Interface: eth1
|
||||
MII Status: up
|
||||
Link Failure Count: 0
|
||||
Permanent HW addr: 00:0c:29:f5:b7:11
|
||||
Aggregator ID: N/A
|
||||
|
||||
Slave Interface: eth2
|
||||
MII Status: up
|
||||
Link Failure Count: 3
|
||||
Permanent HW addr: 00:0c:29:f5:b7:1b
|
||||
Aggregator ID: N/A
|
||||
`
|
||||
|
||||
var sampleTestAB = `
|
||||
Ethernet Channel Bonding Driver: v3.6.0 (September 26, 2009)
|
||||
|
||||
Bonding Mode: fault-tolerance (active-backup)
|
||||
Primary Slave: eth2 (primary_reselect always)
|
||||
Currently Active Slave: eth2
|
||||
MII Status: up
|
||||
MII Polling Interval (ms): 100
|
||||
Up Delay (ms): 0
|
||||
Down Delay (ms): 0
|
||||
|
||||
Slave Interface: eth3
|
||||
MII Status: down
|
||||
Speed: 1000 Mbps
|
||||
Duplex: full
|
||||
Link Failure Count: 2
|
||||
Permanent HW addr:
|
||||
Slave queue ID: 0
|
||||
|
||||
Slave Interface: eth2
|
||||
MII Status: up
|
||||
Speed: 100 Mbps
|
||||
Duplex: full
|
||||
Link Failure Count: 0
|
||||
Permanent HW addr:
|
||||
`
|
||||
|
||||
func TestGatherBondInterface(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
bond := &Bond{}
|
||||
|
||||
bond.gatherBondInterface("bond802", sampleTest802, &acc)
|
||||
acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bond802"})
|
||||
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth1"})
|
||||
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 3, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth2"})
|
||||
|
||||
bond.gatherBondInterface("bondAB", sampleTestAB, &acc)
|
||||
acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"active_slave": "eth2", "status": 1}, map[string]string{"bond": "bondAB"})
|
||||
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 2, "status": 0}, map[string]string{"bond": "bondAB", "interface": "eth3"})
|
||||
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bondAB", "interface": "eth2"})
|
||||
}
|
||||
@@ -26,7 +26,7 @@ func TestParseSockId(t *testing.T) {
|
||||
func TestParseMonDump(t *testing.T) {
|
||||
dump, err := parseDump(monPerfDump)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 5678670180, dump["cluster"]["osd_kb_used"], epsilon)
|
||||
assert.InEpsilon(t, int64(5678670180), dump["cluster"]["osd_kb_used"], epsilon)
|
||||
assert.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon)
|
||||
}
|
||||
|
||||
|
||||
@@ -225,7 +225,7 @@ var fileFormats = [...]fileFormat{
|
||||
}
|
||||
|
||||
func numberOrString(s string) interface{} {
|
||||
i, err := strconv.Atoi(s)
|
||||
i, err := strconv.ParseInt(s, 10, 64)
|
||||
if err == nil {
|
||||
return i
|
||||
}
|
||||
|
||||
@@ -31,17 +31,17 @@ func TestCgroupStatistics_1(t *testing.T) {
|
||||
"path": "testdata/memory",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.stat.cache": 1739362304123123123,
|
||||
"memory.stat.rss": 1775325184,
|
||||
"memory.stat.rss_huge": 778043392,
|
||||
"memory.stat.mapped_file": 421036032,
|
||||
"memory.stat.dirty": -307200,
|
||||
"memory.max_usage_in_bytes.0": 0,
|
||||
"memory.max_usage_in_bytes.1": -1,
|
||||
"memory.max_usage_in_bytes.2": 2,
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"memory.stat.cache": int64(1739362304123123123),
|
||||
"memory.stat.rss": int64(1775325184),
|
||||
"memory.stat.rss_huge": int64(778043392),
|
||||
"memory.stat.mapped_file": int64(421036032),
|
||||
"memory.stat.dirty": int64(-307200),
|
||||
"memory.max_usage_in_bytes.0": int64(0),
|
||||
"memory.max_usage_in_bytes.1": int64(-1),
|
||||
"memory.max_usage_in_bytes.2": int64(2),
|
||||
"memory.limit_in_bytes": int64(223372036854771712),
|
||||
"memory.use_hierarchy": "12-781",
|
||||
"notify_on_release": 0,
|
||||
"notify_on_release": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
@@ -63,10 +63,10 @@ func TestCgroupStatistics_2(t *testing.T) {
|
||||
"path": "testdata/cpu",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"cpuacct.usage_percpu.0": -1452543795404,
|
||||
"cpuacct.usage_percpu.1": 1376681271659,
|
||||
"cpuacct.usage_percpu.2": 1450950799997,
|
||||
"cpuacct.usage_percpu.3": -1473113374257,
|
||||
"cpuacct.usage_percpu.0": int64(-1452543795404),
|
||||
"cpuacct.usage_percpu.1": int64(1376681271659),
|
||||
"cpuacct.usage_percpu.2": int64(1450950799997),
|
||||
"cpuacct.usage_percpu.3": int64(-1473113374257),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
@@ -88,7 +88,7 @@ func TestCgroupStatistics_3(t *testing.T) {
|
||||
"path": "testdata/memory/group_1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"memory.limit_in_bytes": int64(223372036854771712),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
|
||||
@@ -115,7 +115,7 @@ func TestCgroupStatistics_4(t *testing.T) {
|
||||
"path": "testdata/memory/group_1/group_1_1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"memory.limit_in_bytes": int64(223372036854771712),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
|
||||
@@ -147,7 +147,7 @@ func TestCgroupStatistics_5(t *testing.T) {
|
||||
"path": "testdata/memory/group_1/group_1_1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"memory.limit_in_bytes": int64(223372036854771712),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
|
||||
@@ -174,9 +174,9 @@ func TestCgroupStatistics_6(t *testing.T) {
|
||||
"path": "testdata/memory",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.usage_in_bytes": 3513667584,
|
||||
"memory.usage_in_bytes": int64(3513667584),
|
||||
"memory.use_hierarchy": "12-781",
|
||||
"memory.kmem.limit_in_bytes": 9223372036854771712,
|
||||
"memory.kmem.limit_in_bytes": int64(9223372036854771712),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
// +build linux
|
||||
|
||||
package chrony
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package chrony
|
||||
@@ -1,5 +1,3 @@
|
||||
// +build linux
|
||||
|
||||
package chrony
|
||||
|
||||
import (
|
||||
|
||||
@@ -92,7 +92,7 @@ func (c *CloudWatch) SampleConfig() string {
|
||||
## Collection Delay (required - must account for metrics availability via CloudWatch API)
|
||||
delay = "5m"
|
||||
|
||||
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||
## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||
## gaps or overlap in pulled data
|
||||
interval = "5m"
|
||||
|
||||
|
||||
@@ -69,6 +69,10 @@ func (c *Consul) createAPIClient() (*api.Client, error) {
|
||||
config.Datacenter = c.Datacentre
|
||||
}
|
||||
|
||||
if c.Token != "" {
|
||||
config.Token = c.Token
|
||||
}
|
||||
|
||||
if c.Username != "" {
|
||||
config.HttpAuth = &api.HttpBasicAuth{
|
||||
Username: c.Username,
|
||||
|
||||
@@ -20,7 +20,7 @@ var sampleChecks = []*api.HealthCheck{
|
||||
},
|
||||
}
|
||||
|
||||
func TestGatherHealtCheck(t *testing.T) {
|
||||
func TestGatherHealthCheck(t *testing.T) {
|
||||
expectedFields := map[string]interface{}{
|
||||
"check_name": "foo.health",
|
||||
"status": "passing",
|
||||
|
||||
@@ -21,7 +21,7 @@ var sampleConfig = `
|
||||
## http://admin:secret@couchbase-0.example.com:8091/
|
||||
##
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
## If no protocol is specifed, HTTP is used.
|
||||
## If no protocol is specified, HTTP is used.
|
||||
## If no port is specified, 8091 is used.
|
||||
servers = ["http://localhost:8091"]
|
||||
`
|
||||
|
||||
209
plugins/inputs/dcos/README.md
Normal file
209
plugins/inputs/dcos/README.md
Normal file
@@ -0,0 +1,209 @@
|
||||
# DC/OS Input Plugin
|
||||
|
||||
This input plugin gathers metrics from a DC/OS cluster's [metrics component](https://docs.mesosphere.com/1.10/metrics/).
|
||||
|
||||
**Series Cardinality Warning**
|
||||
|
||||
Depending on the work load of your DC/OS cluster, this plugin can quickly
|
||||
create a high number of series which, when unchecked, can cause high load on
|
||||
your database.
|
||||
|
||||
- Use [measurement filtering](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#measurement-filtering) liberally to exclude unneeded metrics as well as the node, container, and app inclue/exclude options.
|
||||
- Write to a database with an appropriate [retention policy](https://docs.influxdata.com/influxdb/v1.3/concepts/glossary/#retention-policy-rp).
|
||||
- Limit the number of series allowed in your database using the `max-series-per-database` and `max-values-per-tag` settings.
|
||||
- Consider enabling the [TSI](https://docs.influxdata.com/influxdb/v1.3/about_the_project/releasenotes-changelog/#release-notes-8) engine.
|
||||
- Monitor your [series cardinality](https://docs.influxdata.com/influxdb/v1.3/troubleshooting/frequently-asked-questions/#how-can-i-query-for-series-cardinality).
|
||||
|
||||
### Configuration:
|
||||
```toml
|
||||
[[inputs.dcos]]
|
||||
## The DC/OS cluster URL.
|
||||
cluster_url = "https://dcos-master-1"
|
||||
|
||||
## The ID of the service account.
|
||||
service_account_id = "telegraf"
|
||||
## The private key file for the service account.
|
||||
service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
|
||||
|
||||
## Path containing login token. If set, will read on every gather.
|
||||
# token_file = "/home/dcos/.dcos/token"
|
||||
|
||||
## In all filter options if both include and exclude are empty all items
|
||||
## will be collected. Arrays may contain glob patterns.
|
||||
##
|
||||
## Node IDs to collect metrics from. If a node is excluded, no metrics will
|
||||
## be collected for its containers or apps.
|
||||
# node_include = []
|
||||
# node_exclude = []
|
||||
## Container IDs to collect container metrics from.
|
||||
# container_include = []
|
||||
# container_exclude = []
|
||||
## Container IDs to collect app metrics from.
|
||||
# app_include = []
|
||||
# app_exclude = []
|
||||
|
||||
## Maximum concurrent connections to the cluster.
|
||||
# max_connections = 10
|
||||
## Maximum time to receive a response from cluster.
|
||||
# response_timeout = "20s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## If false, skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
|
||||
## Recommended filtering to reduce series cardinality.
|
||||
# [inputs.dcos.tagdrop]
|
||||
# path = ["/var/lib/mesos/slave/slaves/*"]
|
||||
```
|
||||
|
||||
#### Enterprise Authentication
|
||||
|
||||
When using Enterprise DC/OS, it is recommended to use a service account to
|
||||
authenticate with the cluster.
|
||||
|
||||
The plugin requires the following permissions:
|
||||
```
|
||||
dcos:adminrouter:ops:system-metrics full
|
||||
dcos:adminrouter:ops:mesos full
|
||||
```
|
||||
|
||||
Follow the directions to [create a service account and assign permissions](https://docs.mesosphere.com/1.10/security/service-auth/custom-service-auth/).
|
||||
|
||||
Quick configuration using the Enterprise CLI:
|
||||
```
|
||||
dcos security org service-accounts keypair telegraf-sa-key.pem telegraf-sa-cert.pem
|
||||
dcos security org service-accounts create -p telegraf-sa-cert.pem -d "Telegraf DC/OS input plugin" telegraf
|
||||
dcos security org users grant telegraf dcos:adminrouter:ops:system-metrics full
|
||||
dcos security org users grant telegraf dcos:adminrouter:ops:mesos full
|
||||
```
|
||||
|
||||
#### Open Source Authentication
|
||||
|
||||
The Open Source DC/OS does not provide service accounts. Instead you can use
|
||||
of the following options:
|
||||
|
||||
1. [Disable authentication](https://dcos.io/docs/1.10/security/managing-authentication/#authentication-opt-out)
|
||||
2. Use the `token_file` parameter to read a authentication token from a file.
|
||||
|
||||
Then `token_file` can be set by using the [dcos cli] to login periodically.
|
||||
The cli can login for at most XXX days, you will need to ensure the cli
|
||||
performs a new login before this time expires.
|
||||
```
|
||||
dcos auth login --username foo --password bar
|
||||
dcos config show core.dcos_acs_token > ~/.dcos/token
|
||||
```
|
||||
|
||||
Another option to create a `token_file` is to generate a token using the
|
||||
cluster secret. This will allow you to set the expiration date manually or
|
||||
even create a never expiring token. However, if the cluster secret or the
|
||||
token is compromised it cannot be revoked and may require a full reinstall of
|
||||
the cluster. For more information on this technique reference
|
||||
[this blog post](https://medium.com/@richardgirges/authenticating-open-source-dc-os-with-third-party-services-125fa33a5add).
|
||||
|
||||
### Metrics:
|
||||
|
||||
Please consult the [Metrics Reference](https://docs.mesosphere.com/1.10/metrics/reference/)
|
||||
for details about field interpretation.
|
||||
|
||||
- dcos_node
|
||||
- tags:
|
||||
- cluster
|
||||
- hostname
|
||||
- path (filesystem fields only)
|
||||
- interface (network fields only)
|
||||
- fields:
|
||||
- system_uptime (float)
|
||||
- cpu_cores (float)
|
||||
- cpu_total (float)
|
||||
- cpu_user (float)
|
||||
- cpu_system (float)
|
||||
- cpu_idle (float)
|
||||
- cpu_wait (float)
|
||||
- load_1min (float)
|
||||
- load_5min (float)
|
||||
- load_15min (float)
|
||||
- filesystem_capacity_total_bytes (int)
|
||||
- filesystem_capacity_used_bytes (int)
|
||||
- filesystem_capacity_free_bytes (int)
|
||||
- filesystem_inode_total (float)
|
||||
- filesystem_inode_used (float)
|
||||
- filesystem_inode_free (float)
|
||||
- memory_total_bytes (int)
|
||||
- memory_free_bytes (int)
|
||||
- memory_buffers_bytes (int)
|
||||
- memory_cached_bytes (int)
|
||||
- swap_total_bytes (int)
|
||||
- swap_free_bytes (int)
|
||||
- swap_used_bytes (int)
|
||||
- network_in_bytes (int)
|
||||
- network_out_bytes (int)
|
||||
- network_in_packets (float)
|
||||
- network_out_packets (float)
|
||||
- network_in_dropped (float)
|
||||
- network_out_dropped (float)
|
||||
- network_in_errors (float)
|
||||
- network_out_errors (float)
|
||||
- process_count (float)
|
||||
|
||||
- dcos_container
|
||||
- tags:
|
||||
- cluster
|
||||
- hostname
|
||||
- container_id
|
||||
- task_name
|
||||
- fields:
|
||||
- cpus_limit (float)
|
||||
- cpus_system_time (float)
|
||||
- cpus_throttled_time (float)
|
||||
- cpus_user_time (float)
|
||||
- disk_limit_bytes (int)
|
||||
- disk_used_bytes (int)
|
||||
- mem_limit_bytes (int)
|
||||
- mem_total_bytes (int)
|
||||
- net_rx_bytes (int)
|
||||
- net_rx_dropped (float)
|
||||
- net_rx_errors (float)
|
||||
- net_rx_packets (float)
|
||||
- net_tx_bytes (int)
|
||||
- net_tx_dropped (float)
|
||||
- net_tx_errors (float)
|
||||
- net_tx_packets (float)
|
||||
|
||||
- dcos_app
|
||||
- tags:
|
||||
- cluster
|
||||
- hostname
|
||||
- container_id
|
||||
- task_name
|
||||
- fields:
|
||||
- fields are application specific
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/boot filesystem_capacity_free_bytes=918188032i,filesystem_capacity_total_bytes=1063256064i,filesystem_capacity_used_bytes=145068032i,filesystem_inode_free=523958,filesystem_inode_total=524288,filesystem_inode_used=330 1511859222000000000
|
||||
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=dummy0 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000
|
||||
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=docker0 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000
|
||||
dcos_node,cluster=enterprise,hostname=192.168.122.18 cpu_cores=2,cpu_idle=81.62,cpu_system=4.19,cpu_total=13.670000000000002,cpu_user=9.48,cpu_wait=0,load_15min=0.7,load_1min=0.22,load_5min=0.6,memory_buffers_bytes=970752i,memory_cached_bytes=1830473728i,memory_free_bytes=1178636288i,memory_total_bytes=3975073792i,process_count=198,swap_free_bytes=859828224i,swap_total_bytes=859828224i,swap_used_bytes=0i,system_uptime=18874 1511859222000000000
|
||||
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=lo network_in_bytes=1090992450i,network_in_dropped=0,network_in_errors=0,network_in_packets=1546938,network_out_bytes=1090992450i,network_out_dropped=0,network_out_errors=0,network_out_packets=1546938 1511859222000000000
|
||||
dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/ filesystem_capacity_free_bytes=1668378624i,filesystem_capacity_total_bytes=6641680384i,filesystem_capacity_used_bytes=4973301760i,filesystem_inode_free=3107856,filesystem_inode_total=3248128,filesystem_inode_used=140272 1511859222000000000
|
||||
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=minuteman network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=210i,network_out_dropped=0,network_out_errors=0,network_out_packets=3 1511859222000000000
|
||||
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=eth0 network_in_bytes=539886216i,network_in_dropped=1,network_in_errors=0,network_in_packets=979808,network_out_bytes=112395836i,network_out_dropped=0,network_out_errors=0,network_out_packets=891239 1511859222000000000
|
||||
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=spartan network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=210i,network_out_dropped=0,network_out_errors=0,network_out_packets=3 1511859222000000000
|
||||
dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/var/lib/docker/overlay filesystem_capacity_free_bytes=1668378624i,filesystem_capacity_total_bytes=6641680384i,filesystem_capacity_used_bytes=4973301760i,filesystem_inode_free=3107856,filesystem_inode_total=3248128,filesystem_inode_used=140272 1511859222000000000
|
||||
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=vtep1024 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000
|
||||
dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/var/lib/docker/plugins filesystem_capacity_free_bytes=1668378624i,filesystem_capacity_total_bytes=6641680384i,filesystem_capacity_used_bytes=4973301760i,filesystem_inode_free=3107856,filesystem_inode_total=3248128,filesystem_inode_used=140272 1511859222000000000
|
||||
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=d-dcos network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000
|
||||
dcos_app,cluster=enterprise,container_id=9a78d34a-3bbf-467e-81cf-a57737f154ee,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
|
||||
dcos_container,cluster=enterprise,container_id=cbf19b77-3b8d-4bcf-b81f-824b67279629,hostname=192.168.122.18 cpus_limit=0.3,cpus_system_time=307.31,cpus_throttled_time=102.029930607,cpus_user_time=268.57,disk_limit_bytes=268435456i,disk_used_bytes=30953472i,mem_limit_bytes=570425344i,mem_total_bytes=13316096i,net_rx_bytes=0i,net_rx_dropped=0,net_rx_errors=0,net_rx_packets=0,net_tx_bytes=0i,net_tx_dropped=0,net_tx_errors=0,net_tx_packets=0 1511859222000000000
|
||||
dcos_app,cluster=enterprise,container_id=cbf19b77-3b8d-4bcf-b81f-824b67279629,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
|
||||
dcos_container,cluster=enterprise,container_id=5725e219-f66e-40a8-b3ab-519d85f4c4dc,hostname=192.168.122.18,task_name=hello-world cpus_limit=0.6,cpus_system_time=25.6,cpus_throttled_time=327.977109217,cpus_user_time=566.54,disk_limit_bytes=0i,disk_used_bytes=0i,mem_limit_bytes=1107296256i,mem_total_bytes=335941632i,net_rx_bytes=0i,net_rx_dropped=0,net_rx_errors=0,net_rx_packets=0,net_tx_bytes=0i,net_tx_dropped=0,net_tx_errors=0,net_tx_packets=0 1511859222000000000
|
||||
dcos_app,cluster=enterprise,container_id=5725e219-f66e-40a8-b3ab-519d85f4c4dc,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
|
||||
dcos_app,cluster=enterprise,container_id=c76e1488-4fb7-4010-a4cf-25725f8173f9,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
|
||||
dcos_container,cluster=enterprise,container_id=cbe0b2f9-061f-44ac-8f15-4844229e8231,hostname=192.168.122.18,task_name=telegraf cpus_limit=0.2,cpus_system_time=8.109999999,cpus_throttled_time=93.183916045,cpus_user_time=17.97,disk_limit_bytes=0i,disk_used_bytes=0i,mem_limit_bytes=167772160i,mem_total_bytes=0i,net_rx_bytes=0i,net_rx_dropped=0,net_rx_errors=0,net_rx_packets=0,net_tx_bytes=0i,net_tx_dropped=0,net_tx_errors=0,net_tx_packets=0 1511859222000000000
|
||||
dcos_container,cluster=enterprise,container_id=b64115de-3d2a-431d-a805-76e7c46453f1,hostname=192.168.122.18 cpus_limit=0.2,cpus_system_time=2.69,cpus_throttled_time=20.064861214,cpus_user_time=6.56,disk_limit_bytes=268435456i,disk_used_bytes=29360128i,mem_limit_bytes=297795584i,mem_total_bytes=13733888i,net_rx_bytes=0i,net_rx_dropped=0,net_rx_errors=0,net_rx_packets=0,net_tx_bytes=0i,net_tx_dropped=0,net_tx_errors=0,net_tx_packets=0 1511859222000000000
|
||||
dcos_app,cluster=enterprise,container_id=b64115de-3d2a-431d-a805-76e7c46453f1,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
|
||||
```
|
||||
332
plugins/inputs/dcos/client.go
Normal file
332
plugins/inputs/dcos/client.go
Normal file
@@ -0,0 +1,332 @@
|
||||
package dcos
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
jwt "github.com/dgrijalva/jwt-go"
|
||||
)
|
||||
|
||||
const (
|
||||
// How long to stayed logged in for
|
||||
loginDuration = 65 * time.Minute
|
||||
)
|
||||
|
||||
// Client is an interface for communicating with the DC/OS API.
|
||||
type Client interface {
|
||||
SetToken(token string)
|
||||
|
||||
Login(ctx context.Context, sa *ServiceAccount) (*AuthToken, error)
|
||||
GetSummary(ctx context.Context) (*Summary, error)
|
||||
GetContainers(ctx context.Context, node string) ([]Container, error)
|
||||
GetNodeMetrics(ctx context.Context, node string) (*Metrics, error)
|
||||
GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error)
|
||||
GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error)
|
||||
}
|
||||
|
||||
type APIError struct {
|
||||
StatusCode int
|
||||
Title string
|
||||
Description string
|
||||
}
|
||||
|
||||
// Login is request data for logging in.
|
||||
type Login struct {
|
||||
UID string `json:"uid"`
|
||||
Exp int64 `json:"exp"`
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
// LoginError is the response when login fails.
|
||||
type LoginError struct {
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
// LoginAuth is the response to a successful login.
|
||||
type LoginAuth struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
// Slave is a node in the cluster.
|
||||
type Slave struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// Summary provides high level cluster wide information.
|
||||
type Summary struct {
|
||||
Cluster string
|
||||
Slaves []Slave
|
||||
}
|
||||
|
||||
// Container is a container on a node.
|
||||
type Container struct {
|
||||
ID string
|
||||
}
|
||||
|
||||
type DataPoint struct {
|
||||
Name string `json:"name"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
Unit string `json:"unit"`
|
||||
Value float64 `json:"value"`
|
||||
}
|
||||
|
||||
// Metrics are the DCOS metrics
|
||||
type Metrics struct {
|
||||
Datapoints []DataPoint `json:"datapoints"`
|
||||
Dimensions map[string]interface{} `json:"dimensions"`
|
||||
}
|
||||
|
||||
// AuthToken is the authentication token.
|
||||
type AuthToken struct {
|
||||
Text string
|
||||
Expire time.Time
|
||||
}
|
||||
|
||||
// ClusterClient is a Client that uses the cluster URL.
|
||||
type ClusterClient struct {
|
||||
clusterURL *url.URL
|
||||
httpClient *http.Client
|
||||
credentials *Credentials
|
||||
token string
|
||||
semaphore chan struct{}
|
||||
}
|
||||
|
||||
type claims struct {
|
||||
UID string `json:"uid"`
|
||||
jwt.StandardClaims
|
||||
}
|
||||
|
||||
func (e APIError) Error() string {
|
||||
if e.Description != "" {
|
||||
return fmt.Sprintf("%s: %s", e.Title, e.Description)
|
||||
}
|
||||
return e.Title
|
||||
}
|
||||
|
||||
func NewClusterClient(
|
||||
clusterURL *url.URL,
|
||||
timeout time.Duration,
|
||||
maxConns int,
|
||||
tlsConfig *tls.Config,
|
||||
) *ClusterClient {
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
MaxIdleConns: maxConns,
|
||||
TLSClientConfig: tlsConfig,
|
||||
},
|
||||
Timeout: timeout,
|
||||
}
|
||||
semaphore := make(chan struct{}, maxConns)
|
||||
|
||||
c := &ClusterClient{
|
||||
clusterURL: clusterURL,
|
||||
httpClient: httpClient,
|
||||
semaphore: semaphore,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *ClusterClient) SetToken(token string) {
|
||||
c.token = token
|
||||
}
|
||||
|
||||
func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthToken, error) {
|
||||
token, err := c.createLoginToken(sa)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exp := time.Now().Add(loginDuration)
|
||||
|
||||
body := &Login{
|
||||
UID: sa.AccountID,
|
||||
Exp: exp.Unix(),
|
||||
Token: token,
|
||||
}
|
||||
|
||||
octets, err := json.Marshal(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", c.url("/acs/api/v1/auth/login"), bytes.NewBuffer(octets))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
|
||||
req = req.WithContext(ctx)
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
auth := &LoginAuth{}
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
err = dec.Decode(auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
token := &AuthToken{
|
||||
Text: auth.Token,
|
||||
Expire: exp,
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
loginError := &LoginError{}
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
err = dec.Decode(loginError)
|
||||
if err != nil {
|
||||
err := &APIError{
|
||||
StatusCode: resp.StatusCode,
|
||||
Title: resp.Status,
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = &APIError{
|
||||
StatusCode: resp.StatusCode,
|
||||
Title: loginError.Title,
|
||||
Description: loginError.Description,
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (c *ClusterClient) GetSummary(ctx context.Context) (*Summary, error) {
|
||||
summary := &Summary{}
|
||||
err := c.doGet(ctx, c.url("/mesos/master/state-summary"), summary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return summary, nil
|
||||
}
|
||||
|
||||
func (c *ClusterClient) GetContainers(ctx context.Context, node string) ([]Container, error) {
|
||||
list := []string{}
|
||||
|
||||
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers", node)
|
||||
err := c.doGet(ctx, c.url(path), &list)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
containers := make([]Container, 0, len(list))
|
||||
for _, c := range list {
|
||||
containers = append(containers, Container{ID: c})
|
||||
|
||||
}
|
||||
|
||||
return containers, nil
|
||||
}
|
||||
|
||||
func (c *ClusterClient) getMetrics(ctx context.Context, url string) (*Metrics, error) {
|
||||
metrics := &Metrics{}
|
||||
|
||||
err := c.doGet(ctx, url, metrics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func (c *ClusterClient) GetNodeMetrics(ctx context.Context, node string) (*Metrics, error) {
|
||||
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/node", node)
|
||||
return c.getMetrics(ctx, c.url(path))
|
||||
}
|
||||
|
||||
func (c *ClusterClient) GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error) {
|
||||
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers/%s", node, container)
|
||||
return c.getMetrics(ctx, c.url(path))
|
||||
}
|
||||
|
||||
func (c *ClusterClient) GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error) {
|
||||
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers/%s/app", node, container)
|
||||
return c.getMetrics(ctx, c.url(path))
|
||||
}
|
||||
|
||||
func createGetRequest(url string, token string) (*http.Request, error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if token != "" {
|
||||
req.Header.Add("Authorization", "token="+token)
|
||||
}
|
||||
req.Header.Add("Accept", "application/json")
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) error {
|
||||
req, err := createGetRequest(url, c.token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case c.semaphore <- struct{}{}:
|
||||
break
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
resp, err := c.httpClient.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
<-c.semaphore
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
resp.Body.Close()
|
||||
<-c.semaphore
|
||||
}()
|
||||
|
||||
// Clear invalid token if unauthorized
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
c.token = ""
|
||||
}
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return &APIError{
|
||||
StatusCode: resp.StatusCode,
|
||||
Title: resp.Status,
|
||||
}
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusNoContent {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = json.NewDecoder(resp.Body).Decode(v)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *ClusterClient) url(path string) string {
|
||||
url := c.clusterURL
|
||||
url.Path = path
|
||||
return url.String()
|
||||
}
|
||||
|
||||
func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) {
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims{
|
||||
UID: sa.AccountID,
|
||||
StandardClaims: jwt.StandardClaims{
|
||||
// How long we have to login with this token
|
||||
ExpiresAt: time.Now().Add(5 * time.Minute).Unix(),
|
||||
},
|
||||
})
|
||||
return token.SignedString(sa.PrivateKey)
|
||||
}
|
||||
232
plugins/inputs/dcos/client_test.go
Normal file
232
plugins/inputs/dcos/client_test.go
Normal file
@@ -0,0 +1,232 @@
|
||||
package dcos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
jwt "github.com/dgrijalva/jwt-go"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
privateKey = `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQCwlGyzVp9cqtwiNCgCnaR0kilPZhr4xFBcnXxvQ8/uzOHaWKxj
|
||||
XWR38cKR3gPh5+4iSmzMdo3HDJM5ks6imXGnp+LPOA5iNewnpLNs7UxA2arwKH/6
|
||||
4qIaAXAtf5jE46wZIMgc2EW9wGL3dxC0JY8EXPpBFB/3J8gADkorFR8lwwIDAQAB
|
||||
AoGBAJaFHxfMmjHK77U0UnrQWFSKFy64cftmlL4t/Nl3q7L68PdIKULWZIMeEWZ4
|
||||
I0UZiFOwr4em83oejQ1ByGSwekEuiWaKUI85IaHfcbt+ogp9hY/XbOEo56OPQUAd
|
||||
bEZv1JqJOqta9Ug1/E1P9LjEEyZ5F5ubx7813rxAE31qKtKJAkEA1zaMlCWIr+Rj
|
||||
hGvzv5rlHH3wbOB4kQFXO4nqj3J/ttzR5QiJW24STMDcbNngFlVcDVju56LrNTiD
|
||||
dPh9qvl7nwJBANILguR4u33OMksEZTYB7nQZSurqXsq6382zH7pTl29ANQTROHaM
|
||||
PKC8dnDWq8RGTqKuvWblIzzGIKqIMovZo10CQC96T0UXirITFolOL3XjvAuvFO1Q
|
||||
EAkdXJs77805m0dCK+P1IChVfiAEpBw3bKJArpAbQIlFfdI953JUp5SieU0CQEub
|
||||
BSSEKMjh/cxu6peEHnb/262vayuCFKkQPu1sxWewLuVrAe36EKCy9dcsDmv5+rgo
|
||||
Odjdxc9Madm4aKlaT6kCQQCpAgeblDrrxTrNQ+Typzo37PlnQrvI+0EceAUuJ72G
|
||||
P0a+YZUeHNRqT2pPN9lMTAZGGi3CtcF2XScbLNEBeXge
|
||||
-----END RSA PRIVATE KEY-----`
|
||||
)
|
||||
|
||||
func TestLogin(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
responseCode int
|
||||
responseBody string
|
||||
expectedError error
|
||||
expectedToken string
|
||||
}{
|
||||
{
|
||||
name: "Login successful",
|
||||
responseCode: 200,
|
||||
responseBody: `{"token": "XXX.YYY.ZZZ"}`,
|
||||
expectedError: nil,
|
||||
expectedToken: "XXX.YYY.ZZZ",
|
||||
},
|
||||
{
|
||||
name: "Unauthorized Error",
|
||||
responseCode: http.StatusUnauthorized,
|
||||
responseBody: `{"title": "x", "description": "y"}`,
|
||||
expectedError: &APIError{http.StatusUnauthorized, "x", "y"},
|
||||
expectedToken: "",
|
||||
},
|
||||
}
|
||||
|
||||
key, err := jwt.ParseRSAPrivateKeyFromPEM([]byte(privateKey))
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(tt.responseCode)
|
||||
fmt.Fprintln(w, tt.responseBody)
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
sa := &ServiceAccount{
|
||||
AccountID: "telegraf",
|
||||
PrivateKey: key,
|
||||
}
|
||||
client := NewClusterClient(u, defaultResponseTimeout, 1, nil)
|
||||
auth, err := client.Login(ctx, sa)
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
|
||||
if tt.expectedToken != "" {
|
||||
require.Equal(t, tt.expectedToken, auth.Text)
|
||||
} else {
|
||||
require.Nil(t, auth)
|
||||
}
|
||||
|
||||
ts.Close()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSummary(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
responseCode int
|
||||
responseBody string
|
||||
expectedValue *Summary
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "No nodes",
|
||||
responseCode: 200,
|
||||
responseBody: `{"cluster": "a", "slaves": []}`,
|
||||
expectedValue: &Summary{Cluster: "a", Slaves: []Slave{}},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Unauthorized Error",
|
||||
responseCode: http.StatusUnauthorized,
|
||||
responseBody: `<html></html>`,
|
||||
expectedValue: nil,
|
||||
expectedError: &APIError{StatusCode: http.StatusUnauthorized, Title: "401 Unauthorized"},
|
||||
},
|
||||
{
|
||||
name: "Has nodes",
|
||||
responseCode: 200,
|
||||
responseBody: `{"cluster": "a", "slaves": [{"id": "a"}, {"id": "b"}]}`,
|
||||
expectedValue: &Summary{
|
||||
Cluster: "a",
|
||||
Slaves: []Slave{
|
||||
Slave{ID: "a"},
|
||||
Slave{ID: "b"},
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// check the path
|
||||
w.WriteHeader(tt.responseCode)
|
||||
fmt.Fprintln(w, tt.responseBody)
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
client := NewClusterClient(u, defaultResponseTimeout, 1, nil)
|
||||
summary, err := client.GetSummary(ctx)
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
require.Equal(t, tt.expectedValue, summary)
|
||||
|
||||
ts.Close()
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetNodeMetrics(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
responseCode int
|
||||
responseBody string
|
||||
expectedValue *Metrics
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Empty Body",
|
||||
responseCode: 200,
|
||||
responseBody: `{}`,
|
||||
expectedValue: &Metrics{},
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// check the path
|
||||
w.WriteHeader(tt.responseCode)
|
||||
fmt.Fprintln(w, tt.responseBody)
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
client := NewClusterClient(u, defaultResponseTimeout, 1, nil)
|
||||
m, err := client.GetNodeMetrics(ctx, "foo")
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
require.Equal(t, tt.expectedValue, m)
|
||||
|
||||
ts.Close()
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetContainerMetrics(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
responseCode int
|
||||
responseBody string
|
||||
expectedValue *Metrics
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "204 No Contents",
|
||||
responseCode: 204,
|
||||
responseBody: ``,
|
||||
expectedValue: &Metrics{},
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// check the path
|
||||
w.WriteHeader(tt.responseCode)
|
||||
fmt.Fprintln(w, tt.responseBody)
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
client := NewClusterClient(u, defaultResponseTimeout, 1, nil)
|
||||
m, err := client.GetContainerMetrics(ctx, "foo", "bar")
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
require.Equal(t, tt.expectedValue, m)
|
||||
|
||||
ts.Close()
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
72
plugins/inputs/dcos/creds.go
Normal file
72
plugins/inputs/dcos/creds.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package dcos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rsa"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
// How long before expiration to renew token
|
||||
relogDuration = 5 * time.Minute
|
||||
)
|
||||
|
||||
type Credentials interface {
|
||||
Token(ctx context.Context, client Client) (string, error)
|
||||
IsExpired() bool
|
||||
}
|
||||
|
||||
type ServiceAccount struct {
|
||||
AccountID string
|
||||
PrivateKey *rsa.PrivateKey
|
||||
|
||||
auth *AuthToken
|
||||
}
|
||||
|
||||
type TokenCreds struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
type NullCreds struct {
|
||||
}
|
||||
|
||||
func (c *ServiceAccount) Token(ctx context.Context, client Client) (string, error) {
|
||||
auth, err := client.Login(ctx, c)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
c.auth = auth
|
||||
return auth.Text, nil
|
||||
}
|
||||
|
||||
func (c *ServiceAccount) IsExpired() bool {
|
||||
return c.auth.Text != "" || c.auth.Expire.Add(relogDuration).After(time.Now())
|
||||
}
|
||||
|
||||
func (c *TokenCreds) Token(ctx context.Context, client Client) (string, error) {
|
||||
octets, err := ioutil.ReadFile(c.Path)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error reading token file %q: %s", c.Path, err)
|
||||
}
|
||||
if !utf8.Valid(octets) {
|
||||
return "", fmt.Errorf("Token file does not contain utf-8 encoded text: %s", c.Path)
|
||||
}
|
||||
token := strings.TrimSpace(string(octets))
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func (c *TokenCreds) IsExpired() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *NullCreds) Token(ctx context.Context, client Client) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (c *NullCreds) IsExpired() bool {
|
||||
return true
|
||||
}
|
||||
435
plugins/inputs/dcos/dcos.go
Normal file
435
plugins/inputs/dcos/dcos.go
Normal file
@@ -0,0 +1,435 @@
|
||||
package dcos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
jwt "github.com/dgrijalva/jwt-go"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMaxConnections = 10
|
||||
defaultResponseTimeout = 20 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
nodeDimensions = []string{
|
||||
"hostname",
|
||||
"path",
|
||||
"interface",
|
||||
}
|
||||
containerDimensions = []string{
|
||||
"hostname",
|
||||
"container_id",
|
||||
"task_name",
|
||||
}
|
||||
appDimensions = []string{
|
||||
"hostname",
|
||||
"container_id",
|
||||
"task_name",
|
||||
}
|
||||
)
|
||||
|
||||
type DCOS struct {
|
||||
ClusterURL string `toml:"cluster_url"`
|
||||
|
||||
ServiceAccountID string `toml:"service_account_id"`
|
||||
ServiceAccountPrivateKey string
|
||||
|
||||
TokenFile string
|
||||
|
||||
NodeInclude []string
|
||||
NodeExclude []string
|
||||
ContainerInclude []string
|
||||
ContainerExclude []string
|
||||
AppInclude []string
|
||||
AppExclude []string
|
||||
|
||||
MaxConnections int
|
||||
ResponseTimeout internal.Duration
|
||||
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
InsecureSkipVerify bool `toml:"insecure_skip_verify"`
|
||||
|
||||
client Client
|
||||
creds Credentials
|
||||
|
||||
initialized bool
|
||||
nodeFilter filter.Filter
|
||||
containerFilter filter.Filter
|
||||
appFilter filter.Filter
|
||||
taskNameFilter filter.Filter
|
||||
}
|
||||
|
||||
func (d *DCOS) Description() string {
|
||||
return "Input plugin for DC/OS metrics"
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## The DC/OS cluster URL.
|
||||
cluster_url = "https://dcos-ee-master-1"
|
||||
|
||||
## The ID of the service account.
|
||||
service_account_id = "telegraf"
|
||||
## The private key file for the service account.
|
||||
service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
|
||||
|
||||
## Path containing login token. If set, will read on every gather.
|
||||
# token_file = "/home/dcos/.dcos/token"
|
||||
|
||||
## In all filter options if both include and exclude are empty all items
|
||||
## will be collected. Arrays may contain glob patterns.
|
||||
##
|
||||
## Node IDs to collect metrics from. If a node is excluded, no metrics will
|
||||
## be collected for its containers or apps.
|
||||
# node_include = []
|
||||
# node_exclude = []
|
||||
## Container IDs to collect container metrics from.
|
||||
# container_include = []
|
||||
# container_exclude = []
|
||||
## Container IDs to collect app metrics from.
|
||||
# app_include = []
|
||||
# app_exclude = []
|
||||
|
||||
## Maximum concurrent connections to the cluster.
|
||||
# max_connections = 10
|
||||
## Maximum time to receive a response from cluster.
|
||||
# response_timeout = "20s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## If false, skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
|
||||
## Recommended filtering to reduce series cardinality.
|
||||
# [inputs.dcos.tagdrop]
|
||||
# path = ["/var/lib/mesos/slave/slaves/*"]
|
||||
`
|
||||
|
||||
func (d *DCOS) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (d *DCOS) Gather(acc telegraf.Accumulator) error {
|
||||
err := d.init()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
token, err := d.creds.Token(ctx, d.client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.client.SetToken(token)
|
||||
|
||||
summary, err := d.client.GetSummary(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, node := range summary.Slaves {
|
||||
wg.Add(1)
|
||||
go func(node string) {
|
||||
defer wg.Done()
|
||||
d.GatherNode(ctx, acc, summary.Cluster, node)
|
||||
}(node.ID)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DCOS) GatherNode(ctx context.Context, acc telegraf.Accumulator, cluster, node string) {
|
||||
if !d.nodeFilter.Match(node) {
|
||||
return
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
m, err := d.client.GetNodeMetrics(ctx, node)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
d.addNodeMetrics(acc, cluster, m)
|
||||
}()
|
||||
|
||||
d.GatherContainers(ctx, acc, cluster, node)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (d *DCOS) GatherContainers(ctx context.Context, acc telegraf.Accumulator, cluster, node string) {
|
||||
containers, err := d.client.GetContainers(ctx, node)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, container := range containers {
|
||||
if d.containerFilter.Match(container.ID) {
|
||||
wg.Add(1)
|
||||
go func(container string) {
|
||||
defer wg.Done()
|
||||
m, err := d.client.GetContainerMetrics(ctx, node, container)
|
||||
if err != nil {
|
||||
if err, ok := err.(APIError); ok && err.StatusCode == 404 {
|
||||
return
|
||||
}
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
d.addContainerMetrics(acc, cluster, m)
|
||||
}(container.ID)
|
||||
}
|
||||
|
||||
if d.appFilter.Match(container.ID) {
|
||||
wg.Add(1)
|
||||
go func(container string) {
|
||||
defer wg.Done()
|
||||
m, err := d.client.GetAppMetrics(ctx, node, container)
|
||||
if err != nil {
|
||||
if err, ok := err.(APIError); ok && err.StatusCode == 404 {
|
||||
return
|
||||
}
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
d.addAppMetrics(acc, cluster, m)
|
||||
}(container.ID)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
type point struct {
|
||||
tags map[string]string
|
||||
labels map[string]string
|
||||
fields map[string]interface{}
|
||||
}
|
||||
|
||||
func (d *DCOS) createPoints(acc telegraf.Accumulator, m *Metrics) []*point {
|
||||
points := make(map[string]*point)
|
||||
for _, dp := range m.Datapoints {
|
||||
fieldKey := strings.Replace(dp.Name, ".", "_", -1)
|
||||
|
||||
tags := dp.Tags
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
|
||||
if dp.Unit == "bytes" && !strings.HasSuffix(fieldKey, "_bytes") {
|
||||
fieldKey = fieldKey + "_bytes"
|
||||
}
|
||||
|
||||
if strings.HasPrefix(fieldKey, "dcos_metrics_module_") {
|
||||
fieldKey = strings.TrimPrefix(fieldKey, "dcos_metrics_module_")
|
||||
}
|
||||
|
||||
tagset := make([]string, 0, len(tags))
|
||||
for k, v := range tags {
|
||||
tagset = append(tagset, k+"="+v)
|
||||
}
|
||||
sort.Strings(tagset)
|
||||
seriesParts := make([]string, 0, len(tagset))
|
||||
seriesParts = append(seriesParts, tagset...)
|
||||
seriesKey := strings.Join(seriesParts, ",")
|
||||
|
||||
p, ok := points[seriesKey]
|
||||
if !ok {
|
||||
p = &point{}
|
||||
p.tags = tags
|
||||
p.labels = make(map[string]string)
|
||||
p.fields = make(map[string]interface{})
|
||||
points[seriesKey] = p
|
||||
}
|
||||
|
||||
if dp.Unit == "bytes" {
|
||||
p.fields[fieldKey] = int64(dp.Value)
|
||||
} else {
|
||||
p.fields[fieldKey] = dp.Value
|
||||
}
|
||||
}
|
||||
|
||||
results := make([]*point, 0, len(points))
|
||||
for _, p := range points {
|
||||
for k, v := range m.Dimensions {
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
p.tags[k] = v
|
||||
case map[string]string:
|
||||
if k == "labels" {
|
||||
for k, v := range v {
|
||||
p.labels[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
results = append(results, p)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *Metrics, tagDimensions []string) {
|
||||
tm := time.Now()
|
||||
|
||||
points := d.createPoints(acc, m)
|
||||
|
||||
for _, p := range points {
|
||||
tags := make(map[string]string)
|
||||
tags["cluster"] = cluster
|
||||
for _, tagkey := range tagDimensions {
|
||||
v, ok := p.tags[tagkey]
|
||||
if ok {
|
||||
tags[tagkey] = v
|
||||
}
|
||||
}
|
||||
for k, v := range p.labels {
|
||||
tags[k] = v
|
||||
}
|
||||
|
||||
acc.AddFields(mname, p.fields, tags, tm)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DCOS) addNodeMetrics(acc telegraf.Accumulator, cluster string, m *Metrics) {
|
||||
d.addMetrics(acc, cluster, "dcos_node", m, nodeDimensions)
|
||||
}
|
||||
|
||||
func (d *DCOS) addContainerMetrics(acc telegraf.Accumulator, cluster string, m *Metrics) {
|
||||
d.addMetrics(acc, cluster, "dcos_container", m, containerDimensions)
|
||||
}
|
||||
|
||||
func (d *DCOS) addAppMetrics(acc telegraf.Accumulator, cluster string, m *Metrics) {
|
||||
d.addMetrics(acc, cluster, "dcos_app", m, appDimensions)
|
||||
}
|
||||
|
||||
func (d *DCOS) init() error {
|
||||
if !d.initialized {
|
||||
err := d.createFilters()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if d.client == nil {
|
||||
client, err := d.createClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.client = client
|
||||
}
|
||||
|
||||
if d.creds == nil {
|
||||
creds, err := d.createCredentials()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.creds = creds
|
||||
}
|
||||
|
||||
d.initialized = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DCOS) createClient() (Client, error) {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
d.SSLCert, d.SSLKey, d.SSLCA, d.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url, err := url.Parse(d.ClusterURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := NewClusterClient(
|
||||
url,
|
||||
d.ResponseTimeout.Duration,
|
||||
d.MaxConnections,
|
||||
tlsCfg,
|
||||
)
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (d *DCOS) createCredentials() (Credentials, error) {
|
||||
if d.ServiceAccountID != "" && d.ServiceAccountPrivateKey != "" {
|
||||
bs, err := ioutil.ReadFile(d.ServiceAccountPrivateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
privateKey, err := jwt.ParseRSAPrivateKeyFromPEM(bs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
creds := &ServiceAccount{
|
||||
AccountID: d.ServiceAccountID,
|
||||
PrivateKey: privateKey,
|
||||
}
|
||||
return creds, nil
|
||||
} else if d.TokenFile != "" {
|
||||
creds := &TokenCreds{
|
||||
Path: d.TokenFile,
|
||||
}
|
||||
return creds, nil
|
||||
} else {
|
||||
creds := &NullCreds{}
|
||||
return creds, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DCOS) createFilters() error {
|
||||
var err error
|
||||
d.nodeFilter, err = filter.NewIncludeExcludeFilter(
|
||||
d.NodeInclude, d.NodeExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.containerFilter, err = filter.NewIncludeExcludeFilter(
|
||||
d.ContainerInclude, d.ContainerExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.appFilter, err = filter.NewIncludeExcludeFilter(
|
||||
d.AppInclude, d.AppExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("dcos", func() telegraf.Input {
|
||||
return &DCOS{
|
||||
MaxConnections: defaultMaxConnections,
|
||||
ResponseTimeout: internal.Duration{
|
||||
Duration: defaultResponseTimeout,
|
||||
},
|
||||
}
|
||||
})
|
||||
}
|
||||
441
plugins/inputs/dcos/dcos_test.go
Normal file
441
plugins/inputs/dcos/dcos_test.go
Normal file
@@ -0,0 +1,441 @@
|
||||
package dcos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type mockClient struct {
|
||||
SetTokenF func(token string)
|
||||
LoginF func(ctx context.Context, sa *ServiceAccount) (*AuthToken, error)
|
||||
GetSummaryF func(ctx context.Context) (*Summary, error)
|
||||
GetContainersF func(ctx context.Context, node string) ([]Container, error)
|
||||
GetNodeMetricsF func(ctx context.Context, node string) (*Metrics, error)
|
||||
GetContainerMetricsF func(ctx context.Context, node, container string) (*Metrics, error)
|
||||
GetAppMetricsF func(ctx context.Context, node, container string) (*Metrics, error)
|
||||
}
|
||||
|
||||
func (c *mockClient) SetToken(token string) {
|
||||
c.SetTokenF(token)
|
||||
}
|
||||
|
||||
func (c *mockClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthToken, error) {
|
||||
return c.LoginF(ctx, sa)
|
||||
}
|
||||
|
||||
func (c *mockClient) GetSummary(ctx context.Context) (*Summary, error) {
|
||||
return c.GetSummaryF(ctx)
|
||||
}
|
||||
|
||||
func (c *mockClient) GetContainers(ctx context.Context, node string) ([]Container, error) {
|
||||
return c.GetContainersF(ctx, node)
|
||||
}
|
||||
|
||||
func (c *mockClient) GetNodeMetrics(ctx context.Context, node string) (*Metrics, error) {
|
||||
return c.GetNodeMetricsF(ctx, node)
|
||||
}
|
||||
|
||||
func (c *mockClient) GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error) {
|
||||
return c.GetContainerMetricsF(ctx, node, container)
|
||||
}
|
||||
|
||||
func (c *mockClient) GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error) {
|
||||
return c.GetAppMetricsF(ctx, node, container)
|
||||
}
|
||||
|
||||
func TestAddNodeMetrics(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
metrics *Metrics
|
||||
check func(*testutil.Accumulator) []bool
|
||||
}{
|
||||
{
|
||||
name: "basic datapoint conversion",
|
||||
metrics: &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
{
|
||||
Name: "process.count",
|
||||
Unit: "count",
|
||||
Value: 42.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
check: func(acc *testutil.Accumulator) []bool {
|
||||
return []bool{acc.HasPoint(
|
||||
"dcos_node",
|
||||
map[string]string{
|
||||
"cluster": "a",
|
||||
},
|
||||
"process_count", 42.0,
|
||||
)}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "path added as tag",
|
||||
metrics: &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
{
|
||||
Name: "filesystem.inode.free",
|
||||
Tags: map[string]string{
|
||||
"path": "/var/lib",
|
||||
},
|
||||
Unit: "count",
|
||||
Value: 42.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
check: func(acc *testutil.Accumulator) []bool {
|
||||
return []bool{acc.HasPoint(
|
||||
"dcos_node",
|
||||
map[string]string{
|
||||
"cluster": "a",
|
||||
"path": "/var/lib",
|
||||
},
|
||||
"filesystem_inode_free", 42.0,
|
||||
)}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "interface added as tag",
|
||||
metrics: &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
{
|
||||
Name: "network.out.dropped",
|
||||
Tags: map[string]string{
|
||||
"interface": "eth0",
|
||||
},
|
||||
Unit: "count",
|
||||
Value: 42.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
check: func(acc *testutil.Accumulator) []bool {
|
||||
return []bool{acc.HasPoint(
|
||||
"dcos_node",
|
||||
map[string]string{
|
||||
"cluster": "a",
|
||||
"interface": "eth0",
|
||||
},
|
||||
"network_out_dropped", 42.0,
|
||||
)}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bytes unit appended to fieldkey",
|
||||
metrics: &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
{
|
||||
Name: "network.in",
|
||||
Tags: map[string]string{
|
||||
"interface": "eth0",
|
||||
},
|
||||
Unit: "bytes",
|
||||
Value: 42.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
check: func(acc *testutil.Accumulator) []bool {
|
||||
return []bool{acc.HasPoint(
|
||||
"dcos_node",
|
||||
map[string]string{
|
||||
"cluster": "a",
|
||||
"interface": "eth0",
|
||||
},
|
||||
"network_in_bytes", int64(42),
|
||||
)}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "dimensions added as tags",
|
||||
metrics: &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
{
|
||||
Name: "process.count",
|
||||
Tags: map[string]string{},
|
||||
Unit: "count",
|
||||
Value: 42.0,
|
||||
},
|
||||
{
|
||||
Name: "memory.total",
|
||||
Tags: map[string]string{},
|
||||
Unit: "bytes",
|
||||
Value: 42,
|
||||
},
|
||||
},
|
||||
Dimensions: map[string]interface{}{
|
||||
"cluster_id": "c0760bbd-9e9d-434b-bd4a-39c7cdef8a63",
|
||||
"hostname": "192.168.122.18",
|
||||
"mesos_id": "2dfbbd28-29d2-411d-92c4-e2f84c38688e-S1",
|
||||
},
|
||||
},
|
||||
check: func(acc *testutil.Accumulator) []bool {
|
||||
return []bool{
|
||||
acc.HasPoint(
|
||||
"dcos_node",
|
||||
map[string]string{
|
||||
"cluster": "a",
|
||||
"hostname": "192.168.122.18",
|
||||
},
|
||||
"process_count", 42.0),
|
||||
acc.HasPoint(
|
||||
"dcos_node",
|
||||
map[string]string{
|
||||
"cluster": "a",
|
||||
"hostname": "192.168.122.18",
|
||||
},
|
||||
"memory_total_bytes", int64(42)),
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
dcos := &DCOS{}
|
||||
dcos.addNodeMetrics(&acc, "a", tt.metrics)
|
||||
for i, ok := range tt.check(&acc) {
|
||||
require.True(t, ok, fmt.Sprintf("Index was not true: %d", i))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAddContainerMetrics(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
metrics *Metrics
|
||||
check func(*testutil.Accumulator) []bool
|
||||
}{
|
||||
{
|
||||
name: "container",
|
||||
metrics: &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
{
|
||||
Name: "net.rx.errors",
|
||||
Tags: map[string]string{
|
||||
"container_id": "f25c457b-fceb-44f0-8f5b-38be34cbb6fb",
|
||||
"executor_id": "telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a",
|
||||
"executor_name": "Command Executor (Task: telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a) (Command: NO EXECUTABLE)",
|
||||
"framework_id": "ab2f3a8b-06db-4e8c-95b6-fb1940874a30-0001",
|
||||
"source": "telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a",
|
||||
},
|
||||
Unit: "count",
|
||||
Value: 42.0,
|
||||
},
|
||||
},
|
||||
Dimensions: map[string]interface{}{
|
||||
"cluster_id": "c0760bbd-9e9d-434b-bd4a-39c7cdef8a63",
|
||||
"container_id": "f25c457b-fceb-44f0-8f5b-38be34cbb6fb",
|
||||
"executor_id": "telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a",
|
||||
"framework_id": "ab2f3a8b-06db-4e8c-95b6-fb1940874a30-0001",
|
||||
"framework_name": "marathon",
|
||||
"framework_principal": "dcos_marathon",
|
||||
"framework_role": "slave_public",
|
||||
"hostname": "192.168.122.18",
|
||||
"labels": map[string]string{
|
||||
"DCOS_SPACE": "/telegraf",
|
||||
},
|
||||
"mesos_id": "2dfbbd28-29d2-411d-92c4-e2f84c38688e-S1",
|
||||
"task_id": "telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a",
|
||||
"task_name": "telegraf",
|
||||
},
|
||||
},
|
||||
check: func(acc *testutil.Accumulator) []bool {
|
||||
return []bool{
|
||||
acc.HasPoint(
|
||||
"dcos_container",
|
||||
map[string]string{
|
||||
"cluster": "a",
|
||||
"container_id": "f25c457b-fceb-44f0-8f5b-38be34cbb6fb",
|
||||
"hostname": "192.168.122.18",
|
||||
"task_name": "telegraf",
|
||||
"DCOS_SPACE": "/telegraf",
|
||||
},
|
||||
"net_rx_errors",
|
||||
42.0,
|
||||
),
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
dcos := &DCOS{}
|
||||
dcos.addContainerMetrics(&acc, "a", tt.metrics)
|
||||
for i, ok := range tt.check(&acc) {
|
||||
require.True(t, ok, fmt.Sprintf("Index was not true: %d", i))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddAppMetrics(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
metrics *Metrics
|
||||
check func(*testutil.Accumulator) []bool
|
||||
}{
|
||||
{
|
||||
name: "tags are optional",
|
||||
metrics: &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
{
|
||||
Name: "dcos.metrics.module.container_throttled_bytes_per_sec",
|
||||
Unit: "",
|
||||
Value: 42.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
check: func(acc *testutil.Accumulator) []bool {
|
||||
return []bool{
|
||||
acc.HasPoint(
|
||||
"dcos_app",
|
||||
map[string]string{
|
||||
"cluster": "a",
|
||||
},
|
||||
"container_throttled_bytes_per_sec", 42.0,
|
||||
),
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "dimensions are tagged",
|
||||
metrics: &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
{
|
||||
Name: "dcos.metrics.module.container_throttled_bytes_per_sec",
|
||||
Unit: "",
|
||||
Value: 42.0,
|
||||
},
|
||||
},
|
||||
Dimensions: map[string]interface{}{
|
||||
"cluster_id": "c0760bbd-9e9d-434b-bd4a-39c7cdef8a63",
|
||||
"container_id": "02d31175-1c01-4459-8520-ef8b1339bc52",
|
||||
"hostname": "192.168.122.18",
|
||||
"mesos_id": "2dfbbd28-29d2-411d-92c4-e2f84c38688e-S1",
|
||||
},
|
||||
},
|
||||
check: func(acc *testutil.Accumulator) []bool {
|
||||
return []bool{
|
||||
acc.HasPoint(
|
||||
"dcos_app",
|
||||
map[string]string{
|
||||
"cluster": "a",
|
||||
"container_id": "02d31175-1c01-4459-8520-ef8b1339bc52",
|
||||
"hostname": "192.168.122.18",
|
||||
},
|
||||
"container_throttled_bytes_per_sec", 42.0,
|
||||
),
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
dcos := &DCOS{}
|
||||
dcos.addAppMetrics(&acc, "a", tt.metrics)
|
||||
for i, ok := range tt.check(&acc) {
|
||||
require.True(t, ok, fmt.Sprintf("Index was not true: %d", i))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGatherFilterNode(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
nodeInclude []string
|
||||
nodeExclude []string
|
||||
client Client
|
||||
check func(*testutil.Accumulator) []bool
|
||||
}{
|
||||
{
|
||||
name: "cluster without nodes has no metrics",
|
||||
client: &mockClient{
|
||||
SetTokenF: func(token string) {},
|
||||
GetSummaryF: func(ctx context.Context) (*Summary, error) {
|
||||
return &Summary{
|
||||
Cluster: "a",
|
||||
Slaves: []Slave{},
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
check: func(acc *testutil.Accumulator) []bool {
|
||||
return []bool{
|
||||
acc.NMetrics() == 0,
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "node include",
|
||||
nodeInclude: []string{"x"},
|
||||
client: &mockClient{
|
||||
SetTokenF: func(token string) {},
|
||||
GetSummaryF: func(ctx context.Context) (*Summary, error) {
|
||||
return &Summary{
|
||||
Cluster: "a",
|
||||
Slaves: []Slave{
|
||||
Slave{ID: "x"},
|
||||
Slave{ID: "y"},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
GetContainersF: func(ctx context.Context, node string) ([]Container, error) {
|
||||
return []Container{}, nil
|
||||
},
|
||||
GetNodeMetricsF: func(ctx context.Context, node string) (*Metrics, error) {
|
||||
return &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
{
|
||||
Name: "value",
|
||||
Value: 42.0,
|
||||
},
|
||||
},
|
||||
Dimensions: map[string]interface{}{
|
||||
"hostname": "x",
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
check: func(acc *testutil.Accumulator) []bool {
|
||||
return []bool{
|
||||
acc.HasPoint(
|
||||
"dcos_node",
|
||||
map[string]string{
|
||||
"cluster": "a",
|
||||
"hostname": "x",
|
||||
},
|
||||
"value", 42.0,
|
||||
),
|
||||
acc.NMetrics() == 1,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
dcos := &DCOS{
|
||||
NodeInclude: tt.nodeInclude,
|
||||
NodeExclude: tt.nodeExclude,
|
||||
client: tt.client,
|
||||
}
|
||||
err := dcos.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
for i, ok := range tt.check(&acc) {
|
||||
require.True(t, ok, fmt.Sprintf("Index was not true: %d", i))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -16,21 +16,21 @@ const metricName = "dmcache"
|
||||
|
||||
type cacheStatus struct {
|
||||
device string
|
||||
length int
|
||||
length int64
|
||||
target string
|
||||
metadataBlocksize int
|
||||
metadataUsed int
|
||||
metadataTotal int
|
||||
cacheBlocksize int
|
||||
cacheUsed int
|
||||
cacheTotal int
|
||||
readHits int
|
||||
readMisses int
|
||||
writeHits int
|
||||
writeMisses int
|
||||
demotions int
|
||||
promotions int
|
||||
dirty int
|
||||
metadataBlocksize int64
|
||||
metadataUsed int64
|
||||
metadataTotal int64
|
||||
cacheBlocksize int64
|
||||
cacheUsed int64
|
||||
cacheTotal int64
|
||||
readHits int64
|
||||
readMisses int64
|
||||
writeHits int64
|
||||
writeMisses int64
|
||||
demotions int64
|
||||
promotions int64
|
||||
dirty int64
|
||||
}
|
||||
|
||||
func (c *DMCache) Gather(acc telegraf.Accumulator) error {
|
||||
@@ -69,12 +69,12 @@ func parseDMSetupStatus(line string) (cacheStatus, error) {
|
||||
}
|
||||
|
||||
status.device = strings.TrimRight(values[0], ":")
|
||||
status.length, err = strconv.Atoi(values[2])
|
||||
status.length, err = strconv.ParseInt(values[2], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.target = values[3]
|
||||
status.metadataBlocksize, err = strconv.Atoi(values[4])
|
||||
status.metadataBlocksize, err = strconv.ParseInt(values[4], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
@@ -82,15 +82,15 @@ func parseDMSetupStatus(line string) (cacheStatus, error) {
|
||||
if len(metadata) != 2 {
|
||||
return cacheStatus{}, parseError
|
||||
}
|
||||
status.metadataUsed, err = strconv.Atoi(metadata[0])
|
||||
status.metadataUsed, err = strconv.ParseInt(metadata[0], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.metadataTotal, err = strconv.Atoi(metadata[1])
|
||||
status.metadataTotal, err = strconv.ParseInt(metadata[1], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.cacheBlocksize, err = strconv.Atoi(values[6])
|
||||
status.cacheBlocksize, err = strconv.ParseInt(values[6], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
@@ -98,39 +98,39 @@ func parseDMSetupStatus(line string) (cacheStatus, error) {
|
||||
if len(cache) != 2 {
|
||||
return cacheStatus{}, parseError
|
||||
}
|
||||
status.cacheUsed, err = strconv.Atoi(cache[0])
|
||||
status.cacheUsed, err = strconv.ParseInt(cache[0], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.cacheTotal, err = strconv.Atoi(cache[1])
|
||||
status.cacheTotal, err = strconv.ParseInt(cache[1], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.readHits, err = strconv.Atoi(values[8])
|
||||
status.readHits, err = strconv.ParseInt(values[8], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.readMisses, err = strconv.Atoi(values[9])
|
||||
status.readMisses, err = strconv.ParseInt(values[9], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.writeHits, err = strconv.Atoi(values[10])
|
||||
status.writeHits, err = strconv.ParseInt(values[10], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.writeMisses, err = strconv.Atoi(values[11])
|
||||
status.writeMisses, err = strconv.ParseInt(values[11], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.demotions, err = strconv.Atoi(values[12])
|
||||
status.demotions, err = strconv.ParseInt(values[12], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.promotions, err = strconv.Atoi(values[13])
|
||||
status.promotions, err = strconv.ParseInt(values[13], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.dirty, err = strconv.Atoi(values[14])
|
||||
status.dirty, err = strconv.ParseInt(values[14], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build linux
|
||||
|
||||
package dmcache
|
||||
|
||||
import (
|
||||
@@ -33,20 +35,20 @@ func TestPerDeviceGoodOutput(t *testing.T) {
|
||||
"device": "cs-1",
|
||||
}
|
||||
fields1 := map[string]interface{}{
|
||||
"length": 4883791872,
|
||||
"metadata_blocksize": 8,
|
||||
"metadata_used": 1018,
|
||||
"metadata_total": 1501122,
|
||||
"cache_blocksize": 512,
|
||||
"cache_used": 7,
|
||||
"cache_total": 464962,
|
||||
"read_hits": 139,
|
||||
"read_misses": 352643,
|
||||
"write_hits": 15,
|
||||
"write_misses": 46,
|
||||
"demotions": 0,
|
||||
"promotions": 7,
|
||||
"dirty": 0,
|
||||
"length": int64(4883791872),
|
||||
"metadata_blocksize": int64(8),
|
||||
"metadata_used": int64(1018),
|
||||
"metadata_total": int64(1501122),
|
||||
"cache_blocksize": int64(512),
|
||||
"cache_used": int64(7),
|
||||
"cache_total": int64(464962),
|
||||
"read_hits": int64(139),
|
||||
"read_misses": int64(352643),
|
||||
"write_hits": int64(15),
|
||||
"write_misses": int64(46),
|
||||
"demotions": int64(0),
|
||||
"promotions": int64(7),
|
||||
"dirty": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, measurement, fields1, tags1)
|
||||
|
||||
@@ -54,20 +56,20 @@ func TestPerDeviceGoodOutput(t *testing.T) {
|
||||
"device": "cs-2",
|
||||
}
|
||||
fields2 := map[string]interface{}{
|
||||
"length": 4294967296,
|
||||
"metadata_blocksize": 8,
|
||||
"metadata_used": 72352,
|
||||
"metadata_total": 1310720,
|
||||
"cache_blocksize": 128,
|
||||
"cache_used": 26,
|
||||
"cache_total": 24327168,
|
||||
"read_hits": 2409,
|
||||
"read_misses": 286,
|
||||
"write_hits": 265,
|
||||
"write_misses": 524682,
|
||||
"demotions": 0,
|
||||
"promotions": 0,
|
||||
"dirty": 0,
|
||||
"length": int64(4294967296),
|
||||
"metadata_blocksize": int64(8),
|
||||
"metadata_used": int64(72352),
|
||||
"metadata_total": int64(1310720),
|
||||
"cache_blocksize": int64(128),
|
||||
"cache_used": int64(26),
|
||||
"cache_total": int64(24327168),
|
||||
"read_hits": int64(2409),
|
||||
"read_misses": int64(286),
|
||||
"write_hits": int64(265),
|
||||
"write_misses": int64(524682),
|
||||
"demotions": int64(0),
|
||||
"promotions": int64(0),
|
||||
"dirty": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, measurement, fields2, tags2)
|
||||
|
||||
@@ -76,20 +78,20 @@ func TestPerDeviceGoodOutput(t *testing.T) {
|
||||
}
|
||||
|
||||
fields3 := map[string]interface{}{
|
||||
"length": 9178759168,
|
||||
"metadata_blocksize": 16,
|
||||
"metadata_used": 73370,
|
||||
"metadata_total": 2811842,
|
||||
"cache_blocksize": 640,
|
||||
"cache_used": 33,
|
||||
"cache_total": 24792130,
|
||||
"read_hits": 2548,
|
||||
"read_misses": 352929,
|
||||
"write_hits": 280,
|
||||
"write_misses": 524728,
|
||||
"demotions": 0,
|
||||
"promotions": 7,
|
||||
"dirty": 0,
|
||||
"length": int64(9178759168),
|
||||
"metadata_blocksize": int64(16),
|
||||
"metadata_used": int64(73370),
|
||||
"metadata_total": int64(2811842),
|
||||
"cache_blocksize": int64(640),
|
||||
"cache_used": int64(33),
|
||||
"cache_total": int64(24792130),
|
||||
"read_hits": int64(2548),
|
||||
"read_misses": int64(352929),
|
||||
"write_hits": int64(280),
|
||||
"write_misses": int64(524728),
|
||||
"demotions": int64(0),
|
||||
"promotions": int64(7),
|
||||
"dirty": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, measurement, fields3, tags3)
|
||||
}
|
||||
@@ -111,20 +113,20 @@ func TestNotPerDeviceGoodOutput(t *testing.T) {
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"length": 9178759168,
|
||||
"metadata_blocksize": 16,
|
||||
"metadata_used": 73370,
|
||||
"metadata_total": 2811842,
|
||||
"cache_blocksize": 640,
|
||||
"cache_used": 33,
|
||||
"cache_total": 24792130,
|
||||
"read_hits": 2548,
|
||||
"read_misses": 352929,
|
||||
"write_hits": 280,
|
||||
"write_misses": 524728,
|
||||
"demotions": 0,
|
||||
"promotions": 7,
|
||||
"dirty": 0,
|
||||
"length": int64(9178759168),
|
||||
"metadata_blocksize": int64(16),
|
||||
"metadata_used": int64(73370),
|
||||
"metadata_total": int64(2811842),
|
||||
"cache_blocksize": int64(640),
|
||||
"cache_used": int64(33),
|
||||
"cache_total": int64(24792130),
|
||||
"read_hits": int64(2548),
|
||||
"read_misses": int64(352929),
|
||||
"write_hits": int64(280),
|
||||
"write_misses": int64(524728),
|
||||
"demotions": int64(0),
|
||||
"promotions": int64(7),
|
||||
"dirty": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, measurement, fields, tags)
|
||||
}
|
||||
@@ -17,7 +17,7 @@ type DnsQuery struct {
|
||||
// Domains or subdomains to query
|
||||
Domains []string
|
||||
|
||||
// Network protocl name
|
||||
// Network protocol name
|
||||
Network string
|
||||
|
||||
// Server to query
|
||||
|
||||
@@ -17,6 +17,11 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/)
|
||||
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
|
||||
## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
|
||||
## Note: configure this in one of the manager nodes in a Swarm cluster.
|
||||
## configuring in multiple Swarm managers results in duplication of metrics.
|
||||
gather_services = false
|
||||
|
||||
## Only collect metrics for these containers. Values will be appended to
|
||||
## container_name_include.
|
||||
## Deprecated (1.4.0), use container_name_include
|
||||
@@ -57,6 +62,15 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/)
|
||||
When using the `"ENV"` endpoint, the connection is configured using the
|
||||
[cli Docker environment variables](https://godoc.org/github.com/moby/moby/client#NewEnvClient).
|
||||
|
||||
#### Kubernetes Labels
|
||||
|
||||
Kubernetes may add many labels to your containers, if they are not needed you
|
||||
may prefer to exclude them:
|
||||
```
|
||||
docker_label_exclude = ["annotation.kubernetes*"]
|
||||
```
|
||||
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
Every effort was made to preserve the names based on the JSON response from the
|
||||
@@ -152,6 +166,9 @@ based on the availability of per-cpu stats on your system.
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
- docker_swarm
|
||||
- tasks_desired
|
||||
- tasks_running
|
||||
|
||||
|
||||
### Tags:
|
||||
@@ -182,6 +199,13 @@ based on the availability of per-cpu stats on your system.
|
||||
- network
|
||||
- docker_container_blkio specific:
|
||||
- device
|
||||
- docker_container_health specific:
|
||||
- health_status
|
||||
- failing_streak
|
||||
- docker_swarm specific:
|
||||
- service_id
|
||||
- service_name
|
||||
- service_mode
|
||||
|
||||
### Example Output:
|
||||
|
||||
@@ -233,4 +257,7 @@ io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888
|
||||
io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\
|
||||
io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\
|
||||
io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713
|
||||
>docker_swarm,
|
||||
service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test,\
|
||||
tasks_desired=3,tasks_running=3 1508968160000000000
|
||||
```
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
docker "github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
)
|
||||
@@ -20,6 +21,9 @@ type Client interface {
|
||||
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
|
||||
ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error)
|
||||
ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error)
|
||||
ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
|
||||
TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
|
||||
NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
|
||||
}
|
||||
|
||||
func NewEnvClient() (Client, error) {
|
||||
@@ -65,3 +69,12 @@ func (c *SocketClient) ContainerStats(ctx context.Context, containerID string, s
|
||||
func (c *SocketClient) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
|
||||
return c.client.ContainerInspect(ctx, containerID)
|
||||
}
|
||||
func (c *SocketClient) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) {
|
||||
return c.client.ServiceList(ctx, options)
|
||||
}
|
||||
func (c *SocketClient) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) {
|
||||
return c.client.TaskList(ctx, options)
|
||||
}
|
||||
func (c *SocketClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) {
|
||||
return c.client.NodeList(ctx, options)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
@@ -14,38 +15,29 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type DockerLabelFilter struct {
|
||||
labelInclude filter.Filter
|
||||
labelExclude filter.Filter
|
||||
}
|
||||
|
||||
type DockerContainerFilter struct {
|
||||
containerInclude filter.Filter
|
||||
containerExclude filter.Filter
|
||||
}
|
||||
|
||||
// Docker object
|
||||
type Docker struct {
|
||||
Endpoint string
|
||||
ContainerNames []string
|
||||
|
||||
GatherServices bool `toml:"gather_services"`
|
||||
|
||||
Timeout internal.Duration
|
||||
PerDevice bool `toml:"perdevice"`
|
||||
Total bool `toml:"total"`
|
||||
TagEnvironment []string `toml:"tag_env"`
|
||||
LabelInclude []string `toml:"docker_label_include"`
|
||||
LabelExclude []string `toml:"docker_label_exclude"`
|
||||
LabelFilter DockerLabelFilter
|
||||
|
||||
ContainerInclude []string `toml:"container_name_include"`
|
||||
ContainerExclude []string `toml:"container_name_exclude"`
|
||||
ContainerFilter DockerContainerFilter
|
||||
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
@@ -55,10 +47,12 @@ type Docker struct {
|
||||
newEnvClient func() (Client, error)
|
||||
newClient func(string, *tls.Config) (Client, error)
|
||||
|
||||
client Client
|
||||
httpClient *http.Client
|
||||
engine_host string
|
||||
filtersCreated bool
|
||||
client Client
|
||||
httpClient *http.Client
|
||||
engine_host string
|
||||
filtersCreated bool
|
||||
labelFilter filter.Filter
|
||||
containerFilter filter.Filter
|
||||
}
|
||||
|
||||
// KB, MB, GB, TB, PB...human friendly
|
||||
@@ -82,6 +76,9 @@ var sampleConfig = `
|
||||
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
|
||||
## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
|
||||
gather_services = false
|
||||
|
||||
## Only collect metrics for these containers, collect all if empty
|
||||
container_names = []
|
||||
|
||||
@@ -160,6 +157,13 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
acc.AddError(err)
|
||||
}
|
||||
|
||||
if d.GatherServices {
|
||||
err := d.gatherSwarmInfo(acc)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// List containers
|
||||
opts := types.ContainerListOptions{}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
@@ -187,6 +191,75 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error {
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
defer cancel()
|
||||
services, err := d.client.ServiceList(ctx, types.ServiceListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(services) > 0 {
|
||||
|
||||
tasks, err := d.client.TaskList(ctx, types.TaskListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := d.client.NodeList(ctx, types.NodeListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
running := map[string]int{}
|
||||
tasksNoShutdown := map[string]int{}
|
||||
|
||||
activeNodes := make(map[string]struct{})
|
||||
for _, n := range nodes {
|
||||
if n.Status.State != swarm.NodeStateDown {
|
||||
activeNodes[n.ID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for _, task := range tasks {
|
||||
if task.DesiredState != swarm.TaskStateShutdown {
|
||||
tasksNoShutdown[task.ServiceID]++
|
||||
}
|
||||
|
||||
if task.Status.State == swarm.TaskStateRunning {
|
||||
running[task.ServiceID]++
|
||||
}
|
||||
}
|
||||
|
||||
for _, service := range services {
|
||||
tags := map[string]string{}
|
||||
fields := make(map[string]interface{})
|
||||
now := time.Now()
|
||||
tags["service_id"] = service.ID
|
||||
tags["service_name"] = service.Spec.Name
|
||||
if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil {
|
||||
tags["service_mode"] = "replicated"
|
||||
fields["tasks_running"] = running[service.ID]
|
||||
fields["tasks_desired"] = *service.Spec.Mode.Replicated.Replicas
|
||||
} else if service.Spec.Mode.Global != nil {
|
||||
tags["service_mode"] = "global"
|
||||
fields["tasks_running"] = running[service.ID]
|
||||
fields["tasks_desired"] = tasksNoShutdown[service.ID]
|
||||
} else {
|
||||
log.Printf("E! Unknow Replicas Mode")
|
||||
}
|
||||
// Add metrics
|
||||
acc.AddFields("docker_swarm",
|
||||
fields,
|
||||
tags,
|
||||
now)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
// Init vars
|
||||
dataFields := make(map[string]interface{})
|
||||
@@ -291,12 +364,8 @@ func (d *Docker) gatherContainer(
|
||||
"container_version": imageVersion,
|
||||
}
|
||||
|
||||
if len(d.ContainerInclude) > 0 || len(d.ContainerExclude) > 0 {
|
||||
if len(d.ContainerInclude) == 0 || !d.ContainerFilter.containerInclude.Match(cname) {
|
||||
if len(d.ContainerExclude) == 0 || d.ContainerFilter.containerExclude.Match(cname) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if !d.containerFilter.Match(cname) {
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
@@ -317,19 +386,18 @@ func (d *Docker) gatherContainer(
|
||||
|
||||
// Add labels to tags
|
||||
for k, label := range container.Labels {
|
||||
if len(d.LabelInclude) == 0 || d.LabelFilter.labelInclude.Match(k) {
|
||||
if len(d.LabelExclude) == 0 || !d.LabelFilter.labelExclude.Match(k) {
|
||||
tags[k] = label
|
||||
}
|
||||
if d.labelFilter.Match(k) {
|
||||
tags[k] = label
|
||||
}
|
||||
}
|
||||
|
||||
info, err := d.client.ContainerInspect(ctx, container.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error inspecting docker container: %s", err.Error())
|
||||
}
|
||||
|
||||
// Add whitelisted environment variables to tags
|
||||
if len(d.TagEnvironment) > 0 {
|
||||
info, err := d.client.ContainerInspect(ctx, container.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error inspecting docker container: %s", err.Error())
|
||||
}
|
||||
for _, envvar := range info.Config.Env {
|
||||
for _, configvar := range d.TagEnvironment {
|
||||
dock_env := strings.SplitN(envvar, "=", 2)
|
||||
@@ -341,6 +409,14 @@ func (d *Docker) gatherContainer(
|
||||
}
|
||||
}
|
||||
|
||||
if info.State.Health != nil {
|
||||
healthfields := map[string]interface{}{
|
||||
"health_status": info.State.Health.Status,
|
||||
"failing_streak": info.ContainerJSONBase.State.Health.FailingStreak,
|
||||
}
|
||||
acc.AddFields("docker_container_health", healthfields, tags, time.Now())
|
||||
}
|
||||
|
||||
gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total, daemonOSType)
|
||||
|
||||
return nil
|
||||
@@ -355,7 +431,11 @@ func gatherContainerStats(
|
||||
total bool,
|
||||
daemonOSType string,
|
||||
) {
|
||||
now := stat.Read
|
||||
tm := stat.Read
|
||||
|
||||
if tm.Before(time.Unix(0, 0)) {
|
||||
tm = time.Now()
|
||||
}
|
||||
|
||||
memfields := map[string]interface{}{
|
||||
"container_id": id,
|
||||
@@ -415,7 +495,7 @@ func gatherContainerStats(
|
||||
memfields["private_working_set"] = stat.MemoryStats.PrivateWorkingSet
|
||||
}
|
||||
|
||||
acc.AddFields("docker_container_mem", memfields, tags, now)
|
||||
acc.AddFields("docker_container_mem", memfields, tags, tm)
|
||||
|
||||
cpufields := map[string]interface{}{
|
||||
"usage_total": stat.CPUStats.CPUUsage.TotalUsage,
|
||||
@@ -440,7 +520,7 @@ func gatherContainerStats(
|
||||
|
||||
cputags := copyTags(tags)
|
||||
cputags["cpu"] = "cpu-total"
|
||||
acc.AddFields("docker_container_cpu", cpufields, cputags, now)
|
||||
acc.AddFields("docker_container_cpu", cpufields, cputags, tm)
|
||||
|
||||
// If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs
|
||||
// (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400)
|
||||
@@ -458,7 +538,7 @@ func gatherContainerStats(
|
||||
"usage_total": percpu,
|
||||
"container_id": id,
|
||||
}
|
||||
acc.AddFields("docker_container_cpu", fields, percputags, now)
|
||||
acc.AddFields("docker_container_cpu", fields, percputags, tm)
|
||||
}
|
||||
|
||||
totalNetworkStatMap := make(map[string]interface{})
|
||||
@@ -478,7 +558,7 @@ func gatherContainerStats(
|
||||
if perDevice {
|
||||
nettags := copyTags(tags)
|
||||
nettags["network"] = network
|
||||
acc.AddFields("docker_container_net", netfields, nettags, now)
|
||||
acc.AddFields("docker_container_net", netfields, nettags, tm)
|
||||
}
|
||||
if total {
|
||||
for field, value := range netfields {
|
||||
@@ -511,17 +591,17 @@ func gatherContainerStats(
|
||||
nettags := copyTags(tags)
|
||||
nettags["network"] = "total"
|
||||
totalNetworkStatMap["container_id"] = id
|
||||
acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, now)
|
||||
acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, tm)
|
||||
}
|
||||
|
||||
gatherBlockIOMetrics(stat, acc, tags, now, id, perDevice, total)
|
||||
gatherBlockIOMetrics(stat, acc, tags, tm, id, perDevice, total)
|
||||
}
|
||||
|
||||
func gatherBlockIOMetrics(
|
||||
stat *types.StatsJSON,
|
||||
acc telegraf.Accumulator,
|
||||
tags map[string]string,
|
||||
now time.Time,
|
||||
tm time.Time,
|
||||
id string,
|
||||
perDevice bool,
|
||||
total bool,
|
||||
@@ -592,7 +672,7 @@ func gatherBlockIOMetrics(
|
||||
if perDevice {
|
||||
iotags := copyTags(tags)
|
||||
iotags["device"] = device
|
||||
acc.AddFields("docker_container_blkio", fields, iotags, now)
|
||||
acc.AddFields("docker_container_blkio", fields, iotags, tm)
|
||||
}
|
||||
if total {
|
||||
for field, value := range fields {
|
||||
@@ -623,7 +703,7 @@ func gatherBlockIOMetrics(
|
||||
totalStatMap["container_id"] = id
|
||||
iotags := copyTags(tags)
|
||||
iotags["device"] = "total"
|
||||
acc.AddFields("docker_container_blkio", totalStatMap, iotags, now)
|
||||
acc.AddFields("docker_container_blkio", totalStatMap, iotags, tm)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -666,46 +746,25 @@ func parseSize(sizeStr string) (int64, error) {
|
||||
}
|
||||
|
||||
func (d *Docker) createContainerFilters() error {
|
||||
// Backwards compatibility for deprecated `container_names` parameter.
|
||||
if len(d.ContainerNames) > 0 {
|
||||
d.ContainerInclude = append(d.ContainerInclude, d.ContainerNames...)
|
||||
}
|
||||
|
||||
if len(d.ContainerInclude) != 0 {
|
||||
var err error
|
||||
d.ContainerFilter.containerInclude, err = filter.Compile(d.ContainerInclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(d.ContainerExclude) != 0 {
|
||||
var err error
|
||||
d.ContainerFilter.containerExclude, err = filter.Compile(d.ContainerExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.containerFilter = filter
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Docker) createLabelFilters() error {
|
||||
if len(d.LabelInclude) != 0 {
|
||||
var err error
|
||||
d.LabelFilter.labelInclude, err = filter.Compile(d.LabelInclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(d.LabelExclude) != 0 {
|
||||
var err error
|
||||
d.LabelFilter.labelExclude, err = filter.Compile(d.LabelExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.labelFilter = filter
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -16,6 +17,9 @@ type MockClient struct {
|
||||
ContainerListF func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
|
||||
ContainerStatsF func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error)
|
||||
ContainerInspectF func(ctx context.Context, containerID string) (types.ContainerJSON, error)
|
||||
ServiceListF func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
|
||||
TaskListF func(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
|
||||
NodeListF func(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
|
||||
}
|
||||
|
||||
func (c *MockClient) Info(ctx context.Context) (types.Info, error) {
|
||||
@@ -44,21 +48,53 @@ func (c *MockClient) ContainerInspect(
|
||||
return c.ContainerInspectF(ctx, containerID)
|
||||
}
|
||||
|
||||
func (c *MockClient) ServiceList(
|
||||
ctx context.Context,
|
||||
options types.ServiceListOptions,
|
||||
) ([]swarm.Service, error) {
|
||||
return c.ServiceListF(ctx, options)
|
||||
}
|
||||
|
||||
func (c *MockClient) TaskList(
|
||||
ctx context.Context,
|
||||
options types.TaskListOptions,
|
||||
) ([]swarm.Task, error) {
|
||||
return c.TaskListF(ctx, options)
|
||||
}
|
||||
|
||||
func (c *MockClient) NodeList(
|
||||
ctx context.Context,
|
||||
options types.NodeListOptions,
|
||||
) ([]swarm.Node, error) {
|
||||
return c.NodeListF(ctx, options)
|
||||
}
|
||||
|
||||
var baseClient = MockClient{
|
||||
InfoF: func(context.Context) (types.Info, error) {
|
||||
return info, nil
|
||||
},
|
||||
ContainerListF: func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
|
||||
return containerList, nil
|
||||
},
|
||||
ContainerStatsF: func(context.Context, string, bool) (types.ContainerStats, error) {
|
||||
return containerStats(), nil
|
||||
},
|
||||
ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) {
|
||||
return containerInspect, nil
|
||||
},
|
||||
ServiceListF: func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) {
|
||||
return ServiceList, nil
|
||||
},
|
||||
TaskListF: func(context.Context, types.TaskListOptions) ([]swarm.Task, error) {
|
||||
return TaskList, nil
|
||||
},
|
||||
NodeListF: func(context.Context, types.NodeListOptions) ([]swarm.Node, error) {
|
||||
return NodeList, nil
|
||||
},
|
||||
}
|
||||
|
||||
func newClient(host string, tlsConfig *tls.Config) (Client, error) {
|
||||
return &MockClient{
|
||||
InfoF: func(context.Context) (types.Info, error) {
|
||||
return info, nil
|
||||
},
|
||||
ContainerListF: func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
|
||||
return containerList, nil
|
||||
},
|
||||
ContainerStatsF: func(context.Context, string, bool) (types.ContainerStats, error) {
|
||||
return containerStats(), nil
|
||||
},
|
||||
ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) {
|
||||
return containerInspect, nil
|
||||
},
|
||||
}, nil
|
||||
return &baseClient, nil
|
||||
}
|
||||
|
||||
func TestDockerGatherContainerStats(t *testing.T) {
|
||||
@@ -227,6 +263,15 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) {
|
||||
ContainerInspectF: func(ctx context.Context, containerID string) (types.ContainerJSON, error) {
|
||||
return containerInspect, nil
|
||||
},
|
||||
ServiceListF: func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) {
|
||||
return ServiceList, nil
|
||||
},
|
||||
TaskListF: func(context.Context, types.TaskListOptions) ([]swarm.Task, error) {
|
||||
return TaskList, nil
|
||||
},
|
||||
NodeListF: func(context.Context, types.NodeListOptions) ([]swarm.Node, error) {
|
||||
return NodeList, nil
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
@@ -234,82 +279,291 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestDockerGatherLabels(t *testing.T) {
|
||||
var gatherLabelsTests = []struct {
|
||||
include []string
|
||||
exclude []string
|
||||
expected []string
|
||||
notexpected []string
|
||||
func TestContainerLabels(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
container types.Container
|
||||
include []string
|
||||
exclude []string
|
||||
expected map[string]string
|
||||
}{
|
||||
{[]string{}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||
{[]string{"*"}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||
{[]string{"lab*"}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||
{[]string{"label1"}, []string{}, []string{"label1"}, []string{"label2"}},
|
||||
{[]string{"label1*"}, []string{}, []string{"label1"}, []string{"label2"}},
|
||||
{[]string{}, []string{"*"}, []string{}, []string{"label1", "label2"}},
|
||||
{[]string{}, []string{"lab*"}, []string{}, []string{"label1", "label2"}},
|
||||
{[]string{}, []string{"label1"}, []string{"label2"}, []string{"label1"}},
|
||||
{[]string{"*"}, []string{"*"}, []string{}, []string{"label1", "label2"}},
|
||||
{
|
||||
name: "Nil filters matches all",
|
||||
container: types.Container{
|
||||
Labels: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
include: nil,
|
||||
exclude: nil,
|
||||
expected: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Empty filters matches all",
|
||||
container: types.Container{
|
||||
Labels: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
include: []string{},
|
||||
exclude: []string{},
|
||||
expected: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Must match include",
|
||||
container: types.Container{
|
||||
Labels: map[string]string{
|
||||
"a": "x",
|
||||
"b": "y",
|
||||
},
|
||||
},
|
||||
include: []string{"a"},
|
||||
exclude: []string{},
|
||||
expected: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Must not match exclude",
|
||||
container: types.Container{
|
||||
Labels: map[string]string{
|
||||
"a": "x",
|
||||
"b": "y",
|
||||
},
|
||||
},
|
||||
include: []string{},
|
||||
exclude: []string{"b"},
|
||||
expected: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Include Glob",
|
||||
container: types.Container{
|
||||
Labels: map[string]string{
|
||||
"aa": "x",
|
||||
"ab": "y",
|
||||
"bb": "z",
|
||||
},
|
||||
},
|
||||
include: []string{"a*"},
|
||||
exclude: []string{},
|
||||
expected: map[string]string{
|
||||
"aa": "x",
|
||||
"ab": "y",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Exclude Glob",
|
||||
container: types.Container{
|
||||
Labels: map[string]string{
|
||||
"aa": "x",
|
||||
"ab": "y",
|
||||
"bb": "z",
|
||||
},
|
||||
},
|
||||
include: []string{},
|
||||
exclude: []string{"a*"},
|
||||
expected: map[string]string{
|
||||
"bb": "z",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Excluded Includes",
|
||||
container: types.Container{
|
||||
Labels: map[string]string{
|
||||
"aa": "x",
|
||||
"ab": "y",
|
||||
"bb": "z",
|
||||
},
|
||||
},
|
||||
include: []string{"a*"},
|
||||
exclude: []string{"*b"},
|
||||
expected: map[string]string{
|
||||
"aa": "x",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range gatherLabelsTests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
d := Docker{
|
||||
newClient: newClient,
|
||||
|
||||
newClientFunc := func(host string, tlsConfig *tls.Config) (Client, error) {
|
||||
client := baseClient
|
||||
client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
|
||||
return []types.Container{tt.container}, nil
|
||||
}
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
for _, label := range tt.include {
|
||||
d.LabelInclude = append(d.LabelInclude, label)
|
||||
}
|
||||
for _, label := range tt.exclude {
|
||||
d.LabelExclude = append(d.LabelExclude, label)
|
||||
d := Docker{
|
||||
newClient: newClientFunc,
|
||||
LabelInclude: tt.include,
|
||||
LabelExclude: tt.exclude,
|
||||
}
|
||||
|
||||
err := d.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, label := range tt.expected {
|
||||
if !acc.HasTag("docker_container_cpu", label) {
|
||||
t.Errorf("Didn't get expected label of %s. Test was: Include: %s Exclude %s",
|
||||
label, tt.include, tt.exclude)
|
||||
// Grab tags from a container metric
|
||||
var actual map[string]string
|
||||
for _, metric := range acc.Metrics {
|
||||
if metric.Measurement == "docker_container_cpu" {
|
||||
actual = metric.Tags
|
||||
}
|
||||
}
|
||||
|
||||
for _, label := range tt.notexpected {
|
||||
if acc.HasTag("docker_container_cpu", label) {
|
||||
t.Errorf("Got unexpected label of %s. Test was: Include: %s Exclude %s",
|
||||
label, tt.include, tt.exclude)
|
||||
}
|
||||
for k, v := range tt.expected {
|
||||
require.Equal(t, v, actual[k])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerNames(t *testing.T) {
|
||||
var gatherContainerNames = []struct {
|
||||
include []string
|
||||
exclude []string
|
||||
expected []string
|
||||
notexpected []string
|
||||
var tests = []struct {
|
||||
name string
|
||||
containers [][]string
|
||||
include []string
|
||||
exclude []string
|
||||
expected []string
|
||||
}{
|
||||
{[]string{}, []string{}, []string{"etcd", "etcd2"}, []string{}},
|
||||
{[]string{"*"}, []string{}, []string{"etcd", "etcd2"}, []string{}},
|
||||
{[]string{"etc*"}, []string{}, []string{"etcd", "etcd2"}, []string{}},
|
||||
{[]string{"etcd"}, []string{}, []string{"etcd"}, []string{"etcd2"}},
|
||||
{[]string{"etcd2*"}, []string{}, []string{"etcd2"}, []string{"etcd"}},
|
||||
{[]string{}, []string{"etc*"}, []string{}, []string{"etcd", "etcd2"}},
|
||||
{[]string{}, []string{"etcd"}, []string{"etcd2"}, []string{"etcd"}},
|
||||
{[]string{"*"}, []string{"*"}, []string{"etcd", "etcd2"}, []string{}},
|
||||
{[]string{}, []string{"*"}, []string{""}, []string{"etcd", "etcd2"}},
|
||||
{
|
||||
name: "Nil filters matches all",
|
||||
containers: [][]string{
|
||||
{"/etcd"},
|
||||
{"/etcd2"},
|
||||
},
|
||||
include: nil,
|
||||
exclude: nil,
|
||||
expected: []string{"etcd", "etcd2"},
|
||||
},
|
||||
{
|
||||
name: "Empty filters matches all",
|
||||
containers: [][]string{
|
||||
{"/etcd"},
|
||||
{"/etcd2"},
|
||||
},
|
||||
include: []string{},
|
||||
exclude: []string{},
|
||||
expected: []string{"etcd", "etcd2"},
|
||||
},
|
||||
{
|
||||
name: "Match all containers",
|
||||
containers: [][]string{
|
||||
{"/etcd"},
|
||||
{"/etcd2"},
|
||||
},
|
||||
include: []string{"*"},
|
||||
exclude: []string{},
|
||||
expected: []string{"etcd", "etcd2"},
|
||||
},
|
||||
{
|
||||
name: "Include prefix match",
|
||||
containers: [][]string{
|
||||
{"/etcd"},
|
||||
{"/etcd2"},
|
||||
},
|
||||
include: []string{"etc*"},
|
||||
exclude: []string{},
|
||||
expected: []string{"etcd", "etcd2"},
|
||||
},
|
||||
{
|
||||
name: "Exact match",
|
||||
containers: [][]string{
|
||||
{"/etcd"},
|
||||
{"/etcd2"},
|
||||
},
|
||||
include: []string{"etcd"},
|
||||
exclude: []string{},
|
||||
expected: []string{"etcd"},
|
||||
},
|
||||
{
|
||||
name: "Star matches zero length",
|
||||
containers: [][]string{
|
||||
{"/etcd"},
|
||||
{"/etcd2"},
|
||||
},
|
||||
include: []string{"etcd2*"},
|
||||
exclude: []string{},
|
||||
expected: []string{"etcd2"},
|
||||
},
|
||||
{
|
||||
name: "Exclude matches all",
|
||||
containers: [][]string{
|
||||
{"/etcd"},
|
||||
{"/etcd2"},
|
||||
},
|
||||
include: []string{},
|
||||
exclude: []string{"etc*"},
|
||||
expected: []string{},
|
||||
},
|
||||
{
|
||||
name: "Exclude single",
|
||||
containers: [][]string{
|
||||
{"/etcd"},
|
||||
{"/etcd2"},
|
||||
},
|
||||
include: []string{},
|
||||
exclude: []string{"etcd"},
|
||||
expected: []string{"etcd2"},
|
||||
},
|
||||
{
|
||||
name: "Exclude all",
|
||||
containers: [][]string{
|
||||
{"/etcd"},
|
||||
{"/etcd2"},
|
||||
},
|
||||
include: []string{"*"},
|
||||
exclude: []string{"*"},
|
||||
expected: []string{},
|
||||
},
|
||||
{
|
||||
name: "Exclude item matching include",
|
||||
containers: [][]string{
|
||||
{"acme"},
|
||||
{"foo"},
|
||||
{"acme-test"},
|
||||
},
|
||||
include: []string{"acme*"},
|
||||
exclude: []string{"*test*"},
|
||||
expected: []string{"acme"},
|
||||
},
|
||||
{
|
||||
name: "Exclude item no wildcards",
|
||||
containers: [][]string{
|
||||
{"acme"},
|
||||
{"acme-test"},
|
||||
},
|
||||
include: []string{"acme*"},
|
||||
exclude: []string{"test"},
|
||||
expected: []string{"acme", "acme-test"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range gatherContainerNames {
|
||||
t.Run("", func(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
newClientFunc := func(host string, tlsConfig *tls.Config) (Client, error) {
|
||||
client := baseClient
|
||||
client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
|
||||
var containers []types.Container
|
||||
for _, names := range tt.containers {
|
||||
containers = append(containers, types.Container{
|
||||
Names: names,
|
||||
})
|
||||
}
|
||||
return containers, nil
|
||||
}
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
d := Docker{
|
||||
newClient: newClient,
|
||||
newClient: newClientFunc,
|
||||
ContainerInclude: tt.include,
|
||||
ContainerExclude: tt.exclude,
|
||||
}
|
||||
@@ -317,39 +571,21 @@ func TestContainerNames(t *testing.T) {
|
||||
err := d.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set of expected names
|
||||
var expected = make(map[string]bool)
|
||||
for _, v := range tt.expected {
|
||||
expected[v] = true
|
||||
}
|
||||
|
||||
// Set of actual names
|
||||
var actual = make(map[string]bool)
|
||||
for _, metric := range acc.Metrics {
|
||||
if metric.Measurement == "docker_container_cpu" {
|
||||
if val, ok := metric.Tags["container_name"]; ok {
|
||||
var found bool = false
|
||||
for _, cname := range tt.expected {
|
||||
if val == cname {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude)
|
||||
}
|
||||
}
|
||||
if name, ok := metric.Tags["container_name"]; ok {
|
||||
actual[name] = true
|
||||
}
|
||||
}
|
||||
|
||||
for _, metric := range acc.Metrics {
|
||||
if metric.Measurement == "docker_container_cpu" {
|
||||
if val, ok := metric.Tags["container_name"]; ok {
|
||||
var found bool = false
|
||||
for _, cname := range tt.notexpected {
|
||||
if val == cname {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -436,3 +672,42 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestDockerGatherSwarmInfo(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
d := Docker{
|
||||
newClient: newClient,
|
||||
}
|
||||
|
||||
err := acc.GatherError(d.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
d.gatherSwarmInfo(&acc)
|
||||
|
||||
// test docker_container_net measurement
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"docker_swarm",
|
||||
map[string]interface{}{
|
||||
"tasks_running": int(2),
|
||||
"tasks_desired": uint64(2),
|
||||
},
|
||||
map[string]string{
|
||||
"service_id": "qolkls9g5iasdiuihcyz9rnx2",
|
||||
"service_name": "test1",
|
||||
"service_mode": "replicated",
|
||||
},
|
||||
)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"docker_swarm",
|
||||
map[string]interface{}{
|
||||
"tasks_running": int(1),
|
||||
"tasks_desired": int(1),
|
||||
},
|
||||
map[string]string{
|
||||
"service_id": "qolkls9g5iasdiuihcyz9rn3",
|
||||
"service_name": "test2",
|
||||
"service_mode": "global",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
)
|
||||
|
||||
var info = types.Info{
|
||||
@@ -133,6 +134,79 @@ var containerList = []types.Container{
|
||||
},
|
||||
}
|
||||
|
||||
var two = uint64(2)
|
||||
var ServiceList = []swarm.Service{
|
||||
swarm.Service{
|
||||
ID: "qolkls9g5iasdiuihcyz9rnx2",
|
||||
Spec: swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: "test1",
|
||||
},
|
||||
Mode: swarm.ServiceMode{
|
||||
Replicated: &swarm.ReplicatedService{
|
||||
Replicas: &two,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
swarm.Service{
|
||||
ID: "qolkls9g5iasdiuihcyz9rn3",
|
||||
Spec: swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: "test2",
|
||||
},
|
||||
Mode: swarm.ServiceMode{
|
||||
Global: &swarm.GlobalService{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var TaskList = []swarm.Task{
|
||||
swarm.Task{
|
||||
ID: "kwh0lv7hwwbh",
|
||||
ServiceID: "qolkls9g5iasdiuihcyz9rnx2",
|
||||
NodeID: "0cl4jturcyd1ks3fwpd010kor",
|
||||
Status: swarm.TaskStatus{
|
||||
State: "running",
|
||||
},
|
||||
DesiredState: "running",
|
||||
},
|
||||
swarm.Task{
|
||||
ID: "u78m5ojbivc3",
|
||||
ServiceID: "qolkls9g5iasdiuihcyz9rnx2",
|
||||
NodeID: "0cl4jturcyd1ks3fwpd010kor",
|
||||
Status: swarm.TaskStatus{
|
||||
State: "running",
|
||||
},
|
||||
DesiredState: "running",
|
||||
},
|
||||
swarm.Task{
|
||||
ID: "1n1uilkhr98l",
|
||||
ServiceID: "qolkls9g5iasdiuihcyz9rn3",
|
||||
NodeID: "0cl4jturcyd1ks3fwpd010kor",
|
||||
Status: swarm.TaskStatus{
|
||||
State: "running",
|
||||
},
|
||||
DesiredState: "running",
|
||||
},
|
||||
}
|
||||
|
||||
var NodeList = []swarm.Node{
|
||||
swarm.Node{
|
||||
ID: "0cl4jturcyd1ks3fwpd010kor",
|
||||
Status: swarm.NodeStatus{
|
||||
State: "ready",
|
||||
},
|
||||
},
|
||||
swarm.Node{
|
||||
ID: "0cl4jturcyd1ks3fwpd010kor",
|
||||
Status: swarm.NodeStatus{
|
||||
State: "ready",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func containerStats() types.ContainerStats {
|
||||
var stat types.ContainerStats
|
||||
jsonStat := `
|
||||
@@ -403,4 +477,12 @@ var containerInspect = types.ContainerJSON{
|
||||
"PATH=/bin:/sbin",
|
||||
},
|
||||
},
|
||||
ContainerJSONBase: &types.ContainerJSONBase{
|
||||
State: &types.ContainerState{
|
||||
Health: &types.Health{
|
||||
FailingStreak: 1,
|
||||
Status: "Unhealthy",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -23,10 +23,21 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre
|
||||
## Set cluster_health to true when you want to also obtain cluster health stats
|
||||
cluster_health = false
|
||||
|
||||
## Set cluster_stats to true when you want to obtain cluster stats from the
|
||||
## Master node.
|
||||
## Adjust cluster_health_level when you want to also obtain detailed health stats
|
||||
## The options are
|
||||
## - indices (default)
|
||||
## - cluster
|
||||
# cluster_health_level = "indices"
|
||||
|
||||
## Set cluster_stats to true when you want to also obtain cluster stats from the
|
||||
## Master node.
|
||||
cluster_stats = false
|
||||
|
||||
## node_stats is a list of sub-stats that you want to have gathered. Valid options
|
||||
## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
|
||||
## "breakers". Per default, all stats are gathered.
|
||||
# node_stats = ["jvm", "http"]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
@@ -35,6 +46,17 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
### Status mappings
|
||||
|
||||
When reporting health (green/yellow/red), additional field `status_code`
|
||||
is reported. Field contains mapping from status:string to status_code:int
|
||||
with following rules:
|
||||
|
||||
* `green` - 1
|
||||
* `yellow` - 2
|
||||
* `red` - 3
|
||||
* `unknown` - 0
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
field data circuit breaker measurement names:
|
||||
|
||||
@@ -3,17 +3,16 @@ package elasticsearch
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// mask for masking username/password from error messages
|
||||
@@ -94,10 +93,21 @@ const sampleConfig = `
|
||||
## Set cluster_health to true when you want to also obtain cluster health stats
|
||||
cluster_health = false
|
||||
|
||||
## Adjust cluster_health_level when you want to also obtain detailed health stats
|
||||
## The options are
|
||||
## - indices (default)
|
||||
## - cluster
|
||||
# cluster_health_level = "indices"
|
||||
|
||||
## Set cluster_stats to true when you want to also obtain cluster stats from the
|
||||
## Master node.
|
||||
cluster_stats = false
|
||||
|
||||
## node_stats is a list of sub-stats that you want to have gathered. Valid options
|
||||
## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
|
||||
## "breakers". Per default, all stats are gathered.
|
||||
# node_stats = ["jvm", "http"]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
@@ -113,7 +123,9 @@ type Elasticsearch struct {
|
||||
Servers []string
|
||||
HttpTimeout internal.Duration
|
||||
ClusterHealth bool
|
||||
ClusterHealthLevel string
|
||||
ClusterStats bool
|
||||
NodeStats []string
|
||||
SSLCA string `toml:"ssl_ca"` // Path to CA file
|
||||
SSLCert string `toml:"ssl_cert"` // Path to host cert file
|
||||
SSLKey string `toml:"ssl_key"` // Path to cert key file
|
||||
@@ -126,10 +138,24 @@ type Elasticsearch struct {
|
||||
// NewElasticsearch return a new instance of Elasticsearch
|
||||
func NewElasticsearch() *Elasticsearch {
|
||||
return &Elasticsearch{
|
||||
HttpTimeout: internal.Duration{Duration: time.Second * 5},
|
||||
HttpTimeout: internal.Duration{Duration: time.Second * 5},
|
||||
ClusterHealthLevel: "indices",
|
||||
}
|
||||
}
|
||||
|
||||
// perform status mapping
|
||||
func mapHealthStatusToCode(s string) int {
|
||||
switch strings.ToLower(s) {
|
||||
case "green":
|
||||
return 1
|
||||
case "yellow":
|
||||
return 2
|
||||
case "red":
|
||||
return 3
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// SampleConfig returns sample configuration for this plugin.
|
||||
func (e *Elasticsearch) SampleConfig() string {
|
||||
return sampleConfig
|
||||
@@ -158,12 +184,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
for _, serv := range e.Servers {
|
||||
go func(s string, acc telegraf.Accumulator) {
|
||||
defer wg.Done()
|
||||
var url string
|
||||
if e.Local {
|
||||
url = s + statsPathLocal
|
||||
} else {
|
||||
url = s + statsPath
|
||||
}
|
||||
url := e.nodeStatsUrl(s)
|
||||
e.isMaster = false
|
||||
|
||||
if e.ClusterStats {
|
||||
@@ -182,7 +203,10 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
if e.ClusterHealth {
|
||||
url = s + "/_cluster/health?level=indices"
|
||||
url = s + "/_cluster/health"
|
||||
if e.ClusterHealthLevel != "" {
|
||||
url = url + "?level=" + e.ClusterHealthLevel
|
||||
}
|
||||
if err := e.gatherClusterHealth(url, acc); err != nil {
|
||||
acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
return
|
||||
@@ -219,6 +243,22 @@ func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) nodeStatsUrl(baseUrl string) string {
|
||||
var url string
|
||||
|
||||
if e.Local {
|
||||
url = baseUrl + statsPathLocal
|
||||
} else {
|
||||
url = baseUrl + statsPath
|
||||
}
|
||||
|
||||
if len(e.NodeStats) == 0 {
|
||||
return url
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s/%s", url, strings.Join(e.NodeStats, ","))
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
|
||||
nodeStats := &struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
@@ -259,6 +299,11 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er
|
||||
|
||||
now := time.Now()
|
||||
for p, s := range stats {
|
||||
// if one of the individual node stats is not even in the
|
||||
// original result
|
||||
if s == nil {
|
||||
continue
|
||||
}
|
||||
f := jsonparser.JSONFlattener{}
|
||||
// parse Json, ignoring strings and bools
|
||||
err := f.FlattenJSON("", s)
|
||||
@@ -279,6 +324,7 @@ func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator
|
||||
measurementTime := time.Now()
|
||||
clusterFields := map[string]interface{}{
|
||||
"status": healthStats.Status,
|
||||
"status_code": mapHealthStatusToCode(healthStats.Status),
|
||||
"timed_out": healthStats.TimedOut,
|
||||
"number_of_nodes": healthStats.NumberOfNodes,
|
||||
"number_of_data_nodes": healthStats.NumberOfDataNodes,
|
||||
@@ -298,6 +344,7 @@ func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator
|
||||
for name, health := range healthStats.Indices {
|
||||
indexFields := map[string]interface{}{
|
||||
"status": health.Status,
|
||||
"status_code": mapHealthStatusToCode(health.Status),
|
||||
"number_of_shards": health.NumberOfShards,
|
||||
"number_of_replicas": health.NumberOfReplicas,
|
||||
"active_primary_shards": health.ActivePrimaryShards,
|
||||
|
||||
@@ -13,6 +13,16 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func defaultTags() map[string]string {
|
||||
return map[string]string{
|
||||
"cluster_name": "es-testcluster",
|
||||
"node_attribute_master": "true",
|
||||
"node_id": "SDFsfSDFsdfFSDSDfSFDSDF",
|
||||
"node_name": "test.host.com",
|
||||
"node_host": "test",
|
||||
}
|
||||
}
|
||||
|
||||
type transportMock struct {
|
||||
statusCode int
|
||||
body string
|
||||
@@ -45,15 +55,9 @@ func checkIsMaster(es *Elasticsearch, expected bool, t *testing.T) {
|
||||
assert.Fail(t, msg)
|
||||
}
|
||||
}
|
||||
func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) {
|
||||
tags := map[string]string{
|
||||
"cluster_name": "es-testcluster",
|
||||
"node_attribute_master": "true",
|
||||
"node_id": "SDFsfSDFsdfFSDSDfSFDSDF",
|
||||
"node_name": "test.host.com",
|
||||
"node_host": "test",
|
||||
}
|
||||
|
||||
func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) {
|
||||
tags := defaultTags()
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_os", nodestatsOsExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_process", nodestatsProcessExpected, tags)
|
||||
@@ -79,6 +83,31 @@ func TestGather(t *testing.T) {
|
||||
checkNodeStatsResult(t, &acc)
|
||||
}
|
||||
|
||||
func TestGatherIndividualStats(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.NodeStats = []string{"jvm", "process"}
|
||||
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponseJVMProcess)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
if err := acc.GatherError(es.Gather); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkIsMaster(es, false, t)
|
||||
|
||||
tags := defaultTags()
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags)
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_os", nodestatsOsExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_process", nodestatsProcessExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", nodestatsJvmExpected, tags)
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_thread_pool", nodestatsThreadPoolExpected, tags)
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_fs", nodestatsFsExpected, tags)
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_transport", nodestatsTransportExpected, tags)
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_http", nodestatsHttpExpected, tags)
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_breakers", nodestatsBreakersExpected, tags)
|
||||
}
|
||||
|
||||
func TestGatherNodeStats(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
@@ -93,10 +122,11 @@ func TestGatherNodeStats(t *testing.T) {
|
||||
checkNodeStatsResult(t, &acc)
|
||||
}
|
||||
|
||||
func TestGatherClusterHealth(t *testing.T) {
|
||||
func TestGatherClusterHealthEmptyClusterHealth(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.ClusterHealth = true
|
||||
es.ClusterHealthLevel = ""
|
||||
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
@@ -104,6 +134,56 @@ func TestGatherClusterHealth(t *testing.T) {
|
||||
|
||||
checkIsMaster(es, false, t)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
|
||||
clusterHealthExpected,
|
||||
map[string]string{"name": "elasticsearch_telegraf"})
|
||||
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices",
|
||||
v1IndexExpected,
|
||||
map[string]string{"index": "v1"})
|
||||
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices",
|
||||
v2IndexExpected,
|
||||
map[string]string{"index": "v2"})
|
||||
}
|
||||
|
||||
func TestGatherClusterHealthSpecificClusterHealth(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.ClusterHealth = true
|
||||
es.ClusterHealthLevel = "cluster"
|
||||
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, es.gatherClusterHealth("junk", &acc))
|
||||
|
||||
checkIsMaster(es, false, t)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
|
||||
clusterHealthExpected,
|
||||
map[string]string{"name": "elasticsearch_telegraf"})
|
||||
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices",
|
||||
v1IndexExpected,
|
||||
map[string]string{"index": "v1"})
|
||||
|
||||
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices",
|
||||
v2IndexExpected,
|
||||
map[string]string{"index": "v2"})
|
||||
}
|
||||
|
||||
func TestGatherClusterHealthAlsoIndicesHealth(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.ClusterHealth = true
|
||||
es.ClusterHealthLevel = "indices"
|
||||
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponseWithIndices)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, es.gatherClusterHealth("junk", &acc))
|
||||
|
||||
checkIsMaster(es, false, t)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
|
||||
clusterHealthExpected,
|
||||
map[string]string{"name": "elasticsearch_telegraf"})
|
||||
@@ -185,7 +265,6 @@ func TestGatherClusterStatsNonMaster(t *testing.T) {
|
||||
// ensure flag is clear so Cluster Stats would not be done
|
||||
checkIsMaster(es, false, t)
|
||||
checkNodeStatsResult(t, &acc)
|
||||
|
||||
}
|
||||
|
||||
func newElasticsearchWithClient() *Elasticsearch {
|
||||
|
||||
@@ -1,6 +1,21 @@
|
||||
package elasticsearch
|
||||
|
||||
const clusterHealthResponse = `
|
||||
{
|
||||
"cluster_name": "elasticsearch_telegraf",
|
||||
"status": "green",
|
||||
"timed_out": false,
|
||||
"number_of_nodes": 3,
|
||||
"number_of_data_nodes": 3,
|
||||
"active_primary_shards": 5,
|
||||
"active_shards": 15,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0
|
||||
}
|
||||
`
|
||||
|
||||
const clusterHealthResponseWithIndices = `
|
||||
{
|
||||
"cluster_name": "elasticsearch_telegraf",
|
||||
"status": "green",
|
||||
@@ -39,6 +54,7 @@ const clusterHealthResponse = `
|
||||
|
||||
var clusterHealthExpected = map[string]interface{}{
|
||||
"status": "green",
|
||||
"status_code": 1,
|
||||
"timed_out": false,
|
||||
"number_of_nodes": 3,
|
||||
"number_of_data_nodes": 3,
|
||||
@@ -51,6 +67,7 @@ var clusterHealthExpected = map[string]interface{}{
|
||||
|
||||
var v1IndexExpected = map[string]interface{}{
|
||||
"status": "green",
|
||||
"status_code": 1,
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 10,
|
||||
@@ -62,6 +79,7 @@ var v1IndexExpected = map[string]interface{}{
|
||||
|
||||
var v2IndexExpected = map[string]interface{}{
|
||||
"status": "red",
|
||||
"status_code": 3,
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 0,
|
||||
@@ -489,6 +507,100 @@ const nodeStatsResponse = `
|
||||
}
|
||||
`
|
||||
|
||||
const nodeStatsResponseJVMProcess = `
|
||||
{
|
||||
"cluster_name": "es-testcluster",
|
||||
"nodes": {
|
||||
"SDFsfSDFsdfFSDSDfSFDSDF": {
|
||||
"timestamp": 1436365550135,
|
||||
"name": "test.host.com",
|
||||
"transport_address": "inet[/127.0.0.1:9300]",
|
||||
"host": "test",
|
||||
"ip": [
|
||||
"inet[/127.0.0.1:9300]",
|
||||
"NONE"
|
||||
],
|
||||
"attributes": {
|
||||
"master": "true"
|
||||
},
|
||||
"process": {
|
||||
"timestamp": 1436460392945,
|
||||
"open_file_descriptors": 160,
|
||||
"cpu": {
|
||||
"percent": 2,
|
||||
"sys_in_millis": 1870,
|
||||
"user_in_millis": 13610,
|
||||
"total_in_millis": 15480
|
||||
},
|
||||
"mem": {
|
||||
"total_virtual_in_bytes": 4747890688
|
||||
}
|
||||
},
|
||||
"jvm": {
|
||||
"timestamp": 1436460392945,
|
||||
"uptime_in_millis": 202245,
|
||||
"mem": {
|
||||
"heap_used_in_bytes": 52709568,
|
||||
"heap_used_percent": 5,
|
||||
"heap_committed_in_bytes": 259522560,
|
||||
"heap_max_in_bytes": 1038876672,
|
||||
"non_heap_used_in_bytes": 39634576,
|
||||
"non_heap_committed_in_bytes": 40841216,
|
||||
"pools": {
|
||||
"young": {
|
||||
"used_in_bytes": 32685760,
|
||||
"max_in_bytes": 279183360,
|
||||
"peak_used_in_bytes": 71630848,
|
||||
"peak_max_in_bytes": 279183360
|
||||
},
|
||||
"survivor": {
|
||||
"used_in_bytes": 8912880,
|
||||
"max_in_bytes": 34865152,
|
||||
"peak_used_in_bytes": 8912888,
|
||||
"peak_max_in_bytes": 34865152
|
||||
},
|
||||
"old": {
|
||||
"used_in_bytes": 11110928,
|
||||
"max_in_bytes": 724828160,
|
||||
"peak_used_in_bytes": 14354608,
|
||||
"peak_max_in_bytes": 724828160
|
||||
}
|
||||
}
|
||||
},
|
||||
"threads": {
|
||||
"count": 44,
|
||||
"peak_count": 45
|
||||
},
|
||||
"gc": {
|
||||
"collectors": {
|
||||
"young": {
|
||||
"collection_count": 2,
|
||||
"collection_time_in_millis": 98
|
||||
},
|
||||
"old": {
|
||||
"collection_count": 1,
|
||||
"collection_time_in_millis": 24
|
||||
}
|
||||
}
|
||||
},
|
||||
"buffer_pools": {
|
||||
"direct": {
|
||||
"count": 40,
|
||||
"used_in_bytes": 6304239,
|
||||
"total_capacity_in_bytes": 6304239
|
||||
},
|
||||
"mapped": {
|
||||
"count": 0,
|
||||
"used_in_bytes": 0,
|
||||
"total_capacity_in_bytes": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var nodestatsIndicesExpected = map[string]interface{}{
|
||||
"id_cache_memory_size_in_bytes": float64(0),
|
||||
"completion_size_in_bytes": float64(0),
|
||||
|
||||
@@ -1,175 +1,57 @@
|
||||
# Exec Input Plugin
|
||||
|
||||
Please also see: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md)
|
||||
The `exec` plugin executes the `commands` on every interval and parses metrics from
|
||||
their output in any one of the accepted [Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
|
||||
|
||||
### Example 1 - JSON
|
||||
This plugin can be used to poll for custom metrics from any source.
|
||||
|
||||
#### Configuration
|
||||
|
||||
In this example a script called ```/tmp/test.sh```, a script called ```/tmp/test2.sh```, and
|
||||
all scripts matching glob pattern ```/tmp/collect_*.sh``` are configured for ```[[inputs.exec]]```
|
||||
in JSON format. Glob patterns are matched on every run, so adding new scripts that match the pattern
|
||||
will cause them to be picked up immediately.
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Read flattened metrics from one or more commands that output JSON to stdout
|
||||
[[inputs.exec]]
|
||||
# Shell/commands array
|
||||
# Full command line to executable with parameters, or a glob pattern to run all matching files.
|
||||
commands = ["/tmp/test.sh", "/tmp/test2.sh", "/tmp/collect_*.sh"]
|
||||
## Commands array
|
||||
commands = [
|
||||
"/tmp/test.sh",
|
||||
"/usr/bin/mycollector --foo=bar",
|
||||
"/tmp/collect_*.sh"
|
||||
]
|
||||
|
||||
## Timeout for each command to complete.
|
||||
timeout = "5s"
|
||||
|
||||
# Data format to consume.
|
||||
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
||||
data_format = "json"
|
||||
|
||||
# measurement name suffix (for separating different commands)
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
```
|
||||
|
||||
Other options for modifying the measurement names are:
|
||||
|
||||
```
|
||||
name_prefix = "prefix_"
|
||||
```
|
||||
|
||||
Let's say that we have the above configuration, and mycollector outputs the
|
||||
following JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"a": 0.5,
|
||||
"b": {
|
||||
"c": 0.1,
|
||||
"d": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The collected metrics will be stored as fields under the measurement
|
||||
"exec_mycollector":
|
||||
|
||||
```
|
||||
exec_mycollector a=0.5,b_c=0.1,b_d=5 1452815002357578567
|
||||
```
|
||||
If using JSON, only numeric values are parsed and turned into floats. Booleans
|
||||
and strings will be ignored.
|
||||
|
||||
### Example 2 - Influx Line-Protocol
|
||||
|
||||
In this example an application called ```/usr/bin/line_protocol_collector```
|
||||
and a script called ```/tmp/test2.sh``` are configured for ```[[inputs.exec]]```
|
||||
in influx line-protocol format.
|
||||
|
||||
#### Configuration
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
# Shell/commands array
|
||||
# compatible with old version
|
||||
# we can still use the old command configuration
|
||||
# command = "/usr/bin/line_protocol_collector"
|
||||
commands = ["/usr/bin/line_protocol_collector","/tmp/test2.sh"]
|
||||
|
||||
## Timeout for each command to complete.
|
||||
timeout = "5s"
|
||||
|
||||
# Data format to consume.
|
||||
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
The line_protocol_collector application outputs the following line protocol:
|
||||
Glob patterns in the `command` option are matched on every run, so adding new
|
||||
scripts that match the pattern will cause them to be picked up immediately.
|
||||
|
||||
```
|
||||
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu1,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu2,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu3,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu4,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
### Example:
|
||||
|
||||
This script produces static values, since no timestamp is specified the values are at the current time.
|
||||
```sh
|
||||
#!/bin/sh
|
||||
echo 'example,tag1=a,tag2=b i=42i,j=43i,k=44i'
|
||||
```
|
||||
|
||||
You will get data in InfluxDB exactly as it is defined above,
|
||||
tags are cpu=cpuN, host=foo, and datacenter=us-east with fields usage_idle
|
||||
and usage_busy. They will receive a timestamp at collection time.
|
||||
Each line must end in \n, just as the Influx line protocol does.
|
||||
|
||||
|
||||
### Example 3 - Graphite
|
||||
|
||||
We can also change the data_format to "graphite" to use the metrics collecting scripts such as (compatible with graphite):
|
||||
|
||||
* Nagios [Metrics Plugins](https://exchange.nagios.org/directory/Plugins)
|
||||
* Sensu [Metrics Plugins](https://github.com/sensu-plugins)
|
||||
|
||||
In this example a script called /tmp/test.sh and a script called /tmp/test2.sh are configured for [[inputs.exec]] in graphite format.
|
||||
|
||||
#### Configuration
|
||||
|
||||
It can be paired with the following configuration and will be ran at the `interval` of the agent.
|
||||
```toml
|
||||
# Read flattened metrics from one or more commands that output JSON to stdout
|
||||
[[inputs.exec]]
|
||||
# Shell/commands array
|
||||
commands = ["/tmp/test.sh","/tmp/test2.sh"]
|
||||
|
||||
## Timeout for each command to complete.
|
||||
commands = ["sh /tmp/test.sh"]
|
||||
timeout = "5s"
|
||||
|
||||
# Data format to consume.
|
||||
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
||||
data_format = "graphite"
|
||||
|
||||
# measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Below configuration will be used for data_format = "graphite", can be ignored for other data_format
|
||||
## If matching multiple measurement files, this string will be used to join the matched values.
|
||||
separator = "."
|
||||
|
||||
## Each template line requires a template pattern. It can have an optional
|
||||
## filter before the template and separated by spaces. It can also have optional extra
|
||||
## tags following the template. Multiple tags should be separated by commas and no spaces
|
||||
## similar to the line protocol format. The can be only one default template.
|
||||
## Templates support below format:
|
||||
## 1. filter + template
|
||||
## 2. filter + template + extra tag
|
||||
## 3. filter + template with field key
|
||||
## 4. default template
|
||||
templates = [
|
||||
"*.app env.service.resource.measurement",
|
||||
"stats.* .host.measurement* region=us-west,agent=sensu",
|
||||
"stats2.* .host.measurement.field",
|
||||
"measurement*"
|
||||
]
|
||||
```
|
||||
Graphite messages are in this format:
|
||||
|
||||
```
|
||||
metric_path value timestamp\n
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
__metric_path__ is the metric namespace that you want to populate.
|
||||
### Common Issues:
|
||||
|
||||
__value__ is the value that you want to assign to the metric at this time.
|
||||
#### Q: My script works when I run it by hand, but not when Telegraf is running as a service.
|
||||
|
||||
__timestamp__ is the unix epoch time.
|
||||
|
||||
And test.sh/test2.sh will output:
|
||||
|
||||
```
|
||||
sensu.metric.net.server0.eth0.rx_packets 461295119435 1444234982
|
||||
sensu.metric.net.server0.eth0.tx_bytes 1093086493388480 1444234982
|
||||
sensu.metric.net.server0.eth0.rx_bytes 1015633926034834 1444234982
|
||||
sensu.metric.net.server0.eth0.tx_errors 0 1444234982
|
||||
sensu.metric.net.server0.eth0.rx_errors 0 1444234982
|
||||
sensu.metric.net.server0.eth0.tx_dropped 0 1444234982
|
||||
sensu.metric.net.server0.eth0.rx_dropped 0 1444234982
|
||||
```
|
||||
|
||||
The templates configuration will be used to parse the graphite metrics to support influxdb/opentsdb tagging store engines.
|
||||
|
||||
More detail information about templates, please refer to [The graphite Input](https://github.com/influxdata/influxdb/blob/master/services/graphite/README.md)
|
||||
This may be related to the Telegraf service running as a different user. The
|
||||
official packages run Telegraf as the `telegraf` user and group on Linux
|
||||
systems.
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
# Fail2ban Plugin
|
||||
# Fail2ban Input Plugin
|
||||
|
||||
The fail2ban plugin gathers counts of failed and banned ip addresses from fail2ban.
|
||||
The fail2ban plugin gathers the count of failed and banned ip addresses using [fail2ban](https://www.fail2ban.org).
|
||||
|
||||
This plugin run fail2ban-client command, and fail2ban-client require root access.
|
||||
You have to grant telegraf to run fail2ban-client:
|
||||
This plugin runs the `fail2ban-client` command which generally requires root access.
|
||||
Acquiring the required permissions can be done using several methods:
|
||||
|
||||
- Run telegraf as root. (deprecate)
|
||||
- Configure sudo to grant telegraf to fail2ban-client.
|
||||
- Use sudo run fail2ban-client.
|
||||
- Run telegraf as root. (not recommended)
|
||||
|
||||
### Using sudo
|
||||
|
||||
You may edit your sudo configuration with the following:
|
||||
|
||||
``` sudo
|
||||
telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
|
||||
telegraf ALL=(root) NOEXEC: NOPASSWD: /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status *
|
||||
```
|
||||
|
||||
### Configuration:
|
||||
@@ -21,10 +21,7 @@ telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
|
||||
``` toml
|
||||
# Read metrics from fail2ban.
|
||||
[[inputs.fail2ban]]
|
||||
## fail2ban-client require root access.
|
||||
## Setting 'use_sudo' to true will make use of sudo to run fail2ban-client.
|
||||
## Users must configure sudo to allow telegraf user to run fail2ban-client with no password.
|
||||
## This plugin run only "fail2ban-client status".
|
||||
## Use sudo to run fail2ban-client
|
||||
use_sudo = false
|
||||
```
|
||||
|
||||
@@ -38,7 +35,7 @@ telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
|
||||
|
||||
- All measurements have the following tags:
|
||||
- jail
|
||||
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
@@ -55,6 +52,5 @@ Status for the jail: sshd
|
||||
```
|
||||
|
||||
```
|
||||
$ ./telegraf --config telegraf.conf --input-filter fail2ban --test
|
||||
fail2ban,jail=sshd failed=5i,banned=2i 1495868667000000000
|
||||
```
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
// +build linux
|
||||
|
||||
package fail2ban
|
||||
|
||||
import (
|
||||
@@ -8,9 +6,10 @@ import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -23,10 +22,7 @@ type Fail2ban struct {
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## fail2ban-client require root access.
|
||||
## Setting 'use_sudo' to true will make use of sudo to run fail2ban-client.
|
||||
## Users must configure sudo to allow telegraf user to run fail2ban-client with no password.
|
||||
## This plugin run only "fail2ban-client status".
|
||||
## Use sudo to run fail2ban-client
|
||||
use_sudo = false
|
||||
`
|
||||
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package fail2ban
|
||||
@@ -20,6 +20,7 @@ The filestat plugin gathers metrics about file existence, size, and other stats.
|
||||
- filestat
|
||||
- exists (int, 0 | 1)
|
||||
- size_bytes (int, bytes)
|
||||
- modification_time (int, unixtime)
|
||||
- md5 (optional, string)
|
||||
|
||||
### Tags:
|
||||
@@ -32,6 +33,6 @@ The filestat plugin gathers metrics about file existence, size, and other stats.
|
||||
```
|
||||
$ telegraf --config /etc/telegraf/telegraf.conf --input-filter filestat --test
|
||||
* Plugin: filestat, Collection 1
|
||||
> filestat,file=/tmp/foo/bar,host=tyrion exists=0i 1461203374493128216
|
||||
> filestat,file=/Users/sparrc/ws/telegraf.conf,host=tyrion exists=1i,size=47894i 1461203374493199335
|
||||
> filestat,file=/tmp/foo/bar,host=tyrion exists=0i 1507218518192154351
|
||||
> filestat,file=/Users/sparrc/ws/telegraf.conf,host=tyrion exists=1i,size=47894i,modification_time=1507152973123456789i 1507218518192154351
|
||||
```
|
||||
|
||||
@@ -86,6 +86,7 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error {
|
||||
fileName)
|
||||
} else {
|
||||
fields["size_bytes"] = fileInfo.Size()
|
||||
fields["modification_time"] = fileInfo.ModTime().UnixNano()
|
||||
}
|
||||
|
||||
if f.Md5 {
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -24,28 +26,19 @@ func TestGatherNoMd5(t *testing.T) {
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
}
|
||||
fields1 := map[string]interface{}{
|
||||
"size_bytes": int64(0),
|
||||
"exists": int64(1),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
|
||||
require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0)))
|
||||
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1)))
|
||||
|
||||
tags2 := map[string]string{
|
||||
"file": dir + "log2.log",
|
||||
}
|
||||
fields2 := map[string]interface{}{
|
||||
"size_bytes": int64(0),
|
||||
"exists": int64(1),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
|
||||
require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0)))
|
||||
require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1)))
|
||||
|
||||
tags3 := map[string]string{
|
||||
"file": "/non/existant/file",
|
||||
}
|
||||
fields3 := map[string]interface{}{
|
||||
"exists": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3)
|
||||
require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(0)))
|
||||
}
|
||||
|
||||
func TestGatherExplicitFiles(t *testing.T) {
|
||||
@@ -64,30 +57,21 @@ func TestGatherExplicitFiles(t *testing.T) {
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
}
|
||||
fields1 := map[string]interface{}{
|
||||
"size_bytes": int64(0),
|
||||
"exists": int64(1),
|
||||
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
|
||||
require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0)))
|
||||
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1)))
|
||||
require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
|
||||
|
||||
tags2 := map[string]string{
|
||||
"file": dir + "log2.log",
|
||||
}
|
||||
fields2 := map[string]interface{}{
|
||||
"size_bytes": int64(0),
|
||||
"exists": int64(1),
|
||||
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
|
||||
require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0)))
|
||||
require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1)))
|
||||
require.True(t, acc.HasPoint("filestat", tags2, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
|
||||
|
||||
tags3 := map[string]string{
|
||||
"file": "/non/existant/file",
|
||||
}
|
||||
fields3 := map[string]interface{}{
|
||||
"exists": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3)
|
||||
require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(0)))
|
||||
}
|
||||
|
||||
func TestGatherGlob(t *testing.T) {
|
||||
@@ -104,22 +88,16 @@ func TestGatherGlob(t *testing.T) {
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
}
|
||||
fields1 := map[string]interface{}{
|
||||
"size_bytes": int64(0),
|
||||
"exists": int64(1),
|
||||
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
|
||||
require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0)))
|
||||
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1)))
|
||||
require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
|
||||
|
||||
tags2 := map[string]string{
|
||||
"file": dir + "log2.log",
|
||||
}
|
||||
fields2 := map[string]interface{}{
|
||||
"size_bytes": int64(0),
|
||||
"exists": int64(1),
|
||||
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
|
||||
require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0)))
|
||||
require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1)))
|
||||
require.True(t, acc.HasPoint("filestat", tags2, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
|
||||
}
|
||||
|
||||
func TestGatherSuperAsterisk(t *testing.T) {
|
||||
@@ -136,32 +114,57 @@ func TestGatherSuperAsterisk(t *testing.T) {
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
}
|
||||
fields1 := map[string]interface{}{
|
||||
"size_bytes": int64(0),
|
||||
"exists": int64(1),
|
||||
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
|
||||
require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0)))
|
||||
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1)))
|
||||
require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
|
||||
|
||||
tags2 := map[string]string{
|
||||
"file": dir + "log2.log",
|
||||
}
|
||||
fields2 := map[string]interface{}{
|
||||
"size_bytes": int64(0),
|
||||
"exists": int64(1),
|
||||
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
|
||||
require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0)))
|
||||
require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1)))
|
||||
require.True(t, acc.HasPoint("filestat", tags2, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
|
||||
|
||||
tags3 := map[string]string{
|
||||
"file": dir + "test.conf",
|
||||
}
|
||||
fields3 := map[string]interface{}{
|
||||
"size_bytes": int64(104),
|
||||
"exists": int64(1),
|
||||
"md5_sum": "5a7e9b77fa25e7bb411dbd17cf403c1f",
|
||||
require.True(t, acc.HasPoint("filestat", tags3, "size_bytes", int64(104)))
|
||||
require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(1)))
|
||||
require.True(t, acc.HasPoint("filestat", tags3, "md5_sum", "5a7e9b77fa25e7bb411dbd17cf403c1f"))
|
||||
}
|
||||
|
||||
func TestModificationTime(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
fs := NewFileStat()
|
||||
fs.Files = []string{
|
||||
dir + "log1.log",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
acc.GatherError(fs.Gather)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
}
|
||||
require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0)))
|
||||
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1)))
|
||||
require.True(t, acc.HasInt64Field("filestat", "modification_time"))
|
||||
}
|
||||
|
||||
func TestNoModificationTime(t *testing.T) {
|
||||
fs := NewFileStat()
|
||||
fs.Files = []string{
|
||||
"/non/existant/file",
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
acc.GatherError(fs.Gather)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"file": "/non/existant/file",
|
||||
}
|
||||
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(0)))
|
||||
require.False(t, acc.HasInt64Field("filestat", "modification_time"))
|
||||
}
|
||||
|
||||
func TestGetMd5(t *testing.T) {
|
||||
|
||||
@@ -22,11 +22,11 @@ example configuratio with `@id` parameter for http plugin:
|
||||
[[inputs.fluentd]]
|
||||
## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
|
||||
##
|
||||
## Endpoint:
|
||||
## Endpoint:
|
||||
## - only one URI is allowed
|
||||
## - https is not supported
|
||||
endpoint = "http://localhost:24220/api/plugins.json"
|
||||
|
||||
|
||||
## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
|
||||
exclude = [
|
||||
"monitor_agent",
|
||||
|
||||
@@ -18,11 +18,11 @@ const (
|
||||
sampleConfig = `
|
||||
## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
|
||||
##
|
||||
## Endpoint:
|
||||
## Endpoint:
|
||||
## - only one URI is allowed
|
||||
## - https is not supported
|
||||
endpoint = "http://localhost:24220/api/plugins.json"
|
||||
|
||||
|
||||
## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
|
||||
exclude = [
|
||||
"monitor_agent",
|
||||
@@ -148,15 +148,15 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
if p.BufferQueueLength != nil {
|
||||
tmpFields["buffer_queue_length"] = p.BufferQueueLength
|
||||
tmpFields["buffer_queue_length"] = *p.BufferQueueLength
|
||||
|
||||
}
|
||||
if p.RetryCount != nil {
|
||||
tmpFields["retry_count"] = p.RetryCount
|
||||
tmpFields["retry_count"] = *p.RetryCount
|
||||
}
|
||||
|
||||
if p.BufferTotalQueuedSize != nil {
|
||||
tmpFields["buffer_total_queued_size"] = p.BufferTotalQueuedSize
|
||||
tmpFields["buffer_total_queued_size"] = *p.BufferTotalQueuedSize
|
||||
}
|
||||
|
||||
if !((p.BufferQueueLength == nil) && (p.RetryCount == nil) && (p.BufferTotalQueuedSize == nil)) {
|
||||
|
||||
@@ -122,12 +122,6 @@ func Test_parse(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_Gather(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping Gather function test")
|
||||
}
|
||||
|
||||
t.Log("Testing Gather function")
|
||||
|
||||
t.Logf("Start HTTP mock (%s) with sampleJSON", fluentdTest.Endpoint)
|
||||
|
||||
ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -157,13 +151,13 @@ func Test_Gather(t *testing.T) {
|
||||
assert.Equal(t, expectedOutput[0].PluginID, acc.Metrics[0].Tags["plugin_id"])
|
||||
assert.Equal(t, expectedOutput[0].PluginType, acc.Metrics[0].Tags["plugin_type"])
|
||||
assert.Equal(t, expectedOutput[0].PluginCategory, acc.Metrics[0].Tags["plugin_category"])
|
||||
assert.Equal(t, expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"])
|
||||
assert.Equal(t, *expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"])
|
||||
|
||||
assert.Equal(t, expectedOutput[1].PluginID, acc.Metrics[1].Tags["plugin_id"])
|
||||
assert.Equal(t, expectedOutput[1].PluginType, acc.Metrics[1].Tags["plugin_type"])
|
||||
assert.Equal(t, expectedOutput[1].PluginCategory, acc.Metrics[1].Tags["plugin_category"])
|
||||
assert.Equal(t, expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"])
|
||||
assert.Equal(t, expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"])
|
||||
assert.Equal(t, expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"])
|
||||
assert.Equal(t, *expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"])
|
||||
assert.Equal(t, *expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"])
|
||||
assert.Equal(t, *expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"])
|
||||
|
||||
}
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
# HAproxy Input Plugin
|
||||
# HAProxy Input Plugin
|
||||
|
||||
[HAproxy](http://www.haproxy.org/) input plugin gathers metrics directly from any running HAproxy instance. It can do so by using CSV generated by HAproxy status page or from admin socket(s).
|
||||
The [HAProxy](http://www.haproxy.org/) input plugin gathers
|
||||
[statistics](https://cbonte.github.io/haproxy-dconv/1.9/intro.html#3.3.16)
|
||||
using the [stats socket](https://cbonte.github.io/haproxy-dconv/1.9/management.html#9.3)
|
||||
or [HTTP statistics page](https://cbonte.github.io/haproxy-dconv/1.9/management.html#9) of a HAProxy server.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# SampleConfig
|
||||
# Read metrics of HAProxy, via socket or HTTP stats page
|
||||
[[inputs.haproxy]]
|
||||
## An array of address to gather stats about. Specify an ip on hostname
|
||||
## with optional port. ie localhost, 10.10.3.33:1936, etc.
|
||||
@@ -23,7 +26,7 @@
|
||||
## By default, some of the fields are renamed from what haproxy calls them.
|
||||
## Setting this option to true results in the plugin keeping the original
|
||||
## field names.
|
||||
# keep_field_names = true
|
||||
# keep_field_names = false
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -33,34 +36,77 @@
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
#### `servers`
|
||||
Server addresses need to explicitly start with 'http' if you wish to use HAproxy status page. Otherwise, address will be assumed to be an UNIX socket and protocol (if present) will be discarded.
|
||||
#### HAProxy Configuration
|
||||
|
||||
For basic authentication you need to add username and password in the URL: `http://user:password@1.2.3.4/haproxy?stats`.
|
||||
The following information may be useful when getting started, but please
|
||||
consult the HAProxy documentation for complete and up to date instructions.
|
||||
|
||||
Following examples will all resolve to the same socket:
|
||||
The [`stats enable`](https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#4-stats%20enable)
|
||||
option can be used to add unauthenticated access over HTTP using the default
|
||||
settings. To enable the unix socket begin by reading about the
|
||||
[`stats socket`](https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#3.1-stats%20socket)
|
||||
option.
|
||||
|
||||
|
||||
#### servers
|
||||
|
||||
Server addresses must explicitly start with 'http' if you wish to use HAProxy
|
||||
status page. Otherwise, addresses will be assumed to be an UNIX socket and
|
||||
any protocol (if present) will be discarded.
|
||||
|
||||
When using socket names, wildcard expansion is supported so plugin can gather
|
||||
stats from multiple sockets at once.
|
||||
|
||||
To use HTTP Basic Auth add the username and password in the userinfo section
|
||||
of the URL: `http://user:password@1.2.3.4/haproxy?stats`. The credentials are
|
||||
sent via the `Authorization` header and not using the request URL.
|
||||
|
||||
|
||||
#### keep_field_names
|
||||
|
||||
By default, some of the fields are renamed from what haproxy calls them.
|
||||
Setting the `keep_field_names` parameter to `true` will result in the plugin
|
||||
keeping the original field names.
|
||||
|
||||
The following renames are made:
|
||||
- `pxname` -> `proxy`
|
||||
- `svname` -> `sv`
|
||||
- `act` -> `active_servers`
|
||||
- `bck` -> `backup_servers`
|
||||
- `cli_abrt` -> `cli_abort`
|
||||
- `srv_abrt` -> `srv_abort`
|
||||
- `hrsp_1xx` -> `http_response.1xx`
|
||||
- `hrsp_2xx` -> `http_response.2xx`
|
||||
- `hrsp_3xx` -> `http_response.3xx`
|
||||
- `hrsp_4xx` -> `http_response.4xx`
|
||||
- `hrsp_5xx` -> `http_response.5xx`
|
||||
- `hrsp_other` -> `http_response.other`
|
||||
|
||||
### Metrics:
|
||||
|
||||
For more details about collected metrics reference the [HAProxy CSV format
|
||||
documentation](https://cbonte.github.io/haproxy-dconv/1.8/management.html#9.1).
|
||||
|
||||
- haproxy
|
||||
- tags:
|
||||
- `server` - address of the server data was gathered from
|
||||
- `proxy` - proxy name
|
||||
- `sv` - service name
|
||||
- `type` - proxy session type
|
||||
- fields:
|
||||
- `status` (string)
|
||||
- `check_status` (string)
|
||||
- `last_chk` (string)
|
||||
- `mode` (string)
|
||||
- `tracked` (string)
|
||||
- `agent_status` (string)
|
||||
- `last_agt` (string)
|
||||
- `addr` (string)
|
||||
- `cookie` (string)
|
||||
- `lastsess` (int)
|
||||
- **all other stats** (int)
|
||||
|
||||
### Example Output:
|
||||
```
|
||||
socket:/var/run/haproxy.sock
|
||||
unix:/var/run/haproxy.sock
|
||||
foo:/var/run/haproxy.sock
|
||||
/var/run/haproxy.sock
|
||||
haproxy,server=/run/haproxy/admin.sock,proxy=public,sv=FRONTEND,type=frontend http_response.other=0i,req_rate_max=1i,comp_byp=0i,status="OPEN",rate_lim=0i,dses=0i,req_rate=0i,comp_rsp=0i,bout=9287i,comp_in=0i,mode="http",smax=1i,slim=2000i,http_response.1xx=0i,conn_rate=0i,dreq=0i,ereq=0i,iid=2i,rate_max=1i,http_response.2xx=1i,comp_out=0i,intercepted=1i,stot=2i,pid=1i,http_response.5xx=1i,http_response.3xx=0i,http_response.4xx=0i,conn_rate_max=1i,conn_tot=2i,dcon=0i,bin=294i,rate=0i,sid=0i,req_tot=2i,scur=0i,dresp=0i 1513293519000000000
|
||||
```
|
||||
|
||||
When using socket names, wildcard expansion is supported so plugin can gather stats from multiple sockets at once.
|
||||
|
||||
If no servers are specified, then the default address of `http://127.0.0.1:1936/haproxy?stats` will be used.
|
||||
|
||||
#### `keep_field_names`
|
||||
By default, some of the fields are renamed from what haproxy calls them. Setting the `keep_field_names` parameter to `true` will result in the plugin keeping the original field names.
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
Plugin will gather measurements outlined in [HAproxy CSV format documentation](https://cbonte.github.io/haproxy-dconv/1.7/management.html#9.1).
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- server - address of server data is gathered from
|
||||
- proxy - proxy name as reported in `pxname`
|
||||
- sv - service name as reported in `svname`
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ var sampleConfig = `
|
||||
## By default, some of the fields are renamed from what haproxy calls them.
|
||||
## Setting this option to true results in the plugin keeping the original
|
||||
## field names.
|
||||
# keep_field_names = true
|
||||
# keep_field_names = false
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
// +build linux
|
||||
|
||||
package hddtemp
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package hddtemp
|
||||
@@ -8,6 +8,10 @@ The `/write` endpoint supports the `precision` query parameter and can be set to
|
||||
|
||||
When chaining Telegraf instances using this plugin, CREATE DATABASE requests receive a 200 OK response with message body `{"results":[]}` but they are not relayed. The output configuration of the Telegraf instance which ultimately submits data to InfluxDB determines the destination database.
|
||||
|
||||
Enable TLS by specifying the file names of a service TLS certificate and key.
|
||||
|
||||
Enable mutually authenticated TLS and authorize client connections by signing certificate authority by including a list of allowed CA certificate file names in ````tls_allowed_cacerts````.
|
||||
|
||||
See: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx).
|
||||
|
||||
**Example:**
|
||||
@@ -28,4 +32,11 @@ This is a sample configuration for the plugin.
|
||||
## timeouts
|
||||
read_timeout = "10s"
|
||||
write_timeout = "10s"
|
||||
|
||||
## HTTPS
|
||||
tls_cert= "/etc/telegraf/cert.pem"
|
||||
tls_key = "/etc/telegraf/key.pem"
|
||||
|
||||
## MTLS
|
||||
tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
|
||||
```
|
||||
|
||||
@@ -3,7 +3,10 @@ package http_listener
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -37,6 +40,10 @@ type HTTPListener struct {
|
||||
MaxLineSize int
|
||||
Port int
|
||||
|
||||
TlsAllowedCacerts []string
|
||||
TlsCert string
|
||||
TlsKey string
|
||||
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
|
||||
@@ -75,6 +82,14 @@ const sampleConfig = `
|
||||
## Maximum line size allowed to be sent in bytes.
|
||||
## 0 means to use the default of 65536 bytes (64 kibibytes)
|
||||
max_line_size = 0
|
||||
|
||||
## Set one or more allowed client CA certificate file names to
|
||||
## enable mutually authenticated TLS connections
|
||||
tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
|
||||
|
||||
## Add service certificate and key
|
||||
tls_cert = "/etc/telegraf/cert.pem"
|
||||
tls_key = "/etc/telegraf/key.pem"
|
||||
`
|
||||
|
||||
func (h *HTTPListener) SampleConfig() string {
|
||||
@@ -117,10 +132,33 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
|
||||
h.MaxLineSize = DEFAULT_MAX_LINE_SIZE
|
||||
}
|
||||
|
||||
if h.ReadTimeout.Duration < time.Second {
|
||||
h.ReadTimeout.Duration = time.Second * 10
|
||||
}
|
||||
if h.WriteTimeout.Duration < time.Second {
|
||||
h.WriteTimeout.Duration = time.Second * 10
|
||||
}
|
||||
|
||||
h.acc = acc
|
||||
h.pool = NewPool(200, h.MaxLineSize)
|
||||
|
||||
var listener, err = net.Listen("tcp", h.ServiceAddress)
|
||||
tlsConf := h.getTLSConfig()
|
||||
|
||||
server := &http.Server{
|
||||
Addr: h.ServiceAddress,
|
||||
Handler: h,
|
||||
ReadTimeout: h.ReadTimeout.Duration,
|
||||
WriteTimeout: h.WriteTimeout.Duration,
|
||||
TLSConfig: tlsConf,
|
||||
}
|
||||
|
||||
var err error
|
||||
var listener net.Listener
|
||||
if tlsConf != nil {
|
||||
listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf)
|
||||
} else {
|
||||
listener, err = net.Listen("tcp", h.ServiceAddress)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -130,7 +168,7 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
|
||||
h.wg.Add(1)
|
||||
go func() {
|
||||
defer h.wg.Done()
|
||||
h.httpListen()
|
||||
server.Serve(h.listener)
|
||||
}()
|
||||
|
||||
log.Printf("I! Started HTTP listener service on %s\n", h.ServiceAddress)
|
||||
@@ -149,27 +187,6 @@ func (h *HTTPListener) Stop() {
|
||||
log.Println("I! Stopped HTTP listener service on ", h.ServiceAddress)
|
||||
}
|
||||
|
||||
// httpListen sets up an http.Server and calls server.Serve.
|
||||
// like server.Serve, httpListen will always return a non-nil error, for this
|
||||
// reason, the error returned should probably be ignored.
|
||||
// see https://golang.org/pkg/net/http/#Server.Serve
|
||||
func (h *HTTPListener) httpListen() error {
|
||||
if h.ReadTimeout.Duration < time.Second {
|
||||
h.ReadTimeout.Duration = time.Second * 10
|
||||
}
|
||||
if h.WriteTimeout.Duration < time.Second {
|
||||
h.WriteTimeout.Duration = time.Second * 10
|
||||
}
|
||||
|
||||
var server = http.Server{
|
||||
Handler: h,
|
||||
ReadTimeout: h.ReadTimeout.Duration,
|
||||
WriteTimeout: h.WriteTimeout.Duration,
|
||||
}
|
||||
|
||||
return server.Serve(h.listener)
|
||||
}
|
||||
|
||||
func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
||||
h.RequestsRecv.Incr(1)
|
||||
defer h.RequestsServed.Incr(1)
|
||||
@@ -327,6 +344,38 @@ func badRequest(res http.ResponseWriter) {
|
||||
res.Write([]byte(`{"error":"http: bad request"}`))
|
||||
}
|
||||
|
||||
func (h *HTTPListener) getTLSConfig() *tls.Config {
|
||||
tlsConf := &tls.Config{
|
||||
InsecureSkipVerify: false,
|
||||
Renegotiation: tls.RenegotiateNever,
|
||||
}
|
||||
|
||||
if len(h.TlsCert) == 0 || len(h.TlsKey) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(h.TlsCert, h.TlsKey)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
tlsConf.Certificates = []tls.Certificate{cert}
|
||||
|
||||
if h.TlsAllowedCacerts != nil {
|
||||
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
clientPool := x509.NewCertPool()
|
||||
for _, ca := range h.TlsAllowedCacerts {
|
||||
c, err := ioutil.ReadFile(ca)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
clientPool.AppendCertsFromPEM(c)
|
||||
}
|
||||
tlsConf.ClientCAs = clientPool
|
||||
}
|
||||
|
||||
return tlsConf
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("http_listener", func() telegraf.Input {
|
||||
return &HTTPListener{
|
||||
|
||||
@@ -2,6 +2,9 @@ package http_listener
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -29,18 +32,166 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257
|
||||
badMsg = "blahblahblah: 42\n"
|
||||
|
||||
emptyMsg = ""
|
||||
|
||||
serviceRootPEM = `-----BEGIN CERTIFICATE-----
|
||||
MIIBxzCCATCgAwIBAgIJAJb7HqN2BzWWMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNV
|
||||
BAMMC1RlbGVncmFmIENBMB4XDTE3MTEwNDA0MzEwN1oXDTI3MTEwMjA0MzEwN1ow
|
||||
FjEUMBIGA1UEAwwLVGVsZWdyYWYgQ0EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJ
|
||||
AoGBANbkUkK6JQC3rbLcXhLJTS9SX6uXyFwl7bUfpAN5Hm5EqfvG3PnLrogfTGLr
|
||||
Tq5CRAu/gbbdcMoL9TLv/aaDVnrpV0FslKhqYmkOgT28bdmA7Qtr539aQpMKCfcW
|
||||
WCnoMcBD5u5h9MsRqpdq+0Mjlsf1H2hSf07jHk5R1T4l8RMXAgMBAAGjHTAbMAwG
|
||||
A1UdEwQFMAMBAf8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4GBANSrwvpU
|
||||
t8ihIhpHqgJZ34DM92CZZ3ZHmH/KyqlnuGzjjpnVZiXVrLDTOzrA0ziVhmefY29w
|
||||
roHjENbFm54HW97ogxeURuO8HRHIVh2U0rkyVxOfGZiUdINHqsZdSnDY07bzCtSr
|
||||
Z/KsfWXM5llD1Ig1FyBHpKjyUvfzr73sjm/4
|
||||
-----END CERTIFICATE-----`
|
||||
serviceCertPEM = `-----BEGIN CERTIFICATE-----
|
||||
MIIBzzCCATigAwIBAgIBATANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDDAtUZWxl
|
||||
Z3JhZiBDQTAeFw0xNzExMDQwNDMxMDdaFw0yNzExMDIwNDMxMDdaMBQxEjAQBgNV
|
||||
BAMMCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAsJRss1af
|
||||
XKrcIjQoAp2kdJIpT2Ya+MRQXJ18b0PP7szh2lisY11kd/HCkd4D4efuIkpszHaN
|
||||
xwyTOZLOoplxp6fizzgOYjXsJ6SzbO1MQNmq8Ch/+uKiGgFwLX+YxOOsGSDIHNhF
|
||||
vcBi93cQtCWPBFz6QRQf9yfIAA5KKxUfJcMCAwEAAaMvMC0wCQYDVR0TBAIwADAL
|
||||
BgNVHQ8EBAMCBSAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZIhvcNAQELBQAD
|
||||
gYEAiC3WI4y9vfYz53gw7FKnNK7BBdwRc43x7Pd+5J/cclWyUZPdmcj1UNmv/3rj
|
||||
2qcMmX06UdgPoHppzNAJePvMVk0vjMBUe9MmYlafMz0h4ma/it5iuldXwmejFcdL
|
||||
6wWQp7gVTileCEmq9sNvfQN1FmT3EWf4IMdO2MNat/1If0g=
|
||||
-----END CERTIFICATE-----`
|
||||
serviceKeyPEM = `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQCwlGyzVp9cqtwiNCgCnaR0kilPZhr4xFBcnXxvQ8/uzOHaWKxj
|
||||
XWR38cKR3gPh5+4iSmzMdo3HDJM5ks6imXGnp+LPOA5iNewnpLNs7UxA2arwKH/6
|
||||
4qIaAXAtf5jE46wZIMgc2EW9wGL3dxC0JY8EXPpBFB/3J8gADkorFR8lwwIDAQAB
|
||||
AoGBAJaFHxfMmjHK77U0UnrQWFSKFy64cftmlL4t/Nl3q7L68PdIKULWZIMeEWZ4
|
||||
I0UZiFOwr4em83oejQ1ByGSwekEuiWaKUI85IaHfcbt+ogp9hY/XbOEo56OPQUAd
|
||||
bEZv1JqJOqta9Ug1/E1P9LjEEyZ5F5ubx7813rxAE31qKtKJAkEA1zaMlCWIr+Rj
|
||||
hGvzv5rlHH3wbOB4kQFXO4nqj3J/ttzR5QiJW24STMDcbNngFlVcDVju56LrNTiD
|
||||
dPh9qvl7nwJBANILguR4u33OMksEZTYB7nQZSurqXsq6382zH7pTl29ANQTROHaM
|
||||
PKC8dnDWq8RGTqKuvWblIzzGIKqIMovZo10CQC96T0UXirITFolOL3XjvAuvFO1Q
|
||||
EAkdXJs77805m0dCK+P1IChVfiAEpBw3bKJArpAbQIlFfdI953JUp5SieU0CQEub
|
||||
BSSEKMjh/cxu6peEHnb/262vayuCFKkQPu1sxWewLuVrAe36EKCy9dcsDmv5+rgo
|
||||
Odjdxc9Madm4aKlaT6kCQQCpAgeblDrrxTrNQ+Typzo37PlnQrvI+0EceAUuJ72G
|
||||
P0a+YZUeHNRqT2pPN9lMTAZGGi3CtcF2XScbLNEBeXge
|
||||
-----END RSA PRIVATE KEY-----`
|
||||
clientRootPEM = serviceRootPEM
|
||||
clientCertPEM = `-----BEGIN CERTIFICATE-----
|
||||
MIIBzjCCATegAwIBAgIBAjANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDDAtUZWxl
|
||||
Z3JhZiBDQTAeFw0xNzExMDQwNDMxMDdaFw0yNzExMDIwNDMxMDdaMBMxETAPBgNV
|
||||
BAMMCHRlbGVncmFmMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDP2IMqyOqI
|
||||
sJjwBprrz8WPzmlrpyYikQ4XSCSJB3DSTIO+igqMpBUTj3vLlOzsHfVVot1WRqc6
|
||||
3esM4JE92rc6S73xi4g8L/r8cPIHW4hvFJdMti4UkJBWim8ArSbFqnZjcR19G3tG
|
||||
LUOiXAUG3nWzMzoEsPruvV1dkKRbJVE4MwIDAQABoy8wLTAJBgNVHRMEAjAAMAsG
|
||||
A1UdDwQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAjANBgkqhkiG9w0BAQsFAAOB
|
||||
gQCHxMk38XNxL9nPFBYo3JqITJCFswu6/NLHwDBXCuZKl53rUuFWduiO+1OuScKQ
|
||||
sQ79W0jHsWRKGOUFrF5/Gdnh8AlkVaITVlcmhdAOFCEbeGpeEvLuuK6grckPitxy
|
||||
bRF5oM4TCLKKAha60Ir41rk2bomZM9+NZu+Bm+csDqCoxQ==
|
||||
-----END CERTIFICATE-----`
|
||||
clientKeyPEM = `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXAIBAAKBgQDP2IMqyOqIsJjwBprrz8WPzmlrpyYikQ4XSCSJB3DSTIO+igqM
|
||||
pBUTj3vLlOzsHfVVot1WRqc63esM4JE92rc6S73xi4g8L/r8cPIHW4hvFJdMti4U
|
||||
kJBWim8ArSbFqnZjcR19G3tGLUOiXAUG3nWzMzoEsPruvV1dkKRbJVE4MwIDAQAB
|
||||
AoGAFzb/r4+xYoMXEfgq5ZvXXTCY5cVNpR6+jCsqqYODPnn9XRLeCsdo8z5bfWms
|
||||
7NKLzHzca/6IPzL6Rf3vOxFq1YyIZfYVHH+d63/9blAm3Iajjp1W2yW5aj9BJjTb
|
||||
nm6F0RfuW/SjrZ9IXxTZhSpCklPmUzVZpzvwV3KGeVTVCEECQQDoavCeOwLuqDpt
|
||||
0aM9GMFUpOU7kLPDuicSwCDaTae4kN2rS17Zki41YXe8A8+509IEN7mK09Vq9HxY
|
||||
SX6EmV1FAkEA5O9QcCHEa8P12EmUC8oqD2bjq6o7JjUIRlKinwZTlooMJYZw98gA
|
||||
FVSngTUvLVCVIvSdjldXPOGgfYiccTZrFwJAfHS3gKOtAEuJbkEyHodhD4h1UB4+
|
||||
hPLr9Xh4ny2yQH0ilpV3px5GLEOTMFUCKUoqTiPg8VxaDjn5U/WXED5n2QJAR4J1
|
||||
NsFlcGACj+/TvacFYlA6N2nyFeokzoqLX28Ddxdh2erXqJ4hYIhT1ik9tkLggs2z
|
||||
1T1084BquCuO6lIcOwJBALX4xChoMUF9k0IxSQzlz//seQYDkQNsE7y9IgAOXkzp
|
||||
RaR4pzgPbnKj7atG+2dBnffWfE+1Mcy0INDAO6WxPg0=
|
||||
-----END RSA PRIVATE KEY-----`
|
||||
)
|
||||
|
||||
var (
|
||||
initClient sync.Once
|
||||
client *http.Client
|
||||
initServiceCertFiles sync.Once
|
||||
allowedCAFiles []string
|
||||
serviceCAFiles []string
|
||||
serviceCertFile string
|
||||
serviceKeyFile string
|
||||
)
|
||||
|
||||
func newTestHTTPListener() *HTTPListener {
|
||||
listener := &HTTPListener{
|
||||
ServiceAddress: ":0",
|
||||
ServiceAddress: "localhost:0",
|
||||
}
|
||||
return listener
|
||||
}
|
||||
|
||||
func createURL(listener *HTTPListener, path string, rawquery string) string {
|
||||
func newTestHTTPSListener() *HTTPListener {
|
||||
initServiceCertFiles.Do(func() {
|
||||
acaf, err := ioutil.TempFile("", "allowedCAFile.crt")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer acaf.Close()
|
||||
_, err = io.Copy(acaf, bytes.NewReader([]byte(clientRootPEM)))
|
||||
allowedCAFiles = []string{acaf.Name()}
|
||||
|
||||
scaf, err := ioutil.TempFile("", "serviceCAFile.crt")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer scaf.Close()
|
||||
_, err = io.Copy(scaf, bytes.NewReader([]byte(serviceRootPEM)))
|
||||
serviceCAFiles = []string{scaf.Name()}
|
||||
|
||||
scf, err := ioutil.TempFile("", "serviceCertFile.crt")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer scf.Close()
|
||||
_, err = io.Copy(scf, bytes.NewReader([]byte(serviceCertPEM)))
|
||||
serviceCertFile = scf.Name()
|
||||
|
||||
skf, err := ioutil.TempFile("", "serviceKeyFile.crt")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer skf.Close()
|
||||
_, err = io.Copy(skf, bytes.NewReader([]byte(serviceKeyPEM)))
|
||||
serviceKeyFile = skf.Name()
|
||||
})
|
||||
|
||||
listener := &HTTPListener{
|
||||
ServiceAddress: "localhost:0",
|
||||
TlsAllowedCacerts: allowedCAFiles,
|
||||
TlsCert: serviceCertFile,
|
||||
TlsKey: serviceKeyFile,
|
||||
}
|
||||
|
||||
return listener
|
||||
}
|
||||
|
||||
func getHTTPSClient() *http.Client {
|
||||
initClient.Do(func() {
|
||||
cas := x509.NewCertPool()
|
||||
cas.AppendCertsFromPEM([]byte(serviceRootPEM))
|
||||
clientCert, err := tls.X509KeyPair([]byte(clientCertPEM), []byte(clientKeyPEM))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: cas,
|
||||
Certificates: []tls.Certificate{clientCert},
|
||||
MinVersion: tls.VersionTLS12,
|
||||
MaxVersion: tls.VersionTLS12,
|
||||
CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256},
|
||||
Renegotiation: tls.RenegotiateNever,
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
return client
|
||||
}
|
||||
|
||||
func createURL(listener *HTTPListener, scheme string, path string, rawquery string) string {
|
||||
u := url.URL{
|
||||
Scheme: "http",
|
||||
Scheme: scheme,
|
||||
Host: "localhost:" + strconv.Itoa(listener.Port),
|
||||
Path: path,
|
||||
RawQuery: rawquery,
|
||||
@@ -48,6 +199,45 @@ func createURL(listener *HTTPListener, path string, rawquery string) string {
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func TestWriteHTTPSNoClientAuth(t *testing.T) {
|
||||
listener := newTestHTTPSListener()
|
||||
listener.TlsAllowedCacerts = nil
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
cas := x509.NewCertPool()
|
||||
cas.AppendCertsFromPEM([]byte(serviceRootPEM))
|
||||
noClientAuthClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: cas,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// post single message to listener
|
||||
resp, err := noClientAuthClient.Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
}
|
||||
|
||||
func TestWriteHTTPSWithClientAuth(t *testing.T) {
|
||||
listener := newTestHTTPSListener()
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
// post single message to listener
|
||||
resp, err := getHTTPSClient().Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
}
|
||||
|
||||
func TestWriteHTTP(t *testing.T) {
|
||||
listener := newTestHTTPListener()
|
||||
|
||||
@@ -56,8 +246,9 @@ func TestWriteHTTP(t *testing.T) {
|
||||
defer listener.Stop()
|
||||
|
||||
// post single message to listener
|
||||
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
|
||||
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
|
||||
acc.Wait(1)
|
||||
@@ -67,8 +258,9 @@ func TestWriteHTTP(t *testing.T) {
|
||||
)
|
||||
|
||||
// post multiple message to listener
|
||||
resp, err = http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
|
||||
resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
|
||||
acc.Wait(2)
|
||||
@@ -82,8 +274,9 @@ func TestWriteHTTP(t *testing.T) {
|
||||
}
|
||||
|
||||
// Post a gigantic metric to the listener and verify that an error is returned:
|
||||
resp, err = http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
|
||||
resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
require.EqualValues(t, 400, resp.StatusCode)
|
||||
|
||||
acc.Wait(3)
|
||||
@@ -102,8 +295,9 @@ func TestWriteHTTPNoNewline(t *testing.T) {
|
||||
defer listener.Stop()
|
||||
|
||||
// post single message to listener
|
||||
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline)))
|
||||
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline)))
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
|
||||
acc.Wait(1)
|
||||
@@ -115,7 +309,7 @@ func TestWriteHTTPNoNewline(t *testing.T) {
|
||||
|
||||
func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) {
|
||||
listener := &HTTPListener{
|
||||
ServiceAddress: ":0",
|
||||
ServiceAddress: "localhost:0",
|
||||
MaxLineSize: 128 * 1000,
|
||||
}
|
||||
|
||||
@@ -124,14 +318,15 @@ func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) {
|
||||
defer listener.Stop()
|
||||
|
||||
// Post a gigantic metric to the listener and verify that it writes OK this time:
|
||||
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
|
||||
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
}
|
||||
|
||||
func TestWriteHTTPVerySmallMaxBody(t *testing.T) {
|
||||
listener := &HTTPListener{
|
||||
ServiceAddress: ":0",
|
||||
ServiceAddress: "localhost:0",
|
||||
MaxBodySize: 4096,
|
||||
}
|
||||
|
||||
@@ -139,14 +334,15 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) {
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
resp, err := http.Post(createURL(listener, "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric)))
|
||||
resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric)))
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
require.EqualValues(t, 413, resp.StatusCode)
|
||||
}
|
||||
|
||||
func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) {
|
||||
listener := &HTTPListener{
|
||||
ServiceAddress: ":0",
|
||||
ServiceAddress: "localhost:0",
|
||||
MaxLineSize: 70,
|
||||
}
|
||||
|
||||
@@ -154,8 +350,9 @@ func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) {
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
resp, err := http.Post(createURL(listener, "/write", ""), "", bytes.NewBuffer([]byte(testMsgs)))
|
||||
resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(testMsgs)))
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
|
||||
hostTags := []string{"server02", "server03",
|
||||
@@ -171,7 +368,7 @@ func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) {
|
||||
|
||||
func TestWriteHTTPLargeLinesSkipped(t *testing.T) {
|
||||
listener := &HTTPListener{
|
||||
ServiceAddress: ":0",
|
||||
ServiceAddress: "localhost:0",
|
||||
MaxLineSize: 100,
|
||||
}
|
||||
|
||||
@@ -179,8 +376,9 @@ func TestWriteHTTPLargeLinesSkipped(t *testing.T) {
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
resp, err := http.Post(createURL(listener, "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs)))
|
||||
resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs)))
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
require.EqualValues(t, 400, resp.StatusCode)
|
||||
|
||||
hostTags := []string{"server02", "server03",
|
||||
@@ -205,7 +403,7 @@ func TestWriteHTTPGzippedData(t *testing.T) {
|
||||
data, err := ioutil.ReadFile("./testdata/testmsgs.gz")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("POST", createURL(listener, "/write", ""), bytes.NewBuffer(data))
|
||||
req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Encoding", "gzip")
|
||||
|
||||
@@ -240,8 +438,9 @@ func TestWriteHTTPHighTraffic(t *testing.T) {
|
||||
go func(innerwg *sync.WaitGroup) {
|
||||
defer innerwg.Done()
|
||||
for i := 0; i < 500; i++ {
|
||||
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
|
||||
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
}
|
||||
}(&wg)
|
||||
@@ -262,8 +461,9 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) {
|
||||
defer listener.Stop()
|
||||
|
||||
// post single message to listener
|
||||
resp, err := http.Post(createURL(listener, "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg)))
|
||||
resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg)))
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
require.EqualValues(t, 404, resp.StatusCode)
|
||||
}
|
||||
|
||||
@@ -275,8 +475,9 @@ func TestWriteHTTPInvalid(t *testing.T) {
|
||||
defer listener.Stop()
|
||||
|
||||
// post single message to listener
|
||||
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg)))
|
||||
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg)))
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
require.EqualValues(t, 400, resp.StatusCode)
|
||||
}
|
||||
|
||||
@@ -288,8 +489,9 @@ func TestWriteHTTPEmpty(t *testing.T) {
|
||||
defer listener.Stop()
|
||||
|
||||
// post single message to listener
|
||||
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg)))
|
||||
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg)))
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
}
|
||||
|
||||
@@ -302,13 +504,14 @@ func TestQueryAndPingHTTP(t *testing.T) {
|
||||
|
||||
// post query to listener
|
||||
resp, err := http.Post(
|
||||
createURL(listener, "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil)
|
||||
createURL(listener, "http", "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 200, resp.StatusCode)
|
||||
|
||||
// post ping to listener
|
||||
resp, err = http.Post(createURL(listener, "/ping", ""), "", nil)
|
||||
resp, err = http.Post(createURL(listener, "http", "/ping", ""), "", nil)
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
}
|
||||
|
||||
@@ -321,8 +524,9 @@ func TestWriteWithPrecision(t *testing.T) {
|
||||
|
||||
msg := "xyzzy value=42 1422568543\n"
|
||||
resp, err := http.Post(
|
||||
createURL(listener, "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg)))
|
||||
createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg)))
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
|
||||
acc.Wait(1)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user