Compare commits
291 Commits
1.6.0-rc3
...
bugfix/433
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6f4bd9ad82 | ||
|
|
4ea618ea26 | ||
|
|
a7545e6cac | ||
|
|
58e815fdd1 | ||
|
|
06682c6350 | ||
|
|
839ca60b0e | ||
|
|
9fe90d71f4 | ||
|
|
2ee374cf50 | ||
|
|
220e6c5361 | ||
|
|
c7cfc2ec39 | ||
|
|
9bc63c2f7a | ||
|
|
56ea2eb57b | ||
|
|
210dfcee83 | ||
|
|
b7a02c73b3 | ||
|
|
73e2e6afc5 | ||
|
|
b2586a7eaf | ||
|
|
abfbf4f4f2 | ||
|
|
85eacf268b | ||
|
|
1a781a5851 | ||
|
|
ed2bc1151b | ||
|
|
b2e972cd81 | ||
|
|
54056f3808 | ||
|
|
5aa199e2b3 | ||
|
|
9bd5e10133 | ||
|
|
beaef8e3da | ||
|
|
a10262c5d6 | ||
|
|
8bf18d6ac7 | ||
|
|
23523ffd10 | ||
|
|
523d761f34 | ||
|
|
3f28add025 | ||
|
|
ee6e4b0afd | ||
|
|
16454e25ba | ||
|
|
2a1feb6db9 | ||
|
|
61e197d254 | ||
|
|
1bd41ef3ce | ||
|
|
d7c756e9ff | ||
|
|
39206677f8 | ||
|
|
b66eb2fec7 | ||
|
|
3ad10283ef | ||
|
|
84e9a5c97e | ||
|
|
c98b58dacc | ||
|
|
98d86df797 | ||
|
|
4e9e57e210 | ||
|
|
7781507c01 | ||
|
|
8482c40a91 | ||
|
|
0dda9b8319 | ||
|
|
4e69d10ff7 | ||
|
|
f689463e8e | ||
|
|
f217d12de5 | ||
|
|
886795063e | ||
|
|
30dc95fa78 | ||
|
|
40fac0a9b4 | ||
|
|
36df4c5ae5 | ||
|
|
70ffed3a4d | ||
|
|
bf59bcf721 | ||
|
|
a789f97feb | ||
|
|
d2e00a3205 | ||
|
|
daddd8bbac | ||
|
|
d16530677d | ||
|
|
1ea18ffd0a | ||
|
|
dd2223ae1c | ||
|
|
90eebd88af | ||
|
|
d2e729dfaf | ||
|
|
f64d612294 | ||
|
|
76ec90e66d | ||
|
|
1690f36b09 | ||
|
|
87f711a19a | ||
|
|
58895d6b03 | ||
|
|
cd9ad77038 | ||
|
|
8563238059 | ||
|
|
11335f5fee | ||
|
|
acba20af1a | ||
|
|
229b6bd944 | ||
|
|
7fe6e2f5ae | ||
|
|
a4214abfc4 | ||
|
|
5f0cbd1255 | ||
|
|
3ef4dff4ec | ||
|
|
dfe7b5eec2 | ||
|
|
92a8f795f5 | ||
|
|
b1d77ade55 | ||
|
|
7103077b3f | ||
|
|
7332ce0e95 | ||
|
|
2be32f0a80 | ||
|
|
701e157ef0 | ||
|
|
eb94bb29fd | ||
|
|
449bd5c3b9 | ||
|
|
96abff0660 | ||
|
|
9eab3572ff | ||
|
|
be8b87000c | ||
|
|
ff93c3c326 | ||
|
|
df1fe7a2b4 | ||
|
|
a04cfee349 | ||
|
|
da6ad34fc8 | ||
|
|
179bcfdcbb | ||
|
|
e3f1d28908 | ||
|
|
fcea745e99 | ||
|
|
90bcb5bc3c | ||
|
|
312116c101 | ||
|
|
2cc2913d81 | ||
|
|
b556eb8b2f | ||
|
|
8b28f40cc0 | ||
|
|
cabee8f8e0 | ||
|
|
e0071f365a | ||
|
|
5ae2b02f5d | ||
|
|
59f0a5354f | ||
|
|
c8b68430f0 | ||
|
|
1ac64596bf | ||
|
|
b78984554c | ||
|
|
2def31bc3d | ||
|
|
010e4f5b0b | ||
|
|
ce3b367dac | ||
|
|
f3f753310f | ||
|
|
50d721ae05 | ||
|
|
14d97e5416 | ||
|
|
44e3b9bee3 | ||
|
|
7f93911f43 | ||
|
|
f417cec036 | ||
|
|
dbd02ebb74 | ||
|
|
fbf09409e9 | ||
|
|
54728f54c6 | ||
|
|
83b03ecb18 | ||
|
|
8826cdc423 | ||
|
|
697d8ceae5 | ||
|
|
089eb2c8d6 | ||
|
|
fd22b1ef1f | ||
|
|
a86c2d54ad | ||
|
|
daacfc6368 | ||
|
|
795c8057ad | ||
|
|
6a21e23bcc | ||
|
|
0d21296aed | ||
|
|
7660315e45 | ||
|
|
703be4f124 | ||
|
|
ccc4a85fd6 | ||
|
|
3be9cad309 | ||
|
|
45c1a45f4a | ||
|
|
1a407ceaf9 | ||
|
|
61a0e500a8 | ||
|
|
7f46aafcd6 | ||
|
|
3072b5a493 | ||
|
|
81f5a41bc9 | ||
|
|
a688eefd1c | ||
|
|
1a8786712c | ||
|
|
339cebbc21 | ||
|
|
b62f7a3c68 | ||
|
|
cce4f520bd | ||
|
|
6d73cb85cc | ||
|
|
2948dec6f5 | ||
|
|
863af9d1d4 | ||
|
|
99033241c4 | ||
|
|
e45822e2e2 | ||
|
|
0eba72d2c0 | ||
|
|
d5f57715dc | ||
|
|
190a4128c5 | ||
|
|
d19a33dd6f | ||
|
|
0af40a8a5d | ||
|
|
558caf57de | ||
|
|
18db718d7f | ||
|
|
fdd899e9d4 | ||
|
|
7e0e664860 | ||
|
|
5030373a4c | ||
|
|
5b599337a3 | ||
|
|
2add516eee | ||
|
|
1f29612918 | ||
|
|
851efc9ca0 | ||
|
|
fa04e539ff | ||
|
|
3ef28e332f | ||
|
|
5953db88df | ||
|
|
2bf2b51039 | ||
|
|
2a2cc3212f | ||
|
|
b11468757c | ||
|
|
339c5d0312 | ||
|
|
1c6cfcfbab | ||
|
|
c16ecaa124 | ||
|
|
ce58926feb | ||
|
|
cff2aa1863 | ||
|
|
4790a21c04 | ||
|
|
21167a6232 | ||
|
|
2fe167b8a7 | ||
|
|
d96bcac3ec | ||
|
|
ac9b308cee | ||
|
|
4c35a56edd | ||
|
|
73c22a8189 | ||
|
|
de355b76d6 | ||
|
|
b2bb44363a | ||
|
|
8b687a8e21 | ||
|
|
81620c69c5 | ||
|
|
3ae0c20200 | ||
|
|
7c0754ebe5 | ||
|
|
757e23a5f2 | ||
|
|
fd63591b15 | ||
|
|
2108582b43 | ||
|
|
c125cb1d27 | ||
|
|
2fb3f7a585 | ||
|
|
9647ea88ea | ||
|
|
c1d4b0b154 | ||
|
|
239333ad90 | ||
|
|
fd64487be5 | ||
|
|
cff7ee8edf | ||
|
|
c03e8918a2 | ||
|
|
83345ec2b3 | ||
|
|
f094f83da5 | ||
|
|
0768022240 | ||
|
|
92956104d6 | ||
|
|
964856eb5f | ||
|
|
377547aa4c | ||
|
|
1662b6feb9 | ||
|
|
908170b207 | ||
|
|
ec47cab950 | ||
|
|
06671777e9 | ||
|
|
46a8bdbfe5 | ||
|
|
abdff033cc | ||
|
|
535e9e9a68 | ||
|
|
c256f17870 | ||
|
|
b8d5df2076 | ||
|
|
538baee8a4 | ||
|
|
d3d8d52e2f | ||
|
|
286f14f730 | ||
|
|
9f4752ba12 | ||
|
|
f639f994b5 | ||
|
|
911f0e4b57 | ||
|
|
86a3b8cad4 | ||
|
|
a3500cc33a | ||
|
|
bf0c59f56c | ||
|
|
c7b3667ac4 | ||
|
|
638853be05 | ||
|
|
ee9a2f73a1 | ||
|
|
648d7ae922 | ||
|
|
13937d511d | ||
|
|
fe4d3cd117 | ||
|
|
eacf11fcd8 | ||
|
|
3a8ca4d08d | ||
|
|
00e3363d45 | ||
|
|
29b37e67c2 | ||
|
|
42fee824f8 | ||
|
|
120be7e87b | ||
|
|
9e4a330ee5 | ||
|
|
78d4a95ce6 | ||
|
|
571ce86d10 | ||
|
|
dd2c60e620 | ||
|
|
1486ae25c0 | ||
|
|
da5b46e770 | ||
|
|
9ef902f4a1 | ||
|
|
058510464c | ||
|
|
0b4f4b089f | ||
|
|
7c592558d8 | ||
|
|
1e1d9e8acb | ||
|
|
3b3d16273d | ||
|
|
3046f957d5 | ||
|
|
bcf1cf59c1 | ||
|
|
c8d2ba2bc8 | ||
|
|
04ab9a4fe4 | ||
|
|
e4009234e9 | ||
|
|
8d516d26e9 | ||
|
|
0a02363c03 | ||
|
|
2c19d74829 | ||
|
|
3f4e1af222 | ||
|
|
10c7324d74 | ||
|
|
55cfc383f3 | ||
|
|
7b8f12b377 | ||
|
|
15f19375e7 | ||
|
|
93e2381f42 | ||
|
|
387bae9b9f | ||
|
|
34416e0da8 | ||
|
|
32f56140a3 | ||
|
|
64a23c0b18 | ||
|
|
af68975e2f | ||
|
|
0223b22b3e | ||
|
|
1890efbb70 | ||
|
|
e4f8a82ee6 | ||
|
|
a28de4b5cd | ||
|
|
caac224276 | ||
|
|
fe31ce9d7d | ||
|
|
01ede2ea0b | ||
|
|
fb6390e7ab | ||
|
|
ff40da6019 | ||
|
|
43a044542e | ||
|
|
00203fa889 | ||
|
|
7177e0473f | ||
|
|
252101b7c6 | ||
|
|
efdf36746c | ||
|
|
df78133bf3 | ||
|
|
bf915fa79c | ||
|
|
c160b56229 | ||
|
|
627f0e5d9d | ||
|
|
4551b4c5d2 | ||
|
|
a9afd2f030 | ||
|
|
caf860bc88 | ||
|
|
beeab2c509 | ||
|
|
a50acadc44 | ||
|
|
265d0e6d84 | ||
|
|
413cf6dd23 |
@@ -1,49 +1,95 @@
|
||||
---
|
||||
defaults: &defaults
|
||||
docker:
|
||||
- image: 'circleci/golang:1.9.4'
|
||||
working_directory: '/go/src/github.com/influxdata/telegraf'
|
||||
defaults:
|
||||
defaults: &defaults
|
||||
working_directory: '/go/src/github.com/influxdata/telegraf'
|
||||
go-1_9: &go-1_9
|
||||
docker:
|
||||
- image: 'circleci/golang:1.9.7'
|
||||
go-1_10: &go-1_10
|
||||
docker:
|
||||
- image: 'circleci/golang:1.10.3'
|
||||
|
||||
version: 2
|
||||
jobs:
|
||||
build:
|
||||
<<: *defaults
|
||||
deps:
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
key: vendor-{{ checksum "Gopkg.lock" }}
|
||||
- run: 'make deps'
|
||||
- run: 'make test-ci'
|
||||
release:
|
||||
<<: *defaults
|
||||
- save_cache:
|
||||
name: 'vendored deps'
|
||||
key: vendor-{{ checksum "Gopkg.lock" }}
|
||||
paths:
|
||||
- './vendor'
|
||||
- persist_to_workspace:
|
||||
root: '/go/src'
|
||||
paths:
|
||||
- '*'
|
||||
test-go-1.9:
|
||||
<<: [ *defaults, *go-1_9 ]
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: 'make test-ci'
|
||||
test-go-1.10:
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: 'make test-ci'
|
||||
- run: 'GOARCH=386 make test-ci'
|
||||
release:
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: './scripts/release.sh'
|
||||
- store_artifacts:
|
||||
path: './artifacts'
|
||||
destination: '.'
|
||||
nightly:
|
||||
<<: *defaults
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: './scripts/release.sh'
|
||||
- store_artifacts:
|
||||
path: './artifacts'
|
||||
destination: '.'
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build_and_release:
|
||||
jobs:
|
||||
- 'build'
|
||||
- 'deps'
|
||||
- 'test-go-1.9':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'test-go-1.10':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'release':
|
||||
requires:
|
||||
- 'build'
|
||||
- 'test-go-1.9'
|
||||
- 'test-go-1.10'
|
||||
nightly:
|
||||
jobs:
|
||||
- 'build'
|
||||
- 'deps'
|
||||
- 'test-go-1.9':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'test-go-1.10':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'nightly':
|
||||
requires:
|
||||
- 'build'
|
||||
- 'test-go-1.9'
|
||||
- 'test-go-1.10'
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 18 * * *"
|
||||
cron: "0 7 * * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
|
||||
44
.github/ISSUE_TEMPLATE.md
vendored
44
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,44 +0,0 @@
|
||||
## Directions
|
||||
|
||||
GitHub Issues are reserved for actionable bug reports and feature requests.
|
||||
General questions should be asked at the [InfluxData Community](https://community.influxdata.com) site.
|
||||
|
||||
Before opening an issue, search for similar bug reports or feature requests on GitHub Issues.
|
||||
If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.
|
||||
Erase the other section and everything on and above this line.
|
||||
|
||||
*Please note, the quickest way to fix a bug is to open a Pull Request.*
|
||||
|
||||
## Bug report
|
||||
|
||||
### Relevant telegraf.conf:
|
||||
|
||||
### System info:
|
||||
|
||||
[Include Telegraf version, operating system name, and other relevant details]
|
||||
|
||||
### Steps to reproduce:
|
||||
|
||||
1. ...
|
||||
2. ...
|
||||
|
||||
### Expected behavior:
|
||||
|
||||
### Actual behavior:
|
||||
|
||||
### Additional info:
|
||||
|
||||
[Include gist of relevant config, logs, etc.]
|
||||
|
||||
|
||||
## Feature Request
|
||||
|
||||
Opening a feature request kicks off a discussion.
|
||||
|
||||
### Proposal:
|
||||
|
||||
### Current behavior:
|
||||
|
||||
### Desired behavior:
|
||||
|
||||
### Use case: [Why is this important (helps with prioritizing requests)]
|
||||
24
.github/ISSUE_TEMPLATE/Bug_report.md
vendored
Normal file
24
.github/ISSUE_TEMPLATE/Bug_report.md
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
|
||||
---
|
||||
|
||||
### Relevant telegraf.conf:
|
||||
|
||||
### System info:
|
||||
|
||||
[Include Telegraf version, operating system name, and other relevant details]
|
||||
|
||||
### Steps to reproduce:
|
||||
|
||||
1. ...
|
||||
2. ...
|
||||
|
||||
### Expected behavior:
|
||||
|
||||
### Actual behavior:
|
||||
|
||||
### Additional info:
|
||||
|
||||
[Include gist of relevant config, logs, etc.]
|
||||
17
.github/ISSUE_TEMPLATE/Feature_request.md
vendored
Normal file
17
.github/ISSUE_TEMPLATE/Feature_request.md
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
|
||||
---
|
||||
|
||||
## Feature Request
|
||||
|
||||
Opening a feature request kicks off a discussion.
|
||||
|
||||
### Proposal:
|
||||
|
||||
### Current behavior:
|
||||
|
||||
### Desired behavior:
|
||||
|
||||
### Use case: [Why is this important (helps with prioritizing requests)]
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,3 +1,5 @@
|
||||
/build
|
||||
/telegraf
|
||||
/telegraf.exe
|
||||
/telegraf.gz
|
||||
/vendor
|
||||
|
||||
169
CHANGELOG.md
169
CHANGELOG.md
@@ -1,4 +1,168 @@
|
||||
## v1.6 [unreleased]
|
||||
## v1.8 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
### New Inputs
|
||||
|
||||
- [tengine](./plugins/inputs/tengine/README.md) - Contributed by @ertaoxu
|
||||
|
||||
### New Aggregators
|
||||
|
||||
- [valuecounter](./plugins/aggregators/valuecounter/README.md) - Contributed by @piotr1212
|
||||
|
||||
### Features
|
||||
|
||||
- [#4236](https://github.com/influxdata/telegraf/pull/4236): Add SSL/TLS support to redis input.
|
||||
- [#4160](https://github.com/influxdata/telegraf/pull/4160): Add tengine input plugin.
|
||||
- [#4262](https://github.com/influxdata/telegraf/pull/4262): Add power draw field to nvidia_smi plugin.
|
||||
- [#4271](https://github.com/influxdata/telegraf/pull/4271): Add support for solr 7 to the solr input.
|
||||
- [#4281](https://github.com/influxdata/telegraf/pull/4281): Add owner tag on partitions in burrow input.
|
||||
- [#4259](https://github.com/influxdata/telegraf/pull/4259): Add container status tag to docker input.
|
||||
- [#3523](https://github.com/influxdata/telegraf/pull/3523): Add valuecounter aggregator plugin.
|
||||
- [#4307](https://github.com/influxdata/telegraf/pull/4307): Add new measurement with results of pgrep lookup to procstat input.
|
||||
- [#4311](https://github.com/influxdata/telegraf/pull/4311): Add support for comma in logparser timestamp format.
|
||||
- [#4292](https://github.com/influxdata/telegraf/pull/4292): Add path tag to tail input plugin.
|
||||
- [#4322](https://github.com/influxdata/telegraf/pull/4322): Add log message when tail is added or removed from a file.
|
||||
- [#4267](https://github.com/influxdata/telegraf/pull/4267): Add option to use of counter time in win perf counters.
|
||||
- [#4343](https://github.com/influxdata/telegraf/pull/4343): Add energy and power field and device id tag to fibaro input.
|
||||
- [#4347](https://github.com/influxdata/telegraf/pull/4347): Add http path configuration for OpenTSDB output.
|
||||
|
||||
## v1.7.1 [unreleased]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#4277](https://github.com/influxdata/telegraf/pull/4277): Treat sigterm as a clean shutdown signal.
|
||||
- [#4284](https://github.com/influxdata/telegraf/pull/4284): Fix selection of tags under nested objects in the JSON parser.
|
||||
- [#4135](https://github.com/influxdata/telegraf/issues/4135): Fix postfix input handling multi-level queues.
|
||||
- [#4334](https://github.com/influxdata/telegraf/pull/4334): Fix syslog timestamp parsing with single digit day of month.
|
||||
- [#2910](https://github.com/influxdata/telegraf/issues/2910): Handle mysql input variations in the user_statistics collecting.
|
||||
- [#4293](https://github.com/influxdata/telegraf/issues/4293): Fix minmax and basicstats aggregators to use uint64.
|
||||
- [#4290](https://github.com/influxdata/telegraf/issues/4290): Document swap input plugin.
|
||||
|
||||
## v1.7 [2018-06-12]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- The `cassandra` input plugin has been deprecated in favor of the `jolokia2`
|
||||
input plugin which is much more configurable and more performant. There is
|
||||
an [example configuration](./plugins/inputs/jolokia2/examples) to help you
|
||||
get started.
|
||||
|
||||
- For plugins supporting TLS, you can now specify the certificate and keys
|
||||
using `tls_ca`, `tls_cert`, `tls_key`. These options behave the same as
|
||||
the, now deprecated, `ssl` forms.
|
||||
|
||||
### New Inputs
|
||||
|
||||
- [aurora](./plugins/inputs/aurora/README.md) - Contributed by @influxdata
|
||||
- [burrow](./plugins/inputs/burrow/README.md) - Contributed by @arkady-emelyanov
|
||||
- [fibaro](./plugins/inputs/fibaro/README.md) - Contributed by @dynek
|
||||
- [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry/README.md) - Contributed by @ajhai
|
||||
- [mcrouter](./plugins/inputs/mcrouter/README.md) - Contributed by @cthayer
|
||||
- [nvidia_smi](./plugins/inputs/nvidia_smi/README.md) - Contributed by @jackzampolin
|
||||
- [syslog](./plugins/inputs/syslog/README.md) - Contributed by @influxdata
|
||||
|
||||
### New Processors
|
||||
|
||||
- [converter](./plugins/processors/converter/README.md) - Contributed by @influxdata
|
||||
- [regex](./plugins/processors/regex/README.md) - Contributed by @44px
|
||||
- [topk](./plugins/processors/topk/README.md) - Contributed by @mirath
|
||||
|
||||
### New Outputs
|
||||
|
||||
- [http](./plugins/outputs/http/README.md) - Contributed by @Dark0096
|
||||
- [application_insights](./plugins/outputs/application_insights/README.md): Contribute by @karolz-ms
|
||||
|
||||
### Features
|
||||
|
||||
- [#3964](https://github.com/influxdata/telegraf/pull/3964): Add repl_oplog_window_sec metric to mongodb input.
|
||||
- [#3819](https://github.com/influxdata/telegraf/pull/3819): Add per-host shard metrics in mongodb input.
|
||||
- [#3999](https://github.com/influxdata/telegraf/pull/3999): Skip files with leading `..` in config directory.
|
||||
- [#4021](https://github.com/influxdata/telegraf/pull/4021): Add TLS support to socket_writer and socket_listener plugins.
|
||||
- [#4025](https://github.com/influxdata/telegraf/pull/4025): Add snmp input option to strip non fixed length index suffixes.
|
||||
- [#4035](https://github.com/influxdata/telegraf/pull/4035): Add server version tag to docker input.
|
||||
- [#4044](https://github.com/influxdata/telegraf/pull/4044): Add support for LeoFS 1.4 to leofs input.
|
||||
- [#4068](https://github.com/influxdata/telegraf/pull/4068): Add parameter to force the interval of gather for sysstat.
|
||||
- [#3877](https://github.com/influxdata/telegraf/pull/3877): Support busybox ping in the ping input.
|
||||
- [#4077](https://github.com/influxdata/telegraf/pull/4077): Add input plugin for McRouter.
|
||||
- [#4096](https://github.com/influxdata/telegraf/pull/4096): Add topk processor plugin.
|
||||
- [#4114](https://github.com/influxdata/telegraf/pull/4114): Add cursor metrics to mongodb input.
|
||||
- [#3455](https://github.com/influxdata/telegraf/pull/3455): Add tag/integer pair for result to net_response.
|
||||
- [#4010](https://github.com/influxdata/telegraf/pull/3455): Add application_insights output plugin.
|
||||
- [#4167](https://github.com/influxdata/telegraf/pull/4167): Added several important elasticsearch cluster health metrics.
|
||||
- [#4094](https://github.com/influxdata/telegraf/pull/4094): Add batch mode to mqtt output.
|
||||
- [#4158](https://github.com/influxdata/telegraf/pull/4158): Add aurora input plugin.
|
||||
- [#3839](https://github.com/influxdata/telegraf/pull/3839): Add regex processor plugin.
|
||||
- [#4165](https://github.com/influxdata/telegraf/pull/4165): Add support for Graphite 1.1 tags.
|
||||
- [#4162](https://github.com/influxdata/telegraf/pull/4162): Add timeout option to sensors input.
|
||||
- [#3489](https://github.com/influxdata/telegraf/pull/3489): Add burrow input plugin.
|
||||
- [#3969](https://github.com/influxdata/telegraf/pull/3969): Add option to unbound module to use threads as tags.
|
||||
- [#4183](https://github.com/influxdata/telegraf/pull/4183): Add support for TLS and username/password auth to aerospike input.
|
||||
- [#4190](https://github.com/influxdata/telegraf/pull/4190): Add special syslog timestamp parser to grok parser that uses current year.
|
||||
- [#4181](https://github.com/influxdata/telegraf/pull/4181): Add syslog input plugin.
|
||||
- [#4212](https://github.com/influxdata/telegraf/pull/4212): Print the enabled aggregator and processor plugins on startup.
|
||||
- [#3994](https://github.com/influxdata/telegraf/pull/3994): Add static routing_key option to amqp output.
|
||||
- [#3995](https://github.com/influxdata/telegraf/pull/3995): Add passive mode exchange declaration option to amqp consumer input.
|
||||
- [#4216](https://github.com/influxdata/telegraf/pull/4216): Add counter fields to pf input.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#4018](https://github.com/influxdata/telegraf/pull/4018): Write to working file outputs if any files are not writeable.
|
||||
- [#4036](https://github.com/influxdata/telegraf/pull/4036): Add all win_perf_counters fields for a series in a single metric.
|
||||
- [#4118](https://github.com/influxdata/telegraf/pull/4118): Report results of dns_query instead of 0ms on timeout.
|
||||
- [#4155](https://github.com/influxdata/telegraf/pull/4155): Add consul service tags to metric.
|
||||
- [#2879](https://github.com/influxdata/telegraf/issues/2879): Fix wildcards and multi instance processes in win_perf_counters.
|
||||
- [#2468](https://github.com/influxdata/telegraf/issues/2468): Fix crash on 32-bit Windows in win_perf_counters.
|
||||
- [#4198](https://github.com/influxdata/telegraf/issues/4198): Fix win_perf_counters not collecting at every interval.
|
||||
- [#4227](https://github.com/influxdata/telegraf/issues/4227): Use same flags for all BSD family ping variants.
|
||||
- [#4266](https://github.com/influxdata/telegraf/issues/4266): Remove tags with empty values from Wavefront output.
|
||||
|
||||
## v1.6.4 [2018-06-05]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#4203](https://github.com/influxdata/telegraf/issues/4203): Fix snmp overriding of auto-configured table fields.
|
||||
- [#4218](https://github.com/influxdata/telegraf/issues/4218): Fix uint support in cloudwatch output.
|
||||
- [#4188](https://github.com/influxdata/telegraf/pull/4188): Fix documentation of instance_name option in varnish input.
|
||||
- [#4195](https://github.com/influxdata/telegraf/pull/4195): Revert to previous aerospike library version due to memory leak.
|
||||
|
||||
## v1.6.3 [2018-05-21]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#4127](https://github.com/influxdata/telegraf/issues/4127): Fix intermittent panic in aerospike input.
|
||||
- [#4130](https://github.com/influxdata/telegraf/issues/4130): Fix connection leak in jolokia2_agent.
|
||||
- [#4136](https://github.com/influxdata/telegraf/pull/4130): Fix jolokia2 timeout parsing.
|
||||
- [#4142](https://github.com/influxdata/telegraf/pull/4142): Fix error parsing dropwizard metrics.
|
||||
- [#4149](https://github.com/influxdata/telegraf/issues/4149): Fix librato output support for uint and bool.
|
||||
- [#4176](https://github.com/influxdata/telegraf/pull/4176): Fix waitgroup deadlock if url is incorrect in apache input.
|
||||
|
||||
## v1.6.2 [2018-05-08]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#4078](https://github.com/influxdata/telegraf/pull/4078): Use same timestamp for fields in system input.
|
||||
- [#4091](https://github.com/influxdata/telegraf/pull/4091): Fix handling of uint64 in datadog output.
|
||||
- [#4099](https://github.com/influxdata/telegraf/pull/4099): Ignore UTF8 BOM in JSON parser.
|
||||
- [#4104](https://github.com/influxdata/telegraf/issues/4104): Fix case for slave metrics in mysql input.
|
||||
- [#4110](https://github.com/influxdata/telegraf/issues/4110): Fix uint support in cratedb output.
|
||||
|
||||
## v1.6.1 [2018-04-23]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3835](https://github.com/influxdata/telegraf/issues/3835): Report mem input fields as gauges instead counters.
|
||||
- [#4030](https://github.com/influxdata/telegraf/issues/4030): Fix graphite outputs unsigned integers in wrong format.
|
||||
- [#4043](https://github.com/influxdata/telegraf/issues/4043): Report available fields if utmp is unreadable.
|
||||
- [#4039](https://github.com/influxdata/telegraf/issues/4039): Fix potential "no fields" error writing to outputs.
|
||||
- [#4037](https://github.com/influxdata/telegraf/issues/4037): Fix uptime reporting in system input when ran inside docker.
|
||||
- [#3750](https://github.com/influxdata/telegraf/issues/3750): Fix mem input "cannot allocate memory" error on FreeBSD based systems.
|
||||
- [#4056](https://github.com/influxdata/telegraf/pull/4056): Fix duplicate tags when overriding an existing tag.
|
||||
- [#4062](https://github.com/influxdata/telegraf/pull/4062): Add server argument as first argument in unbound input.
|
||||
- [#4063](https://github.com/influxdata/telegraf/issues/4063): Fix handling of floats with multiple leading zeroes.
|
||||
- [#4064](https://github.com/influxdata/telegraf/issues/4064): Return errors in mongodb SSL/TLS configuration.
|
||||
|
||||
## v1.6 [2018-04-16]
|
||||
|
||||
### Release Notes
|
||||
|
||||
@@ -105,6 +269,9 @@
|
||||
- [#3648](https://github.com/influxdata/telegraf/issues/3648): Fix InfluxDB output not able to reconnect when server address changes.
|
||||
- [#3957](https://github.com/influxdata/telegraf/issues/3957): Fix parsing of dos line endings in the smart input.
|
||||
- [#3754](https://github.com/influxdata/telegraf/issues/3754): Fix precision truncation when no timestamp included.
|
||||
- [#3655](https://github.com/influxdata/telegraf/issues/3655): Fix SNMPv3 connection with Cisco ASA 5515 in snmp input.
|
||||
- [#3981](https://github.com/influxdata/telegraf/pull/3981): Export all vars defined in /etc/default/telegraf.
|
||||
- [#4004](https://github.com/influxdata/telegraf/issues/4004): Allow grok pattern to contain newlines.
|
||||
|
||||
## v1.5.3 [2018-03-14]
|
||||
|
||||
|
||||
@@ -30,9 +30,9 @@ which can be found [on our website](http://influxdb.com/community/cla.html)
|
||||
|
||||
Assuming you can already build the project, run these in the telegraf directory:
|
||||
|
||||
1. `go get github.com/sparrc/gdm`
|
||||
1. `gdm restore`
|
||||
1. `GOOS=linux gdm save`
|
||||
1. `go get -u github.com/golang/dep/cmd/dep`
|
||||
2. `dep ensure`
|
||||
3. `dep ensure -add github.com/[dependency]/[new-package]`
|
||||
|
||||
## Input Plugins
|
||||
|
||||
@@ -100,6 +100,13 @@ func init() {
|
||||
}
|
||||
```
|
||||
|
||||
### Input Plugin Development
|
||||
|
||||
* Run `make static` followed by `make plugin-[pluginName]` to spin up a docker dev environment
|
||||
using docker-compose.
|
||||
* ***[Optional]*** When developing a plugin, add a `dev` directory with a `docker-compose.yml` and `telegraf.conf`
|
||||
as well as any other supporting files, where sensible.
|
||||
|
||||
## Adding Typed Metrics
|
||||
|
||||
In addition the the `AddFields` function, the accumulator also supports an
|
||||
|
||||
96
Godeps
96
Godeps
@@ -1,96 +0,0 @@
|
||||
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
|
||||
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
|
||||
github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
|
||||
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
|
||||
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/bsm/sarama-cluster abf039439f66c1ce78017f560b490612552f6472
|
||||
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
|
||||
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
|
||||
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
|
||||
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/dgrijalva/jwt-go dbeaa9332f19a944acb5736b4456cfcc02140e29
|
||||
github.com/docker/docker f5ec1e2936dcbe7b5001c2b817188b095c700c27
|
||||
github.com/docker/go-connections 990a1a1a70b0da4c4cb70e117971a4f0babfbf1a
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
|
||||
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
|
||||
github.com/eclipse/paho.mqtt.golang aff15770515e3c57fc6109da73d42b0d46f7f483
|
||||
github.com/go-logfmt/logfmt 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
|
||||
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
|
||||
github.com/gogo/protobuf 7b6c6391c4ff245962047fc1e2c6e08b1cdfa0e8
|
||||
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
|
||||
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
|
||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
||||
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
|
||||
github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
|
||||
github.com/go-redis/redis 73b70592cdaa9e6abdfcfbf97b4a90d80728c836
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
|
||||
github.com/influxdata/tail c43482518d410361b6c383d7aebce33d0471d7bc
|
||||
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
|
||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||
github.com/fsnotify/fsnotify c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9
|
||||
github.com/jackc/pgx 63f58fd32edb5684b9e9f4cfaac847c6b42b3917
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
|
||||
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
|
||||
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
|
||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
||||
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
|
||||
github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4
|
||||
github.com/multiplay/go-ts3 07477f49b8dfa3ada231afc7b7b17617d42afe8e
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/nats-io/gnatsd 393bbb7c031433e68707c8810fda0bfcfbe6ab9b
|
||||
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
|
||||
github.com/nsqio/go-nsq eee57a3ac4174c55924125bb15eeeda8cffb6e6f
|
||||
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
||||
github.com/opentracing-contrib/go-observer a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
|
||||
github.com/opentracing/opentracing-go 06f47b42c792fef2796e9681353e1d908c417827
|
||||
github.com/openzipkin/zipkin-go-opentracing 1cafbdfde94fbf2b373534764e0863aa3bd0bf7b
|
||||
github.com/pierrec/lz4 5c9560bfa9ace2bf86080bf40d46b34ae44604df
|
||||
github.com/pierrec/xxHash 5a004441f897722c627870a981d02b29924215fa
|
||||
github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d
|
||||
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/prometheus/client_golang c317fb74746eac4fc65fe3909195f4cf67c5562a
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
|
||||
github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
|
||||
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||
github.com/shirou/gopsutil fc04d2dd9a512906a2604242b35275179e250eda
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
|
||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
|
||||
github.com/stretchr/objx facf9a85c22f48d2f52f2380e4efce1768749a89
|
||||
github.com/stretchr/testify 12b6f73e6084dad08a7c6e575284b177ecafbc71
|
||||
github.com/tidwall/gjson 0623bd8fbdbf97cc62b98d15108832851a658e59
|
||||
github.com/tidwall/match 173748da739a410c5b0b813b956f89ff94730b4c
|
||||
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
|
||||
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
|
||||
github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
|
||||
github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto dc137beb6cce2043eb6b5f223ab8bf51c32459f4
|
||||
golang.org/x/net f2499483f923065a842d38eb4c7f1927e6fc6e6d
|
||||
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
|
||||
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
|
||||
gopkg.in/asn1-ber.v1 4e86f4367175e39f69d9358a5f17b4dda270378d
|
||||
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
|
||||
gopkg.in/gorethink/gorethink.v3 7ab832f7b65573104a555d84a27992ae9ea1f659
|
||||
gopkg.in/ldap.v2 8168ee085ee43257585e50c6441aadf54ecb2c9f
|
||||
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
|
||||
gopkg.in/olivere/elastic.v5 3113f9b9ad37509fe5f8a0e5e91c96fdc4435e26
|
||||
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
|
||||
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
|
||||
973
Gopkg.lock
generated
Normal file
973
Gopkg.lock
generated
Normal file
@@ -0,0 +1,973 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "code.cloudfoundry.org/clock"
|
||||
packages = ["."]
|
||||
revision = "02e53af36e6c978af692887ed449b74026d76fec"
|
||||
|
||||
[[projects]]
|
||||
name = "collectd.org"
|
||||
packages = [
|
||||
"api",
|
||||
"cdtime",
|
||||
"network"
|
||||
]
|
||||
revision = "2ce144541b8903101fb8f1483cc0497a68798122"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Microsoft/ApplicationInsights-Go"
|
||||
packages = [
|
||||
"appinsights",
|
||||
"appinsights/contracts"
|
||||
]
|
||||
revision = "d2df5d440eda5372f24fcac03839a64d6cb5f7e5"
|
||||
version = "v0.4.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Microsoft/go-winio"
|
||||
packages = ["."]
|
||||
revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f"
|
||||
version = "v0.4.7"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Shopify/sarama"
|
||||
packages = ["."]
|
||||
revision = "35324cf48e33d8260e1c7c18854465a904ade249"
|
||||
version = "v1.17.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/StackExchange/wmi"
|
||||
packages = ["."]
|
||||
revision = "5d049714c4a64225c3c79a7cf7d02f7fb5b96338"
|
||||
version = "1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/aerospike/aerospike-client-go"
|
||||
packages = [
|
||||
".",
|
||||
"internal/lua",
|
||||
"internal/lua/resources",
|
||||
"logger",
|
||||
"pkg/bcrypt",
|
||||
"pkg/ripemd160",
|
||||
"types",
|
||||
"types/atomic",
|
||||
"types/particle_type",
|
||||
"types/rand",
|
||||
"utils/buffer"
|
||||
]
|
||||
revision = "c10b5393e43bd60125aca6289c7b24879edb1787"
|
||||
version = "v1.33.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/alecthomas/template"
|
||||
packages = [
|
||||
".",
|
||||
"parse"
|
||||
]
|
||||
revision = "a0175ee3bccc567396460bf5acd36800cb10c49c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/alecthomas/units"
|
||||
packages = ["."]
|
||||
revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/amir/raidman"
|
||||
packages = [
|
||||
".",
|
||||
"proto"
|
||||
]
|
||||
revision = "1ccc43bfb9c93cb401a4025e49c64ba71e5e668b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/apache/thrift"
|
||||
packages = ["lib/go/thrift"]
|
||||
revision = "f5f430df56871bc937950274b2c86681d3db6e59"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = [
|
||||
"aws",
|
||||
"aws/awserr",
|
||||
"aws/awsutil",
|
||||
"aws/client",
|
||||
"aws/client/metadata",
|
||||
"aws/corehandlers",
|
||||
"aws/credentials",
|
||||
"aws/credentials/ec2rolecreds",
|
||||
"aws/credentials/endpointcreds",
|
||||
"aws/credentials/stscreds",
|
||||
"aws/csm",
|
||||
"aws/defaults",
|
||||
"aws/ec2metadata",
|
||||
"aws/endpoints",
|
||||
"aws/request",
|
||||
"aws/session",
|
||||
"aws/signer/v4",
|
||||
"internal/sdkio",
|
||||
"internal/sdkrand",
|
||||
"internal/shareddefaults",
|
||||
"private/protocol",
|
||||
"private/protocol/json/jsonutil",
|
||||
"private/protocol/jsonrpc",
|
||||
"private/protocol/query",
|
||||
"private/protocol/query/queryutil",
|
||||
"private/protocol/rest",
|
||||
"private/protocol/xml/xmlutil",
|
||||
"service/cloudwatch",
|
||||
"service/kinesis",
|
||||
"service/sts"
|
||||
]
|
||||
revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff"
|
||||
version = "v1.14.8"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/bsm/sarama-cluster"
|
||||
packages = ["."]
|
||||
revision = "cf455bc755fe41ac9bb2861e7a961833d9c2ecc3"
|
||||
version = "v2.1.13"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/cenkalti/backoff"
|
||||
packages = ["."]
|
||||
revision = "2ea60e5f094469f9e65adb9cd103795b73ae743e"
|
||||
version = "v2.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/couchbase/go-couchbase"
|
||||
packages = ["."]
|
||||
revision = "16db1f1fe037412f12738fa4d8448c549c4edd77"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/couchbase/gomemcached"
|
||||
packages = [
|
||||
".",
|
||||
"client"
|
||||
]
|
||||
revision = "0da75df145308b9a4e6704d762ca9d9b77752efc"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/couchbase/goutils"
|
||||
packages = [
|
||||
"logging",
|
||||
"scramsha"
|
||||
]
|
||||
revision = "e865a1461c8ac0032bd37e2d4dab3289faea3873"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/docker/distribution"
|
||||
packages = [
|
||||
"digest",
|
||||
"reference"
|
||||
]
|
||||
revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89"
|
||||
version = "v2.6.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/docker/docker"
|
||||
packages = [
|
||||
"api/types",
|
||||
"api/types/blkiodev",
|
||||
"api/types/container",
|
||||
"api/types/events",
|
||||
"api/types/filters",
|
||||
"api/types/mount",
|
||||
"api/types/network",
|
||||
"api/types/reference",
|
||||
"api/types/registry",
|
||||
"api/types/strslice",
|
||||
"api/types/swarm",
|
||||
"api/types/time",
|
||||
"api/types/versions",
|
||||
"api/types/volume",
|
||||
"client",
|
||||
"pkg/tlsconfig"
|
||||
]
|
||||
revision = "eef6495eddab52828327aade186443681ed71a4e"
|
||||
version = "v17.03.2-ce-rc1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/docker/go-connections"
|
||||
packages = [
|
||||
"nat",
|
||||
"sockets",
|
||||
"tlsconfig"
|
||||
]
|
||||
revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/docker/go-units"
|
||||
packages = ["."]
|
||||
revision = "47565b4f722fb6ceae66b95f853feed578a4a51c"
|
||||
version = "v0.3.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/eapache/go-resiliency"
|
||||
packages = ["breaker"]
|
||||
revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/eapache/go-xerial-snappy"
|
||||
packages = ["."]
|
||||
revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/eapache/queue"
|
||||
packages = ["."]
|
||||
revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/eclipse/paho.mqtt.golang"
|
||||
packages = [
|
||||
".",
|
||||
"packets"
|
||||
]
|
||||
revision = "36d01c2b4cbeb3d2a12063e4880ce30800af9560"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
|
||||
version = "v1.37.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-logfmt/logfmt"
|
||||
packages = ["."]
|
||||
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ole/go-ole"
|
||||
packages = [
|
||||
".",
|
||||
"oleutil"
|
||||
]
|
||||
revision = "a41e3c4b706f6ae8dfbff342b06e40fa4d2d0506"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-redis/redis"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"internal/consistenthash",
|
||||
"internal/hashtag",
|
||||
"internal/pool",
|
||||
"internal/proto",
|
||||
"internal/singleflight",
|
||||
"internal/util"
|
||||
]
|
||||
revision = "83fb42932f6145ce52df09860384a4653d2d332a"
|
||||
version = "v6.12.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-sql-driver/mysql"
|
||||
packages = ["."]
|
||||
revision = "d523deb1b23d913de5bdada721a6071e71283618"
|
||||
version = "v1.4.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gobwas/glob"
|
||||
packages = [
|
||||
".",
|
||||
"compiler",
|
||||
"match",
|
||||
"syntax",
|
||||
"syntax/ast",
|
||||
"syntax/lexer",
|
||||
"util/runes",
|
||||
"util/strings"
|
||||
]
|
||||
revision = "5ccd90ef52e1e632236f7326478d4faa74f99438"
|
||||
version = "v0.2.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp"
|
||||
]
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/snappy"
|
||||
packages = ["."]
|
||||
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/google/go-cmp"
|
||||
packages = [
|
||||
"cmp",
|
||||
"cmp/internal/diff",
|
||||
"cmp/internal/function",
|
||||
"cmp/internal/value"
|
||||
]
|
||||
revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gorilla/context"
|
||||
packages = ["."]
|
||||
revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gorilla/mux"
|
||||
packages = ["."]
|
||||
revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf"
|
||||
version = "v1.6.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hailocab/go-hostpool"
|
||||
packages = ["."]
|
||||
revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/hashicorp/consul"
|
||||
packages = ["api"]
|
||||
revision = "5174058f0d2bda63fa5198ab96c33d9a909c58ed"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/go-cleanhttp"
|
||||
packages = ["."]
|
||||
revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/go-rootcerts"
|
||||
packages = ["."]
|
||||
revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/hashicorp/serf"
|
||||
packages = ["coordinate"]
|
||||
revision = "d6574a5bb1226678d7010325fb6c985db20ee458"
|
||||
version = "v0.8.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/influxdata/go-syslog"
|
||||
packages = [
|
||||
"rfc5424",
|
||||
"rfc5425"
|
||||
]
|
||||
revision = "eecd51df3ad85464a2bab9b7d3a45bc1e299059e"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/influxdata/tail"
|
||||
packages = [
|
||||
".",
|
||||
"ratelimiter",
|
||||
"util",
|
||||
"watch",
|
||||
"winfile"
|
||||
]
|
||||
revision = "c43482518d410361b6c383d7aebce33d0471d7bc"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/influxdata/toml"
|
||||
packages = [
|
||||
".",
|
||||
"ast"
|
||||
]
|
||||
revision = "2a2e3012f7cfbef64091cc79776311e65dfa211b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/influxdata/wlog"
|
||||
packages = ["."]
|
||||
revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jackc/pgx"
|
||||
packages = [
|
||||
".",
|
||||
"chunkreader",
|
||||
"internal/sanitize",
|
||||
"pgio",
|
||||
"pgproto3",
|
||||
"pgtype",
|
||||
"stdlib"
|
||||
]
|
||||
revision = "da3231b0b66e2e74cdb779f1d46c5e958ba8be27"
|
||||
version = "v3.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
revision = "0b12d6b5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kardianos/osext"
|
||||
packages = ["."]
|
||||
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kardianos/service"
|
||||
packages = ["."]
|
||||
revision = "615a14ed75099c9eaac6949e22ac2341bf9d3197"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kballard/go-shellquote"
|
||||
packages = ["."]
|
||||
revision = "95032a82bc518f77982ea72343cc1ade730072f0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kr/logfmt"
|
||||
packages = ["."]
|
||||
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mailru/easyjson"
|
||||
packages = [
|
||||
".",
|
||||
"buffer",
|
||||
"jlexer",
|
||||
"jwriter"
|
||||
]
|
||||
revision = "3fdea8d05856a0c8df22ed4bc71b3219245e4485"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/miekg/dns"
|
||||
packages = ["."]
|
||||
revision = "5a2b9fab83ff0f8bfc99684bd5f43a37abe560f1"
|
||||
version = "v1.0.8"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/go-homedir"
|
||||
packages = ["."]
|
||||
revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/multiplay/go-ts3"
|
||||
packages = ["."]
|
||||
revision = "d0d44555495c8776880a17e439399e715a4ef319"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/naoina/go-stringutil"
|
||||
packages = ["."]
|
||||
revision = "6b638e95a32d0c1131db0e7fe83775cbea4a0d0b"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/nats-io/gnatsd"
|
||||
packages = [
|
||||
"conf",
|
||||
"logger",
|
||||
"server",
|
||||
"server/pse",
|
||||
"util"
|
||||
]
|
||||
revision = "add6d7930ae6d4bff8823b28999ea87bf1bfd23d"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/nats-io/go-nats"
|
||||
packages = [
|
||||
".",
|
||||
"encoders/builtin",
|
||||
"util"
|
||||
]
|
||||
revision = "062418ea1c2181f52dc0f954f6204370519a868b"
|
||||
version = "v1.5.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/nats-io/nuid"
|
||||
packages = ["."]
|
||||
revision = "289cccf02c178dc782430d534e3c1f5b72af807f"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/nsqio/go-nsq"
|
||||
packages = ["."]
|
||||
revision = "eee57a3ac4174c55924125bb15eeeda8cffb6e6f"
|
||||
version = "v1.0.7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/opentracing-contrib/go-observer"
|
||||
packages = ["."]
|
||||
revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/opentracing/opentracing-go"
|
||||
packages = [
|
||||
".",
|
||||
"ext",
|
||||
"log"
|
||||
]
|
||||
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/openzipkin/zipkin-go-opentracing"
|
||||
packages = [
|
||||
".",
|
||||
"flag",
|
||||
"thrift/gen-go/scribe",
|
||||
"thrift/gen-go/zipkincore",
|
||||
"types",
|
||||
"wire"
|
||||
]
|
||||
revision = "26cf9707480e6b90e5eff22cf0bbf05319154232"
|
||||
version = "v0.3.4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pierrec/lz4"
|
||||
packages = [
|
||||
".",
|
||||
"internal/xxh32"
|
||||
]
|
||||
revision = "6b9367c9ff401dbc54fabce3fb8d972e799b702d"
|
||||
version = "v2.0.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = [
|
||||
"prometheus",
|
||||
"prometheus/promhttp"
|
||||
]
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"log",
|
||||
"model"
|
||||
]
|
||||
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs"
|
||||
]
|
||||
revision = "7d6f385de8bea29190f15ba9931442a0eaef9af7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/rcrowley/go-metrics"
|
||||
packages = ["."]
|
||||
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/samuel/go-zookeeper"
|
||||
packages = ["zk"]
|
||||
revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/shirou/gopsutil"
|
||||
packages = [
|
||||
"cpu",
|
||||
"disk",
|
||||
"host",
|
||||
"internal/common",
|
||||
"load",
|
||||
"mem",
|
||||
"net",
|
||||
"process"
|
||||
]
|
||||
revision = "eeb1d38d69593f121e060d24d17f7b1f0936b203"
|
||||
version = "v2.18.05"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/shirou/w32"
|
||||
packages = ["."]
|
||||
revision = "bb4de0191aa41b5507caa14b0650cdbddcd9280b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
|
||||
version = "v1.0.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/soniah/gosnmp"
|
||||
packages = ["."]
|
||||
revision = "bcf840db66be7d64bf96c3c0e075c92e3d98f793"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/streadway/amqp"
|
||||
packages = ["."]
|
||||
revision = "e5adc2ada8b8efff032bf61173a233d143e9318e"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/stretchr/objx"
|
||||
packages = ["."]
|
||||
revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
|
||||
version = "v0.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"mock",
|
||||
"require"
|
||||
]
|
||||
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||
version = "v1.2.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tidwall/gjson"
|
||||
packages = ["."]
|
||||
revision = "afaeb9562041a8018c74e006551143666aed08bf"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/tidwall/match"
|
||||
packages = ["."]
|
||||
revision = "1731857f09b1f38450e2c12409748407822dc6be"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/vjeantet/grok"
|
||||
packages = ["."]
|
||||
revision = "ce01e59abcf6fbc9833b7deb5e4b8ee1769bcc53"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/wvanbergen/kafka"
|
||||
packages = ["consumergroup"]
|
||||
revision = "e2edea948ddfee841ea9a263b32ccca15f7d6c2f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/wvanbergen/kazoo-go"
|
||||
packages = ["."]
|
||||
revision = "f72d8611297a7cf105da904c04198ad701a60101"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/yuin/gopher-lua"
|
||||
packages = [
|
||||
".",
|
||||
"ast",
|
||||
"parse",
|
||||
"pm"
|
||||
]
|
||||
revision = "ca850f594eaafa5468da2bd53b865e4ee55be18b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/zensqlmonitor/go-mssqldb"
|
||||
packages = ["."]
|
||||
revision = "e8fbf836e44e86764eba398361d1825651709547"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"bcrypt",
|
||||
"blowfish",
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
"md4",
|
||||
"pbkdf2",
|
||||
"ssh/terminal"
|
||||
]
|
||||
revision = "027cca12c2d63e3d62b670d901e8a2c95854feec"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"bpf",
|
||||
"context",
|
||||
"context/ctxhttp",
|
||||
"html",
|
||||
"html/atom",
|
||||
"html/charset",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/iana",
|
||||
"internal/socket",
|
||||
"internal/socks",
|
||||
"internal/timeseries",
|
||||
"ipv4",
|
||||
"ipv6",
|
||||
"proxy",
|
||||
"trace",
|
||||
"websocket"
|
||||
]
|
||||
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
"windows/registry",
|
||||
"windows/svc",
|
||||
"windows/svc/debug",
|
||||
"windows/svc/eventlog",
|
||||
"windows/svc/mgr"
|
||||
]
|
||||
revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"encoding",
|
||||
"encoding/charmap",
|
||||
"encoding/htmlindex",
|
||||
"encoding/internal",
|
||||
"encoding/internal/identifier",
|
||||
"encoding/japanese",
|
||||
"encoding/korean",
|
||||
"encoding/simplifiedchinese",
|
||||
"encoding/traditionalchinese",
|
||||
"encoding/unicode",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"internal/utf8internal",
|
||||
"language",
|
||||
"runes",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable"
|
||||
]
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = ["cloudsql"]
|
||||
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
revision = "32ee49c4dd805befd833990acba36cb75042378c"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
"balancer",
|
||||
"balancer/base",
|
||||
"balancer/roundrobin",
|
||||
"channelz",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"encoding",
|
||||
"encoding/proto",
|
||||
"grpclb/grpc_lb_v1/messages",
|
||||
"grpclog",
|
||||
"internal",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
"peer",
|
||||
"resolver",
|
||||
"resolver/dns",
|
||||
"resolver/passthrough",
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
"transport"
|
||||
]
|
||||
revision = "7a6a684ca69eb4cae85ad0a484f2e531598c047b"
|
||||
version = "v1.12.2"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/alecthomas/kingpin.v2"
|
||||
packages = ["."]
|
||||
revision = "947dcec5ba9c011838740e680966fd7087a71d0d"
|
||||
version = "v2.2.6"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/asn1-ber.v1"
|
||||
packages = ["."]
|
||||
revision = "379148ca0225df7a432012b8df0355c2a2063ac0"
|
||||
version = "v1.2"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/fatih/pool.v2"
|
||||
packages = ["."]
|
||||
revision = "010e0b745d12eaf8426c95f9c3924d81dd0b668f"
|
||||
version = "v2.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/fsnotify.v1"
|
||||
packages = ["."]
|
||||
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
|
||||
source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz"
|
||||
version = "v1.4.7"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/gorethink/gorethink.v3"
|
||||
packages = [
|
||||
".",
|
||||
"encoding",
|
||||
"ql2",
|
||||
"types"
|
||||
]
|
||||
revision = "7f5bdfd858bb064d80559b2a32b86669c5de5d3b"
|
||||
version = "v3.0.5"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/ldap.v2"
|
||||
packages = ["."]
|
||||
revision = "bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9"
|
||||
version = "v2.5.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/mgo.v2"
|
||||
packages = [
|
||||
".",
|
||||
"bson",
|
||||
"internal/json",
|
||||
"internal/sasl",
|
||||
"internal/scram"
|
||||
]
|
||||
revision = "3f83fa5005286a7fe593b055f0d7771a7dce4655"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/olivere/elastic.v5"
|
||||
packages = [
|
||||
".",
|
||||
"config",
|
||||
"uritemplates"
|
||||
]
|
||||
revision = "b708306d715bea9b983685e94ab4602cdc9f988b"
|
||||
version = "v5.0.69"
|
||||
|
||||
[[projects]]
|
||||
branch = "v1"
|
||||
name = "gopkg.in/tomb.v1"
|
||||
packages = ["."]
|
||||
revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "024194b983d91b9500fe97e0aa0ddb5fe725030cb51ddfb034e386cae1098370"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
243
Gopkg.toml
Normal file
243
Gopkg.toml
Normal file
@@ -0,0 +1,243 @@
|
||||
[[constraint]]
|
||||
name = "collectd.org"
|
||||
version = "0.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aerospike/aerospike-client-go"
|
||||
version = "^1.33.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/amir/raidman"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/apache/thrift"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
version = "1.14.8"
|
||||
# version = "1.8.39"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/bsm/sarama-cluster"
|
||||
version = "2.1.13"
|
||||
# version = "2.1.10"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/couchbase/go-couchbase"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
version = "3.2.0"
|
||||
# version = "3.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/docker/docker"
|
||||
version = "~17.03.2-ce"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/docker/go-connections"
|
||||
version = "0.3.0"
|
||||
# version = "0.2.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/eclipse/paho.mqtt.golang"
|
||||
version = "~1.1.1"
|
||||
# version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-sql-driver/mysql"
|
||||
version = "1.4.0"
|
||||
# version = "1.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gobwas/glob"
|
||||
version = "0.2.3"
|
||||
# version = "0.2.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/golang/protobuf"
|
||||
version = "1.1.0"
|
||||
# version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/google/go-cmp"
|
||||
version = "0.2.0"
|
||||
# version = "0.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gorilla/mux"
|
||||
version = "1.6.2"
|
||||
# version = "1.6.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-redis/redis"
|
||||
version = "6.12.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/hashicorp/consul"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/influxdata/go-syslog"
|
||||
version = "1.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/influxdata/tail"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/influxdata/toml"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/influxdata/wlog"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/jackc/pgx"
|
||||
version = "3.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/kardianos/service"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/kballard/go-shellquote"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
version = "1.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Microsoft/ApplicationInsights-Go"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/miekg/dns"
|
||||
version = "1.0.8"
|
||||
# version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/multiplay/go-ts3"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/nats-io/gnatsd"
|
||||
version = "1.1.0"
|
||||
# version = "1.0.4"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/nats-io/go-nats"
|
||||
version = "1.5.0"
|
||||
# version = "1.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/nsqio/go-nsq"
|
||||
version = "1.0.7"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/openzipkin/zipkin-go-opentracing"
|
||||
version = "0.3.4"
|
||||
# version = "0.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_model"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/common"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
version = "1.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/shirou/gopsutil"
|
||||
version = "2.18.05"
|
||||
# version = "2.18.04"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Shopify/sarama"
|
||||
version = "1.17.0"
|
||||
# version = "1.15.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/soniah/gosnmp"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/StackExchange/wmi"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/streadway/amqp"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "1.2.2"
|
||||
# version = "1.2.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tidwall/gjson"
|
||||
version = "1.1.1"
|
||||
# version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/vjeantet/grok"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/wvanbergen/kafka"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/zensqlmonitor/go-mssqldb"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/net"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/sys"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "1.12.2"
|
||||
# version = "1.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/gorethink/gorethink.v3"
|
||||
version = "3.0.5"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/ldap.v2"
|
||||
version = "2.5.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/mgo.v2"
|
||||
branch = "v2"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/olivere/elastic.v5"
|
||||
version = "^5.0.69"
|
||||
# version = "^6.1.23"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
version = "^2.2.1"
|
||||
|
||||
[[override]]
|
||||
source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz"
|
||||
name = "gopkg.in/fsnotify.v1"
|
||||
40
Makefile
40
Makefile
@@ -4,7 +4,7 @@ BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
COMMIT := $(shell git rev-parse --short HEAD)
|
||||
GOFILES ?= $(shell git ls-files '*.go')
|
||||
GOFMT ?= $(shell gofmt -l $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)))
|
||||
BUILDFLAGS ?=
|
||||
BUILDFLAGS ?=
|
||||
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
@@ -12,8 +12,6 @@ else
|
||||
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
|
||||
endif
|
||||
|
||||
TELEGRAF := telegraf$(shell go tool dist env | grep -q 'GOOS=.windows.' && echo .exe)
|
||||
|
||||
LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)
|
||||
ifdef VERSION
|
||||
LDFLAGS += -X main.version=$(VERSION)
|
||||
@@ -25,18 +23,18 @@ all:
|
||||
|
||||
deps:
|
||||
go get -u github.com/golang/lint/golint
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
go get -u github.com/golang/dep/cmd/dep
|
||||
dep ensure
|
||||
|
||||
telegraf:
|
||||
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
|
||||
go build -ldflags "$(LDFLAGS)" ./cmd/telegraf
|
||||
|
||||
go-install:
|
||||
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
|
||||
|
||||
install: telegraf
|
||||
mkdir -p $(DESTDIR)$(PREFIX)/bin/
|
||||
cp $(TELEGRAF) $(DESTDIR)$(PREFIX)/bin/
|
||||
cp telegraf $(DESTDIR)$(PREFIX)/bin/
|
||||
|
||||
test:
|
||||
go test -short ./...
|
||||
@@ -46,7 +44,7 @@ fmt:
|
||||
|
||||
fmtcheck:
|
||||
@echo '[INFO] running gofmt to identify incorrectly formatted code...'
|
||||
@if [ ! -z $(GOFMT) ]; then \
|
||||
@if [ ! -z "$(GOFMT)" ]; then \
|
||||
echo "[ERROR] gofmt has found errors in the following files:" ; \
|
||||
echo "$(GOFMT)" ; \
|
||||
echo "" ;\
|
||||
@@ -56,16 +54,17 @@ fmtcheck:
|
||||
@echo '[INFO] done.'
|
||||
|
||||
test-windows:
|
||||
go test ./plugins/inputs/ping/...
|
||||
go test ./plugins/inputs/win_perf_counters/...
|
||||
go test ./plugins/inputs/win_services/...
|
||||
go test ./plugins/inputs/procstat/...
|
||||
go test -short ./plugins/inputs/ping/...
|
||||
go test -short ./plugins/inputs/win_perf_counters/...
|
||||
go test -short ./plugins/inputs/win_services/...
|
||||
go test -short ./plugins/inputs/procstat/...
|
||||
go test -short ./plugins/inputs/ntpq/...
|
||||
|
||||
# vet runs the Go source code static analysis tool `vet` to find
|
||||
# any common errors.
|
||||
vet:
|
||||
@echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)'
|
||||
@go vet $$(go list ./... | grep -v ./plugins/parsers/influx) ; if [ $$? -eq 1 ]; then \
|
||||
@go vet $$(go list ./... | grep -v ./plugins/parsers/influx) ; if [ $$? -ne 0 ]; then \
|
||||
echo ""; \
|
||||
echo "go vet has found suspicious constructs. Please remediate any reported errors"; \
|
||||
echo "to fix them before submitting code for review."; \
|
||||
@@ -73,7 +72,7 @@ vet:
|
||||
fi
|
||||
|
||||
test-ci: fmtcheck vet
|
||||
go test -short./...
|
||||
go test -short ./...
|
||||
|
||||
test-all: fmtcheck vet
|
||||
go test ./...
|
||||
@@ -93,4 +92,15 @@ docker-image:
|
||||
plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl
|
||||
ragel -Z -G2 $^ -o $@
|
||||
|
||||
.PHONY: deps telegraf install test test-windows lint vet test-all package clean docker-image fmtcheck uint64
|
||||
static:
|
||||
@echo "Building static linux binary..."
|
||||
@CGO_ENABLED=0 \
|
||||
GOOS=linux \
|
||||
GOARCH=amd64 \
|
||||
go build -ldflags "$(LDFLAGS)" ./cmd/telegraf
|
||||
|
||||
plugin-%:
|
||||
@echo "Starting dev environment for $${$(@)} input plugin..."
|
||||
@docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up
|
||||
|
||||
.PHONY: deps telegraf install test test-windows lint vet test-all package clean docker-image fmtcheck uint64 static
|
||||
|
||||
26
README.md
26
README.md
@@ -5,7 +5,7 @@ and writing metrics.
|
||||
|
||||
Design goals are to have a minimal memory footprint with a plugin system so
|
||||
that developers in the community can easily add support for collecting metrics
|
||||
from local or remote services.
|
||||
. For an example configuration referencet from local or remote services.
|
||||
|
||||
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
|
||||
|
||||
@@ -40,9 +40,9 @@ Ansible role: https://github.com/rossmcdonald/telegraf
|
||||
|
||||
### From Source:
|
||||
|
||||
Telegraf requires golang version 1.8+, the Makefile requires GNU make.
|
||||
Telegraf requires golang version 1.9 or newer, the Makefile requires GNU make.
|
||||
|
||||
Dependencies are managed with [gdm](https://github.com/sparrc/gdm),
|
||||
Dependencies are managed with [dep](https://github.com/golang/dep),
|
||||
which is installed by the Makefile if you don't have it already.
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install)
|
||||
@@ -127,10 +127,12 @@ configuration options.
|
||||
* [aerospike](./plugins/inputs/aerospike)
|
||||
* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq)
|
||||
* [apache](./plugins/inputs/apache)
|
||||
* [aurora](./plugins/inputs/aurora)
|
||||
* [aws cloudwatch](./plugins/inputs/cloudwatch)
|
||||
* [bcache](./plugins/inputs/bcache)
|
||||
* [bond](./plugins/inputs/bond)
|
||||
* [cassandra](./plugins/inputs/cassandra)
|
||||
* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
|
||||
* [burrow](./plugins/inputs/burrow)
|
||||
* [ceph](./plugins/inputs/ceph)
|
||||
* [cgroup](./plugins/inputs/cgroup)
|
||||
* [chrony](./plugins/inputs/chrony)
|
||||
@@ -147,6 +149,7 @@ configuration options.
|
||||
* [elasticsearch](./plugins/inputs/elasticsearch)
|
||||
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||
* [fail2ban](./plugins/inputs/fail2ban)
|
||||
* [fibaro](./plugins/inputs/fibaro)
|
||||
* [filestat](./plugins/inputs/filestat)
|
||||
* [fluentd](./plugins/inputs/fluentd)
|
||||
* [graylog](./plugins/inputs/graylog)
|
||||
@@ -162,12 +165,14 @@ configuration options.
|
||||
* [iptables](./plugins/inputs/iptables)
|
||||
* [ipset](./plugins/inputs/ipset)
|
||||
* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
|
||||
* [jolokia2](./plugins/inputs/jolokia2)
|
||||
* [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka)
|
||||
- [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry)
|
||||
* [kapacitor](./plugins/inputs/kapacitor)
|
||||
* [kubernetes](./plugins/inputs/kubernetes)
|
||||
* [leofs](./plugins/inputs/leofs)
|
||||
* [lustre2](./plugins/inputs/lustre2)
|
||||
* [mailchimp](./plugins/inputs/mailchimp)
|
||||
* [mcrouter](./plugins/inputs/mcrouter)
|
||||
* [memcached](./plugins/inputs/memcached)
|
||||
* [mesos](./plugins/inputs/mesos)
|
||||
* [minecraft](./plugins/inputs/minecraft)
|
||||
@@ -180,6 +185,7 @@ configuration options.
|
||||
* [nsq](./plugins/inputs/nsq)
|
||||
* [nstat](./plugins/inputs/nstat)
|
||||
* [ntpq](./plugins/inputs/ntpq)
|
||||
* [nvidia_smi](./plugins/inputs/nvidia_smi)
|
||||
* [openldap](./plugins/inputs/openldap)
|
||||
* [opensmtpd](./plugins/inputs/opensmtpd)
|
||||
* [pf](./plugins/inputs/pf)
|
||||
@@ -205,7 +211,9 @@ configuration options.
|
||||
* [snmp_legacy](./plugins/inputs/snmp_legacy)
|
||||
* [solr](./plugins/inputs/solr)
|
||||
* [sql server](./plugins/inputs/sqlserver) (microsoft)
|
||||
* [syslog](./plugins/inputs/syslog)
|
||||
* [teamspeak](./plugins/inputs/teamspeak)
|
||||
* [tengine](./plugins/inputs/tengine)
|
||||
* [tomcat](./plugins/inputs/tomcat)
|
||||
* [twemproxy](./plugins/inputs/twemproxy)
|
||||
* [unbound](./plugins/inputs/unbound)
|
||||
@@ -263,20 +271,25 @@ formats may be used with input plugins supporting the `data_format` option:
|
||||
|
||||
## Processor Plugins
|
||||
|
||||
* [printer](./plugins/processors/printer)
|
||||
* [converter](./plugins/processors/converter)
|
||||
* [override](./plugins/processors/override)
|
||||
* [printer](./plugins/processors/printer)
|
||||
* [regex](./plugins/processors/regex)
|
||||
* [topk](./plugins/processors/topk)
|
||||
|
||||
## Aggregator Plugins
|
||||
|
||||
* [basicstats](./plugins/aggregators/basicstats)
|
||||
* [minmax](./plugins/aggregators/minmax)
|
||||
* [histogram](./plugins/aggregators/histogram)
|
||||
* [valuecounter](./plugins/aggregators/valuecounter)
|
||||
|
||||
## Output Plugins
|
||||
|
||||
* [influxdb](./plugins/outputs/influxdb)
|
||||
* [amon](./plugins/outputs/amon)
|
||||
* [amqp](./plugins/outputs/amqp) (rabbitmq)
|
||||
* [application_insights](./plugins/outputs/application_insights)
|
||||
* [aws kinesis](./plugins/outputs/kinesis)
|
||||
* [aws cloudwatch](./plugins/outputs/cloudwatch)
|
||||
* [cratedb](./plugins/outputs/cratedb)
|
||||
@@ -286,6 +299,7 @@ formats may be used with input plugins supporting the `data_format` option:
|
||||
* [file](./plugins/outputs/file)
|
||||
* [graphite](./plugins/outputs/graphite)
|
||||
* [graylog](./plugins/outputs/graylog)
|
||||
* [http](./plugins/outputs/http)
|
||||
* [instrumental](./plugins/outputs/instrumental)
|
||||
* [kafka](./plugins/outputs/kafka)
|
||||
* [librato](./plugins/outputs/librato)
|
||||
|
||||
@@ -203,11 +203,6 @@ func (a *Agent) Test() error {
|
||||
input.SetTrace(true)
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name())
|
||||
if input.Config.Interval != 0 {
|
||||
fmt.Printf("* Internal: %s\n", input.Config.Interval)
|
||||
}
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -217,7 +212,6 @@ func (a *Agent) Test() error {
|
||||
switch input.Name() {
|
||||
case "inputs.cpu", "inputs.mongodb", "inputs.procstat":
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name())
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -368,24 +362,6 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
metricC := make(chan telegraf.Metric, 100)
|
||||
aggC := make(chan telegraf.Metric, 100)
|
||||
|
||||
// Start all ServicePlugins
|
||||
for _, input := range a.Config.Inputs {
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
switch p := input.Input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
acc := NewAccumulator(input, metricC)
|
||||
// Service input plugins should set their own precision of their
|
||||
// metrics.
|
||||
acc.SetPrecision(time.Nanosecond, 0)
|
||||
if err := p.Start(acc); err != nil {
|
||||
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name(), err.Error())
|
||||
return err
|
||||
}
|
||||
defer p.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// Round collection to nearest interval by sleeping
|
||||
if a.Config.Agent.RoundInterval {
|
||||
i := int64(a.Config.Agent.Interval.Duration)
|
||||
@@ -425,6 +401,25 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
}(input, interval)
|
||||
}
|
||||
|
||||
// Start all ServicePlugins inputs after all other
|
||||
// plugins are loaded so that no metrics get dropped
|
||||
for _, input := range a.Config.Inputs {
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
switch p := input.Input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
acc := NewAccumulator(input, metricC)
|
||||
// Service input plugins should set their own precision of their
|
||||
// metrics.
|
||||
acc.SetPrecision(time.Nanosecond, 0)
|
||||
if err := p.Start(acc); err != nil {
|
||||
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name(), err.Error())
|
||||
return err
|
||||
}
|
||||
defer p.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
a.Close()
|
||||
return nil
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
image: Previous Visual Studio 2015
|
||||
version: "{build}"
|
||||
|
||||
cache:
|
||||
@@ -13,15 +12,16 @@ platform: x64
|
||||
|
||||
install:
|
||||
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
|
||||
- IF NOT EXIST "C:\Cache\go1.9.4.msi" curl -o "C:\Cache\go1.9.4.msi" https://storage.googleapis.com/golang/go1.9.4.windows-amd64.msi
|
||||
- IF NOT EXIST "C:\Cache\go1.10.1.msi" curl -o "C:\Cache\go1.10.1.msi" https://storage.googleapis.com/golang/go1.10.1.windows-amd64.msi
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
|
||||
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
|
||||
- msiexec.exe /i "C:\Cache\go1.9.4.msi" /quiet
|
||||
- msiexec.exe /i "C:\Cache\go1.10.1.msi" /quiet
|
||||
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
|
||||
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
|
||||
- go version
|
||||
- go env
|
||||
- git config --system core.longpaths true
|
||||
|
||||
build_script:
|
||||
- cmd: C:\GnuWin32\bin\make deps
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/influxdata/telegraf/agent"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/logger"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
|
||||
@@ -57,7 +58,7 @@ var fService = flag.String("service", "",
|
||||
var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)")
|
||||
|
||||
var (
|
||||
nextVersion = "1.6.0"
|
||||
nextVersion = "1.8.0"
|
||||
version string
|
||||
commit string
|
||||
branch string
|
||||
@@ -73,48 +74,6 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf [commands|flags]
|
||||
|
||||
The commands & flags are:
|
||||
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
--config-directory directory containing additional *.conf files
|
||||
--input-filter filter the input plugins to enable, separator is :
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||
--quiet run in quiet mode
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf --config telegraf.conf --test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf --config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
|
||||
# run telegraf with pprof
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
`
|
||||
|
||||
var stop chan struct{}
|
||||
|
||||
func reloadLoop(
|
||||
@@ -188,11 +147,11 @@ func reloadLoop(
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
signals := make(chan os.Signal)
|
||||
signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
|
||||
signal.Notify(signals, os.Interrupt, syscall.SIGHUP, syscall.SIGTERM)
|
||||
go func() {
|
||||
select {
|
||||
case sig := <-signals:
|
||||
if sig == os.Interrupt {
|
||||
if sig == os.Interrupt || sig == syscall.SIGTERM {
|
||||
close(shutdown)
|
||||
}
|
||||
if sig == syscall.SIGHUP {
|
||||
@@ -207,8 +166,10 @@ func reloadLoop(
|
||||
}()
|
||||
|
||||
log.Printf("I! Starting Telegraf %s\n", displayVersion())
|
||||
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("I! Loaded aggregators: %s", strings.Join(c.AggregatorNames(), " "))
|
||||
log.Printf("I! Loaded processors: %s", strings.Join(c.ProcessorNames(), " "))
|
||||
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("I! Tags enabled: %s", c.ListTags())
|
||||
|
||||
if *fPidfile != "" {
|
||||
@@ -234,7 +195,7 @@ func reloadLoop(
|
||||
}
|
||||
|
||||
func usageExit(rc int) {
|
||||
fmt.Println(usage)
|
||||
fmt.Println(internal.Usage)
|
||||
os.Exit(rc)
|
||||
}
|
||||
|
||||
@@ -365,7 +326,7 @@ func main() {
|
||||
DisplayName: "Telegraf Data Collector Service",
|
||||
Description: "Collects data using a series of plugins and publishes it to" +
|
||||
"another series of plugins.",
|
||||
Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
|
||||
Arguments: []string{"--config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
|
||||
}
|
||||
|
||||
prg := &program{
|
||||
@@ -378,14 +339,14 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
// Handle the -service flag here to prevent any issues with tooling that
|
||||
// Handle the --service flag here to prevent any issues with tooling that
|
||||
// may not have an interactive session, e.g. installing from Ansible.
|
||||
if *fService != "" {
|
||||
if *fConfig != "" {
|
||||
(*svcConfig).Arguments = []string{"-config", *fConfig}
|
||||
(*svcConfig).Arguments = []string{"--config", *fConfig}
|
||||
}
|
||||
if *fConfigDirectory != "" {
|
||||
(*svcConfig).Arguments = append((*svcConfig).Arguments, "-config-directory", *fConfigDirectory)
|
||||
(*svcConfig).Arguments = append((*svcConfig).Arguments, "--config-directory", *fConfigDirectory)
|
||||
}
|
||||
err := service.Control(s, *fService)
|
||||
if err != nil {
|
||||
|
||||
@@ -79,15 +79,15 @@ services:
|
||||
- "389:389"
|
||||
- "636:636"
|
||||
crate:
|
||||
image: crate/crate
|
||||
ports:
|
||||
- "4200:4200"
|
||||
- "4230:4230"
|
||||
command:
|
||||
- crate
|
||||
- -Cnetwork.host=0.0.0.0
|
||||
- -Ctransport.host=localhost
|
||||
- -Clicense.enterprise=false
|
||||
environment:
|
||||
- CRATE_HEAP_SIZE=128m
|
||||
- JAVA_OPTS='-Xms256m -Xmx256m'
|
||||
image: crate/crate
|
||||
ports:
|
||||
- "4200:4200"
|
||||
- "4230:4230"
|
||||
- "5432:5432"
|
||||
command:
|
||||
- crate
|
||||
- -Cnetwork.host=0.0.0.0
|
||||
- -Ctransport.host=localhost
|
||||
- -Clicense.enterprise=false
|
||||
environment:
|
||||
- CRATE_HEAP_SIZE=128m
|
||||
|
||||
@@ -153,11 +153,11 @@ The inverse of `namepass`. If a match is found the point is discarded. This
|
||||
is tested on points after they have passed the `namepass` test.
|
||||
* **fieldpass**:
|
||||
An array of glob pattern strings. Only fields whose field key matches a
|
||||
pattern in this list are emitted. Not available for outputs.
|
||||
pattern in this list are emitted.
|
||||
* **fielddrop**:
|
||||
The inverse of `fieldpass`. Fields with a field key matching one of the
|
||||
patterns will be discarded from the point. This is tested on points after
|
||||
they have passed the `fieldpass` test. Not available for outputs.
|
||||
they have passed the `fieldpass` test.
|
||||
* **tagpass**:
|
||||
A table mapping tag keys to arrays of glob pattern strings. Only points
|
||||
that contain a tag key in the table and a tag value matching one of its
|
||||
|
||||
@@ -1,35 +1,15 @@
|
||||
# Telegraf Output Data Formats
|
||||
# Output Data Formats
|
||||
|
||||
Telegraf is able to serialize metrics into the following output data formats:
|
||||
In addition to output specific data formats, Telegraf supports a set of
|
||||
standard data formats that may be selected from when configuring many output
|
||||
plugins.
|
||||
|
||||
1. [InfluxDB Line Protocol](#influx)
|
||||
1. [JSON](#json)
|
||||
1. [Graphite](#graphite)
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/latest/concepts/glossary/#point),
|
||||
are a combination of four basic parts:
|
||||
|
||||
1. Measurement Name
|
||||
1. Tags
|
||||
1. Fields
|
||||
1. Timestamp
|
||||
|
||||
In InfluxDB line protocol, these 4 parts are easily defined in textual form:
|
||||
|
||||
```
|
||||
measurement_name[,tag1=val1,...] field1=val1[,field2=val2,...] [timestamp]
|
||||
```
|
||||
|
||||
For Telegraf outputs that write textual data (such as `kafka`, `mqtt`, and `file`),
|
||||
InfluxDB line protocol was originally the only available output format. But now
|
||||
we are normalizing telegraf metric "serializers" into a
|
||||
[plugin-like interface](https://github.com/influxdata/telegraf/tree/master/plugins/serializers)
|
||||
across all output plugins that can support it.
|
||||
You will be able to identify a plugin that supports different data formats
|
||||
by the presence of a `data_format`
|
||||
config option, for example, in the `file` output plugin:
|
||||
|
||||
You will be able to identify the plugins with support by the presence of a
|
||||
`data_format` config option, for example, in the `file` output plugin:
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
@@ -40,22 +20,16 @@ config option, for example, in the `file` output plugin:
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
## Additional configuration options go here
|
||||
```
|
||||
|
||||
Each data_format has an additional set of configuration options available, which
|
||||
I'll go over below.
|
||||
## Influx
|
||||
|
||||
# Influx:
|
||||
|
||||
The `influx` format outputs data as
|
||||
The `influx` data format outputs metrics using
|
||||
[InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/latest/write_protocols/line_protocol_tutorial/).
|
||||
This is the recommended format to use unless another format is required for
|
||||
This is the recommended format unless another format is required for
|
||||
interoperability.
|
||||
|
||||
### Influx Configuration:
|
||||
|
||||
### Influx Configuration
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
@@ -82,11 +56,17 @@ interoperability.
|
||||
# influx_uint_support = false
|
||||
```
|
||||
|
||||
# Graphite:
|
||||
## Graphite
|
||||
|
||||
The Graphite data format translates Telegraf metrics into _dot_ buckets. A
|
||||
template can be specified for the output of Telegraf metrics into Graphite
|
||||
buckets. The default template is:
|
||||
The Graphite data format is translated from Telegraf Metrics using either the
|
||||
template pattern or tag support method. You can select between the two
|
||||
methods using the [`graphite_tag_support`](#graphite-tag-support) option. When set, the tag support
|
||||
method is used, otherwise the [`template` pattern](#template-pattern) is used.
|
||||
|
||||
#### Template Pattern
|
||||
|
||||
The `template` option describes how Telegraf traslates metrics into _dot_
|
||||
buckets. The default template is:
|
||||
|
||||
```
|
||||
template = "host.tags.measurement.field"
|
||||
@@ -103,7 +83,7 @@ tag keys are filled.
|
||||
1. _measurement_ is a special keyword that outputs the measurement name.
|
||||
1. _field_ is a special keyword that outputs the field name.
|
||||
|
||||
Which means the following influx metric -> graphite conversion would happen:
|
||||
**Example Conversion**:
|
||||
|
||||
```
|
||||
cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
|
||||
@@ -115,7 +95,25 @@ tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
||||
Fields with string values will be skipped. Boolean fields will be converted
|
||||
to 1 (true) or 0 (false).
|
||||
|
||||
### Graphite Configuration:
|
||||
#### Graphite Tag Support
|
||||
|
||||
When the `graphite_tag_support` option is enabled, the template pattern is not
|
||||
used. Instead, tags are encoded using
|
||||
[Graphite tag support](http://graphite.readthedocs.io/en/latest/tags.html)
|
||||
added in Graphite 1.1. The `metric_path` is a combination of the optional
|
||||
`prefix` option, measurement name, and field name.
|
||||
|
||||
The tag `name` is reserved by Graphite, any conflicting tags and will be encoded as `_name`.
|
||||
|
||||
**Example Conversion**:
|
||||
```
|
||||
cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
|
||||
=>
|
||||
cpu.usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690
|
||||
cpu.usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690
|
||||
```
|
||||
|
||||
### Graphite Configuration
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
@@ -128,33 +126,72 @@ to 1 (true) or 0 (false).
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "graphite"
|
||||
|
||||
# prefix each graphite bucket
|
||||
## Prefix added to each graphite bucket
|
||||
prefix = "telegraf"
|
||||
# graphite template
|
||||
## Graphite template pattern
|
||||
template = "host.tags.measurement.field"
|
||||
|
||||
## Support Graphite tags, recommended to enable when using Graphite 1.1 or later.
|
||||
# graphite_tag_support = false
|
||||
```
|
||||
|
||||
# JSON:
|
||||
|
||||
The JSON data format serialized Telegraf metrics in json format. The format is:
|
||||
## JSON
|
||||
|
||||
The JSON output data format output for a single metric is in the
|
||||
form:
|
||||
```json
|
||||
{
|
||||
"fields":{
|
||||
"field_1":30,
|
||||
"field_2":4,
|
||||
"field_N":59,
|
||||
"n_images":660
|
||||
},
|
||||
"name":"docker",
|
||||
"tags":{
|
||||
"host":"raynor"
|
||||
},
|
||||
"timestamp":1458229140
|
||||
"fields": {
|
||||
"field_1": 30,
|
||||
"field_2": 4,
|
||||
"field_N": 59,
|
||||
"n_images": 660
|
||||
},
|
||||
"name": "docker",
|
||||
"tags": {
|
||||
"host": "raynor"
|
||||
},
|
||||
"timestamp": 1458229140
|
||||
}
|
||||
```
|
||||
|
||||
### JSON Configuration:
|
||||
When an output plugin needs to emit multiple metrics at one time, it may use
|
||||
the batch format. The use of batch format is determined by the plugin,
|
||||
reference the documentation for the specific plugin.
|
||||
```json
|
||||
{
|
||||
"metrics": [
|
||||
{
|
||||
"fields": {
|
||||
"field_1": 30,
|
||||
"field_2": 4,
|
||||
"field_N": 59,
|
||||
"n_images": 660
|
||||
},
|
||||
"name": "docker",
|
||||
"tags": {
|
||||
"host": "raynor"
|
||||
},
|
||||
"timestamp": 1458229140
|
||||
},
|
||||
{
|
||||
"fields": {
|
||||
"field_1": 30,
|
||||
"field_2": 4,
|
||||
"field_N": 59,
|
||||
"n_images": 660
|
||||
},
|
||||
"name": "docker",
|
||||
"tags": {
|
||||
"host": "raynor"
|
||||
},
|
||||
"timestamp": 1458229140
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### JSON Configuration
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
@@ -166,14 +203,9 @@ The JSON data format serialized Telegraf metrics in json format. The format is:
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "json"
|
||||
json_timestamp_units = "1ns"
|
||||
```
|
||||
|
||||
By default, the timestamp that is output in JSON data format serialized Telegraf
|
||||
metrics is in seconds. The precision of this timestamp can be adjusted for any output
|
||||
by adding the optional `json_timestamp_units` parameter to the configuration for
|
||||
that output. This parameter can be used to set the timestamp units to nanoseconds (`ns`),
|
||||
microseconds (`us` or `µs`), milliseconds (`ms`), or seconds (`s`). Note that this
|
||||
parameter will be truncated to the nearest power of 10 that, so if the `json_timestamp_units`
|
||||
are set to `15ms` the timestamps for the JSON format serialized Telegraf metrics will be
|
||||
output in hundredths of a second (`10ms`).
|
||||
## The resolution to use for the metric timestamp. Must be a duration string
|
||||
## such as "1ns", "1us", "1ms", "10ms", "1s". Durations are truncated to
|
||||
## the power of 10 less than the specified units.
|
||||
json_timestamp_units = "1s"
|
||||
```
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
When distributed in a binary form, Telegraf may contain portions of the
|
||||
following works:
|
||||
|
||||
- code.cloudfoundry.org/clock [APACHE](https://github.com/cloudfoundry/clock/blob/master/LICENSE)
|
||||
- collectd.org [MIT](https://github.com/collectd/go-collectd/blob/master/LICENSE)
|
||||
- github.com/aerospike/aerospike-client-go [APACHE](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE)
|
||||
- github.com/amir/raidman [PUBLIC DOMAIN](https://github.com/amir/raidman/blob/master/UNLICENSE)
|
||||
@@ -43,6 +44,7 @@ following works:
|
||||
- github.com/hashicorp/raft [MPL](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||
- github.com/influxdata/tail [MIT](https://github.com/influxdata/tail/blob/master/LICENSE.txt)
|
||||
- github.com/influxdata/toml [MIT](https://github.com/influxdata/toml/blob/master/LICENSE)
|
||||
- github.com/influxdata/go-syslog [MIT](https://github.com/influxdata/go-syslog/blob/develop/LICENSE)
|
||||
- github.com/influxdata/wlog [MIT](https://github.com/influxdata/wlog/blob/master/LICENSE)
|
||||
- github.com/jackc/pgx [MIT](https://github.com/jackc/pgx/blob/master/LICENSE)
|
||||
- github.com/jmespath/go-jmespath [APACHE](https://github.com/jmespath/go-jmespath/blob/master/LICENSE)
|
||||
@@ -51,6 +53,7 @@ following works:
|
||||
- github.com/kballard/go-shellquote [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
|
||||
- github.com/lib/pq [MIT](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||
- github.com/matttproud/golang_protobuf_extensions [APACHE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||
- github.com/Microsoft/ApplicationInsights-Go [APACHE](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE)
|
||||
- github.com/Microsoft/go-winio [MIT](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
|
||||
- github.com/miekg/dns [BSD](https://github.com/miekg/dns/blob/master/LICENSE)
|
||||
- github.com/naoina/go-stringutil [MIT](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||
@@ -97,6 +100,8 @@ following works:
|
||||
- golang.org/x/net [BSD](https://go.googlesource.com/net/+/master/LICENSE)
|
||||
- golang.org/x/text [BSD](https://go.googlesource.com/text/+/master/LICENSE)
|
||||
- golang.org/x/sys [BSD](https://go.googlesource.com/sys/+/master/LICENSE)
|
||||
- google.golang.org/grpc [APACHE](https://github.com/google/grpc-go/blob/master/LICENSE)
|
||||
- google.golang.org/genproto [APACHE](https://github.com/google/go-genproto/blob/master/LICENSE)
|
||||
- gopkg.in/asn1-ber.v1 [MIT](https://github.com/go-asn1-ber/asn1-ber/blob/v1.2/LICENSE)
|
||||
- gopkg.in/dancannon/gorethink.v1 [APACHE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||
- gopkg.in/fatih/pool.v2 [MIT](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
|
||||
|
||||
@@ -5,7 +5,7 @@ the general steps to set it up.
|
||||
|
||||
1. Obtain the telegraf windows distribution
|
||||
2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
|
||||
location simply specify the `-config` parameter with the desired location)
|
||||
location simply specify the `--config` parameter with the desired location)
|
||||
3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf`
|
||||
4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""):
|
||||
|
||||
@@ -26,6 +26,15 @@ the general steps to set it up.
|
||||
> net start telegraf
|
||||
```
|
||||
|
||||
## Config Directory
|
||||
|
||||
You can also specify a `--config-directory` for the service to use:
|
||||
1. Create a directory for config snippets: `C:\Program Files\Telegraf\telegraf.d`
|
||||
2. Include the `--config-directory` option when registering the service:
|
||||
```
|
||||
> C:\"Program Files"\Telegraf\telegraf.exe --service install --config C:\"Program Files"\Telegraf\telegraf.conf --config-directory C:\"Program Files"\Telegraf\telegraf.d
|
||||
```
|
||||
|
||||
## Other supported operations
|
||||
|
||||
Telegraf can manage its own service through the --service flag:
|
||||
@@ -37,7 +46,6 @@ Telegraf can manage its own service through the --service flag:
|
||||
| `telegraf.exe --service start` | Start the telegraf service |
|
||||
| `telegraf.exe --service stop` | Stop the telegraf service |
|
||||
|
||||
|
||||
Troubleshooting common error #1067
|
||||
|
||||
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -242,7 +242,7 @@
|
||||
#
|
||||
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
# ## present on /run, /var/run, /dev/shm or /dev).
|
||||
# # ignore_fs = ["tmpfs", "devtmpfs"]
|
||||
# # ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
|
||||
|
||||
|
||||
# # Read metrics about disk IO by device
|
||||
|
||||
@@ -156,6 +156,24 @@ func (c *Config) InputNames() []string {
|
||||
return name
|
||||
}
|
||||
|
||||
// Outputs returns a list of strings of the configured aggregators.
|
||||
func (c *Config) AggregatorNames() []string {
|
||||
var name []string
|
||||
for _, aggregator := range c.Aggregators {
|
||||
name = append(name, aggregator.Name())
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// Outputs returns a list of strings of the configured processors.
|
||||
func (c *Config) ProcessorNames() []string {
|
||||
var name []string
|
||||
for _, processor := range c.Processors {
|
||||
name = append(name, processor.Name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// Outputs returns a list of strings of the configured outputs.
|
||||
func (c *Config) OutputNames() []string {
|
||||
var name []string
|
||||
@@ -519,7 +537,13 @@ func (c *Config) LoadDirectory(path string) error {
|
||||
log.Printf("W! Telegraf is not permitted to read %s", thispath)
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
if strings.HasPrefix(info.Name(), "..") {
|
||||
// skip Kubernetes mounts, prevening loading the same config twice
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
name := info.Name()
|
||||
@@ -1403,6 +1427,18 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["graphite_tag_support"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if b, ok := kv.Value.(*ast.Boolean); ok {
|
||||
var err error
|
||||
c.GraphiteTagSupport, err = b.Boolean()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["json_timestamp_units"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
@@ -1422,6 +1458,7 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
|
||||
delete(tbl.Fields, "influx_max_line_bytes")
|
||||
delete(tbl.Fields, "influx_sort_fields")
|
||||
delete(tbl.Fields, "influx_uint_support")
|
||||
delete(tbl.Fields, "graphite_tag_support")
|
||||
delete(tbl.Fields, "data_format")
|
||||
delete(tbl.Fields, "prefix")
|
||||
delete(tbl.Fields, "template")
|
||||
|
||||
4
internal/config/testdata/subconfig/..4984_10_04_08_28_06.119/invalid-config.conf
vendored
Normal file
4
internal/config/testdata/subconfig/..4984_10_04_08_28_06.119/invalid-config.conf
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# This invalid config file should be skipped during testing
|
||||
# as it is an ..data folder
|
||||
|
||||
[[outputs.influxdb
|
||||
@@ -4,17 +4,14 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/big"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
@@ -112,49 +109,6 @@ func RandomString(n int) string {
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files.
|
||||
// you must give the full path to the files.
|
||||
// If all files are blank and InsecureSkipVerify=false, returns a nil pointer.
|
||||
func GetTLSConfig(
|
||||
SSLCert, SSLKey, SSLCA string,
|
||||
InsecureSkipVerify bool,
|
||||
) (*tls.Config, error) {
|
||||
if SSLCert == "" && SSLKey == "" && SSLCA == "" && !InsecureSkipVerify {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
t := &tls.Config{
|
||||
InsecureSkipVerify: InsecureSkipVerify,
|
||||
}
|
||||
|
||||
if SSLCA != "" {
|
||||
caCert, err := ioutil.ReadFile(SSLCA)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s",
|
||||
err))
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
t.RootCAs = caCertPool
|
||||
}
|
||||
|
||||
if SSLCert != "" && SSLKey != "" {
|
||||
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf(
|
||||
"Could not load TLS client key/certificate from %s:%s: %s",
|
||||
SSLKey, SSLCert, err))
|
||||
}
|
||||
|
||||
t.Certificates = []tls.Certificate{cert}
|
||||
t.BuildNameToCertificate()
|
||||
}
|
||||
|
||||
// will be nil by default if nothing is provided
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// SnakeCase converts the given string to snake case following the Golang format:
|
||||
// acronyms are converted to lower-case and preceded by an underscore.
|
||||
func SnakeCase(in string) string {
|
||||
@@ -240,3 +194,15 @@ func RandomSleep(max time.Duration, shutdown chan struct{}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Exit status takes the error from exec.Command
|
||||
// and returns the exit status and true
|
||||
// if error is not exit status, will return 0 and false
|
||||
func ExitStatus(err error) (int, bool) {
|
||||
if exiterr, ok := err.(*exec.ExitError); ok {
|
||||
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
|
||||
return status.ExitStatus(), true
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package limiter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRateLimiter(t *testing.T) {
|
||||
r := NewRateLimiter(5, time.Second)
|
||||
ticker := time.NewTicker(time.Millisecond * 75)
|
||||
|
||||
// test that we can only get 5 receives from the rate limiter
|
||||
counter := 0
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-r.C:
|
||||
counter++
|
||||
case <-ticker.C:
|
||||
break outer
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, 5, counter)
|
||||
r.Stop()
|
||||
// verify that the Stop function closes the channel.
|
||||
_, ok := <-r.C
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestRateLimiterMultipleIterations(t *testing.T) {
|
||||
r := NewRateLimiter(5, time.Millisecond*50)
|
||||
ticker := time.NewTicker(time.Millisecond * 250)
|
||||
|
||||
// test that we can get 15 receives from the rate limiter
|
||||
counter := 0
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
break outer
|
||||
case <-r.C:
|
||||
counter++
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, counter > 10)
|
||||
r.Stop()
|
||||
// verify that the Stop function closes the channel.
|
||||
_, ok := <-r.C
|
||||
assert.False(t, ok)
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -153,6 +154,7 @@ func (r *RunningAggregator) Run(
|
||||
m.Time().After(r.periodEnd.Add(truncation).Add(r.Config.Delay)) {
|
||||
// the metric is outside the current aggregation period, so
|
||||
// skip it.
|
||||
log.Printf("D! aggregator: metric \"%s\" is not in the current timewindow, skipping", m.Name())
|
||||
continue
|
||||
}
|
||||
r.add(m)
|
||||
|
||||
@@ -77,6 +77,7 @@ func (r *RunningInput) MakeMetric(
|
||||
|
||||
if r.trace && m != nil {
|
||||
s := influx.NewSerializer()
|
||||
s.SetFieldSortOrder(influx.SortFields)
|
||||
octets, err := s.Serialize(m)
|
||||
if err == nil {
|
||||
fmt.Print("> " + string(octets))
|
||||
|
||||
130
internal/tls/config.go
Normal file
130
internal/tls/config.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package tls
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
// ClientConfig represents the standard client TLS config.
|
||||
type ClientConfig struct {
|
||||
TLSCA string `toml:"tls_ca"`
|
||||
TLSCert string `toml:"tls_cert"`
|
||||
TLSKey string `toml:"tls_key"`
|
||||
InsecureSkipVerify bool `toml:"insecure_skip_verify"`
|
||||
|
||||
// Deprecated in 1.7; use TLS variables above
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
}
|
||||
|
||||
// ServerConfig represents the standard server TLS config.
|
||||
type ServerConfig struct {
|
||||
TLSCert string `toml:"tls_cert"`
|
||||
TLSKey string `toml:"tls_key"`
|
||||
TLSAllowedCACerts []string `toml:"tls_allowed_cacerts"`
|
||||
}
|
||||
|
||||
// TLSConfig returns a tls.Config, may be nil without error if TLS is not
|
||||
// configured.
|
||||
func (c *ClientConfig) TLSConfig() (*tls.Config, error) {
|
||||
// Support deprecated variable names
|
||||
if c.TLSCA == "" && c.SSLCA != "" {
|
||||
c.TLSCA = c.SSLCA
|
||||
}
|
||||
if c.TLSCert == "" && c.SSLCert != "" {
|
||||
c.TLSCert = c.SSLCert
|
||||
}
|
||||
if c.TLSKey == "" && c.SSLKey != "" {
|
||||
c.TLSKey = c.SSLKey
|
||||
}
|
||||
|
||||
// TODO: return default tls.Config; plugins should not call if they don't
|
||||
// want TLS, this will require using another option to determine. In the
|
||||
// case of an HTTP plugin, you could use `https`. Other plugins may need
|
||||
// the dedicated option `TLSEnable`.
|
||||
if c.TLSCA == "" && c.TLSKey == "" && c.TLSCert == "" && !c.InsecureSkipVerify {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: c.InsecureSkipVerify,
|
||||
Renegotiation: tls.RenegotiateNever,
|
||||
}
|
||||
|
||||
if c.TLSCA != "" {
|
||||
pool, err := makeCertPool([]string{c.TLSCA})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsConfig.RootCAs = pool
|
||||
}
|
||||
|
||||
if c.TLSCert != "" && c.TLSKey != "" {
|
||||
err := loadCertificate(tlsConfig, c.TLSCert, c.TLSKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
// TLSConfig returns a tls.Config, may be nil without error if TLS is not
|
||||
// configured.
|
||||
func (c *ServerConfig) TLSConfig() (*tls.Config, error) {
|
||||
if c.TLSCert == "" && c.TLSKey == "" && len(c.TLSAllowedCACerts) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{}
|
||||
|
||||
if len(c.TLSAllowedCACerts) != 0 {
|
||||
pool, err := makeCertPool(c.TLSAllowedCACerts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsConfig.ClientCAs = pool
|
||||
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
}
|
||||
|
||||
if c.TLSCert != "" && c.TLSKey != "" {
|
||||
err := loadCertificate(tlsConfig, c.TLSCert, c.TLSKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
func makeCertPool(certFiles []string) (*x509.CertPool, error) {
|
||||
pool := x509.NewCertPool()
|
||||
for _, certFile := range certFiles {
|
||||
pem, err := ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"could not read certificate %q: %v", certFile, err)
|
||||
}
|
||||
ok := pool.AppendCertsFromPEM(pem)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(
|
||||
"could not parse any PEM certificates %q: %v", certFile, err)
|
||||
}
|
||||
}
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
func loadCertificate(config *tls.Config, certFile, keyFile string) error {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"could not load keypair %s:%s: %v", certFile, keyFile, err)
|
||||
}
|
||||
|
||||
config.Certificates = []tls.Certificate{cert}
|
||||
config.BuildNameToCertificate()
|
||||
return nil
|
||||
}
|
||||
226
internal/tls/config_test.go
Normal file
226
internal/tls/config_test.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package tls_test
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var pki = testutil.NewPKI("../../testutil/pki")
|
||||
|
||||
func TestClientConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
client tls.ClientConfig
|
||||
expNil bool
|
||||
expErr bool
|
||||
}{
|
||||
{
|
||||
name: "unset",
|
||||
client: tls.ClientConfig{},
|
||||
expNil: true,
|
||||
},
|
||||
{
|
||||
name: "success",
|
||||
client: tls.ClientConfig{
|
||||
TLSCA: pki.CACertPath(),
|
||||
TLSCert: pki.ClientCertPath(),
|
||||
TLSKey: pki.ClientKeyPath(),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid ca",
|
||||
client: tls.ClientConfig{
|
||||
TLSCA: pki.ClientKeyPath(),
|
||||
TLSCert: pki.ClientCertPath(),
|
||||
TLSKey: pki.ClientKeyPath(),
|
||||
},
|
||||
expNil: true,
|
||||
expErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing ca is okay",
|
||||
client: tls.ClientConfig{
|
||||
TLSCert: pki.ClientCertPath(),
|
||||
TLSKey: pki.ClientKeyPath(),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid cert",
|
||||
client: tls.ClientConfig{
|
||||
TLSCA: pki.CACertPath(),
|
||||
TLSCert: pki.ClientKeyPath(),
|
||||
TLSKey: pki.ClientKeyPath(),
|
||||
},
|
||||
expNil: true,
|
||||
expErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing cert skips client keypair",
|
||||
client: tls.ClientConfig{
|
||||
TLSCA: pki.CACertPath(),
|
||||
TLSKey: pki.ClientKeyPath(),
|
||||
},
|
||||
expNil: false,
|
||||
expErr: false,
|
||||
},
|
||||
{
|
||||
name: "missing key skips client keypair",
|
||||
client: tls.ClientConfig{
|
||||
TLSCA: pki.CACertPath(),
|
||||
TLSCert: pki.ClientCertPath(),
|
||||
},
|
||||
expNil: false,
|
||||
expErr: false,
|
||||
},
|
||||
{
|
||||
name: "support deprecated ssl field names",
|
||||
client: tls.ClientConfig{
|
||||
SSLCA: pki.CACertPath(),
|
||||
SSLCert: pki.ClientCertPath(),
|
||||
SSLKey: pki.ClientKeyPath(),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tlsConfig, err := tt.client.TLSConfig()
|
||||
if !tt.expNil {
|
||||
require.NotNil(t, tlsConfig)
|
||||
} else {
|
||||
require.Nil(t, tlsConfig)
|
||||
}
|
||||
|
||||
if !tt.expErr {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServerConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
server tls.ServerConfig
|
||||
expNil bool
|
||||
expErr bool
|
||||
}{
|
||||
{
|
||||
name: "unset",
|
||||
server: tls.ServerConfig{},
|
||||
expNil: true,
|
||||
},
|
||||
{
|
||||
name: "success",
|
||||
server: tls.ServerConfig{
|
||||
TLSCert: pki.ServerCertPath(),
|
||||
TLSKey: pki.ServerKeyPath(),
|
||||
TLSAllowedCACerts: []string{pki.CACertPath()},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid ca",
|
||||
server: tls.ServerConfig{
|
||||
TLSCert: pki.ServerCertPath(),
|
||||
TLSKey: pki.ServerKeyPath(),
|
||||
TLSAllowedCACerts: []string{pki.ServerKeyPath()},
|
||||
},
|
||||
expNil: true,
|
||||
expErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing allowed ca is okay",
|
||||
server: tls.ServerConfig{
|
||||
TLSCert: pki.ServerCertPath(),
|
||||
TLSKey: pki.ServerKeyPath(),
|
||||
},
|
||||
expNil: true,
|
||||
expErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid cert",
|
||||
server: tls.ServerConfig{
|
||||
TLSCert: pki.ServerKeyPath(),
|
||||
TLSKey: pki.ServerKeyPath(),
|
||||
TLSAllowedCACerts: []string{pki.CACertPath()},
|
||||
},
|
||||
expNil: true,
|
||||
expErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing cert",
|
||||
server: tls.ServerConfig{
|
||||
TLSKey: pki.ServerKeyPath(),
|
||||
TLSAllowedCACerts: []string{pki.CACertPath()},
|
||||
},
|
||||
expNil: true,
|
||||
expErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing key",
|
||||
server: tls.ServerConfig{
|
||||
TLSCert: pki.ServerCertPath(),
|
||||
TLSAllowedCACerts: []string{pki.CACertPath()},
|
||||
},
|
||||
expNil: true,
|
||||
expErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tlsConfig, err := tt.server.TLSConfig()
|
||||
if !tt.expNil {
|
||||
require.NotNil(t, tlsConfig)
|
||||
}
|
||||
if !tt.expErr {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnect(t *testing.T) {
|
||||
clientConfig := tls.ClientConfig{
|
||||
TLSCA: pki.CACertPath(),
|
||||
TLSCert: pki.ClientCertPath(),
|
||||
TLSKey: pki.ClientKeyPath(),
|
||||
}
|
||||
|
||||
serverConfig := tls.ServerConfig{
|
||||
TLSCert: pki.ServerCertPath(),
|
||||
TLSKey: pki.ServerKeyPath(),
|
||||
TLSAllowedCACerts: []string{pki.CACertPath()},
|
||||
}
|
||||
|
||||
serverTLSConfig, err := serverConfig.TLSConfig()
|
||||
require.NoError(t, err)
|
||||
|
||||
ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
ts.TLS = serverTLSConfig
|
||||
|
||||
ts.StartTLS()
|
||||
defer ts.Close()
|
||||
|
||||
clientTLSConfig, err := clientConfig.TLSConfig()
|
||||
require.NoError(t, err)
|
||||
|
||||
client := http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: clientTLSConfig,
|
||||
},
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
resp, err := client.Get(ts.URL)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
}
|
||||
45
internal/usage.go
Normal file
45
internal/usage.go
Normal file
@@ -0,0 +1,45 @@
|
||||
// +build !windows
|
||||
|
||||
package internal
|
||||
|
||||
const Usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf [commands|flags]
|
||||
|
||||
The commands & flags are:
|
||||
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
--config-directory directory containing additional *.conf files
|
||||
--input-filter filter the input plugins to enable, separator is :
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||
--quiet run in quiet mode
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf --config telegraf.conf --test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf --config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
|
||||
# run telegraf with pprof
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
`
|
||||
54
internal/usage_windows.go
Normal file
54
internal/usage_windows.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// +build windows
|
||||
|
||||
package internal
|
||||
|
||||
const Usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf [commands|flags]
|
||||
|
||||
The commands & flags are:
|
||||
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
--config-directory directory containing additional *.conf files
|
||||
--input-filter filter the input plugins to enable, separator is :
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||
--quiet run in quiet mode
|
||||
|
||||
--console run as console application
|
||||
--service operate on service, one of: install, uninstall, start, stop
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf --config telegraf.conf --test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf --config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
|
||||
# run telegraf with pprof
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
|
||||
# run telegraf without service controller
|
||||
telegraf --console install --config "C:\Program Files\Telegraf\telegraf.conf"
|
||||
|
||||
# install telegraf service
|
||||
telegraf --service install --config "C:\Program Files\Telegraf\telegraf.conf"
|
||||
`
|
||||
@@ -54,6 +54,8 @@ type Metric interface {
|
||||
AddField(key string, value interface{})
|
||||
RemoveField(key string)
|
||||
|
||||
SetTime(t time.Time)
|
||||
|
||||
// HashID returns an unique identifier for the series.
|
||||
HashID() uint64
|
||||
|
||||
|
||||
@@ -123,6 +123,7 @@ func (m *metric) AddTag(key, value string) {
|
||||
|
||||
if key == tag.Key {
|
||||
tag.Value = value
|
||||
return
|
||||
}
|
||||
|
||||
m.tags = append(m.tags, nil)
|
||||
@@ -201,6 +202,10 @@ func (m *metric) RemoveField(key string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metric) SetTime(t time.Time) {
|
||||
m.tm = t
|
||||
}
|
||||
|
||||
func (m *metric) Copy() telegraf.Metric {
|
||||
m2 := &metric{
|
||||
name: m.name,
|
||||
@@ -232,9 +237,12 @@ func (m *metric) IsAggregate() bool {
|
||||
func (m *metric) HashID() uint64 {
|
||||
h := fnv.New64a()
|
||||
h.Write([]byte(m.name))
|
||||
h.Write([]byte("\n"))
|
||||
for _, tag := range m.tags {
|
||||
h.Write([]byte(tag.Key))
|
||||
h.Write([]byte("\n"))
|
||||
h.Write([]byte(tag.Value))
|
||||
h.Write([]byte("\n"))
|
||||
}
|
||||
return h.Sum64()
|
||||
}
|
||||
|
||||
@@ -63,6 +63,7 @@ func TestAddTagOverwrites(t *testing.T) {
|
||||
value, ok := m.GetTag("host")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "example.org", value)
|
||||
require.Equal(t, 1, len(m.TagList()))
|
||||
}
|
||||
|
||||
func TestRemoveTagNoEffectOnMissingTags(t *testing.T) {
|
||||
@@ -267,6 +268,32 @@ func TestHashID_Consistency(t *testing.T) {
|
||||
assert.Equal(t, m2.HashID(), m3.HashID())
|
||||
}
|
||||
|
||||
func TestHashID_Delimiting(t *testing.T) {
|
||||
m1, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"a": "x",
|
||||
"b": "y",
|
||||
"c": "z",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
m2, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"a": "xbycz",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.NotEqual(t, m1.HashID(), m2.HashID())
|
||||
}
|
||||
|
||||
func TestSetName(t *testing.T) {
|
||||
m := baseMetric()
|
||||
m.SetName("foo")
|
||||
|
||||
@@ -4,4 +4,5 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/basicstats"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/histogram"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/valuecounter"
|
||||
)
|
||||
|
||||
@@ -246,6 +246,8 @@ func convert(in interface{}) (float64, bool) {
|
||||
return v, true
|
||||
case int64:
|
||||
return float64(v), true
|
||||
case uint64:
|
||||
return float64(v), true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ var m2, _ = metric.New("m1",
|
||||
"c": float64(4),
|
||||
"d": float64(6),
|
||||
"e": float64(200),
|
||||
"f": uint64(200),
|
||||
"ignoreme": "string",
|
||||
"andme": true,
|
||||
},
|
||||
@@ -81,6 +82,10 @@ func TestBasicStatsWithPeriod(t *testing.T) {
|
||||
"e_max": float64(200),
|
||||
"e_min": float64(200),
|
||||
"e_mean": float64(200),
|
||||
"f_count": float64(1), //f
|
||||
"f_max": float64(200),
|
||||
"f_min": float64(200),
|
||||
"f_mean": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
@@ -144,6 +149,10 @@ func TestBasicStatsDifferentPeriods(t *testing.T) {
|
||||
"e_max": float64(200),
|
||||
"e_min": float64(200),
|
||||
"e_mean": float64(200),
|
||||
"f_count": float64(1), //f
|
||||
"f_max": float64(200),
|
||||
"f_min": float64(200),
|
||||
"f_mean": float64(200),
|
||||
}
|
||||
expectedTags = map[string]string{
|
||||
"foo": "bar",
|
||||
@@ -169,6 +178,7 @@ func TestBasicStatsWithOnlyCount(t *testing.T) {
|
||||
"c_count": float64(2),
|
||||
"d_count": float64(2),
|
||||
"e_count": float64(1),
|
||||
"f_count": float64(1),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
@@ -194,6 +204,7 @@ func TestBasicStatsWithOnlyMin(t *testing.T) {
|
||||
"c_min": float64(2),
|
||||
"d_min": float64(2),
|
||||
"e_min": float64(200),
|
||||
"f_min": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
@@ -219,6 +230,7 @@ func TestBasicStatsWithOnlyMax(t *testing.T) {
|
||||
"c_max": float64(4),
|
||||
"d_max": float64(6),
|
||||
"e_max": float64(200),
|
||||
"f_max": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
@@ -244,6 +256,7 @@ func TestBasicStatsWithOnlyMean(t *testing.T) {
|
||||
"c_mean": float64(3),
|
||||
"d_mean": float64(4),
|
||||
"e_mean": float64(200),
|
||||
"f_mean": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
@@ -269,6 +282,7 @@ func TestBasicStatsWithOnlySum(t *testing.T) {
|
||||
"c_sum": float64(6),
|
||||
"d_sum": float64(8),
|
||||
"e_sum": float64(200),
|
||||
"f_sum": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
@@ -399,6 +413,8 @@ func TestBasicStatsWithMinAndMax(t *testing.T) {
|
||||
"d_min": float64(2),
|
||||
"e_max": float64(200), //e
|
||||
"e_min": float64(200),
|
||||
"f_max": float64(200), //f
|
||||
"f_min": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
@@ -450,6 +466,11 @@ func TestBasicStatsWithAllStats(t *testing.T) {
|
||||
"e_min": float64(200),
|
||||
"e_mean": float64(200),
|
||||
"e_sum": float64(200),
|
||||
"f_count": float64(1), //f
|
||||
"f_max": float64(200),
|
||||
"f_min": float64(200),
|
||||
"f_mean": float64(200),
|
||||
"f_sum": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
|
||||
@@ -107,6 +107,8 @@ func convert(in interface{}) (float64, bool) {
|
||||
return v, true
|
||||
case int64:
|
||||
return float64(v), true
|
||||
case uint64:
|
||||
return float64(v), true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ var m2, _ = metric.New("m1",
|
||||
"i": float64(1),
|
||||
"j": float64(1),
|
||||
"k": float64(200),
|
||||
"l": uint64(200),
|
||||
"ignoreme": "string",
|
||||
"andme": true,
|
||||
},
|
||||
@@ -85,6 +86,8 @@ func TestMinMaxWithPeriod(t *testing.T) {
|
||||
"j_min": float64(1),
|
||||
"k_max": float64(200),
|
||||
"k_min": float64(200),
|
||||
"l_max": float64(200),
|
||||
"l_min": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
@@ -154,6 +157,8 @@ func TestMinMaxDifferentPeriods(t *testing.T) {
|
||||
"j_min": float64(1),
|
||||
"k_max": float64(200),
|
||||
"k_min": float64(200),
|
||||
"l_max": float64(200),
|
||||
"l_min": float64(200),
|
||||
}
|
||||
expectedTags = map[string]string{
|
||||
"foo": "bar",
|
||||
|
||||
73
plugins/aggregators/valuecounter/README.md
Normal file
73
plugins/aggregators/valuecounter/README.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# ValueCounter Aggregator Plugin
|
||||
|
||||
The valuecounter plugin counts the occurrence of values in fields and emits the
|
||||
counter once every 'period' seconds.
|
||||
|
||||
A use case for the valuecounter plugin is when you are processing a HTTP access
|
||||
log (with the logparser input) and want to count the HTTP status codes.
|
||||
|
||||
The fields which will be counted must be configured with the `fields`
|
||||
configuration directive. When no `fields` is provided the plugin will not count
|
||||
any fields. The results are emitted in fields in the format:
|
||||
`originalfieldname_fieldvalue = count`.
|
||||
|
||||
Valuecounter only works on fields of the type int, bool or string. Float fields
|
||||
are being dropped to prevent the creating of too many fields.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
[[aggregators.valuecounter]]
|
||||
## General Aggregator Arguments:
|
||||
## The period on which to flush & clear the aggregator.
|
||||
period = "30s"
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
## The fields for which the values will be counted
|
||||
fields = ["status"]
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- measurement1
|
||||
- field_value1
|
||||
- field_value2
|
||||
|
||||
### Tags:
|
||||
|
||||
No tags are applied by this aggregator.
|
||||
|
||||
### Example Output:
|
||||
|
||||
Example for parsing a HTTP access log.
|
||||
|
||||
telegraf.conf:
|
||||
```
|
||||
[[inputs.logparser]]
|
||||
files = ["/tmp/tst.log"]
|
||||
[inputs.logparser.grok]
|
||||
patterns = ['%{DATA:url:tag} %{NUMBER:response:string}']
|
||||
measurement = "access"
|
||||
|
||||
[[aggregators.valuecounter]]
|
||||
namepass = ["access"]
|
||||
fields = ["response"]
|
||||
```
|
||||
|
||||
/tmp/tst.log
|
||||
```
|
||||
/some/path 200
|
||||
/some/path 401
|
||||
/some/path 200
|
||||
```
|
||||
|
||||
```
|
||||
$ telegraf --config telegraf.conf --quiet
|
||||
|
||||
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="200" 1511948755991487011
|
||||
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="401" 1511948755991522282
|
||||
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="200" 1511948755991531697
|
||||
|
||||
access,path=/tmp/tst.log,host=localhost.localdomain,url=/some/path response_200=2i,response_401=1i 1511948761000000000
|
||||
```
|
||||
108
plugins/aggregators/valuecounter/valuecounter.go
Normal file
108
plugins/aggregators/valuecounter/valuecounter.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package valuecounter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||
)
|
||||
|
||||
type aggregate struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fieldCount map[string]int
|
||||
}
|
||||
|
||||
// ValueCounter an aggregation plugin
|
||||
type ValueCounter struct {
|
||||
cache map[uint64]aggregate
|
||||
Fields []string
|
||||
}
|
||||
|
||||
// NewValueCounter create a new aggregation plugin which counts the occurances
|
||||
// of fields and emits the count.
|
||||
func NewValueCounter() telegraf.Aggregator {
|
||||
vc := &ValueCounter{}
|
||||
vc.Reset()
|
||||
return vc
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## General Aggregator Arguments:
|
||||
## The period on which to flush & clear the aggregator.
|
||||
period = "30s"
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
## The fields for which the values will be counted
|
||||
fields = []
|
||||
`
|
||||
|
||||
// SampleConfig generates a sample config for the ValueCounter plugin
|
||||
func (vc *ValueCounter) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description returns the description of the ValueCounter plugin
|
||||
func (vc *ValueCounter) Description() string {
|
||||
return "Count the occurance of values in fields."
|
||||
}
|
||||
|
||||
// Add is run on every metric which passes the plugin
|
||||
func (vc *ValueCounter) Add(in telegraf.Metric) {
|
||||
id := in.HashID()
|
||||
|
||||
// Check if the cache already has an entry for this metric, if not create it
|
||||
if _, ok := vc.cache[id]; !ok {
|
||||
a := aggregate{
|
||||
name: in.Name(),
|
||||
tags: in.Tags(),
|
||||
fieldCount: make(map[string]int),
|
||||
}
|
||||
vc.cache[id] = a
|
||||
}
|
||||
|
||||
// Check if this metric has fields which we need to count, if so increment
|
||||
// the count.
|
||||
for fk, fv := range in.Fields() {
|
||||
for _, cf := range vc.Fields {
|
||||
if fk == cf {
|
||||
// Do not process float types to prevent memory from blowing up
|
||||
switch fv.(type) {
|
||||
default:
|
||||
log.Printf("I! Valuecounter: Unsupported field type. " +
|
||||
"Must be an int, string or bool. Ignoring.")
|
||||
continue
|
||||
case uint64, int64, string, bool:
|
||||
}
|
||||
fn := fmt.Sprintf("%v_%v", fk, fv)
|
||||
vc.cache[id].fieldCount[fn]++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Push emits the counters
|
||||
func (vc *ValueCounter) Push(acc telegraf.Accumulator) {
|
||||
for _, agg := range vc.cache {
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
for field, count := range agg.fieldCount {
|
||||
fields[field] = count
|
||||
}
|
||||
|
||||
acc.AddFields(agg.name, fields, agg.tags)
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the cache, executed after each push
|
||||
func (vc *ValueCounter) Reset() {
|
||||
vc.cache = make(map[uint64]aggregate)
|
||||
}
|
||||
|
||||
func init() {
|
||||
aggregators.Add("valuecounter", func() telegraf.Aggregator {
|
||||
return NewValueCounter()
|
||||
})
|
||||
}
|
||||
126
plugins/aggregators/valuecounter/valuecounter_test.go
Normal file
126
plugins/aggregators/valuecounter/valuecounter_test.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package valuecounter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
// Create a valuecounter with config
|
||||
func NewTestValueCounter(fields []string) telegraf.Aggregator {
|
||||
vc := &ValueCounter{
|
||||
Fields: fields,
|
||||
}
|
||||
vc.Reset()
|
||||
|
||||
return vc
|
||||
}
|
||||
|
||||
var m1, _ = metric.New("m1",
|
||||
map[string]string{"foo": "bar"},
|
||||
map[string]interface{}{
|
||||
"status": 200,
|
||||
"somefield": 20.1,
|
||||
"foobar": "bar",
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
var m2, _ = metric.New("m1",
|
||||
map[string]string{"foo": "bar"},
|
||||
map[string]interface{}{
|
||||
"status": "OK",
|
||||
"ignoreme": "string",
|
||||
"andme": true,
|
||||
"boolfield": false,
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
func BenchmarkApply(b *testing.B) {
|
||||
vc := NewTestValueCounter([]string{"status"})
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
vc.Add(m1)
|
||||
vc.Add(m2)
|
||||
}
|
||||
}
|
||||
|
||||
// Test basic functionality
|
||||
func TestBasic(t *testing.T) {
|
||||
vc := NewTestValueCounter([]string{"status"})
|
||||
acc := testutil.Accumulator{}
|
||||
|
||||
vc.Add(m1)
|
||||
vc.Add(m2)
|
||||
vc.Add(m1)
|
||||
vc.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"status_200": 2,
|
||||
"status_OK": 1,
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test with multiple fields to count
|
||||
func TestMultipleFields(t *testing.T) {
|
||||
vc := NewTestValueCounter([]string{"status", "somefield", "boolfield"})
|
||||
acc := testutil.Accumulator{}
|
||||
|
||||
vc.Add(m1)
|
||||
vc.Add(m2)
|
||||
vc.Add(m2)
|
||||
vc.Add(m1)
|
||||
vc.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"status_200": 2,
|
||||
"status_OK": 2,
|
||||
"boolfield_false": 2,
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test with a reset between two runs
|
||||
func TestWithReset(t *testing.T) {
|
||||
vc := NewTestValueCounter([]string{"status"})
|
||||
acc := testutil.Accumulator{}
|
||||
|
||||
vc.Add(m1)
|
||||
vc.Add(m1)
|
||||
vc.Add(m2)
|
||||
vc.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"status_200": 2,
|
||||
"status_OK": 1,
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
|
||||
acc.ClearMetrics()
|
||||
vc.Reset()
|
||||
|
||||
vc.Add(m2)
|
||||
vc.Add(m2)
|
||||
vc.Add(m1)
|
||||
vc.Push(&acc)
|
||||
|
||||
expectedFields = map[string]interface{}{
|
||||
"status_200": 1,
|
||||
"status_OK": 2,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
@@ -9,6 +9,27 @@ The metric names, to make it less complicated in querying, have replaced all `-`
|
||||
|
||||
All metrics are attempted to be cast to integers, then booleans, then strings.
|
||||
|
||||
### Configuration:
|
||||
```toml
|
||||
# Read stats from aerospike server(s)
|
||||
[[inputs.aerospike]]
|
||||
## Aerospike servers to connect to (with port)
|
||||
## This plugin will query all namespaces the aerospike
|
||||
## server has configured and get stats for them.
|
||||
servers = ["localhost:3000"]
|
||||
|
||||
# username = "telegraf"
|
||||
# password = "pa$$word"
|
||||
|
||||
## Optional TLS Config
|
||||
# enable_tls = false
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## If false, skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
```
|
||||
|
||||
### Measurements:
|
||||
|
||||
The aerospike metrics are under two measurement names:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package aerospike
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"log"
|
||||
"net"
|
||||
@@ -10,13 +11,24 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
tlsint "github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
as "github.com/aerospike/aerospike-client-go"
|
||||
)
|
||||
|
||||
type Aerospike struct {
|
||||
Servers []string
|
||||
Servers []string `toml:"servers"`
|
||||
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
|
||||
EnableTLS bool `toml:"enable_tls"`
|
||||
EnableSSL bool `toml:"enable_ssl"` // deprecated in 1.7; use enable_tls
|
||||
tlsint.ClientConfig
|
||||
|
||||
initialized bool
|
||||
tlsConfig *tls.Config
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
@@ -24,6 +36,17 @@ var sampleConfig = `
|
||||
## This plugin will query all namespaces the aerospike
|
||||
## server has configured and get stats for them.
|
||||
servers = ["localhost:3000"]
|
||||
|
||||
# username = "telegraf"
|
||||
# password = "pa$$word"
|
||||
|
||||
## Optional TLS Config
|
||||
# enable_tls = false
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## If false, skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
`
|
||||
|
||||
func (a *Aerospike) SampleConfig() string {
|
||||
@@ -35,6 +58,18 @@ func (a *Aerospike) Description() string {
|
||||
}
|
||||
|
||||
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
||||
if !a.initialized {
|
||||
tlsConfig, err := a.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tlsConfig == nil && (a.EnableTLS || a.EnableSSL) {
|
||||
tlsConfig = &tls.Config{}
|
||||
}
|
||||
a.tlsConfig = tlsConfig
|
||||
a.initialized = true
|
||||
}
|
||||
|
||||
if len(a.Servers) == 0 {
|
||||
return a.gatherServer("127.0.0.1:3000", acc)
|
||||
}
|
||||
@@ -63,7 +98,11 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||
iport = 3000
|
||||
}
|
||||
|
||||
c, err := as.NewClient(host, iport)
|
||||
policy := as.NewClientPolicy()
|
||||
policy.User = a.Username
|
||||
policy.Password = a.Password
|
||||
policy.TlsConfig = a.tlsConfig
|
||||
c, err := as.NewClientWithPolicy(policy, host, iport)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4,8 +4,10 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/aurora"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/bond"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/burrow"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cgroup"
|
||||
@@ -24,6 +26,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/fail2ban"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/fibaro"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/fluentd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
|
||||
@@ -41,6 +44,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia2"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor"
|
||||
@@ -49,6 +53,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mcrouter"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mesos"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/minecraft"
|
||||
@@ -64,6 +69,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/openldap"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||
@@ -91,11 +97,13 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/solr"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/syslog"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/system"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/teamspeak"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tengine"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tomcat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||
|
||||
@@ -15,28 +15,53 @@ The following defaults are known to work with RabbitMQ:
|
||||
```toml
|
||||
# AMQP consumer plugin
|
||||
[[inputs.amqp_consumer]]
|
||||
## AMQP url
|
||||
url = "amqp://localhost:5672/influxdb"
|
||||
## AMQP exchange
|
||||
## Broker to consume from.
|
||||
## deprecated in 1.7; use the brokers option
|
||||
# url = "amqp://localhost:5672/influxdb"
|
||||
|
||||
## Brokers to consume from. If multiple brokers are specified a random broker
|
||||
## will be selected anytime a connection is established. This can be
|
||||
## helpful for load balancing when not using a dedicated load balancer.
|
||||
brokers = ["amqp://localhost:5672/influxdb"]
|
||||
|
||||
## Authentication credentials for the PLAIN auth_method.
|
||||
# username = ""
|
||||
# password = ""
|
||||
|
||||
## Exchange to declare and consume from.
|
||||
exchange = "telegraf"
|
||||
|
||||
## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
|
||||
# exchange_type = "topic"
|
||||
|
||||
## If true, exchange will be passively declared.
|
||||
# exchange_passive = false
|
||||
|
||||
## Exchange durability can be either "transient" or "durable".
|
||||
# exchange_durability = "durable"
|
||||
|
||||
## Additional exchange arguments.
|
||||
# exchange_arguments = { }
|
||||
# exchange_arguments = {"hash_propery" = "timestamp"}
|
||||
|
||||
## AMQP queue name
|
||||
queue = "telegraf"
|
||||
## Binding Key
|
||||
binding_key = "#"
|
||||
|
||||
## Controls how many messages the server will try to keep on the network
|
||||
## for consumers before receiving delivery acks.
|
||||
#prefetch_count = 50
|
||||
## Maximum number of messages server should give to the worker.
|
||||
# prefetch_count = 50
|
||||
|
||||
## Auth method. PLAIN and EXTERNAL are supported.
|
||||
## Auth method. PLAIN and EXTERNAL are supported
|
||||
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
|
||||
## described here: https://www.rabbitmq.com/plugins.html
|
||||
# auth_method = "PLAIN"
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to consume.
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package amqp_consumer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -10,16 +12,23 @@ import (
|
||||
"github.com/streadway/amqp"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
|
||||
// AMQPConsumer is the top level struct for this plugin
|
||||
type AMQPConsumer struct {
|
||||
URL string
|
||||
// AMQP exchange
|
||||
Exchange string
|
||||
URL string `toml:"url"` // deprecated in 1.7; use brokers
|
||||
Brokers []string `toml:"brokers"`
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
Exchange string `toml:"exchange"`
|
||||
ExchangeType string `toml:"exchange_type"`
|
||||
ExchangeDurability string `toml:"exchange_durability"`
|
||||
ExchangePassive bool `toml:"exchange_passive"`
|
||||
ExchangeArguments map[string]string `toml:"exchange_arguments"`
|
||||
|
||||
// Queue Name
|
||||
Queue string
|
||||
// Binding Key
|
||||
@@ -31,14 +40,7 @@ type AMQPConsumer struct {
|
||||
|
||||
// AMQP Auth method
|
||||
AuthMethod string
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
tls.ClientConfig
|
||||
|
||||
parser parsers.Parser
|
||||
conn *amqp.Connection
|
||||
@@ -55,34 +57,66 @@ func (a *externalAuth) Response() string {
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultAuthMethod = "PLAIN"
|
||||
DefaultAuthMethod = "PLAIN"
|
||||
|
||||
DefaultBroker = "amqp://localhost:5672/influxdb"
|
||||
|
||||
DefaultExchangeType = "topic"
|
||||
DefaultExchangeDurability = "durable"
|
||||
|
||||
DefaultPrefetchCount = 50
|
||||
)
|
||||
|
||||
func (a *AMQPConsumer) SampleConfig() string {
|
||||
return `
|
||||
## AMQP url
|
||||
url = "amqp://localhost:5672/influxdb"
|
||||
## AMQP exchange
|
||||
## Broker to consume from.
|
||||
## deprecated in 1.7; use the brokers option
|
||||
# url = "amqp://localhost:5672/influxdb"
|
||||
|
||||
## Brokers to consume from. If multiple brokers are specified a random broker
|
||||
## will be selected anytime a connection is established. This can be
|
||||
## helpful for load balancing when not using a dedicated load balancer.
|
||||
brokers = ["amqp://localhost:5672/influxdb"]
|
||||
|
||||
## Authentication credentials for the PLAIN auth_method.
|
||||
# username = ""
|
||||
# password = ""
|
||||
|
||||
## Exchange to declare and consume from.
|
||||
exchange = "telegraf"
|
||||
|
||||
## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
|
||||
# exchange_type = "topic"
|
||||
|
||||
## If true, exchange will be passively declared.
|
||||
# exchange_passive = false
|
||||
|
||||
## Exchange durability can be either "transient" or "durable".
|
||||
# exchange_durability = "durable"
|
||||
|
||||
## Additional exchange arguments.
|
||||
# exchange_arguments = { }
|
||||
# exchange_arguments = {"hash_propery" = "timestamp"}
|
||||
|
||||
## AMQP queue name
|
||||
queue = "telegraf"
|
||||
|
||||
## Binding Key
|
||||
binding_key = "#"
|
||||
|
||||
## Maximum number of messages server should give to the worker.
|
||||
prefetch_count = 50
|
||||
# prefetch_count = 50
|
||||
|
||||
## Auth method. PLAIN and EXTERNAL are supported
|
||||
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
|
||||
## described here: https://www.rabbitmq.com/plugins.html
|
||||
# auth_method = "PLAIN"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to consume.
|
||||
@@ -108,22 +142,26 @@ func (a *AMQPConsumer) Gather(_ telegraf.Accumulator) error {
|
||||
|
||||
func (a *AMQPConsumer) createConfig() (*amqp.Config, error) {
|
||||
// make new tls config
|
||||
tls, err := internal.GetTLSConfig(
|
||||
a.SSLCert, a.SSLKey, a.SSLCA, a.InsecureSkipVerify)
|
||||
tls, err := a.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// parse auth method
|
||||
var sasl []amqp.Authentication // nil by default
|
||||
|
||||
var auth []amqp.Authentication
|
||||
if strings.ToUpper(a.AuthMethod) == "EXTERNAL" {
|
||||
sasl = []amqp.Authentication{&externalAuth{}}
|
||||
auth = []amqp.Authentication{&externalAuth{}}
|
||||
} else if a.Username != "" || a.Password != "" {
|
||||
auth = []amqp.Authentication{
|
||||
&amqp.PlainAuth{
|
||||
Username: a.Username,
|
||||
Password: a.Password,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
config := amqp.Config{
|
||||
TLSClientConfig: tls,
|
||||
SASL: sasl, // if nil, it will be PLAIN
|
||||
SASL: auth, // if nil, it will be PLAIN
|
||||
}
|
||||
return &config, nil
|
||||
}
|
||||
@@ -171,28 +209,55 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, error) {
|
||||
conn, err := amqp.DialConfig(a.URL, *amqpConf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
brokers := a.Brokers
|
||||
if len(brokers) == 0 {
|
||||
brokers = []string{a.URL}
|
||||
}
|
||||
a.conn = conn
|
||||
|
||||
ch, err := conn.Channel()
|
||||
p := rand.Perm(len(brokers))
|
||||
for _, n := range p {
|
||||
broker := brokers[n]
|
||||
log.Printf("D! [amqp_consumer] connecting to %q", broker)
|
||||
conn, err := amqp.DialConfig(broker, *amqpConf)
|
||||
if err == nil {
|
||||
a.conn = conn
|
||||
log.Printf("D! [amqp_consumer] connected to %q", broker)
|
||||
break
|
||||
}
|
||||
log.Printf("D! [amqp_consumer] error connecting to %q", broker)
|
||||
}
|
||||
|
||||
if a.conn == nil {
|
||||
return nil, errors.New("could not connect to any broker")
|
||||
}
|
||||
|
||||
ch, err := a.conn.Channel()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to open a channel: %s", err)
|
||||
}
|
||||
|
||||
err = ch.ExchangeDeclare(
|
||||
a.Exchange, // name
|
||||
"topic", // type
|
||||
true, // durable
|
||||
false, // auto-deleted
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
var exchangeDurable = true
|
||||
switch a.ExchangeDurability {
|
||||
case "transient":
|
||||
exchangeDurable = false
|
||||
default:
|
||||
exchangeDurable = true
|
||||
}
|
||||
|
||||
exchangeArgs := make(amqp.Table, len(a.ExchangeArguments))
|
||||
for k, v := range a.ExchangeArguments {
|
||||
exchangeArgs[k] = v
|
||||
}
|
||||
|
||||
err = declareExchange(
|
||||
ch,
|
||||
a.Exchange,
|
||||
a.ExchangeType,
|
||||
a.ExchangePassive,
|
||||
exchangeDurable,
|
||||
exchangeArgs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to declare an exchange: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
q, err := ch.QueueDeclare(
|
||||
@@ -244,6 +309,42 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err
|
||||
return msgs, err
|
||||
}
|
||||
|
||||
func declareExchange(
|
||||
channel *amqp.Channel,
|
||||
exchangeName string,
|
||||
exchangeType string,
|
||||
exchangePassive bool,
|
||||
exchangeDurable bool,
|
||||
exchangeArguments amqp.Table,
|
||||
) error {
|
||||
var err error
|
||||
if exchangePassive {
|
||||
err = channel.ExchangeDeclarePassive(
|
||||
exchangeName,
|
||||
exchangeType,
|
||||
exchangeDurable,
|
||||
false, // delete when unused
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
exchangeArguments,
|
||||
)
|
||||
} else {
|
||||
err = channel.ExchangeDeclare(
|
||||
exchangeName,
|
||||
exchangeType,
|
||||
exchangeDurable,
|
||||
false, // delete when unused
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
exchangeArguments,
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error declaring exchange: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read messages from queue and add them to the Accumulator
|
||||
func (a *AMQPConsumer) process(msgs <-chan amqp.Delivery, acc telegraf.Accumulator) {
|
||||
defer a.wg.Done()
|
||||
@@ -275,8 +376,11 @@ func (a *AMQPConsumer) Stop() {
|
||||
func init() {
|
||||
inputs.Add("amqp_consumer", func() telegraf.Input {
|
||||
return &AMQPConsumer{
|
||||
AuthMethod: DefaultAuthMethod,
|
||||
PrefetchCount: DefaultPrefetchCount,
|
||||
URL: DefaultBroker,
|
||||
AuthMethod: DefaultAuthMethod,
|
||||
ExchangeType: DefaultExchangeType,
|
||||
ExchangeDurability: DefaultExchangeDurability,
|
||||
PrefetchCount: DefaultPrefetchCount,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -21,11 +21,11 @@ Typically, the `mod_status` module is configured to expose a page at the `/serve
|
||||
## Maximum time to receive response.
|
||||
# response_timeout = "5s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
@@ -21,14 +22,7 @@ type Apache struct {
|
||||
Username string
|
||||
Password string
|
||||
ResponseTimeout internal.Duration
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
tls.ClientConfig
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
@@ -46,11 +40,11 @@ var sampleConfig = `
|
||||
## Maximum time to receive response.
|
||||
# response_timeout = "5s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
@@ -63,6 +57,8 @@ func (n *Apache) Description() string {
|
||||
}
|
||||
|
||||
func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
if len(n.Urls) == 0 {
|
||||
n.Urls = []string{"http://localhost/server-status?auto"}
|
||||
}
|
||||
@@ -78,8 +74,6 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
n.client = client
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(n.Urls))
|
||||
for _, u := range n.Urls {
|
||||
addr, err := url.Parse(u)
|
||||
if err != nil {
|
||||
@@ -87,6 +81,7 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
acc.AddError(n.gatherUrl(addr, acc))
|
||||
@@ -98,8 +93,7 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
func (n *Apache) createHttpClient() (*http.Client, error) {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
|
||||
tlsCfg, err := n.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
63
plugins/inputs/aurora/README.md
Normal file
63
plugins/inputs/aurora/README.md
Normal file
File diff suppressed because one or more lines are too long
280
plugins/inputs/aurora/aurora.go
Normal file
280
plugins/inputs/aurora/aurora.go
Normal file
@@ -0,0 +1,280 @@
|
||||
package aurora
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type RoleType int
|
||||
|
||||
const (
|
||||
Unknown RoleType = iota
|
||||
Leader
|
||||
Follower
|
||||
)
|
||||
|
||||
func (r RoleType) String() string {
|
||||
switch r {
|
||||
case Leader:
|
||||
return "leader"
|
||||
case Follower:
|
||||
return "follower"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
defaultTimeout = 5 * time.Second
|
||||
defaultRoles = []string{"leader", "follower"}
|
||||
)
|
||||
|
||||
type Vars map[string]interface{}
|
||||
|
||||
type Aurora struct {
|
||||
Schedulers []string `toml:"schedulers"`
|
||||
Roles []string `toml:"roles"`
|
||||
Timeout internal.Duration `toml:"timeout"`
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
tls.ClientConfig
|
||||
|
||||
client *http.Client
|
||||
urls []*url.URL
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Schedulers are the base addresses of your Aurora Schedulers
|
||||
schedulers = ["http://127.0.0.1:8081"]
|
||||
|
||||
## Set of role types to collect metrics from.
|
||||
##
|
||||
## The scheduler roles are checked each interval by contacting the
|
||||
## scheduler nodes; zookeeper is not contacted.
|
||||
# roles = ["leader", "follower"]
|
||||
|
||||
## Timeout is the max time for total network operations.
|
||||
# timeout = "5s"
|
||||
|
||||
## Username and password are sent using HTTP Basic Auth.
|
||||
# username = "username"
|
||||
# password = "pa$$word"
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
func (a *Aurora) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (a *Aurora) Description() string {
|
||||
return "Gather metrics from Apache Aurora schedulers"
|
||||
}
|
||||
|
||||
func (a *Aurora) Gather(acc telegraf.Accumulator) error {
|
||||
if a.client == nil {
|
||||
err := a.initialize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), a.Timeout.Duration)
|
||||
defer cancel()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, u := range a.urls {
|
||||
wg.Add(1)
|
||||
go func(u *url.URL) {
|
||||
defer wg.Done()
|
||||
role, err := a.gatherRole(ctx, u)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("%s: %v", u, err))
|
||||
return
|
||||
}
|
||||
|
||||
if !a.roleEnabled(role) {
|
||||
return
|
||||
}
|
||||
|
||||
err = a.gatherScheduler(ctx, u, role, acc)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("%s: %v", u, err))
|
||||
}
|
||||
}(u)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Aurora) initialize() error {
|
||||
tlsCfg, err := a.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
}
|
||||
|
||||
urls := make([]*url.URL, 0, len(a.Schedulers))
|
||||
for _, s := range a.Schedulers {
|
||||
loc, err := url.Parse(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
urls = append(urls, loc)
|
||||
}
|
||||
|
||||
if a.Timeout.Duration < time.Second {
|
||||
a.Timeout.Duration = defaultTimeout
|
||||
}
|
||||
|
||||
if len(a.Roles) == 0 {
|
||||
a.Roles = defaultRoles
|
||||
}
|
||||
|
||||
a.client = client
|
||||
a.urls = urls
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Aurora) roleEnabled(role RoleType) bool {
|
||||
if len(a.Roles) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, v := range a.Roles {
|
||||
if role.String() == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (a *Aurora) gatherRole(ctx context.Context, origin *url.URL) (RoleType, error) {
|
||||
loc := *origin
|
||||
loc.Path = "leaderhealth"
|
||||
req, err := http.NewRequest("GET", loc.String(), nil)
|
||||
if err != nil {
|
||||
return Unknown, err
|
||||
}
|
||||
|
||||
if a.Username != "" || a.Password != "" {
|
||||
req.SetBasicAuth(a.Username, a.Password)
|
||||
}
|
||||
req.Header.Add("Accept", "text/plain")
|
||||
|
||||
resp, err := a.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return Unknown, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
return Leader, nil
|
||||
case http.StatusBadGateway:
|
||||
fallthrough
|
||||
case http.StatusServiceUnavailable:
|
||||
return Follower, nil
|
||||
default:
|
||||
return Unknown, fmt.Errorf("%v", resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Aurora) gatherScheduler(
|
||||
ctx context.Context, origin *url.URL, role RoleType, acc telegraf.Accumulator,
|
||||
) error {
|
||||
loc := *origin
|
||||
loc.Path = "vars.json"
|
||||
req, err := http.NewRequest("GET", loc.String(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if a.Username != "" || a.Password != "" {
|
||||
req.SetBasicAuth(a.Username, a.Password)
|
||||
}
|
||||
req.Header.Add("Accept", "application/json")
|
||||
|
||||
resp, err := a.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("%v", resp.Status)
|
||||
}
|
||||
|
||||
var vars Vars
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
decoder.UseNumber()
|
||||
err = decoder.Decode(&vars)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding response: %v", err)
|
||||
}
|
||||
|
||||
var fields = make(map[string]interface{}, len(vars))
|
||||
for k, v := range vars {
|
||||
switch v := v.(type) {
|
||||
case json.Number:
|
||||
// Aurora encodes numbers as you would specify them as a literal,
|
||||
// use this to determine if a value is a float or int.
|
||||
if strings.ContainsAny(v.String(), ".eE") {
|
||||
fv, err := v.Float64()
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
continue
|
||||
}
|
||||
fields[k] = fv
|
||||
} else {
|
||||
fi, err := v.Int64()
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
continue
|
||||
}
|
||||
fields[k] = fi
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("aurora",
|
||||
fields,
|
||||
map[string]string{
|
||||
"scheduler": origin.String(),
|
||||
"role": role.String(),
|
||||
},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("aurora", func() telegraf.Input {
|
||||
return &Aurora{}
|
||||
})
|
||||
}
|
||||
259
plugins/inputs/aurora/aurora_test.go
Normal file
259
plugins/inputs/aurora/aurora_test.go
Normal file
@@ -0,0 +1,259 @@
|
||||
package aurora
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type (
|
||||
TestHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request)
|
||||
CheckFunc func(t *testing.T, err error, acc *testutil.Accumulator)
|
||||
)
|
||||
|
||||
func TestAurora(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
plugin *Aurora
|
||||
schedulers []string
|
||||
roles []string
|
||||
leaderhealth TestHandlerFunc
|
||||
varsjson TestHandlerFunc
|
||||
check CheckFunc
|
||||
}{
|
||||
{
|
||||
name: "minimal",
|
||||
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
body := `{
|
||||
"variable_scrape_events": 2958,
|
||||
"variable_scrape_events_per_sec": 1.0,
|
||||
"variable_scrape_micros_per_event": 1484.0,
|
||||
"variable_scrape_micros_total": 4401084,
|
||||
"variable_scrape_micros_total_per_sec": 1485.0
|
||||
}`
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(body))
|
||||
},
|
||||
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(acc.Metrics))
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"aurora",
|
||||
map[string]interface{}{
|
||||
"variable_scrape_events": int64(2958),
|
||||
"variable_scrape_events_per_sec": 1.0,
|
||||
"variable_scrape_micros_per_event": 1484.0,
|
||||
"variable_scrape_micros_total": int64(4401084),
|
||||
"variable_scrape_micros_total_per_sec": 1485.0,
|
||||
},
|
||||
map[string]string{
|
||||
"scheduler": u.String(),
|
||||
"role": "leader",
|
||||
},
|
||||
)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "disabled role",
|
||||
roles: []string{"leader"},
|
||||
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
},
|
||||
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, acc.FirstError())
|
||||
require.Equal(t, 0, len(acc.Metrics))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no metrics available",
|
||||
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("{}"))
|
||||
},
|
||||
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, acc.FirstError())
|
||||
require.Equal(t, 0, len(acc.Metrics))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "string metrics skipped",
|
||||
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
body := `{
|
||||
"foo": "bar"
|
||||
}`
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(body))
|
||||
},
|
||||
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, acc.FirstError())
|
||||
require.Equal(t, 0, len(acc.Metrics))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "float64 unparseable",
|
||||
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
// too large
|
||||
body := `{
|
||||
"foo": 1e309
|
||||
}`
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(body))
|
||||
},
|
||||
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
|
||||
require.NoError(t, err)
|
||||
require.Error(t, acc.FirstError())
|
||||
require.Equal(t, 0, len(acc.Metrics))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "int64 unparseable",
|
||||
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
// too large
|
||||
body := `{
|
||||
"foo": 9223372036854775808
|
||||
}`
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(body))
|
||||
},
|
||||
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
|
||||
require.NoError(t, err)
|
||||
require.Error(t, acc.FirstError())
|
||||
require.Equal(t, 0, len(acc.Metrics))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bad json",
|
||||
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
body := `{]`
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(body))
|
||||
},
|
||||
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
|
||||
require.NoError(t, err)
|
||||
require.Error(t, acc.FirstError())
|
||||
require.Equal(t, 0, len(acc.Metrics))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "wrong status code",
|
||||
leaderhealth: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) {
|
||||
body := `{
|
||||
"value": 42
|
||||
}`
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
w.Write([]byte(body))
|
||||
},
|
||||
check: func(t *testing.T, err error, acc *testutil.Accumulator) {
|
||||
require.NoError(t, err)
|
||||
require.Error(t, acc.FirstError())
|
||||
require.Equal(t, 0, len(acc.Metrics))
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/leaderhealth":
|
||||
tt.leaderhealth(t, w, r)
|
||||
case "/vars.json":
|
||||
tt.varsjson(t, w, r)
|
||||
default:
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
plugin := &Aurora{}
|
||||
plugin.Schedulers = []string{u.String()}
|
||||
plugin.Roles = tt.roles
|
||||
err := plugin.Gather(&acc)
|
||||
tt.check(t, err, &acc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasicAuth(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String()))
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
username string
|
||||
password string
|
||||
}{
|
||||
{
|
||||
name: "no auth",
|
||||
},
|
||||
{
|
||||
name: "basic auth",
|
||||
username: "username",
|
||||
password: "pa$$word",
|
||||
},
|
||||
{
|
||||
name: "username only",
|
||||
username: "username",
|
||||
},
|
||||
{
|
||||
name: "password only",
|
||||
password: "pa$$word",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
username, password, _ := r.BasicAuth()
|
||||
require.Equal(t, tt.username, username)
|
||||
require.Equal(t, tt.password, password)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("{}"))
|
||||
})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
plugin := &Aurora{}
|
||||
plugin.Schedulers = []string{u.String()}
|
||||
plugin.Username = tt.username
|
||||
plugin.Password = tt.password
|
||||
err := plugin.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
100
plugins/inputs/burrow/README.md
Normal file
100
plugins/inputs/burrow/README.md
Normal file
@@ -0,0 +1,100 @@
|
||||
# Telegraf Plugin: Burrow
|
||||
|
||||
Collect Kafka topic, consumer and partition status
|
||||
via [Burrow](https://github.com/linkedin/Burrow) HTTP [API](https://github.com/linkedin/Burrow/wiki/HTTP-Endpoint).
|
||||
|
||||
Supported Burrow version: `1.x`
|
||||
|
||||
### Configuration
|
||||
|
||||
```
|
||||
[[inputs.burrow]]
|
||||
## Burrow API endpoints in format "schema://host:port".
|
||||
## Default is "http://localhost:8000".
|
||||
servers = ["http://localhost:8000"]
|
||||
|
||||
## Override Burrow API prefix.
|
||||
## Useful when Burrow is behind reverse-proxy.
|
||||
# api_prefix = "/v3/kafka"
|
||||
|
||||
## Maximum time to receive response.
|
||||
# response_timeout = "5s"
|
||||
|
||||
## Limit per-server concurrent connections.
|
||||
## Useful in case of large number of topics or consumer groups.
|
||||
# concurrent_connections = 20
|
||||
|
||||
## Filter clusters, default is no filtering.
|
||||
## Values can be specified as glob patterns.
|
||||
# clusters_include = []
|
||||
# clusters_exclude = []
|
||||
|
||||
## Filter consumer groups, default is no filtering.
|
||||
## Values can be specified as glob patterns.
|
||||
# groups_include = []
|
||||
# groups_exclude = []
|
||||
|
||||
## Filter topics, default is no filtering.
|
||||
## Values can be specified as glob patterns.
|
||||
# topics_include = []
|
||||
# topics_exclude = []
|
||||
|
||||
## Credentials for basic HTTP authentication.
|
||||
# username = ""
|
||||
# password = ""
|
||||
|
||||
## Optional SSL config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
### Partition Status mappings
|
||||
|
||||
* `OK` = 1
|
||||
* `NOT_FOUND` = 2
|
||||
* `WARN` = 3
|
||||
* `ERR` = 4
|
||||
* `STOP` = 5
|
||||
* `STALL` = 6
|
||||
|
||||
> unknown value will be mapped to 0
|
||||
|
||||
### Fields
|
||||
|
||||
* `burrow_group` (one event per each consumer group)
|
||||
- status (string, see Partition Status mappings)
|
||||
- status_code (int, `1..6`, see Partition status mappings)
|
||||
- parition_count (int, `number of partitions`)
|
||||
- total_lag (int64, `totallag`)
|
||||
- lag (int64, `maxlag.current_lag || 0`)
|
||||
|
||||
* `burrow_partition` (one event per each topic partition)
|
||||
- status (string, see Partition Status mappings)
|
||||
- status_code (int, `1..6`, see Partition status mappings)
|
||||
- lag (int64, `current_lag || 0`)
|
||||
- offset (int64, `end.timestamp`)
|
||||
- timestamp (int64, `end.timestamp`)
|
||||
|
||||
* `burrow_topic` (one event per topic offset)
|
||||
- offset (int64)
|
||||
|
||||
|
||||
### Tags
|
||||
|
||||
* `burrow_group`
|
||||
- cluster (string)
|
||||
- group (string)
|
||||
|
||||
* `burrow_partition`
|
||||
- cluster (string)
|
||||
- group (string)
|
||||
- topic (string)
|
||||
- partition (int)
|
||||
- owner (string)
|
||||
|
||||
* `burrow_topic`
|
||||
- cluster (string)
|
||||
- topic (string)
|
||||
- partition (int)
|
||||
487
plugins/inputs/burrow/burrow.go
Normal file
487
plugins/inputs/burrow/burrow.go
Normal file
@@ -0,0 +1,487 @@
|
||||
package burrow
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBurrowPrefix = "/v3/kafka"
|
||||
defaultConcurrentConnections = 20
|
||||
defaultResponseTimeout = time.Second * 5
|
||||
defaultServer = "http://localhost:8000"
|
||||
)
|
||||
|
||||
const configSample = `
|
||||
## Burrow API endpoints in format "schema://host:port".
|
||||
## Default is "http://localhost:8000".
|
||||
servers = ["http://localhost:8000"]
|
||||
|
||||
## Override Burrow API prefix.
|
||||
## Useful when Burrow is behind reverse-proxy.
|
||||
# api_prefix = "/v3/kafka"
|
||||
|
||||
## Maximum time to receive response.
|
||||
# response_timeout = "5s"
|
||||
|
||||
## Limit per-server concurrent connections.
|
||||
## Useful in case of large number of topics or consumer groups.
|
||||
# concurrent_connections = 20
|
||||
|
||||
## Filter clusters, default is no filtering.
|
||||
## Values can be specified as glob patterns.
|
||||
# clusters_include = []
|
||||
# clusters_exclude = []
|
||||
|
||||
## Filter consumer groups, default is no filtering.
|
||||
## Values can be specified as glob patterns.
|
||||
# groups_include = []
|
||||
# groups_exclude = []
|
||||
|
||||
## Filter topics, default is no filtering.
|
||||
## Values can be specified as glob patterns.
|
||||
# topics_include = []
|
||||
# topics_exclude = []
|
||||
|
||||
## Credentials for basic HTTP authentication.
|
||||
# username = ""
|
||||
# password = ""
|
||||
|
||||
## Optional SSL config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
type (
|
||||
burrow struct {
|
||||
tls.ClientConfig
|
||||
|
||||
Servers []string
|
||||
Username string
|
||||
Password string
|
||||
ResponseTimeout internal.Duration
|
||||
ConcurrentConnections int
|
||||
|
||||
APIPrefix string `toml:"api_prefix"`
|
||||
ClustersExclude []string
|
||||
ClustersInclude []string
|
||||
GroupsExclude []string
|
||||
GroupsInclude []string
|
||||
TopicsExclude []string
|
||||
TopicsInclude []string
|
||||
|
||||
client *http.Client
|
||||
filterClusters filter.Filter
|
||||
filterGroups filter.Filter
|
||||
filterTopics filter.Filter
|
||||
}
|
||||
|
||||
// response
|
||||
apiResponse struct {
|
||||
Clusters []string `json:"clusters"`
|
||||
Groups []string `json:"consumers"`
|
||||
Topics []string `json:"topics"`
|
||||
Offsets []int64 `json:"offsets"`
|
||||
Status apiStatusResponse `json:"status"`
|
||||
}
|
||||
|
||||
// response: status field
|
||||
apiStatusResponse struct {
|
||||
Partitions []apiStatusResponseLag `json:"partitions"`
|
||||
Status string `json:"status"`
|
||||
PartitionCount int `json:"partition_count"`
|
||||
Maxlag *apiStatusResponseLag `json:"maxlag"`
|
||||
TotalLag int64 `json:"totallag"`
|
||||
}
|
||||
|
||||
// response: lag field
|
||||
apiStatusResponseLag struct {
|
||||
Topic string `json:"topic"`
|
||||
Partition int32 `json:"partition"`
|
||||
Status string `json:"status"`
|
||||
Start apiStatusResponseLagItem `json:"start"`
|
||||
End apiStatusResponseLagItem `json:"end"`
|
||||
CurrentLag int64 `json:"current_lag"`
|
||||
Owner string `json:"owner"`
|
||||
}
|
||||
|
||||
// response: lag field item
|
||||
apiStatusResponseLagItem struct {
|
||||
Offset int64 `json:"offset"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Lag int64 `json:"lag"`
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
inputs.Add("burrow", func() telegraf.Input {
|
||||
return &burrow{}
|
||||
})
|
||||
}
|
||||
|
||||
func (b *burrow) SampleConfig() string {
|
||||
return configSample
|
||||
}
|
||||
|
||||
func (b *burrow) Description() string {
|
||||
return "Collect Kafka topics and consumers status from Burrow HTTP API."
|
||||
}
|
||||
|
||||
func (b *burrow) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
if len(b.Servers) == 0 {
|
||||
b.Servers = []string{defaultServer}
|
||||
}
|
||||
|
||||
if b.client == nil {
|
||||
b.setDefaults()
|
||||
if err := b.compileGlobs(); err != nil {
|
||||
return err
|
||||
}
|
||||
c, err := b.createClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.client = c
|
||||
}
|
||||
|
||||
for _, addr := range b.Servers {
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("unable to parse address '%s': %s", addr, err))
|
||||
continue
|
||||
}
|
||||
if u.Path == "" {
|
||||
u.Path = b.APIPrefix
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(u *url.URL) {
|
||||
defer wg.Done()
|
||||
acc.AddError(b.gatherServer(u, acc))
|
||||
}(u)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *burrow) setDefaults() {
|
||||
if b.APIPrefix == "" {
|
||||
b.APIPrefix = defaultBurrowPrefix
|
||||
}
|
||||
if b.ConcurrentConnections < 1 {
|
||||
b.ConcurrentConnections = defaultConcurrentConnections
|
||||
}
|
||||
if b.ResponseTimeout.Duration < time.Second {
|
||||
b.ResponseTimeout = internal.Duration{
|
||||
Duration: defaultResponseTimeout,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *burrow) compileGlobs() error {
|
||||
var err error
|
||||
|
||||
// compile glob patterns
|
||||
b.filterClusters, err = filter.NewIncludeExcludeFilter(b.ClustersInclude, b.ClustersExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.filterGroups, err = filter.NewIncludeExcludeFilter(b.GroupsInclude, b.GroupsExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.filterTopics, err = filter.NewIncludeExcludeFilter(b.TopicsInclude, b.TopicsExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *burrow) createClient() (*http.Client, error) {
|
||||
tlsCfg, err := b.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
Timeout: b.ResponseTimeout.Duration,
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (b *burrow) getResponse(u *url.URL) (*apiResponse, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.Username != "" {
|
||||
req.SetBasicAuth(b.Username, b.Password)
|
||||
}
|
||||
|
||||
res, err := b.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("wrong response: %d", res.StatusCode)
|
||||
}
|
||||
|
||||
ares := &apiResponse{}
|
||||
dec := json.NewDecoder(res.Body)
|
||||
|
||||
return ares, dec.Decode(ares)
|
||||
}
|
||||
|
||||
func (b *burrow) gatherServer(src *url.URL, acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
r, err := b.getResponse(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
guard := make(chan struct{}, b.ConcurrentConnections)
|
||||
for _, cluster := range r.Clusters {
|
||||
if !b.filterClusters.Match(cluster) {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(cluster string) {
|
||||
defer wg.Done()
|
||||
|
||||
// fetch topic list
|
||||
// endpoint: <api_prefix>/(cluster)/topic
|
||||
ut := appendPathToURL(src, cluster, "topic")
|
||||
b.gatherTopics(guard, ut, cluster, acc)
|
||||
}(cluster)
|
||||
|
||||
wg.Add(1)
|
||||
go func(cluster string) {
|
||||
defer wg.Done()
|
||||
|
||||
// fetch consumer group list
|
||||
// endpoint: <api_prefix>/(cluster)/consumer
|
||||
uc := appendPathToURL(src, cluster, "consumer")
|
||||
b.gatherGroups(guard, uc, cluster, acc)
|
||||
}(cluster)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *burrow) gatherTopics(guard chan struct{}, src *url.URL, cluster string, acc telegraf.Accumulator) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
r, err := b.getResponse(src)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, topic := range r.Topics {
|
||||
if !b.filterTopics.Match(topic) {
|
||||
continue
|
||||
}
|
||||
|
||||
guard <- struct{}{}
|
||||
wg.Add(1)
|
||||
|
||||
go func(topic string) {
|
||||
defer func() {
|
||||
<-guard
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// fetch topic offsets
|
||||
// endpoint: <api_prefix>/<cluster>/topic/<topic>
|
||||
tu := appendPathToURL(src, topic)
|
||||
tr, err := b.getResponse(tu)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
|
||||
b.genTopicMetrics(tr, cluster, topic, acc)
|
||||
}(topic)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (b *burrow) genTopicMetrics(r *apiResponse, cluster, topic string, acc telegraf.Accumulator) {
|
||||
for i, offset := range r.Offsets {
|
||||
tags := map[string]string{
|
||||
"cluster": cluster,
|
||||
"topic": topic,
|
||||
"partition": strconv.Itoa(i),
|
||||
}
|
||||
|
||||
acc.AddFields(
|
||||
"burrow_topic",
|
||||
map[string]interface{}{
|
||||
"offset": offset,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *burrow) gatherGroups(guard chan struct{}, src *url.URL, cluster string, acc telegraf.Accumulator) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
r, err := b.getResponse(src)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, group := range r.Groups {
|
||||
if !b.filterGroups.Match(group) {
|
||||
continue
|
||||
}
|
||||
|
||||
guard <- struct{}{}
|
||||
wg.Add(1)
|
||||
|
||||
go func(group string) {
|
||||
defer func() {
|
||||
<-guard
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// fetch consumer group status
|
||||
// endpoint: <api_prefix>/<cluster>/consumer/<group>/lag
|
||||
gl := appendPathToURL(src, group, "lag")
|
||||
gr, err := b.getResponse(gl)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
|
||||
b.genGroupStatusMetrics(gr, cluster, group, acc)
|
||||
b.genGroupLagMetrics(gr, cluster, group, acc)
|
||||
}(group)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (b *burrow) genGroupStatusMetrics(r *apiResponse, cluster, group string, acc telegraf.Accumulator) {
|
||||
partitionCount := r.Status.PartitionCount
|
||||
if partitionCount == 0 {
|
||||
partitionCount = len(r.Status.Partitions)
|
||||
}
|
||||
|
||||
// get max timestamp and offset from partitions list
|
||||
offset := int64(0)
|
||||
timestamp := int64(0)
|
||||
for _, partition := range r.Status.Partitions {
|
||||
if partition.End.Offset > offset {
|
||||
offset = partition.End.Offset
|
||||
}
|
||||
if partition.End.Timestamp > timestamp {
|
||||
timestamp = partition.End.Timestamp
|
||||
}
|
||||
}
|
||||
|
||||
lag := int64(0)
|
||||
if r.Status.Maxlag != nil {
|
||||
lag = r.Status.Maxlag.CurrentLag
|
||||
}
|
||||
|
||||
acc.AddFields(
|
||||
"burrow_group",
|
||||
map[string]interface{}{
|
||||
"status": r.Status.Status,
|
||||
"status_code": mapStatusToCode(r.Status.Status),
|
||||
"partition_count": partitionCount,
|
||||
"total_lag": r.Status.TotalLag,
|
||||
"lag": lag,
|
||||
"offset": offset,
|
||||
"timestamp": timestamp,
|
||||
},
|
||||
map[string]string{
|
||||
"cluster": cluster,
|
||||
"group": group,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (b *burrow) genGroupLagMetrics(r *apiResponse, cluster, group string, acc telegraf.Accumulator) {
|
||||
for _, partition := range r.Status.Partitions {
|
||||
acc.AddFields(
|
||||
"burrow_partition",
|
||||
map[string]interface{}{
|
||||
"status": partition.Status,
|
||||
"status_code": mapStatusToCode(partition.Status),
|
||||
"lag": partition.CurrentLag,
|
||||
"offset": partition.End.Offset,
|
||||
"timestamp": partition.End.Timestamp,
|
||||
},
|
||||
map[string]string{
|
||||
"cluster": cluster,
|
||||
"group": group,
|
||||
"topic": partition.Topic,
|
||||
"partition": strconv.FormatInt(int64(partition.Partition), 10),
|
||||
"owner": partition.Owner,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func appendPathToURL(src *url.URL, parts ...string) *url.URL {
|
||||
dst := new(url.URL)
|
||||
*dst = *src
|
||||
|
||||
for i, part := range parts {
|
||||
parts[i] = url.PathEscape(part)
|
||||
}
|
||||
|
||||
ext := strings.Join(parts, "/")
|
||||
dst.Path = fmt.Sprintf("%s/%s", src.Path, ext)
|
||||
return dst
|
||||
}
|
||||
|
||||
func mapStatusToCode(src string) int {
|
||||
switch src {
|
||||
case "OK":
|
||||
return 1
|
||||
case "NOT_FOUND":
|
||||
return 2
|
||||
case "WARN":
|
||||
return 3
|
||||
case "ERR":
|
||||
return 4
|
||||
case "STOP":
|
||||
return 5
|
||||
case "STALL":
|
||||
return 6
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
285
plugins/inputs/burrow/burrow_test.go
Normal file
285
plugins/inputs/burrow/burrow_test.go
Normal file
@@ -0,0 +1,285 @@
|
||||
package burrow
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// remap uri to json file, eg: /v3/kafka -> ./testdata/v3_kafka.json
|
||||
func getResponseJSON(requestURI string) ([]byte, int) {
|
||||
uri := strings.TrimLeft(requestURI, "/")
|
||||
mappedFile := strings.Replace(uri, "/", "_", -1)
|
||||
jsonFile := fmt.Sprintf("./testdata/%s.json", mappedFile)
|
||||
|
||||
code := 200
|
||||
_, err := os.Stat(jsonFile)
|
||||
if err != nil {
|
||||
code = 404
|
||||
jsonFile = "./testdata/error.json"
|
||||
}
|
||||
|
||||
// respond with file
|
||||
b, _ := ioutil.ReadFile(jsonFile)
|
||||
return b, code
|
||||
}
|
||||
|
||||
// return mocked HTTP server
|
||||
func getHTTPServer() *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
body, code := getResponseJSON(r.RequestURI)
|
||||
w.WriteHeader(code)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(body)
|
||||
}))
|
||||
}
|
||||
|
||||
// return mocked HTTP server with basic auth
|
||||
func getHTTPServerBasicAuth() *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`)
|
||||
|
||||
username, password, authOK := r.BasicAuth()
|
||||
if authOK == false {
|
||||
http.Error(w, "Not authorized", 401)
|
||||
return
|
||||
}
|
||||
|
||||
if username != "test" && password != "test" {
|
||||
http.Error(w, "Not authorized", 401)
|
||||
return
|
||||
}
|
||||
|
||||
// ok, continue
|
||||
body, code := getResponseJSON(r.RequestURI)
|
||||
w.WriteHeader(code)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(body)
|
||||
}))
|
||||
}
|
||||
|
||||
// test burrow_topic measurement
|
||||
func TestBurrowTopic(t *testing.T) {
|
||||
s := getHTTPServer()
|
||||
defer s.Close()
|
||||
|
||||
plugin := &burrow{Servers: []string{s.URL}}
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
fields := []map[string]interface{}{
|
||||
// topicA
|
||||
{"offset": int64(459178195)},
|
||||
{"offset": int64(459178022)},
|
||||
{"offset": int64(456491598)},
|
||||
}
|
||||
tags := []map[string]string{
|
||||
// topicA
|
||||
{"cluster": "clustername1", "topic": "topicA", "partition": "0"},
|
||||
{"cluster": "clustername1", "topic": "topicA", "partition": "1"},
|
||||
{"cluster": "clustername1", "topic": "topicA", "partition": "2"},
|
||||
}
|
||||
|
||||
require.Empty(t, acc.Errors)
|
||||
require.Equal(t, true, acc.HasMeasurement("burrow_topic"))
|
||||
for i := 0; i < len(fields); i++ {
|
||||
acc.AssertContainsTaggedFields(t, "burrow_topic", fields[i], tags[i])
|
||||
}
|
||||
}
|
||||
|
||||
// test burrow_partition measurement
|
||||
func TestBurrowPartition(t *testing.T) {
|
||||
s := getHTTPServer()
|
||||
defer s.Close()
|
||||
|
||||
plugin := &burrow{
|
||||
Servers: []string{s.URL},
|
||||
}
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
fields := []map[string]interface{}{
|
||||
{
|
||||
"status": "OK",
|
||||
"status_code": 1,
|
||||
"lag": int64(0),
|
||||
"offset": int64(431323195),
|
||||
"timestamp": int64(1515609490008),
|
||||
},
|
||||
{
|
||||
"status": "OK",
|
||||
"status_code": 1,
|
||||
"lag": int64(0),
|
||||
"offset": int64(431322962),
|
||||
"timestamp": int64(1515609490008),
|
||||
},
|
||||
{
|
||||
"status": "OK",
|
||||
"status_code": 1,
|
||||
"lag": int64(0),
|
||||
"offset": int64(428636563),
|
||||
"timestamp": int64(1515609490008),
|
||||
},
|
||||
}
|
||||
tags := []map[string]string{
|
||||
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "0", "owner": "kafka1"},
|
||||
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "1", "owner": "kafka2"},
|
||||
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "2", "owner": "kafka3"},
|
||||
}
|
||||
|
||||
require.Empty(t, acc.Errors)
|
||||
require.Equal(t, true, acc.HasMeasurement("burrow_partition"))
|
||||
|
||||
for i := 0; i < len(fields); i++ {
|
||||
acc.AssertContainsTaggedFields(t, "burrow_partition", fields[i], tags[i])
|
||||
}
|
||||
}
|
||||
|
||||
// burrow_group
|
||||
func TestBurrowGroup(t *testing.T) {
|
||||
s := getHTTPServer()
|
||||
defer s.Close()
|
||||
|
||||
plugin := &burrow{
|
||||
Servers: []string{s.URL},
|
||||
}
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
fields := []map[string]interface{}{
|
||||
{
|
||||
"status": "OK",
|
||||
"status_code": 1,
|
||||
"partition_count": 3,
|
||||
"total_lag": int64(0),
|
||||
"lag": int64(0),
|
||||
"offset": int64(431323195),
|
||||
"timestamp": int64(1515609490008),
|
||||
},
|
||||
}
|
||||
|
||||
tags := []map[string]string{
|
||||
{"cluster": "clustername1", "group": "group1"},
|
||||
}
|
||||
|
||||
require.Empty(t, acc.Errors)
|
||||
require.Equal(t, true, acc.HasMeasurement("burrow_group"))
|
||||
|
||||
for i := 0; i < len(fields); i++ {
|
||||
acc.AssertContainsTaggedFields(t, "burrow_group", fields[i], tags[i])
|
||||
}
|
||||
}
|
||||
|
||||
// collect from multiple servers
|
||||
func TestMultipleServers(t *testing.T) {
|
||||
s1 := getHTTPServer()
|
||||
defer s1.Close()
|
||||
|
||||
s2 := getHTTPServer()
|
||||
defer s2.Close()
|
||||
|
||||
plugin := &burrow{
|
||||
Servers: []string{s1.URL, s2.URL},
|
||||
}
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
require.Exactly(t, 14, len(acc.Metrics))
|
||||
require.Empty(t, acc.Errors)
|
||||
}
|
||||
|
||||
// collect multiple times
|
||||
func TestMultipleRuns(t *testing.T) {
|
||||
s := getHTTPServer()
|
||||
defer s.Close()
|
||||
|
||||
plugin := &burrow{
|
||||
Servers: []string{s.URL},
|
||||
}
|
||||
for i := 0; i < 4; i++ {
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
require.Exactly(t, 7, len(acc.Metrics))
|
||||
require.Empty(t, acc.Errors)
|
||||
}
|
||||
}
|
||||
|
||||
// collect from http basic auth server
|
||||
func TestBasicAuthConfig(t *testing.T) {
|
||||
s := getHTTPServerBasicAuth()
|
||||
defer s.Close()
|
||||
|
||||
plugin := &burrow{
|
||||
Servers: []string{s.URL},
|
||||
Username: "test",
|
||||
Password: "test",
|
||||
}
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
require.Exactly(t, 7, len(acc.Metrics))
|
||||
require.Empty(t, acc.Errors)
|
||||
}
|
||||
|
||||
// collect from whitelisted clusters
|
||||
func TestFilterClusters(t *testing.T) {
|
||||
s := getHTTPServer()
|
||||
defer s.Close()
|
||||
|
||||
plugin := &burrow{
|
||||
Servers: []string{s.URL},
|
||||
ClustersInclude: []string{"wrongname*"}, // clustername1 -> no match
|
||||
}
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
// no match by cluster
|
||||
require.Exactly(t, 0, len(acc.Metrics))
|
||||
require.Empty(t, acc.Errors)
|
||||
}
|
||||
|
||||
// collect from whitelisted groups
|
||||
func TestFilterGroups(t *testing.T) {
|
||||
s := getHTTPServer()
|
||||
defer s.Close()
|
||||
|
||||
plugin := &burrow{
|
||||
Servers: []string{s.URL},
|
||||
GroupsInclude: []string{"group?"}, // group1 -> match
|
||||
TopicsExclude: []string{"*"}, // exclude all
|
||||
}
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
require.Exactly(t, 4, len(acc.Metrics))
|
||||
require.Empty(t, acc.Errors)
|
||||
}
|
||||
|
||||
// collect from whitelisted topics
|
||||
func TestFilterTopics(t *testing.T) {
|
||||
s := getHTTPServer()
|
||||
defer s.Close()
|
||||
|
||||
plugin := &burrow{
|
||||
Servers: []string{s.URL},
|
||||
TopicsInclude: []string{"topic?"}, // topicA -> match
|
||||
GroupsExclude: []string{"*"}, // exclude all
|
||||
}
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
plugin.Gather(acc)
|
||||
|
||||
require.Exactly(t, 3, len(acc.Metrics))
|
||||
require.Empty(t, acc.Errors)
|
||||
}
|
||||
11
plugins/inputs/burrow/testdata/error.json
vendored
Normal file
11
plugins/inputs/burrow/testdata/error.json
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"error": true,
|
||||
"message": "Detailed error message",
|
||||
"request": {
|
||||
"uri": "/invalid/request",
|
||||
"host": "responding.host.example.com",
|
||||
"cluster": "",
|
||||
"group": "",
|
||||
"topic": ""
|
||||
}
|
||||
}
|
||||
11
plugins/inputs/burrow/testdata/v3_kafka.json
vendored
Normal file
11
plugins/inputs/burrow/testdata/v3_kafka.json
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"error": false,
|
||||
"message": "cluster list returned",
|
||||
"clusters": [
|
||||
"clustername1"
|
||||
],
|
||||
"request": {
|
||||
"url": "/v3/kafka",
|
||||
"host": "example.com"
|
||||
}
|
||||
}
|
||||
11
plugins/inputs/burrow/testdata/v3_kafka_clustername1_consumer.json
vendored
Normal file
11
plugins/inputs/burrow/testdata/v3_kafka_clustername1_consumer.json
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"error": false,
|
||||
"message": "consumer list returned",
|
||||
"consumers": [
|
||||
"group1"
|
||||
],
|
||||
"request": {
|
||||
"url": "/v3/kafka/clustername1/consumer",
|
||||
"host": "example.com"
|
||||
}
|
||||
}
|
||||
90
plugins/inputs/burrow/testdata/v3_kafka_clustername1_consumer_group1_lag.json
vendored
Normal file
90
plugins/inputs/burrow/testdata/v3_kafka_clustername1_consumer_group1_lag.json
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
{
|
||||
"error": false,
|
||||
"message": "consumer status returned",
|
||||
"status": {
|
||||
"cluster": "clustername1",
|
||||
"group": "group1",
|
||||
"status": "OK",
|
||||
"complete": 1,
|
||||
"partitions": [
|
||||
{
|
||||
"topic": "topicA",
|
||||
"partition": 0,
|
||||
"owner": "kafka1",
|
||||
"status": "OK",
|
||||
"start": {
|
||||
"offset": 431323195,
|
||||
"timestamp": 1515609445004,
|
||||
"lag": 0
|
||||
},
|
||||
"end": {
|
||||
"offset": 431323195,
|
||||
"timestamp": 1515609490008,
|
||||
"lag": 0
|
||||
},
|
||||
"current_lag": 0,
|
||||
"complete": 1
|
||||
},
|
||||
{
|
||||
"topic": "topicA",
|
||||
"partition": 1,
|
||||
"owner": "kafka2",
|
||||
"status": "OK",
|
||||
"start": {
|
||||
"offset": 431322962,
|
||||
"timestamp": 1515609445004,
|
||||
"lag": 0
|
||||
},
|
||||
"end": {
|
||||
"offset": 431322962,
|
||||
"timestamp": 1515609490008,
|
||||
"lag": 0
|
||||
},
|
||||
"current_lag": 0,
|
||||
"complete": 1
|
||||
},
|
||||
{
|
||||
"topic": "topicA",
|
||||
"partition": 2,
|
||||
"owner": "kafka3",
|
||||
"status": "OK",
|
||||
"start": {
|
||||
"offset": 428636563,
|
||||
"timestamp": 1515609445004,
|
||||
"lag": 0
|
||||
},
|
||||
"end": {
|
||||
"offset": 428636563,
|
||||
"timestamp": 1515609490008,
|
||||
"lag": 0
|
||||
},
|
||||
"current_lag": 0,
|
||||
"complete": 1
|
||||
}
|
||||
],
|
||||
"partition_count": 3,
|
||||
"maxlag": {
|
||||
"topic": "topicA",
|
||||
"partition": 0,
|
||||
"owner": "kafka",
|
||||
"status": "OK",
|
||||
"start": {
|
||||
"offset": 431323195,
|
||||
"timestamp": 1515609445004,
|
||||
"lag": 0
|
||||
},
|
||||
"end": {
|
||||
"offset": 431323195,
|
||||
"timestamp": 1515609490008,
|
||||
"lag": 0
|
||||
},
|
||||
"current_lag": 0,
|
||||
"complete": 1
|
||||
},
|
||||
"totallag": 0
|
||||
},
|
||||
"request": {
|
||||
"url": "/v3/kafka/clustername1/consumer/group1/lag",
|
||||
"host": "example.com"
|
||||
}
|
||||
}
|
||||
11
plugins/inputs/burrow/testdata/v3_kafka_clustername1_topic.json
vendored
Normal file
11
plugins/inputs/burrow/testdata/v3_kafka_clustername1_topic.json
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"error": false,
|
||||
"message": "topic list returned",
|
||||
"topics": [
|
||||
"topicA"
|
||||
],
|
||||
"request": {
|
||||
"url": "/v3/kafka/clustername1/topic",
|
||||
"host": "example.com"
|
||||
}
|
||||
}
|
||||
13
plugins/inputs/burrow/testdata/v3_kafka_clustername1_topic_topicA.json
vendored
Normal file
13
plugins/inputs/burrow/testdata/v3_kafka_clustername1_topic_topicA.json
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"error": false,
|
||||
"message": "topic offsets returned",
|
||||
"offsets": [
|
||||
459178195,
|
||||
459178022,
|
||||
456491598
|
||||
],
|
||||
"request": {
|
||||
"url": "/v3/kafka/clustername1/topic/topicA",
|
||||
"host": "example.com"
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,8 @@
|
||||
|
||||
# Telegraf plugin: Cassandra
|
||||
|
||||
### **Deprecated in version 1.7**: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin with the [cassandra.conf](/plugins/inputs/jolokia2/examples/cassandra.conf) example configuration.
|
||||
|
||||
#### Plugin arguments:
|
||||
- **context** string: Context root used for jolokia url
|
||||
- **servers** []string: List of servers with the format "<user:passwd@><host>:port"
|
||||
|
||||
@@ -4,12 +4,14 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type JolokiaClient interface {
|
||||
@@ -60,7 +62,8 @@ func newCassandraMetric(host string, metric string,
|
||||
func addValuesAsFields(values map[string]interface{}, fields map[string]interface{},
|
||||
mname string) {
|
||||
for k, v := range values {
|
||||
if v != nil {
|
||||
switch v.(type) {
|
||||
case int64, float64, string, bool:
|
||||
fields[mname+"_"+k] = v
|
||||
}
|
||||
}
|
||||
@@ -117,7 +120,7 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) {
|
||||
switch t := values.(type) {
|
||||
case map[string]interface{}:
|
||||
addValuesAsFields(values.(map[string]interface{}), fields, attribute)
|
||||
case interface{}:
|
||||
case int64, float64, string, bool:
|
||||
fields[attribute] = t
|
||||
}
|
||||
j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
|
||||
@@ -172,7 +175,11 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
||||
|
||||
func (j *Cassandra) SampleConfig() string {
|
||||
return `
|
||||
# This is the context root used to compose the jolokia url
|
||||
## DEPRECATED: The cassandra plugin has been deprecated. Please use the
|
||||
## jolokia2 plugin instead.
|
||||
##
|
||||
## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
|
||||
|
||||
context = "/jolokia/read"
|
||||
## List of cassandra servers exposing jolokia read service
|
||||
servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
|
||||
@@ -256,6 +263,16 @@ func parseServerTokens(server string) map[string]string {
|
||||
return serverTokens
|
||||
}
|
||||
|
||||
func (c *Cassandra) Start(acc telegraf.Accumulator) error {
|
||||
log.Println("W! DEPRECATED: The cassandra plugin has been deprecated. " +
|
||||
"Please use the jolokia2 plugin instead. " +
|
||||
"https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cassandra) Stop() {
|
||||
}
|
||||
|
||||
func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
context := c.Context
|
||||
servers := c.Servers
|
||||
|
||||
@@ -3,14 +3,15 @@
|
||||
package conntrack
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func restoreDflts(savedFiles, savedDirs []string) {
|
||||
@@ -39,6 +40,7 @@ func TestDefaultsUsed(t *testing.T) {
|
||||
|
||||
tmpFile, err := ioutil.TempFile(tmpdir, "ip_conntrack_count")
|
||||
assert.NoError(t, err)
|
||||
defer os.Remove(tmpFile.Name())
|
||||
|
||||
dfltDirs = []string{tmpdir}
|
||||
fname := path.Base(tmpFile.Name())
|
||||
@@ -63,6 +65,8 @@ func TestConfigsUsed(t *testing.T) {
|
||||
cntFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_count")
|
||||
maxFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_max")
|
||||
assert.NoError(t, err)
|
||||
defer os.Remove(cntFile.Name())
|
||||
defer os.Remove(maxFile.Name())
|
||||
|
||||
dfltDirs = []string{tmpdir}
|
||||
cntFname := path.Base(cntFile.Name())
|
||||
|
||||
@@ -27,12 +27,17 @@ report those stats already using StatsD protocol if needed.
|
||||
## Data centre to query the health checks from
|
||||
# datacentre = ""
|
||||
|
||||
## SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## If false, skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
|
||||
## Consul checks' tag splitting
|
||||
# When tags are formatted like "key:value" with ":" as a delimiter then
|
||||
# they will be splitted and reported as proper key:value in Telegraf
|
||||
# tag_delimiter = ":"
|
||||
```
|
||||
|
||||
### Metrics:
|
||||
|
||||
@@ -2,10 +2,11 @@ package consul
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
@@ -16,15 +17,8 @@ type Consul struct {
|
||||
Username string
|
||||
Password string
|
||||
Datacentre string
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
tls.ClientConfig
|
||||
TagDelimiter string
|
||||
|
||||
// client used to connect to Consul agnet
|
||||
client *api.Client
|
||||
@@ -47,12 +41,17 @@ var sampleConfig = `
|
||||
## Data centre to query the health checks from
|
||||
# datacentre = ""
|
||||
|
||||
## SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## If false, skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
|
||||
## Consul checks' tag splitting
|
||||
# When tags are formatted like "key:value" with ":" as a delimiter then
|
||||
# they will be splitted and reported as proper key:value in Telegraf
|
||||
# tag_delimiter = ":"
|
||||
`
|
||||
|
||||
func (c *Consul) Description() string {
|
||||
@@ -89,14 +88,12 @@ func (c *Consul) createAPIClient() (*api.Client, error) {
|
||||
}
|
||||
}
|
||||
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
c.SSLCert, c.SSLKey, c.SSLCA, c.InsecureSkipVerify)
|
||||
|
||||
tlsCfg, err := c.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config.HttpClient.Transport = &http.Transport{
|
||||
config.Transport = &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
|
||||
@@ -121,6 +118,19 @@ func (c *Consul) GatherHealthCheck(acc telegraf.Accumulator, checks []*api.Healt
|
||||
tags["service_name"] = check.ServiceName
|
||||
tags["check_id"] = check.CheckID
|
||||
|
||||
for _, checkTag := range check.ServiceTags {
|
||||
if c.TagDelimiter != "" {
|
||||
splittedTag := strings.SplitN(checkTag, c.TagDelimiter, 2)
|
||||
if len(splittedTag) == 1 {
|
||||
tags[checkTag] = checkTag
|
||||
} else if len(splittedTag) == 2 {
|
||||
tags[splittedTag[0]] = splittedTag[1]
|
||||
}
|
||||
} else {
|
||||
tags[checkTag] = checkTag
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("consul_health_checks", record, tags)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ var sampleChecks = []*api.HealthCheck{
|
||||
Output: "OK",
|
||||
ServiceID: "foo.123",
|
||||
ServiceName: "foo",
|
||||
ServiceTags: []string{"bar", "env:sandbox", "tagkey:value:stillvalue"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -31,9 +32,12 @@ func TestGatherHealthCheck(t *testing.T) {
|
||||
}
|
||||
|
||||
expectedTags := map[string]string{
|
||||
"node": "localhost",
|
||||
"service_name": "foo",
|
||||
"check_id": "foo.health123",
|
||||
"node": "localhost",
|
||||
"service_name": "foo",
|
||||
"check_id": "foo.health123",
|
||||
"bar": "bar",
|
||||
"env:sandbox": "env:sandbox",
|
||||
"tagkey:value:stillvalue": "tagkey:value:stillvalue",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
@@ -43,3 +47,32 @@ func TestGatherHealthCheck(t *testing.T) {
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "consul_health_checks", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
func TestGatherHealthCheckWithDelimitedTags(t *testing.T) {
|
||||
expectedFields := map[string]interface{}{
|
||||
"check_name": "foo.health",
|
||||
"status": "passing",
|
||||
"passing": 1,
|
||||
"critical": 0,
|
||||
"warning": 0,
|
||||
"service_id": "foo.123",
|
||||
}
|
||||
|
||||
expectedTags := map[string]string{
|
||||
"node": "localhost",
|
||||
"service_name": "foo",
|
||||
"check_id": "foo.health123",
|
||||
"bar": "bar",
|
||||
"env": "sandbox",
|
||||
"tagkey": "value:stillvalue",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
consul := &Consul{
|
||||
TagDelimiter: ":",
|
||||
}
|
||||
consul.GatherHealthCheck(&acc, sampleChecks)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "consul_health_checks", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
@@ -54,10 +54,10 @@ your database.
|
||||
## Maximum time to receive a response from cluster.
|
||||
# response_timeout = "20s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## If false, skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
|
||||
|
||||
@@ -9,26 +9,11 @@ import (
|
||||
"testing"
|
||||
|
||||
jwt "github.com/dgrijalva/jwt-go"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
privateKey = `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQCwlGyzVp9cqtwiNCgCnaR0kilPZhr4xFBcnXxvQ8/uzOHaWKxj
|
||||
XWR38cKR3gPh5+4iSmzMdo3HDJM5ks6imXGnp+LPOA5iNewnpLNs7UxA2arwKH/6
|
||||
4qIaAXAtf5jE46wZIMgc2EW9wGL3dxC0JY8EXPpBFB/3J8gADkorFR8lwwIDAQAB
|
||||
AoGBAJaFHxfMmjHK77U0UnrQWFSKFy64cftmlL4t/Nl3q7L68PdIKULWZIMeEWZ4
|
||||
I0UZiFOwr4em83oejQ1ByGSwekEuiWaKUI85IaHfcbt+ogp9hY/XbOEo56OPQUAd
|
||||
bEZv1JqJOqta9Ug1/E1P9LjEEyZ5F5ubx7813rxAE31qKtKJAkEA1zaMlCWIr+Rj
|
||||
hGvzv5rlHH3wbOB4kQFXO4nqj3J/ttzR5QiJW24STMDcbNngFlVcDVju56LrNTiD
|
||||
dPh9qvl7nwJBANILguR4u33OMksEZTYB7nQZSurqXsq6382zH7pTl29ANQTROHaM
|
||||
PKC8dnDWq8RGTqKuvWblIzzGIKqIMovZo10CQC96T0UXirITFolOL3XjvAuvFO1Q
|
||||
EAkdXJs77805m0dCK+P1IChVfiAEpBw3bKJArpAbQIlFfdI953JUp5SieU0CQEub
|
||||
BSSEKMjh/cxu6peEHnb/262vayuCFKkQPu1sxWewLuVrAe36EKCy9dcsDmv5+rgo
|
||||
Odjdxc9Madm4aKlaT6kCQQCpAgeblDrrxTrNQ+Typzo37PlnQrvI+0EceAUuJ72G
|
||||
P0a+YZUeHNRqT2pPN9lMTAZGGi3CtcF2XScbLNEBeXge
|
||||
-----END RSA PRIVATE KEY-----`
|
||||
)
|
||||
var privateKey = testutil.NewPKI("../../../testutil/pki").ReadServerKey()
|
||||
|
||||
func TestLogin(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
@@ -56,11 +57,7 @@ type DCOS struct {
|
||||
|
||||
MaxConnections int
|
||||
ResponseTimeout internal.Duration
|
||||
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
InsecureSkipVerify bool `toml:"insecure_skip_verify"`
|
||||
tls.ClientConfig
|
||||
|
||||
client Client
|
||||
creds Credentials
|
||||
@@ -107,10 +104,10 @@ var sampleConfig = `
|
||||
## Maximum time to receive a response from cluster.
|
||||
# response_timeout = "20s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## If false, skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
|
||||
@@ -351,8 +348,7 @@ func (d *DCOS) init() error {
|
||||
}
|
||||
|
||||
func (d *DCOS) createClient() (Client, error) {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
d.SSLCert, d.SSLKey, d.SSLCA, d.InsecureSkipVerify)
|
||||
tlsCfg, err := d.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -3,9 +3,8 @@
|
||||
The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wikipedia.org/wiki/Dig_\(command\))
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Sample Config:
|
||||
```toml
|
||||
# Query given DNS server and gives statistics
|
||||
[[inputs.dns_query]]
|
||||
## servers to query
|
||||
servers = ["8.8.8.8"]
|
||||
@@ -27,29 +26,20 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi
|
||||
# timeout = 2
|
||||
```
|
||||
|
||||
For querying more than one record type make:
|
||||
### Metrics:
|
||||
|
||||
- dns_query
|
||||
- tags:
|
||||
- server
|
||||
- domain
|
||||
- record_type
|
||||
- result
|
||||
- fields:
|
||||
- query_time_ms (float)
|
||||
- result_code (int, success = 0, timeout = 1, error = 2)
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
[[inputs.dns_query]]
|
||||
domains = ["mjasion.pl"]
|
||||
servers = ["8.8.8.8", "8.8.4.4"]
|
||||
record_type = "A"
|
||||
|
||||
[[inputs.dns_query]]
|
||||
domains = ["mjasion.pl"]
|
||||
servers = ["8.8.8.8", "8.8.4.4"]
|
||||
record_type = "MX"
|
||||
```
|
||||
|
||||
### Tags:
|
||||
|
||||
- server
|
||||
- domain
|
||||
- record_type
|
||||
|
||||
### Example output:
|
||||
|
||||
```
|
||||
telegraf --input-filter dns_query --test
|
||||
> dns_query,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=67.189842 1456082743585760680
|
||||
dns_query,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=67.189842 1456082743585760680
|
||||
```
|
||||
|
||||
@@ -13,6 +13,14 @@ import (
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type ResultType uint64
|
||||
|
||||
const (
|
||||
Success ResultType = 0
|
||||
Timeout = 1
|
||||
Error = 2
|
||||
)
|
||||
|
||||
type DnsQuery struct {
|
||||
// Domains or subdomains to query
|
||||
Domains []string
|
||||
@@ -66,15 +74,24 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
for _, domain := range d.Domains {
|
||||
for _, server := range d.Servers {
|
||||
dnsQueryTime, err := d.getDnsQueryTime(domain, server)
|
||||
acc.AddError(err)
|
||||
fields := make(map[string]interface{}, 2)
|
||||
tags := map[string]string{
|
||||
"server": server,
|
||||
"domain": domain,
|
||||
"record_type": d.RecordType,
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{"query_time_ms": dnsQueryTime}
|
||||
dnsQueryTime, err := d.getDnsQueryTime(domain, server)
|
||||
if err == nil {
|
||||
setResult(Success, fields, tags)
|
||||
fields["query_time_ms"] = dnsQueryTime
|
||||
} else if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() {
|
||||
setResult(Timeout, fields, tags)
|
||||
} else if err != nil {
|
||||
setResult(Error, fields, tags)
|
||||
acc.AddError(err)
|
||||
}
|
||||
|
||||
acc.AddFields("dns_query", fields, tags)
|
||||
}
|
||||
}
|
||||
@@ -165,6 +182,21 @@ func (d *DnsQuery) parseRecordType() (uint16, error) {
|
||||
return recordType, error
|
||||
}
|
||||
|
||||
func setResult(result ResultType, fields map[string]interface{}, tags map[string]string) {
|
||||
var tag string
|
||||
switch result {
|
||||
case Success:
|
||||
tag = "success"
|
||||
case Timeout:
|
||||
tag = "timeout"
|
||||
case Error:
|
||||
tag = "error"
|
||||
}
|
||||
|
||||
tags["result"] = tag
|
||||
fields["result_code"] = uint64(result)
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("dns_query", func() telegraf.Input {
|
||||
return &DnsQuery{}
|
||||
|
||||
@@ -4,12 +4,11 @@ The docker plugin uses the Docker Engine API to gather metrics on running
|
||||
docker containers.
|
||||
|
||||
The docker plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client)
|
||||
to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/).
|
||||
[Library Documentation](https://godoc.org/github.com/moby/moby/client)
|
||||
to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/).
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
```toml
|
||||
# Read metrics about docker containers
|
||||
[[inputs.docker]]
|
||||
## Docker Endpoint
|
||||
@@ -54,11 +53,11 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/)
|
||||
## Which environment variables should we use as a tag
|
||||
tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
@@ -76,15 +75,58 @@ may prefer to exclude them:
|
||||
```
|
||||
|
||||
|
||||
### Measurements & Fields:
|
||||
### Metrics:
|
||||
|
||||
Every effort was made to preserve the names based on the JSON response from the
|
||||
docker API.
|
||||
|
||||
Note that the docker_container_cpu metric may appear multiple times per collection,
|
||||
based on the availability of per-cpu stats on your system.
|
||||
- docker
|
||||
- tags:
|
||||
- unit
|
||||
- engine_host
|
||||
- server_version
|
||||
- fields:
|
||||
- n_used_file_descriptors
|
||||
- n_cpus
|
||||
- n_containers
|
||||
- n_containers_running
|
||||
- n_containers_stopped
|
||||
- n_containers_paused
|
||||
- n_images
|
||||
- n_goroutines
|
||||
- n_listener_events
|
||||
- memory_total
|
||||
- pool_blocksize
|
||||
|
||||
- docker_data
|
||||
- tags:
|
||||
- unit
|
||||
- engine_host
|
||||
- server_version
|
||||
- fields:
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
|
||||
- docker_metadata
|
||||
- tags:
|
||||
- unit
|
||||
- engine_host
|
||||
- server_version
|
||||
- fields:
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
|
||||
- docker_container_mem
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- fields:
|
||||
- total_pgmafault
|
||||
- cache
|
||||
- mapped_file
|
||||
@@ -119,7 +161,17 @@ based on the availability of per-cpu stats on your system.
|
||||
- failcnt
|
||||
- limit
|
||||
- container_id
|
||||
|
||||
- docker_container_cpu
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- cpu
|
||||
- fields:
|
||||
- throttling_periods
|
||||
- throttling_throttled_periods
|
||||
- throttling_throttled_time
|
||||
@@ -129,7 +181,17 @@ based on the availability of per-cpu stats on your system.
|
||||
- usage_total
|
||||
- usage_percent
|
||||
- container_id
|
||||
|
||||
- docker_container_net
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- network
|
||||
- fields:
|
||||
- rx_dropped
|
||||
- rx_bytes
|
||||
- rx_errors
|
||||
@@ -139,7 +201,17 @@ based on the availability of per-cpu stats on your system.
|
||||
- tx_errors
|
||||
- tx_bytes
|
||||
- container_id
|
||||
|
||||
- docker_container_blkio
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- device
|
||||
- fields:
|
||||
- io_service_bytes_recursive_async
|
||||
- io_service_bytes_recursive_read
|
||||
- io_service_bytes_recursive_sync
|
||||
@@ -151,118 +223,54 @@ based on the availability of per-cpu stats on your system.
|
||||
- io_serviced_recursive_total
|
||||
- io_serviced_recursive_write
|
||||
- container_id
|
||||
- docker_
|
||||
- n_used_file_descriptors
|
||||
- n_cpus
|
||||
- n_containers
|
||||
- n_containers_running
|
||||
- n_containers_stopped
|
||||
- n_containers_paused
|
||||
- n_images
|
||||
- n_goroutines
|
||||
- n_listener_events
|
||||
- memory_total
|
||||
- pool_blocksize
|
||||
- docker_data
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
- docker_metadata
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
- docker_swarm
|
||||
- tasks_desired
|
||||
- tasks_running
|
||||
|
||||
|
||||
### Tags:
|
||||
#### Docker Engine tags
|
||||
- docker (memory_total)
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker (pool_blocksize)
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker_data
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker_metadata
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
|
||||
#### Docker Container tags
|
||||
- Tags on all containers:
|
||||
- docker_container_health
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- docker_container_mem specific:
|
||||
- docker_container_cpu specific:
|
||||
- cpu
|
||||
- docker_container_net specific:
|
||||
- network
|
||||
- docker_container_blkio specific:
|
||||
- device
|
||||
- docker_container_health specific:
|
||||
- health_status
|
||||
- failing_streak
|
||||
- docker_swarm specific:
|
||||
- fields:
|
||||
- health_status (string)
|
||||
- failing_streak (integer)
|
||||
|
||||
- docker_container_status
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- fields:
|
||||
- oomkilled (boolean)
|
||||
- pid (integer)
|
||||
- exitcode (integer)
|
||||
- started_at (integer)
|
||||
- finished_at (integer)
|
||||
|
||||
- docker_swarm
|
||||
- tags:
|
||||
- service_id
|
||||
- service_name
|
||||
- service_mode
|
||||
- fields:
|
||||
- tasks_desired
|
||||
- tasks_running
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
% ./telegraf --config ~/ws/telegraf.conf --input-filter docker --test
|
||||
* Plugin: docker, Collection 1
|
||||
> docker n_cpus=8i 1456926671065383978
|
||||
> docker n_used_file_descriptors=15i 1456926671065383978
|
||||
> docker n_containers=7i 1456926671065383978
|
||||
> docker n_containers_running=7i 1456926671065383978
|
||||
> docker n_containers_stopped=3i 1456926671065383978
|
||||
> docker n_containers_paused=0i 1456926671065383978
|
||||
> docker n_images=152i 1456926671065383978
|
||||
> docker n_goroutines=36i 1456926671065383978
|
||||
> docker n_listener_events=0i 1456926671065383978
|
||||
> docker,unit=bytes memory_total=18935443456i 1456926671065383978
|
||||
> docker,unit=bytes pool_blocksize=65540i 1456926671065383978
|
||||
> docker_data,unit=bytes available=24340000000i,total=107400000000i,used=14820000000i 1456926671065383978
|
||||
> docker_metadata,unit=bytes available=2126999999i,total=2146999999i,used=20420000i 145692667106538
|
||||
> docker_container_mem,
|
||||
container_image=spotify/kafka,container_name=kafka \
|
||||
active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\
|
||||
hierarchical_memory_limit=9223372036854771712i,inactive_anon=52707328i,\
|
||||
inactive_file=5111808i,limit=1044578304i,mapped_file=10301440i,\
|
||||
max_usage=140656640i,pgfault=63762i,pgmajfault=2837i,pgpgin=73355i,\
|
||||
pgpgout=45736i,rss=105275392i,rss_huge=4194304i,total_active_anon=52568064i,\
|
||||
total_active_file=6926336i,total_cache=12038144i,total_inactive_anon=52707328i,\
|
||||
total_inactive_file=5111808i,total_mapped_file=10301440i,total_pgfault=63762i,\
|
||||
total_pgmafault=0i,total_pgpgin=73355i,total_pgpgout=45736i,\
|
||||
total_rss=105275392i,total_rss_huge=4194304i,total_unevictable=0i,\
|
||||
total_writeback=0i,unevictable=0i,usage=117440512i,writeback=0i 1453409536840126713
|
||||
> docker_container_cpu,
|
||||
container_image=spotify/kafka,container_name=kafka,cpu=cpu-total \
|
||||
throttling_periods=0i,throttling_throttled_periods=0i,\
|
||||
throttling_throttled_time=0i,usage_in_kernelmode=440000000i,\
|
||||
usage_in_usermode=2290000000i,usage_system=84795360000000i,\
|
||||
usage_total=6628208865i 1453409536840126713
|
||||
> docker_container_cpu,
|
||||
container_image=spotify/kafka,container_name=kafka,cpu=cpu0 \
|
||||
usage_total=6628208865i 1453409536840126713
|
||||
> docker_container_net,\
|
||||
container_image=spotify/kafka,container_name=kafka,network=eth0 \
|
||||
rx_bytes=7468i,rx_dropped=0i,rx_errors=0i,rx_packets=94i,tx_bytes=946i,\
|
||||
tx_dropped=0i,tx_errors=0i,tx_packets=13i 1453409536840126713
|
||||
> docker_container_blkio,
|
||||
container_image=spotify/kafka,container_name=kafka,device=8:0 \
|
||||
io_service_bytes_recursive_async=80216064i,io_service_bytes_recursive_read=79925248i,\
|
||||
io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888i,\
|
||||
io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\
|
||||
io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\
|
||||
io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713
|
||||
>docker_swarm,
|
||||
service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test,\
|
||||
tasks_desired=3,tasks_running=3 1508968160000000000
|
||||
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce n_containers=6i,n_containers_paused=0i,n_containers_running=1i,n_containers_stopped=5i,n_cpus=2i,n_goroutines=41i,n_images=2i,n_listener_events=0i,n_used_file_descriptors=27i 1524002041000000000
|
||||
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce,unit=bytes memory_total=2101661696i 1524002041000000000
|
||||
docker_container_mem,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce active_anon=8327168i,active_file=2314240i,cache=27402240i,container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",hierarchical_memory_limit=9223372036854771712i,inactive_anon=0i,inactive_file=25088000i,limit=2101661696i,mapped_file=20582400i,max_usage=36646912i,pgfault=4193i,pgmajfault=214i,pgpgin=9243i,pgpgout=520i,rss=8327168i,rss_huge=0i,total_active_anon=8327168i,total_active_file=2314240i,total_cache=27402240i,total_inactive_anon=0i,total_inactive_file=25088000i,total_mapped_file=20582400i,total_pgfault=4193i,total_pgmajfault=214i,total_pgpgin=9243i,total_pgpgout=520i,total_rss=8327168i,total_rss_huge=0i,total_unevictable=0i,total_writeback=0i,unevictable=0i,usage=36528128i,usage_percent=0.4342225020025297,writeback=0i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,cpu=cpu-total,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",throttling_periods=0i,throttling_throttled_periods=0i,throttling_throttled_time=0i,usage_in_kernelmode=40000000i,usage_in_usermode=100000000i,usage_percent=0,usage_system=6394210000000i,usage_total=117319068i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,cpu=cpu0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=20825265i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,cpu=cpu1,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=96493803i 1524002042000000000
|
||||
docker_container_net,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,network=eth0,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",rx_bytes=1576i,rx_dropped=0i,rx_errors=0i,rx_packets=20i,tx_bytes=0i,tx_dropped=0i,tx_errors=0i,tx_packets=0i 1524002042000000000
|
||||
docker_container_blkio,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,device=254:0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",io_service_bytes_recursive_async=27398144i,io_service_bytes_recursive_read=27398144i,io_service_bytes_recursive_sync=0i,io_service_bytes_recursive_total=27398144i,io_service_bytes_recursive_write=0i,io_serviced_recursive_async=529i,io_serviced_recursive_read=529i,io_serviced_recursive_sync=0i,io_serviced_recursive_total=529i,io_serviced_recursive_write=0i 1524002042000000000
|
||||
docker_container_health,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce failing_streak=0i,health_status="healthy" 1524007529000000000
|
||||
docker_swarm,service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test tasks_desired=3,tasks_running=3 1508968160000000000
|
||||
```
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
version string
|
||||
version = "1.24"
|
||||
defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"}
|
||||
)
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
tlsint "github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
@@ -43,10 +44,7 @@ type Docker struct {
|
||||
ContainerStateInclude []string `toml:"container_state_include"`
|
||||
ContainerStateExclude []string `toml:"container_state_exclude"`
|
||||
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
InsecureSkipVerify bool
|
||||
tlsint.ClientConfig
|
||||
|
||||
newEnvClient func() (Client, error)
|
||||
newClient func(string, *tls.Config) (Client, error)
|
||||
@@ -54,6 +52,7 @@ type Docker struct {
|
||||
client Client
|
||||
httpClient *http.Client
|
||||
engine_host string
|
||||
serverVersion string
|
||||
filtersCreated bool
|
||||
labelFilter filter.Filter
|
||||
containerFilter filter.Filter
|
||||
@@ -114,11 +113,11 @@ var sampleConfig = `
|
||||
docker_label_include = []
|
||||
docker_label_exclude = []
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
@@ -135,8 +134,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
if d.Endpoint == "ENV" {
|
||||
c, err = d.newEnvClient()
|
||||
} else {
|
||||
tlsConfig, err := internal.GetTLSConfig(
|
||||
d.SSLCert, d.SSLKey, d.SSLCA, d.InsecureSkipVerify)
|
||||
tlsConfig, err := d.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -301,7 +299,14 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.engine_host = info.Name
|
||||
d.serverVersion = info.ServerVersion
|
||||
|
||||
tags := map[string]string{
|
||||
"engine_host": d.engine_host,
|
||||
"server_version": d.serverVersion,
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"n_cpus": info.NCPU,
|
||||
@@ -315,15 +320,13 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
"n_listener_events": info.NEventsListener,
|
||||
}
|
||||
// Add metrics
|
||||
acc.AddFields("docker",
|
||||
fields,
|
||||
map[string]string{"engine_host": d.engine_host},
|
||||
now)
|
||||
acc.AddFields("docker", fields, tags, now)
|
||||
acc.AddFields("docker",
|
||||
map[string]interface{}{"memory_total": info.MemTotal},
|
||||
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
|
||||
tags,
|
||||
now)
|
||||
// Get storage metrics
|
||||
tags["unit"] = "bytes"
|
||||
for _, rawData := range info.DriverStatus {
|
||||
// Try to convert string to int (bytes)
|
||||
value, err := parseSize(rawData[1])
|
||||
@@ -335,7 +338,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
// pool blocksize
|
||||
acc.AddFields("docker",
|
||||
map[string]interface{}{"pool_blocksize": value},
|
||||
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
|
||||
tags,
|
||||
now)
|
||||
} else if strings.HasPrefix(name, "data_space_") {
|
||||
// data space
|
||||
@@ -348,16 +351,10 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
if len(dataFields) > 0 {
|
||||
acc.AddFields("docker_data",
|
||||
dataFields,
|
||||
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
|
||||
now)
|
||||
acc.AddFields("docker_data", dataFields, tags, now)
|
||||
}
|
||||
if len(metadataFields) > 0 {
|
||||
acc.AddFields("docker_metadata",
|
||||
metadataFields,
|
||||
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
|
||||
now)
|
||||
acc.AddFields("docker_metadata", metadataFields, tags, now)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -388,6 +385,7 @@ func (d *Docker) gatherContainer(
|
||||
|
||||
tags := map[string]string{
|
||||
"engine_host": d.engine_host,
|
||||
"server_version": d.serverVersion,
|
||||
"container_name": cname,
|
||||
"container_image": imageName,
|
||||
"container_version": imageVersion,
|
||||
@@ -437,6 +435,23 @@ func (d *Docker) gatherContainer(
|
||||
}
|
||||
}
|
||||
}
|
||||
if info.State != nil {
|
||||
tags["container_status"] = info.State.Status
|
||||
statefields := map[string]interface{}{
|
||||
"oomkilled": info.State.OOMKilled,
|
||||
"pid": info.State.Pid,
|
||||
"exitcode": info.State.ExitCode,
|
||||
}
|
||||
container_time, err := time.Parse(time.RFC3339, info.State.StartedAt)
|
||||
if err == nil && !container_time.IsZero() {
|
||||
statefields["started_at"] = container_time.UnixNano()
|
||||
}
|
||||
container_time, err = time.Parse(time.RFC3339, info.State.FinishedAt)
|
||||
if err == nil && !container_time.IsZero() {
|
||||
statefields["finished_at"] = container_time.UnixNano()
|
||||
}
|
||||
acc.AddFields("docker_container_status", statefields, tags, time.Now())
|
||||
}
|
||||
|
||||
if info.State.Health != nil {
|
||||
healthfields := map[string]interface{}{
|
||||
|
||||
@@ -615,7 +615,10 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"n_images": int(199),
|
||||
"n_goroutines": int(39),
|
||||
},
|
||||
map[string]string{"engine_host": "absol"},
|
||||
map[string]string{
|
||||
"engine_host": "absol",
|
||||
"server_version": "17.09.0-ce",
|
||||
},
|
||||
)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -626,8 +629,9 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"available": int64(36530000000),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "bytes",
|
||||
"engine_host": "absol",
|
||||
"unit": "bytes",
|
||||
"engine_host": "absol",
|
||||
"server_version": "17.09.0-ce",
|
||||
},
|
||||
)
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -648,6 +652,8 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"ENVVAR7": "ENVVAR8=ENVVAR9",
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
"server_version": "17.09.0-ce",
|
||||
"container_status": "running",
|
||||
},
|
||||
)
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -670,6 +676,8 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"ENVVAR7": "ENVVAR8=ENVVAR9",
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
"server_version": "17.09.0-ce",
|
||||
"container_status": "running",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -55,6 +55,7 @@ var info = types.Info{
|
||||
DockerRootDir: "/var/lib/docker",
|
||||
NoProxy: "",
|
||||
BridgeNfIP6tables: true,
|
||||
ServerVersion: "17.09.0-ce",
|
||||
}
|
||||
|
||||
var containerList = []types.Container{
|
||||
@@ -483,6 +484,12 @@ var containerInspect = types.ContainerJSON{
|
||||
FailingStreak: 1,
|
||||
Status: "Unhealthy",
|
||||
},
|
||||
Status: "running",
|
||||
OOMKilled: false,
|
||||
Pid: 1234,
|
||||
ExitCode: 0,
|
||||
StartedAt: "2018-06-14T05:48:53.266176036Z",
|
||||
FinishedAt: "0001-01-01T00:00:00Z",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -38,11 +38,11 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre
|
||||
## "breaker". Per default, all stats are gathered.
|
||||
# node_stats = ["jvm", "http"]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
|
||||
@@ -3,16 +3,18 @@ package elasticsearch
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
)
|
||||
|
||||
// mask for masking username/password from error messages
|
||||
@@ -38,17 +40,20 @@ type nodeStat struct {
|
||||
}
|
||||
|
||||
type clusterHealth struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Status string `json:"status"`
|
||||
TimedOut bool `json:"timed_out"`
|
||||
NumberOfNodes int `json:"number_of_nodes"`
|
||||
NumberOfDataNodes int `json:"number_of_data_nodes"`
|
||||
ActivePrimaryShards int `json:"active_primary_shards"`
|
||||
ActiveShards int `json:"active_shards"`
|
||||
RelocatingShards int `json:"relocating_shards"`
|
||||
InitializingShards int `json:"initializing_shards"`
|
||||
UnassignedShards int `json:"unassigned_shards"`
|
||||
Indices map[string]indexHealth `json:"indices"`
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Status string `json:"status"`
|
||||
TimedOut bool `json:"timed_out"`
|
||||
NumberOfNodes int `json:"number_of_nodes"`
|
||||
NumberOfDataNodes int `json:"number_of_data_nodes"`
|
||||
ActivePrimaryShards int `json:"active_primary_shards"`
|
||||
ActiveShards int `json:"active_shards"`
|
||||
RelocatingShards int `json:"relocating_shards"`
|
||||
InitializingShards int `json:"initializing_shards"`
|
||||
UnassignedShards int `json:"unassigned_shards"`
|
||||
NumberOfPendingTasks int `json:"number_of_pending_tasks"`
|
||||
TaskMaxWaitingInQueueMillis int `json:"task_max_waiting_in_queue_millis"`
|
||||
ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"`
|
||||
Indices map[string]indexHealth `json:"indices"`
|
||||
}
|
||||
|
||||
type indexHealth struct {
|
||||
@@ -108,28 +113,26 @@ const sampleConfig = `
|
||||
## "breaker". Per default, all stats are gathered.
|
||||
# node_stats = ["jvm", "http"]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
// Elasticsearch is a plugin to read stats from one or many Elasticsearch
|
||||
// servers.
|
||||
type Elasticsearch struct {
|
||||
Local bool
|
||||
Servers []string
|
||||
HttpTimeout internal.Duration
|
||||
ClusterHealth bool
|
||||
ClusterHealthLevel string
|
||||
ClusterStats bool
|
||||
NodeStats []string
|
||||
SSLCA string `toml:"ssl_ca"` // Path to CA file
|
||||
SSLCert string `toml:"ssl_cert"` // Path to host cert file
|
||||
SSLKey string `toml:"ssl_key"` // Path to cert key file
|
||||
InsecureSkipVerify bool // Use SSL but skip chain & host verification
|
||||
Local bool
|
||||
Servers []string
|
||||
HttpTimeout internal.Duration
|
||||
ClusterHealth bool
|
||||
ClusterHealthLevel string
|
||||
ClusterStats bool
|
||||
NodeStats []string
|
||||
tls.ClientConfig
|
||||
|
||||
client *http.Client
|
||||
catMasterResponseTokens []string
|
||||
isMaster bool
|
||||
@@ -227,7 +230,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
|
||||
tlsCfg, err := internal.GetTLSConfig(e.SSLCert, e.SSLKey, e.SSLCA, e.InsecureSkipVerify)
|
||||
tlsCfg, err := e.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -323,16 +326,19 @@ func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator
|
||||
}
|
||||
measurementTime := time.Now()
|
||||
clusterFields := map[string]interface{}{
|
||||
"status": healthStats.Status,
|
||||
"status_code": mapHealthStatusToCode(healthStats.Status),
|
||||
"timed_out": healthStats.TimedOut,
|
||||
"number_of_nodes": healthStats.NumberOfNodes,
|
||||
"number_of_data_nodes": healthStats.NumberOfDataNodes,
|
||||
"active_primary_shards": healthStats.ActivePrimaryShards,
|
||||
"active_shards": healthStats.ActiveShards,
|
||||
"relocating_shards": healthStats.RelocatingShards,
|
||||
"initializing_shards": healthStats.InitializingShards,
|
||||
"unassigned_shards": healthStats.UnassignedShards,
|
||||
"status": healthStats.Status,
|
||||
"status_code": mapHealthStatusToCode(healthStats.Status),
|
||||
"timed_out": healthStats.TimedOut,
|
||||
"number_of_nodes": healthStats.NumberOfNodes,
|
||||
"number_of_data_nodes": healthStats.NumberOfDataNodes,
|
||||
"active_primary_shards": healthStats.ActivePrimaryShards,
|
||||
"active_shards": healthStats.ActiveShards,
|
||||
"relocating_shards": healthStats.RelocatingShards,
|
||||
"initializing_shards": healthStats.InitializingShards,
|
||||
"unassigned_shards": healthStats.UnassignedShards,
|
||||
"number_of_pending_tasks": healthStats.NumberOfPendingTasks,
|
||||
"task_max_waiting_in_queue_millis": healthStats.TaskMaxWaitingInQueueMillis,
|
||||
"active_shards_percent_as_number": healthStats.ActiveShardsPercentAsNumber,
|
||||
}
|
||||
acc.AddFields(
|
||||
"elasticsearch_cluster_health",
|
||||
|
||||
@@ -11,7 +11,10 @@ const clusterHealthResponse = `
|
||||
"active_shards": 15,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0
|
||||
"unassigned_shards": 0,
|
||||
"number_of_pending_tasks": 0,
|
||||
"task_max_waiting_in_queue_millis": 0,
|
||||
"active_shards_percent_as_number": 100.0
|
||||
}
|
||||
`
|
||||
|
||||
@@ -27,6 +30,9 @@ const clusterHealthResponseWithIndices = `
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
"number_of_pending_tasks": 0,
|
||||
"task_max_waiting_in_queue_millis": 0,
|
||||
"active_shards_percent_as_number": 100.0,
|
||||
"indices": {
|
||||
"v1": {
|
||||
"status": "green",
|
||||
@@ -53,16 +59,19 @@ const clusterHealthResponseWithIndices = `
|
||||
`
|
||||
|
||||
var clusterHealthExpected = map[string]interface{}{
|
||||
"status": "green",
|
||||
"status_code": 1,
|
||||
"timed_out": false,
|
||||
"number_of_nodes": 3,
|
||||
"number_of_data_nodes": 3,
|
||||
"active_primary_shards": 5,
|
||||
"active_shards": 15,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
"status": "green",
|
||||
"status_code": 1,
|
||||
"timed_out": false,
|
||||
"number_of_nodes": 3,
|
||||
"number_of_data_nodes": 3,
|
||||
"active_primary_shards": 5,
|
||||
"active_shards": 15,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
"number_of_pending_tasks": 0,
|
||||
"task_max_waiting_in_queue_millis": 0,
|
||||
"active_shards_percent_as_number": 100.0,
|
||||
}
|
||||
|
||||
var v1IndexExpected = map[string]interface{}{
|
||||
|
||||
55
plugins/inputs/fibaro/README.md
Normal file
55
plugins/inputs/fibaro/README.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# Fibaro Input Plugin
|
||||
|
||||
The Fibaro plugin makes HTTP calls to the Fibaro controller API to gather values of hooked devices.
|
||||
Those values could be true (1) or false (0) for switches, percentage for dimmers, temperature, etc.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Read devices value(s) from a Fibaro controller
|
||||
[[inputs.fibaro]]
|
||||
## Required Fibaro controller address/hostname.
|
||||
## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
|
||||
url = "http://<controller>:80"
|
||||
|
||||
## Required credentials to access the API (http://<controller/api/<component>)
|
||||
username = "<username>"
|
||||
password = "<password>"
|
||||
|
||||
## Amount of time allowed to complete the HTTP request
|
||||
# timeout = "5s"
|
||||
```
|
||||
|
||||
### Metrics:
|
||||
|
||||
- fibaro
|
||||
- tags:
|
||||
- deviceId (device id)
|
||||
- section (section name)
|
||||
- room (room name)
|
||||
- name (device name)
|
||||
- type (device type)
|
||||
- fields:
|
||||
- energy (float, when available from device)
|
||||
- power (float, when available from device)
|
||||
- value (float)
|
||||
- value2 (float, when available from device)
|
||||
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
fibaro,deviceId=9,host=vm1,name=Fenêtre\ haute,room=Cuisine,section=Cuisine,type=com.fibaro.FGRM222 energy=2.04,power=0.7,value=99,value2=99 1529996807000000000
|
||||
fibaro,deviceId=10,host=vm1,name=Escaliers,room=Dégagement,section=Pièces\ communes,type=com.fibaro.binarySwitch value=0 1529996807000000000
|
||||
fibaro,deviceId=13,host=vm1,name=Porte\ fenêtre,room=Salon,section=Pièces\ communes,type=com.fibaro.FGRM222 energy=4.33,power=0.7,value=99,value2=99 1529996807000000000
|
||||
fibaro,deviceId=21,host=vm1,name=LED\ îlot\ central,room=Cuisine,section=Cuisine,type=com.fibaro.binarySwitch value=0 1529996807000000000
|
||||
fibaro,deviceId=90,host=vm1,name=Détérioration,room=Entrée,section=Pièces\ communes,type=com.fibaro.heatDetector value=0 1529996807000000000
|
||||
fibaro,deviceId=163,host=vm1,name=Température,room=Cave,section=Cave,type=com.fibaro.temperatureSensor value=21.62 1529996807000000000
|
||||
fibaro,deviceId=191,host=vm1,name=Présence,room=Garde-manger,section=Cuisine,type=com.fibaro.FGMS001 value=1 1529996807000000000
|
||||
fibaro,deviceId=193,host=vm1,name=Luminosité,room=Garde-manger,section=Cuisine,type=com.fibaro.lightSensor value=195 1529996807000000000
|
||||
fibaro,deviceId=200,host=vm1,name=Etat,room=Garage,section=Extérieur,type=com.fibaro.doorSensor value=0 1529996807000000000
|
||||
fibaro,deviceId=220,host=vm1,name=CO2\ (ppm),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=536 1529996807000000000
|
||||
fibaro,deviceId=221,host=vm1,name=Humidité\ (%),room=Salon,section=Pièces\ communes,type=com.fibaro.humiditySensor value=61 1529996807000000000
|
||||
fibaro,deviceId=222,host=vm1,name=Pression\ (mb),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=1013.7 1529996807000000000
|
||||
fibaro,deviceId=223,host=vm1,name=Bruit\ (db),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=44 1529996807000000000
|
||||
```
|
||||
217
plugins/inputs/fibaro/fibaro.go
Normal file
217
plugins/inputs/fibaro/fibaro.go
Normal file
@@ -0,0 +1,217 @@
|
||||
package fibaro
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const sampleConfig = `
|
||||
## Required Fibaro controller address/hostname.
|
||||
## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
|
||||
url = "http://<controller>:80"
|
||||
|
||||
## Required credentials to access the API (http://<controller/api/<component>)
|
||||
username = "<username>"
|
||||
password = "<password>"
|
||||
|
||||
## Amount of time allowed to complete the HTTP request
|
||||
# timeout = "5s"
|
||||
`
|
||||
|
||||
const description = "Read devices value(s) from a Fibaro controller"
|
||||
|
||||
// Fibaro contains connection information
|
||||
type Fibaro struct {
|
||||
URL string
|
||||
|
||||
// HTTP Basic Auth Credentials
|
||||
Username string
|
||||
Password string
|
||||
|
||||
Timeout internal.Duration
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// LinkRoomsSections links rooms to sections
|
||||
type LinkRoomsSections struct {
|
||||
Name string
|
||||
SectionID uint16
|
||||
}
|
||||
|
||||
// Sections contains sections informations
|
||||
type Sections struct {
|
||||
ID uint16 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// Rooms contains rooms informations
|
||||
type Rooms struct {
|
||||
ID uint16 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
SectionID uint16 `json:"sectionID"`
|
||||
}
|
||||
|
||||
// Devices contains devices informations
|
||||
type Devices struct {
|
||||
ID uint16 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
RoomID uint16 `json:"roomID"`
|
||||
Type string `json:"type"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Properties struct {
|
||||
Dead interface{} `json:"dead"`
|
||||
Energy interface{} `json:"energy"`
|
||||
Power interface{} `json:"power"`
|
||||
Value interface{} `json:"value"`
|
||||
Value2 interface{} `json:"value2"`
|
||||
} `json:"properties"`
|
||||
}
|
||||
|
||||
// Description returns a string explaining the purpose of this plugin
|
||||
func (f *Fibaro) Description() string { return description }
|
||||
|
||||
// SampleConfig returns text explaining how plugin should be configured
|
||||
func (f *Fibaro) SampleConfig() string { return sampleConfig }
|
||||
|
||||
// getJSON connects, authenticates and reads JSON payload returned by Fibaro box
|
||||
func (f *Fibaro) getJSON(path string, dataStruct interface{}) error {
|
||||
var requestURL = f.URL + path
|
||||
|
||||
req, err := http.NewRequest("GET", requestURL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.SetBasicAuth(f.Username, f.Password)
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
|
||||
requestURL,
|
||||
resp.StatusCode,
|
||||
http.StatusText(resp.StatusCode),
|
||||
http.StatusOK,
|
||||
http.StatusText(http.StatusOK))
|
||||
return err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
err = dec.Decode(&dataStruct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gather fetches all required information to output metrics
|
||||
func (f *Fibaro) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
if f.client == nil {
|
||||
f.client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
Timeout: f.Timeout.Duration,
|
||||
}
|
||||
}
|
||||
|
||||
var tmpSections []Sections
|
||||
err := f.getJSON("/api/sections", &tmpSections)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sections := map[uint16]string{}
|
||||
for _, v := range tmpSections {
|
||||
sections[v.ID] = v.Name
|
||||
}
|
||||
|
||||
var tmpRooms []Rooms
|
||||
err = f.getJSON("/api/rooms", &tmpRooms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rooms := map[uint16]LinkRoomsSections{}
|
||||
for _, v := range tmpRooms {
|
||||
rooms[v.ID] = LinkRoomsSections{Name: v.Name, SectionID: v.SectionID}
|
||||
}
|
||||
|
||||
var devices []Devices
|
||||
err = f.getJSON("/api/devices", &devices)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, device := range devices {
|
||||
// skip device in some cases
|
||||
if device.RoomID == 0 ||
|
||||
device.Enabled == false ||
|
||||
device.Properties.Dead == "true" ||
|
||||
device.Type == "com.fibaro.zwaveDevice" {
|
||||
continue
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"deviceId": strconv.FormatUint(uint64(device.ID), 10),
|
||||
"section": sections[rooms[device.RoomID].SectionID],
|
||||
"room": rooms[device.RoomID].Name,
|
||||
"name": device.Name,
|
||||
"type": device.Type,
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
if device.Properties.Energy != nil {
|
||||
if fValue, err := strconv.ParseFloat(device.Properties.Energy.(string), 64); err == nil {
|
||||
fields["energy"] = fValue
|
||||
}
|
||||
}
|
||||
|
||||
if device.Properties.Power != nil {
|
||||
if fValue, err := strconv.ParseFloat(device.Properties.Power.(string), 64); err == nil {
|
||||
fields["power"] = fValue
|
||||
}
|
||||
}
|
||||
|
||||
if device.Properties.Value != nil {
|
||||
value := device.Properties.Value
|
||||
switch value {
|
||||
case "true":
|
||||
value = "1"
|
||||
case "false":
|
||||
value = "0"
|
||||
}
|
||||
|
||||
if fValue, err := strconv.ParseFloat(value.(string), 64); err == nil {
|
||||
fields["value"] = fValue
|
||||
}
|
||||
}
|
||||
|
||||
if device.Properties.Value2 != nil {
|
||||
if fValue, err := strconv.ParseFloat(device.Properties.Value2.(string), 64); err == nil {
|
||||
fields["value2"] = fValue
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("fibaro", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("fibaro", func() telegraf.Input {
|
||||
return &Fibaro{}
|
||||
})
|
||||
}
|
||||
206
plugins/inputs/fibaro/fibaro_test.go
Normal file
206
plugins/inputs/fibaro/fibaro_test.go
Normal file
@@ -0,0 +1,206 @@
|
||||
package fibaro
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const sectionsJSON = `
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Section 1",
|
||||
"sortOrder": 1
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Section 2",
|
||||
"sortOrder": 2
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Section 3",
|
||||
"sortOrder": 3
|
||||
}
|
||||
]`
|
||||
|
||||
const roomsJSON = `
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Room 1",
|
||||
"sectionID": 1,
|
||||
"icon": "room_1",
|
||||
"sortOrder": 1
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Room 2",
|
||||
"sectionID": 2,
|
||||
"icon": "room_2",
|
||||
"sortOrder": 2
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Room 3",
|
||||
"sectionID": 3,
|
||||
"icon": "room_3",
|
||||
"sortOrder": 3
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"name": "Room 4",
|
||||
"sectionID": 3,
|
||||
"icon": "room_4",
|
||||
"sortOrder": 4
|
||||
}
|
||||
]`
|
||||
|
||||
const devicesJSON = `
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Device 1",
|
||||
"roomID": 1,
|
||||
"type": "com.fibaro.binarySwitch",
|
||||
"enabled": true,
|
||||
"properties": {
|
||||
"dead": "false",
|
||||
"value": "false"
|
||||
},
|
||||
"sortOrder": 1
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Device 2",
|
||||
"roomID": 2,
|
||||
"type": "com.fibaro.binarySwitch",
|
||||
"enabled": true,
|
||||
"properties": {
|
||||
"dead": "false",
|
||||
"value": "true"
|
||||
},
|
||||
"sortOrder": 2
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Device 3",
|
||||
"roomID": 3,
|
||||
"type": "com.fibaro.multilevelSwitch",
|
||||
"enabled": true,
|
||||
"properties": {
|
||||
"dead": "false",
|
||||
"value": "67"
|
||||
},
|
||||
"sortOrder": 3
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"name": "Device 4",
|
||||
"roomID": 4,
|
||||
"type": "com.fibaro.temperatureSensor",
|
||||
"enabled": true,
|
||||
"properties": {
|
||||
"dead": "false",
|
||||
"value": "22.80"
|
||||
},
|
||||
"sortOrder": 4
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"name": "Device 5",
|
||||
"roomID": 4,
|
||||
"type": "com.fibaro.FGRM222",
|
||||
"enabled": true,
|
||||
"properties": {
|
||||
"energy": "4.33",
|
||||
"power": "0.7",
|
||||
"dead": "false",
|
||||
"value": "50",
|
||||
"value2": "75"
|
||||
},
|
||||
"sortOrder": 5
|
||||
}
|
||||
]`
|
||||
|
||||
// TestUnauthorized validates that 401 (wrong credentials) is managed properly
|
||||
func TestUnauthorized(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := Fibaro{
|
||||
URL: ts.URL,
|
||||
Username: "user",
|
||||
Password: "pass",
|
||||
client: &http.Client{},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(a.Gather)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// TestJSONSuccess validates that module works OK with valid JSON payloads
|
||||
func TestJSONSuccess(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
payload := ""
|
||||
switch r.URL.Path {
|
||||
case "/api/sections":
|
||||
payload = sectionsJSON
|
||||
case "/api/rooms":
|
||||
payload = roomsJSON
|
||||
case "/api/devices":
|
||||
payload = devicesJSON
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, payload)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := Fibaro{
|
||||
URL: ts.URL,
|
||||
Username: "user",
|
||||
Password: "pass",
|
||||
client: &http.Client{},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(a.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Gather should add 5 metrics
|
||||
assert.Equal(t, uint64(5), acc.NMetrics())
|
||||
|
||||
// Ensure fields / values are correct - Device 1
|
||||
tags := map[string]string{"deviceId": "1", "section": "Section 1", "room": "Room 1", "name": "Device 1", "type": "com.fibaro.binarySwitch"}
|
||||
fields := map[string]interface{}{"value": float64(0)}
|
||||
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
|
||||
|
||||
// Ensure fields / values are correct - Device 2
|
||||
tags = map[string]string{"deviceId": "2", "section": "Section 2", "room": "Room 2", "name": "Device 2", "type": "com.fibaro.binarySwitch"}
|
||||
fields = map[string]interface{}{"value": float64(1)}
|
||||
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
|
||||
|
||||
// Ensure fields / values are correct - Device 3
|
||||
tags = map[string]string{"deviceId": "3", "section": "Section 3", "room": "Room 3", "name": "Device 3", "type": "com.fibaro.multilevelSwitch"}
|
||||
fields = map[string]interface{}{"value": float64(67)}
|
||||
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
|
||||
|
||||
// Ensure fields / values are correct - Device 4
|
||||
tags = map[string]string{"deviceId": "4", "section": "Section 3", "room": "Room 4", "name": "Device 4", "type": "com.fibaro.temperatureSensor"}
|
||||
fields = map[string]interface{}{"value": float64(22.8)}
|
||||
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
|
||||
|
||||
// Ensure fields / values are correct - Device 5
|
||||
tags = map[string]string{"deviceId": "5", "section": "Section 3", "room": "Room 4", "name": "Device 5", "type": "com.fibaro.FGRM222"}
|
||||
fields = map[string]interface{}{"energy": float64(4.33), "power": float64(0.7), "value": float64(50), "value2": float64(75)}
|
||||
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
|
||||
}
|
||||
@@ -44,11 +44,11 @@ Note: if namespace end point specified metrics array will be ignored for that ca
|
||||
username = ""
|
||||
password = ""
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
@@ -35,15 +35,7 @@ type GrayLog struct {
|
||||
Metrics []string
|
||||
Username string
|
||||
Password string
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
tls.ClientConfig
|
||||
|
||||
client HTTPClient
|
||||
}
|
||||
@@ -111,11 +103,11 @@ var sampleConfig = `
|
||||
username = ""
|
||||
password = ""
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
@@ -132,8 +124,7 @@ func (h *GrayLog) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
if h.client.HTTPClient() == nil {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
h.SSLCert, h.SSLKey, h.SSLCA, h.InsecureSkipVerify)
|
||||
tlsCfg, err := h.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -28,11 +28,11 @@ or [HTTP statistics page](https://cbonte.github.io/haproxy-dconv/1.9/management.
|
||||
## field names.
|
||||
# keep_field_names = false
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
|
||||
@@ -14,27 +14,18 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
//CSV format: https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1
|
||||
|
||||
type haproxy struct {
|
||||
Servers []string
|
||||
Servers []string
|
||||
KeepFieldNames bool
|
||||
tls.ClientConfig
|
||||
|
||||
client *http.Client
|
||||
|
||||
KeepFieldNames bool
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
@@ -56,11 +47,11 @@ var sampleConfig = `
|
||||
## field names.
|
||||
# keep_field_names = false
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
@@ -144,8 +135,7 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
if g.client == nil {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
g.SSLCert, g.SSLKey, g.SSLCA, g.InsecureSkipVerify)
|
||||
tlsCfg, err := g.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -23,11 +23,11 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The
|
||||
# username = "username"
|
||||
# password = "pa$$word"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Amount of time allowed to complete the HTTP request
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
@@ -24,15 +25,7 @@ type HTTP struct {
|
||||
// HTTP Basic Auth Credentials
|
||||
Username string
|
||||
Password string
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
tls.ClientConfig
|
||||
|
||||
Timeout internal.Duration
|
||||
|
||||
@@ -62,11 +55,11 @@ var sampleConfig = `
|
||||
## Tag all metrics with the url
|
||||
# tag_url = true
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Amount of time allowed to complete the HTTP request
|
||||
@@ -97,8 +90,7 @@ func (h *HTTP) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
if h.client == nil {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
h.SSLCert, h.SSLKey, h.SSLCA, h.InsecureSkipVerify)
|
||||
tlsCfg, err := h.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -5,9 +5,7 @@ import (
|
||||
"compress/gzip"
|
||||
"crypto/subtle"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -16,6 +14,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
tlsint "github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
@@ -43,9 +42,7 @@ type HTTPListener struct {
|
||||
MaxLineSize int
|
||||
Port int
|
||||
|
||||
TlsAllowedCacerts []string
|
||||
TlsCert string
|
||||
TlsKey string
|
||||
tlsint.ServerConfig
|
||||
|
||||
BasicUsername string
|
||||
BasicPassword string
|
||||
@@ -158,7 +155,10 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
|
||||
h.acc = acc
|
||||
h.pool = NewPool(200, h.MaxLineSize)
|
||||
|
||||
tlsConf := h.getTLSConfig()
|
||||
tlsConf, err := h.ServerConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
server := &http.Server{
|
||||
Addr: h.ServiceAddress,
|
||||
@@ -168,7 +168,6 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
|
||||
TLSConfig: tlsConf,
|
||||
}
|
||||
|
||||
var err error
|
||||
var listener net.Listener
|
||||
if tlsConf != nil {
|
||||
listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf)
|
||||
@@ -372,38 +371,6 @@ func badRequest(res http.ResponseWriter) {
|
||||
res.Write([]byte(`{"error":"http: bad request"}`))
|
||||
}
|
||||
|
||||
func (h *HTTPListener) getTLSConfig() *tls.Config {
|
||||
tlsConf := &tls.Config{
|
||||
InsecureSkipVerify: false,
|
||||
Renegotiation: tls.RenegotiateNever,
|
||||
}
|
||||
|
||||
if len(h.TlsCert) == 0 || len(h.TlsKey) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(h.TlsCert, h.TlsKey)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
tlsConf.Certificates = []tls.Certificate{cert}
|
||||
|
||||
if h.TlsAllowedCacerts != nil {
|
||||
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
clientPool := x509.NewCertPool()
|
||||
for _, ca := range h.TlsAllowedCacerts {
|
||||
c, err := ioutil.ReadFile(ca)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
clientPool.AppendCertsFromPEM(c)
|
||||
}
|
||||
tlsConf.ClientCAs = clientPool
|
||||
}
|
||||
|
||||
return tlsConf
|
||||
}
|
||||
|
||||
func (h *HTTPListener) AuthenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) {
|
||||
if h.BasicUsername != "" && h.BasicPassword != "" {
|
||||
reqUsername, reqPassword, ok := req.BasicAuth()
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -34,86 +33,12 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257
|
||||
|
||||
emptyMsg = ""
|
||||
|
||||
serviceRootPEM = `-----BEGIN CERTIFICATE-----
|
||||
MIIBxzCCATCgAwIBAgIJAJb7HqN2BzWWMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNV
|
||||
BAMMC1RlbGVncmFmIENBMB4XDTE3MTEwNDA0MzEwN1oXDTI3MTEwMjA0MzEwN1ow
|
||||
FjEUMBIGA1UEAwwLVGVsZWdyYWYgQ0EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJ
|
||||
AoGBANbkUkK6JQC3rbLcXhLJTS9SX6uXyFwl7bUfpAN5Hm5EqfvG3PnLrogfTGLr
|
||||
Tq5CRAu/gbbdcMoL9TLv/aaDVnrpV0FslKhqYmkOgT28bdmA7Qtr539aQpMKCfcW
|
||||
WCnoMcBD5u5h9MsRqpdq+0Mjlsf1H2hSf07jHk5R1T4l8RMXAgMBAAGjHTAbMAwG
|
||||
A1UdEwQFMAMBAf8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4GBANSrwvpU
|
||||
t8ihIhpHqgJZ34DM92CZZ3ZHmH/KyqlnuGzjjpnVZiXVrLDTOzrA0ziVhmefY29w
|
||||
roHjENbFm54HW97ogxeURuO8HRHIVh2U0rkyVxOfGZiUdINHqsZdSnDY07bzCtSr
|
||||
Z/KsfWXM5llD1Ig1FyBHpKjyUvfzr73sjm/4
|
||||
-----END CERTIFICATE-----`
|
||||
serviceCertPEM = `-----BEGIN CERTIFICATE-----
|
||||
MIIBzzCCATigAwIBAgIBATANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDDAtUZWxl
|
||||
Z3JhZiBDQTAeFw0xNzExMDQwNDMxMDdaFw0yNzExMDIwNDMxMDdaMBQxEjAQBgNV
|
||||
BAMMCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAsJRss1af
|
||||
XKrcIjQoAp2kdJIpT2Ya+MRQXJ18b0PP7szh2lisY11kd/HCkd4D4efuIkpszHaN
|
||||
xwyTOZLOoplxp6fizzgOYjXsJ6SzbO1MQNmq8Ch/+uKiGgFwLX+YxOOsGSDIHNhF
|
||||
vcBi93cQtCWPBFz6QRQf9yfIAA5KKxUfJcMCAwEAAaMvMC0wCQYDVR0TBAIwADAL
|
||||
BgNVHQ8EBAMCBSAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZIhvcNAQELBQAD
|
||||
gYEAiC3WI4y9vfYz53gw7FKnNK7BBdwRc43x7Pd+5J/cclWyUZPdmcj1UNmv/3rj
|
||||
2qcMmX06UdgPoHppzNAJePvMVk0vjMBUe9MmYlafMz0h4ma/it5iuldXwmejFcdL
|
||||
6wWQp7gVTileCEmq9sNvfQN1FmT3EWf4IMdO2MNat/1If0g=
|
||||
-----END CERTIFICATE-----`
|
||||
serviceKeyPEM = `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQCwlGyzVp9cqtwiNCgCnaR0kilPZhr4xFBcnXxvQ8/uzOHaWKxj
|
||||
XWR38cKR3gPh5+4iSmzMdo3HDJM5ks6imXGnp+LPOA5iNewnpLNs7UxA2arwKH/6
|
||||
4qIaAXAtf5jE46wZIMgc2EW9wGL3dxC0JY8EXPpBFB/3J8gADkorFR8lwwIDAQAB
|
||||
AoGBAJaFHxfMmjHK77U0UnrQWFSKFy64cftmlL4t/Nl3q7L68PdIKULWZIMeEWZ4
|
||||
I0UZiFOwr4em83oejQ1ByGSwekEuiWaKUI85IaHfcbt+ogp9hY/XbOEo56OPQUAd
|
||||
bEZv1JqJOqta9Ug1/E1P9LjEEyZ5F5ubx7813rxAE31qKtKJAkEA1zaMlCWIr+Rj
|
||||
hGvzv5rlHH3wbOB4kQFXO4nqj3J/ttzR5QiJW24STMDcbNngFlVcDVju56LrNTiD
|
||||
dPh9qvl7nwJBANILguR4u33OMksEZTYB7nQZSurqXsq6382zH7pTl29ANQTROHaM
|
||||
PKC8dnDWq8RGTqKuvWblIzzGIKqIMovZo10CQC96T0UXirITFolOL3XjvAuvFO1Q
|
||||
EAkdXJs77805m0dCK+P1IChVfiAEpBw3bKJArpAbQIlFfdI953JUp5SieU0CQEub
|
||||
BSSEKMjh/cxu6peEHnb/262vayuCFKkQPu1sxWewLuVrAe36EKCy9dcsDmv5+rgo
|
||||
Odjdxc9Madm4aKlaT6kCQQCpAgeblDrrxTrNQ+Typzo37PlnQrvI+0EceAUuJ72G
|
||||
P0a+YZUeHNRqT2pPN9lMTAZGGi3CtcF2XScbLNEBeXge
|
||||
-----END RSA PRIVATE KEY-----`
|
||||
clientRootPEM = serviceRootPEM
|
||||
clientCertPEM = `-----BEGIN CERTIFICATE-----
|
||||
MIIBzjCCATegAwIBAgIBAjANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDDAtUZWxl
|
||||
Z3JhZiBDQTAeFw0xNzExMDQwNDMxMDdaFw0yNzExMDIwNDMxMDdaMBMxETAPBgNV
|
||||
BAMMCHRlbGVncmFmMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDP2IMqyOqI
|
||||
sJjwBprrz8WPzmlrpyYikQ4XSCSJB3DSTIO+igqMpBUTj3vLlOzsHfVVot1WRqc6
|
||||
3esM4JE92rc6S73xi4g8L/r8cPIHW4hvFJdMti4UkJBWim8ArSbFqnZjcR19G3tG
|
||||
LUOiXAUG3nWzMzoEsPruvV1dkKRbJVE4MwIDAQABoy8wLTAJBgNVHRMEAjAAMAsG
|
||||
A1UdDwQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAjANBgkqhkiG9w0BAQsFAAOB
|
||||
gQCHxMk38XNxL9nPFBYo3JqITJCFswu6/NLHwDBXCuZKl53rUuFWduiO+1OuScKQ
|
||||
sQ79W0jHsWRKGOUFrF5/Gdnh8AlkVaITVlcmhdAOFCEbeGpeEvLuuK6grckPitxy
|
||||
bRF5oM4TCLKKAha60Ir41rk2bomZM9+NZu+Bm+csDqCoxQ==
|
||||
-----END CERTIFICATE-----`
|
||||
clientKeyPEM = `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXAIBAAKBgQDP2IMqyOqIsJjwBprrz8WPzmlrpyYikQ4XSCSJB3DSTIO+igqM
|
||||
pBUTj3vLlOzsHfVVot1WRqc63esM4JE92rc6S73xi4g8L/r8cPIHW4hvFJdMti4U
|
||||
kJBWim8ArSbFqnZjcR19G3tGLUOiXAUG3nWzMzoEsPruvV1dkKRbJVE4MwIDAQAB
|
||||
AoGAFzb/r4+xYoMXEfgq5ZvXXTCY5cVNpR6+jCsqqYODPnn9XRLeCsdo8z5bfWms
|
||||
7NKLzHzca/6IPzL6Rf3vOxFq1YyIZfYVHH+d63/9blAm3Iajjp1W2yW5aj9BJjTb
|
||||
nm6F0RfuW/SjrZ9IXxTZhSpCklPmUzVZpzvwV3KGeVTVCEECQQDoavCeOwLuqDpt
|
||||
0aM9GMFUpOU7kLPDuicSwCDaTae4kN2rS17Zki41YXe8A8+509IEN7mK09Vq9HxY
|
||||
SX6EmV1FAkEA5O9QcCHEa8P12EmUC8oqD2bjq6o7JjUIRlKinwZTlooMJYZw98gA
|
||||
FVSngTUvLVCVIvSdjldXPOGgfYiccTZrFwJAfHS3gKOtAEuJbkEyHodhD4h1UB4+
|
||||
hPLr9Xh4ny2yQH0ilpV3px5GLEOTMFUCKUoqTiPg8VxaDjn5U/WXED5n2QJAR4J1
|
||||
NsFlcGACj+/TvacFYlA6N2nyFeokzoqLX28Ddxdh2erXqJ4hYIhT1ik9tkLggs2z
|
||||
1T1084BquCuO6lIcOwJBALX4xChoMUF9k0IxSQzlz//seQYDkQNsE7y9IgAOXkzp
|
||||
RaR4pzgPbnKj7atG+2dBnffWfE+1Mcy0INDAO6WxPg0=
|
||||
-----END RSA PRIVATE KEY-----`
|
||||
|
||||
basicUsername = "test-username-please-ignore"
|
||||
basicPassword = "super-secure-password!"
|
||||
)
|
||||
|
||||
var (
|
||||
initClient sync.Once
|
||||
client *http.Client
|
||||
initServiceCertFiles sync.Once
|
||||
allowedCAFiles []string
|
||||
serviceCAFiles []string
|
||||
serviceCertFile string
|
||||
serviceKeyFile string
|
||||
pki = testutil.NewPKI("../../../testutil/pki")
|
||||
)
|
||||
|
||||
func newTestHTTPListener() *HTTPListener {
|
||||
@@ -132,74 +57,25 @@ func newTestHTTPAuthListener() *HTTPListener {
|
||||
}
|
||||
|
||||
func newTestHTTPSListener() *HTTPListener {
|
||||
initServiceCertFiles.Do(func() {
|
||||
acaf, err := ioutil.TempFile("", "allowedCAFile.crt")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer acaf.Close()
|
||||
_, err = io.Copy(acaf, bytes.NewReader([]byte(clientRootPEM)))
|
||||
allowedCAFiles = []string{acaf.Name()}
|
||||
|
||||
scaf, err := ioutil.TempFile("", "serviceCAFile.crt")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer scaf.Close()
|
||||
_, err = io.Copy(scaf, bytes.NewReader([]byte(serviceRootPEM)))
|
||||
serviceCAFiles = []string{scaf.Name()}
|
||||
|
||||
scf, err := ioutil.TempFile("", "serviceCertFile.crt")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer scf.Close()
|
||||
_, err = io.Copy(scf, bytes.NewReader([]byte(serviceCertPEM)))
|
||||
serviceCertFile = scf.Name()
|
||||
|
||||
skf, err := ioutil.TempFile("", "serviceKeyFile.crt")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer skf.Close()
|
||||
_, err = io.Copy(skf, bytes.NewReader([]byte(serviceKeyPEM)))
|
||||
serviceKeyFile = skf.Name()
|
||||
})
|
||||
|
||||
listener := &HTTPListener{
|
||||
ServiceAddress: "localhost:0",
|
||||
TlsAllowedCacerts: allowedCAFiles,
|
||||
TlsCert: serviceCertFile,
|
||||
TlsKey: serviceKeyFile,
|
||||
TimeFunc: time.Now,
|
||||
ServiceAddress: "localhost:0",
|
||||
ServerConfig: *pki.TLSServerConfig(),
|
||||
TimeFunc: time.Now,
|
||||
}
|
||||
|
||||
return listener
|
||||
}
|
||||
|
||||
func getHTTPSClient() *http.Client {
|
||||
initClient.Do(func() {
|
||||
cas := x509.NewCertPool()
|
||||
cas.AppendCertsFromPEM([]byte(serviceRootPEM))
|
||||
clientCert, err := tls.X509KeyPair([]byte(clientCertPEM), []byte(clientKeyPEM))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: cas,
|
||||
Certificates: []tls.Certificate{clientCert},
|
||||
MinVersion: tls.VersionTLS12,
|
||||
MaxVersion: tls.VersionTLS12,
|
||||
CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256},
|
||||
Renegotiation: tls.RenegotiateNever,
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
return client
|
||||
tlsConfig, err := pki.TLSClientConfig().TLSConfig()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createURL(listener *HTTPListener, scheme string, path string, rawquery string) string {
|
||||
@@ -214,14 +90,14 @@ func createURL(listener *HTTPListener, scheme string, path string, rawquery stri
|
||||
|
||||
func TestWriteHTTPSNoClientAuth(t *testing.T) {
|
||||
listener := newTestHTTPSListener()
|
||||
listener.TlsAllowedCacerts = nil
|
||||
listener.TLSAllowedCACerts = nil
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
cas := x509.NewCertPool()
|
||||
cas.AppendCertsFromPEM([]byte(serviceRootPEM))
|
||||
cas.AppendCertsFromPEM([]byte(pki.ReadServerCert()))
|
||||
noClientAuthClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
|
||||
@@ -32,11 +32,11 @@ This input plugin checks HTTP/HTTPS connections.
|
||||
# response_string_match = "ok"
|
||||
# response_string_match = "\".*_status\".?:.?\"up\""
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## HTTP Request Headers (all values must be strings)
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
@@ -29,15 +30,7 @@ type HTTPResponse struct {
|
||||
Headers map[string]string
|
||||
FollowRedirects bool
|
||||
ResponseStringMatch string
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
tls.ClientConfig
|
||||
|
||||
compiledStringMatch *regexp.Regexp
|
||||
client *http.Client
|
||||
@@ -74,11 +67,11 @@ var sampleConfig = `
|
||||
# response_string_match = "ok"
|
||||
# response_string_match = "\".*_status\".?:.?\"up\""
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## HTTP Request Headers (all values must be strings)
|
||||
@@ -113,8 +106,7 @@ func getProxyFunc(http_proxy string) func(*http.Request) (*url.URL, error) {
|
||||
// CreateHttpClient creates an http client which will timeout at the specified
|
||||
// timeout period and can follow redirects if specified
|
||||
func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
h.SSLCert, h.SSLKey, h.SSLCA, h.InsecureSkipVerify)
|
||||
tlsCfg, err := h.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -34,11 +34,11 @@ Deprecated (1.6): use the [http](../http) input.
|
||||
# "my_tag_2"
|
||||
# ]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## HTTP Request Parameters (all values must be strings). For "GET" requests, data
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
@@ -29,15 +30,7 @@ type HttpJson struct {
|
||||
ResponseTimeout internal.Duration
|
||||
Parameters map[string]string
|
||||
Headers map[string]string
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
tls.ClientConfig
|
||||
|
||||
client HTTPClient
|
||||
}
|
||||
@@ -100,11 +93,11 @@ var sampleConfig = `
|
||||
# "my_tag_2"
|
||||
# ]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## HTTP parameters (all values must be strings). For "GET" requests, data
|
||||
@@ -133,8 +126,7 @@ func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
if h.client.HTTPClient() == nil {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
h.SSLCert, h.SSLKey, h.SSLCA, h.InsecureSkipVerify)
|
||||
tlsCfg, err := h.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -20,11 +20,11 @@ InfluxDB-formatted endpoints. See below for more information.
|
||||
"http://localhost:8086/debug/vars"
|
||||
]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## http request & header timeout
|
||||
@@ -33,6 +33,9 @@ InfluxDB-formatted endpoints. See below for more information.
|
||||
|
||||
### Measurements & Fields
|
||||
|
||||
**Note:** The measurements and fields are dynamically built from the InfluxDB source,
|
||||
and may vary between versions.
|
||||
|
||||
- influxdb
|
||||
- n_shards
|
||||
- influxdb_database
|
||||
|
||||
@@ -10,21 +10,14 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type InfluxDB struct {
|
||||
URLs []string `toml:"urls"`
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
URLs []string `toml:"urls"`
|
||||
Timeout internal.Duration
|
||||
tls.ClientConfig
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
@@ -45,11 +38,11 @@ func (*InfluxDB) SampleConfig() string {
|
||||
"http://localhost:8086/debug/vars"
|
||||
]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## http request & header timeout
|
||||
@@ -63,8 +56,7 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
if i.client == nil {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
i.SSLCert, i.SSLKey, i.SSLCA, i.InsecureSkipVerify)
|
||||
tlsCfg, err := i.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The [Jolokia](http://jolokia.org) _agent_ and _proxy_ input plugins collect JMX metrics from an HTTP endpoint using Jolokia's [JSON-over-HTTP protocol](https://jolokia.org/reference/html/protocol.html).
|
||||
|
||||
## Jolokia Agent Configuration
|
||||
### Configuration:
|
||||
|
||||
#### Jolokia Agent Configuration
|
||||
|
||||
The `jolokia2_agent` input plugin reads JMX metrics from one or more [Jolokia agent](https://jolokia.org/agent/jvm.html) REST endpoints.
|
||||
|
||||
@@ -16,14 +18,14 @@ The `jolokia2_agent` input plugin reads JMX metrics from one or more [Jolokia ag
|
||||
paths = ["Uptime"]
|
||||
```
|
||||
|
||||
Optionally, specify SSL options for communicating with agents:
|
||||
Optionally, specify TLS options for communicating with agents:
|
||||
|
||||
```toml
|
||||
[[inputs.jolokia2_agent]]
|
||||
urls = ["https://agent:8080/jolokia"]
|
||||
ssl_ca = "/var/private/ca.pem"
|
||||
ssl_cert = "/var/private/client.pem"
|
||||
ssl_key = "/var/private/client-key.pem"
|
||||
tls_ca = "/var/private/ca.pem"
|
||||
tls_cert = "/var/private/client.pem"
|
||||
tls_key = "/var/private/client-key.pem"
|
||||
#insecure_skip_verify = false
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
@@ -32,7 +34,7 @@ Optionally, specify SSL options for communicating with agents:
|
||||
paths = ["Uptime"]
|
||||
```
|
||||
|
||||
## Jolokia Proxy Configuration
|
||||
#### Jolokia Proxy Configuration
|
||||
|
||||
The `jolokia2_proxy` input plugin reads JMX metrics from one or more _targets_ by interacting with a [Jolokia proxy](https://jolokia.org/features/proxy.html) REST endpoint.
|
||||
|
||||
@@ -53,15 +55,15 @@ The `jolokia2_proxy` input plugin reads JMX metrics from one or more _targets_ b
|
||||
paths = ["Uptime"]
|
||||
```
|
||||
|
||||
Optionally, specify SSL options for communicating with proxies:
|
||||
Optionally, specify TLS options for communicating with proxies:
|
||||
|
||||
```toml
|
||||
[[inputs.jolokia2_proxy]]
|
||||
url = "https://proxy:8080/jolokia"
|
||||
|
||||
ssl_ca = "/var/private/ca.pem"
|
||||
ssl_cert = "/var/private/client.pem"
|
||||
ssl_key = "/var/private/client-key.pem"
|
||||
tls_ca = "/var/private/ca.pem"
|
||||
tls_cert = "/var/private/client.pem"
|
||||
tls_key = "/var/private/client-key.pem"
|
||||
#insecure_skip_verify = false
|
||||
|
||||
#default_target_username = ""
|
||||
@@ -77,7 +79,7 @@ Optionally, specify SSL options for communicating with proxies:
|
||||
paths = ["Uptime"]
|
||||
```
|
||||
|
||||
## Jolokia Metric Configuration
|
||||
#### Jolokia Metric Configuration
|
||||
|
||||
Each `metric` declaration generates a Jolokia request to fetch telemetry from a JMX MBean.
|
||||
|
||||
@@ -167,3 +169,17 @@ Both `jolokia2_agent` and `jolokia2_proxy` plugins support default configuration
|
||||
| `default_field_separator` | `.` | A character to use to join Mbean attributes when creating fields. |
|
||||
| `default_field_prefix` | _None_ | A string to prepend to the field names produced by all `metric` declarations. |
|
||||
| `default_tag_prefix` | _None_ | A string to prepend to the tag names produced by all `metric` declarations. |
|
||||
|
||||
### Example Configurations:
|
||||
|
||||
- [ActiveMQ](/plugins/inputs/jolokia2/examples/activemq.conf)
|
||||
- [BitBucket](/plugins/inputs/jolokia2/examples/bitbucket.conf)
|
||||
- [Cassandra](/plugins/inputs/jolokia2/examples/cassandra.conf)
|
||||
- [Hadoop-HDFS](/plugins/inputs/jolokia2/examples/hadoop-hdfs.conf)
|
||||
- [Java JVM](/plugins/inputs/jolokia2/examples/java.conf)
|
||||
- [JBoss](/plugins/inputs/jolokia2/examples/jboss.conf)
|
||||
- [Kafka](/plugins/inputs/jolokia2/examples/kafka.conf)
|
||||
- [Tomcat](/plugins/inputs/jolokia2/examples/tomcat.conf)
|
||||
- [Weblogic](/plugins/inputs/jolokia2/examples/weblogic.conf)
|
||||
|
||||
Please help improve this list and contribute new configuration files by opening an issue or pull request.
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
@@ -20,15 +20,11 @@ type Client struct {
|
||||
}
|
||||
|
||||
type ClientConfig struct {
|
||||
ResponseTimeout time.Duration
|
||||
Username string
|
||||
Password string
|
||||
SSLCA string
|
||||
SSLCert string
|
||||
SSLKey string
|
||||
InsecureSkipVerify bool
|
||||
|
||||
ProxyConfig *ProxyConfig
|
||||
ResponseTimeout time.Duration
|
||||
Username string
|
||||
Password string
|
||||
ProxyConfig *ProxyConfig
|
||||
tls.ClientConfig
|
||||
}
|
||||
|
||||
type ProxyConfig struct {
|
||||
@@ -100,8 +96,7 @@ type jolokiaResponse struct {
|
||||
}
|
||||
|
||||
func NewClient(url string, config *ClientConfig) (*Client, error) {
|
||||
tlsConfig, err := internal.GetTLSConfig(
|
||||
config.SSLCert, config.SSLKey, config.SSLCA, config.InsecureSkipVerify)
|
||||
tlsConfig, err := config.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user