Compare commits
219 Commits
0.12.1
...
1.0.0-beta
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2beef21231 | ||
|
|
cb3c54a1ae | ||
|
|
d50a1e83ac | ||
|
|
1f10639222 | ||
|
|
af0979cce5 | ||
|
|
5b43901bd8 | ||
|
|
d7efb7a71d | ||
|
|
4d242836ee | ||
|
|
06cb5a041e | ||
|
|
ea2521bf27 | ||
|
|
4cd1f7a104 | ||
|
|
137843b2f6 | ||
|
|
008ed17a79 | ||
|
|
75e6cb9064 | ||
|
|
ad88a9421a | ||
|
|
346deb30a3 | ||
|
|
8c3d7cd145 | ||
|
|
821b30eb92 | ||
|
|
a362352587 | ||
|
|
94f952787f | ||
|
|
3ff184c061 | ||
|
|
80368e3936 | ||
|
|
2c448e22e1 | ||
|
|
1aabd38eb2 | ||
|
|
675457873a | ||
|
|
8173338f8a | ||
|
|
c4841843a9 | ||
|
|
f08a27be5d | ||
|
|
a4b36d12dd | ||
|
|
c842724b61 | ||
|
|
fb5f40319e | ||
|
|
52b9fc837c | ||
|
|
6f991ec78a | ||
|
|
7921d87a45 | ||
|
|
9f7a758bf9 | ||
|
|
0aff7a0bc1 | ||
|
|
c4cfdb8a25 | ||
|
|
342cfc4087 | ||
|
|
bd1282eddf | ||
|
|
892abec025 | ||
|
|
e809c4e445 | ||
|
|
9ff536d94d | ||
|
|
4f27315720 | ||
|
|
958ef2f872 | ||
|
|
069764f05e | ||
|
|
eeeab5192b | ||
|
|
a7dfbce3d3 | ||
|
|
ed2d1d9bb7 | ||
|
|
0fb2d2ffae | ||
|
|
3af65e7abb | ||
|
|
984b6cb0fb | ||
|
|
ca504a19ec | ||
|
|
c2797c85d1 | ||
|
|
d5add07c0b | ||
|
|
0ebf1c1ad7 | ||
|
|
42d7fc5e16 | ||
|
|
6828fc48e1 | ||
|
|
98d91b1c89 | ||
|
|
9bbdb2d562 | ||
|
|
a8334c3261 | ||
|
|
9144f9630b | ||
|
|
3e4a19539a | ||
|
|
5fe7e6e40e | ||
|
|
58f2ba1247 | ||
|
|
5f3a91bffd | ||
|
|
6351aa5167 | ||
|
|
9966099d1a | ||
|
|
1ef5599361 | ||
|
|
c78b6cdb4e | ||
|
|
d736c7235a | ||
|
|
475252d873 | ||
|
|
e103923430 | ||
|
|
cb59517ceb | ||
|
|
1248934f3e | ||
|
|
204ebf6bf6 | ||
|
|
52d5b19219 | ||
|
|
8e92d3a4a0 | ||
|
|
c44ecf54a5 | ||
|
|
c6699c36d3 | ||
|
|
d6ceae7005 | ||
|
|
4dcb82bf08 | ||
|
|
4f5d5926d9 | ||
|
|
3c5c3b98df | ||
|
|
56aee1ceee | ||
|
|
f176c28a56 | ||
|
|
2e68bd1412 | ||
|
|
35eb65460d | ||
|
|
ab54064689 | ||
|
|
debf7bf149 | ||
|
|
1dbe3b8231 | ||
|
|
b065573e23 | ||
|
|
e94e50181c | ||
|
|
69dfe63809 | ||
|
|
f32916a5bd | ||
|
|
be7ca56872 | ||
|
|
33cacc71b8 | ||
|
|
c292e3931a | ||
|
|
a87d6f0545 | ||
|
|
3a01b6d5b7 | ||
|
|
39df2635bd | ||
|
|
08ecfb8a67 | ||
|
|
a59bf7246a | ||
|
|
281296cd3f | ||
|
|
61d190b1ae | ||
|
|
dc89f029ad | ||
|
|
7557056a31 | ||
|
|
20c45a150c | ||
|
|
46bf0ef271 | ||
|
|
a7b632eb5e | ||
|
|
90a98c76a0 | ||
|
|
12357ee8c5 | ||
|
|
bb254fc2b9 | ||
|
|
aeadc2c43a | ||
|
|
ed492fe950 | ||
|
|
775daba8f5 | ||
|
|
677dd7ad53 | ||
|
|
85dee02a3b | ||
|
|
afdebbc3a2 | ||
|
|
5deb22a539 | ||
|
|
36b9e2e077 | ||
|
|
5348937c3d | ||
|
|
72fcacbbc7 | ||
|
|
4c28f15b35 | ||
|
|
095ef04c04 | ||
|
|
7d49979658 | ||
|
|
7a36695a21 | ||
|
|
5865587bd0 | ||
|
|
219bf93566 | ||
|
|
8371546a66 | ||
|
|
0b9b7bddd7 | ||
|
|
4c8449f4bc | ||
|
|
36d7b5c9ab | ||
|
|
f2b0ea6722 | ||
|
|
46f4be88a6 | ||
|
|
6381efa7ce | ||
|
|
85ee66efb9 | ||
|
|
40dccf5b29 | ||
|
|
c114849a31 | ||
|
|
4e9798d0e6 | ||
|
|
a30b1a394f | ||
|
|
91460436cf | ||
|
|
3f807a9432 | ||
|
|
cbe32c7482 | ||
|
|
5d3c582ecf | ||
|
|
3ed006d216 | ||
|
|
3e1026286b | ||
|
|
b59266249d | ||
|
|
015261a524 | ||
|
|
024e1088eb | ||
|
|
08f4b1ae8a | ||
|
|
1390c22004 | ||
|
|
8742ead585 | ||
|
|
59a297abe6 | ||
|
|
18636ea628 | ||
|
|
cf5980ace2 | ||
|
|
a7b0861436 | ||
|
|
89f2c0b0a4 | ||
|
|
ee4f4d7800 | ||
|
|
4de75ce621 | ||
|
|
1c4043ab39 | ||
|
|
44c945b9f5 | ||
|
|
c7719ac365 | ||
|
|
b9c24189e4 | ||
|
|
411d8d7439 | ||
|
|
671b40df2a | ||
|
|
249a860c6f | ||
|
|
0367a39e1f | ||
|
|
1a7340bb02 | ||
|
|
ce7d852d22 | ||
|
|
01b01c5969 | ||
|
|
c159460b2c | ||
|
|
07728d7425 | ||
|
|
d3a25e4dc1 | ||
|
|
1751c35f69 | ||
|
|
93f5b8cc4a | ||
|
|
5b1e59a48c | ||
|
|
7b27cad1ba | ||
|
|
1b083d63ab | ||
|
|
23f2b47531 | ||
|
|
194288c00e | ||
|
|
f9c8ed0dc3 | ||
|
|
88def9b71b | ||
|
|
f818f44693 | ||
|
|
8a395fdb4a | ||
|
|
c0588926b8 | ||
|
|
f1b7ecb2a2 | ||
|
|
4bcf157d88 | ||
|
|
2f7da03cce | ||
|
|
f1c995dcb8 | ||
|
|
9aec58c6b8 | ||
|
|
46aaaa9b70 | ||
|
|
46543d6323 | ||
|
|
a585119a67 | ||
|
|
8cc72368ca | ||
|
|
92e57ee06c | ||
|
|
c737a19d9f | ||
|
|
708a97d773 | ||
|
|
b95a90dbd6 | ||
|
|
a2d1ee08d4 | ||
|
|
7e64dc380f | ||
|
|
046cb6a564 | ||
|
|
644ce9edab | ||
|
|
059b601b13 | ||
|
|
d59999f510 | ||
|
|
c5d31e7527 | ||
|
|
c121e38da6 | ||
|
|
b16bc3d2e3 | ||
|
|
c732abbda2 | ||
|
|
61d681a7c8 | ||
|
|
7828bc09cf | ||
|
|
36d330fea0 | ||
|
|
4d46589d39 | ||
|
|
93f57edd3a | ||
|
|
8ec8ae0587 | ||
|
|
ce94e636bb | ||
|
|
21c7378b61 | ||
|
|
75a9845d20 | ||
|
|
d638f6e411 | ||
|
|
81d0a64d46 |
4
.gitattributes
vendored
Normal file
4
.gitattributes
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
CHANGELOG.md merge=union
|
||||||
|
README.md merge=union
|
||||||
|
plugins/inputs/all/all.go merge=union
|
||||||
|
plugins/outputs/all/all.go merge=union
|
||||||
44
.github/ISSUE_TEMPLATE.md
vendored
Normal file
44
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
## Directions
|
||||||
|
|
||||||
|
GitHub Issues are reserved for actionable bug reports and feature requests.
|
||||||
|
General questions should be sent to the [InfluxDB mailing list](https://groups.google.com/forum/#!forum/influxdb).
|
||||||
|
|
||||||
|
Before opening an issue, search for similar bug reports or feature requests on GitHub Issues.
|
||||||
|
If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.
|
||||||
|
Erase the other section and everything on and above this line.
|
||||||
|
|
||||||
|
*Please note, the quickest way to fix a bug is to open a Pull Request.*
|
||||||
|
|
||||||
|
## Bug report
|
||||||
|
|
||||||
|
### Relevant telegraf.conf:
|
||||||
|
|
||||||
|
### System info:
|
||||||
|
|
||||||
|
[Include Telegraf version, operating system name, and other relevant details]
|
||||||
|
|
||||||
|
### Steps to reproduce:
|
||||||
|
|
||||||
|
1. ...
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
### Expected behavior:
|
||||||
|
|
||||||
|
### Actual behavior:
|
||||||
|
|
||||||
|
### Additional info:
|
||||||
|
|
||||||
|
[Include gist of relevant config, logs, etc.]
|
||||||
|
|
||||||
|
|
||||||
|
## Feature Request
|
||||||
|
|
||||||
|
Opening a feature request kicks off a discussion.
|
||||||
|
|
||||||
|
### Proposal:
|
||||||
|
|
||||||
|
### Current behavior:
|
||||||
|
|
||||||
|
### Desired behavior:
|
||||||
|
|
||||||
|
### Use case: [Why is this important (helps with prioritizing requests)]
|
||||||
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
### Required for all PRs:
|
||||||
|
|
||||||
|
- [ ] CHANGELOG.md updated
|
||||||
|
- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed)
|
||||||
|
- [ ] README.md updated (if adding a new plugin)
|
||||||
218
CHANGELOG.md
218
CHANGELOG.md
@@ -1,4 +1,220 @@
|
|||||||
## v0.13 [unreleased]
|
## v1.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
## v1.0 beta 2 [2016-06-21]
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- [#1340](https://github.com/influxdata/telegraf/issues/1340): statsd: do not log every dropped metric.
|
||||||
|
- [#1368](https://github.com/influxdata/telegraf/pull/1368): Add precision rounding to all metrics on collection.
|
||||||
|
- [#1390](https://github.com/influxdata/telegraf/pull/1390): Add support for Tengine
|
||||||
|
- [#1320](https://github.com/influxdata/telegraf/pull/1320): Logparser input plugin for parsing grok-style log patterns.
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- [#1330](https://github.com/influxdata/telegraf/issues/1330): Fix exec plugin panic when using single binary.
|
||||||
|
- [#1336](https://github.com/influxdata/telegraf/issues/1336): Fixed incorrect prometheus metrics source selection.
|
||||||
|
- [#1112](https://github.com/influxdata/telegraf/issues/1112): Set default Zookeeper chroot to empty string.
|
||||||
|
- [#1335](https://github.com/influxdata/telegraf/issues/1335): Fix overall ping timeout to be calculated based on per-ping timeout.
|
||||||
|
- [#1374](https://github.com/influxdata/telegraf/pull/1374): Change "default" retention policy to "".
|
||||||
|
- [#1377](https://github.com/influxdata/telegraf/issues/1377): Graphite output mangling '%' character.
|
||||||
|
|
||||||
|
## v1.0 beta 1 [2016-06-07]
|
||||||
|
|
||||||
|
### Release Notes
|
||||||
|
|
||||||
|
- `flush_jitter` behavior has been changed. The random jitter will now be
|
||||||
|
evaluated at every flush interval, rather than once at startup. This makes it
|
||||||
|
consistent with the behavior of `collection_jitter`.
|
||||||
|
|
||||||
|
- All AWS plugins now utilize a standard mechanism for evaluating credentials.
|
||||||
|
This allows all AWS plugins to support environment variables, shared credential
|
||||||
|
files & profiles, and role assumptions. See the specific plugin README for
|
||||||
|
details.
|
||||||
|
|
||||||
|
- The AWS CloudWatch input plugin can now declare a wildcard value for a metric
|
||||||
|
dimension. This causes the plugin to read all metrics that contain the specified
|
||||||
|
dimension key regardless of value. This is used to export collections of metrics
|
||||||
|
without having to know the dimension values ahead of time.
|
||||||
|
|
||||||
|
- The AWS CloudWatch input plugin can now be configured with the `cache_ttl`
|
||||||
|
attribute. This configures the TTL of the internal metric cache. This is useful
|
||||||
|
in conjunction with wildcard dimension values as it will control the amount of
|
||||||
|
time before a new metric is included by the plugin.
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging.
|
||||||
|
- [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash
|
||||||
|
- [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy!
|
||||||
|
- [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm!
|
||||||
|
- [#1247](https://github.com/influxdata/telegraf/pull/1247): rollbar input plugin. Thanks @francois2metz and @cduez!
|
||||||
|
- [#1208](https://github.com/influxdata/telegraf/pull/1208): Standardized AWS credentials evaluation & wildcard CloudWatch dimensions. Thanks @johnrengelman!
|
||||||
|
- [#1264](https://github.com/influxdata/telegraf/pull/1264): Add SSL config options to http_response plugin.
|
||||||
|
- [#1272](https://github.com/influxdata/telegraf/pull/1272): graphite parser: add ability to specify multiple tag keys, for consistency with influxdb parser.
|
||||||
|
- [#1265](https://github.com/influxdata/telegraf/pull/1265): Make dns lookups for chrony configurable. Thanks @zbindenren!
|
||||||
|
- [#1275](https://github.com/influxdata/telegraf/pull/1275): Allow wildcard filtering of varnish stats.
|
||||||
|
- [#1142](https://github.com/influxdata/telegraf/pull/1142): Support for glob patterns in exec plugin commands configuration.
|
||||||
|
- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL (http://localhost:15672) if not specified
|
||||||
|
- [#1197](https://github.com/influxdata/telegraf/pull/1197): Limit AWS GetMetricStatistics requests to 10 per second.
|
||||||
|
- [#1278](https://github.com/influxdata/telegraf/pull/1278) & [#1288](https://github.com/influxdata/telegraf/pull/1288) & [#1295](https://github.com/influxdata/telegraf/pull/1295): RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified
|
||||||
|
- [#1296](https://github.com/influxdata/telegraf/issues/1296): Refactor of flush_jitter argument.
|
||||||
|
- [#1213](https://github.com/influxdata/telegraf/issues/1213): Add inactive & active memory to mem plugin.
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- [#1252](https://github.com/influxdata/telegraf/pull/1252) & [#1279](https://github.com/influxdata/telegraf/pull/1279): Fix systemd service. Thanks @zbindenren & @PierreF!
|
||||||
|
- [#1221](https://github.com/influxdata/telegraf/pull/1221): Fix influxdb n_shards counter.
|
||||||
|
- [#1258](https://github.com/influxdata/telegraf/pull/1258): Fix potential kernel plugin integer parse error.
|
||||||
|
- [#1268](https://github.com/influxdata/telegraf/pull/1268): Fix potential influxdb input type assertion panic.
|
||||||
|
- [#1283](https://github.com/influxdata/telegraf/pull/1283): Still send processes metrics if a process exited during metric collection.
|
||||||
|
- [#1297](https://github.com/influxdata/telegraf/issues/1297): disk plugin panic when usage grab fails.
|
||||||
|
- [#1316](https://github.com/influxdata/telegraf/pull/1316): Removed leaked "database" tag on redis metrics. Thanks @PierreF!
|
||||||
|
- [#1323](https://github.com/influxdata/telegraf/issues/1323): Processes plugin: fix potential error with /proc/net/stat directory.
|
||||||
|
- [#1322](https://github.com/influxdata/telegraf/issues/1322): Fix rare RHEL 5.2 panic in gopsutil diskio gathering function.
|
||||||
|
|
||||||
|
## v0.13.1 [2016-05-24]
|
||||||
|
|
||||||
|
### Release Notes
|
||||||
|
|
||||||
|
- net_response and http_response plugins timeouts will now accept duration
|
||||||
|
strings, ie, "2s" or "500ms".
|
||||||
|
- Input plugin Gathers will no longer be logged by default, but a Gather for
|
||||||
|
_each_ plugin will be logged in Debug mode.
|
||||||
|
- Debug mode will no longer print every point added to the accumulator. This
|
||||||
|
functionality can be duplicated using the `file` output plugin and printing
|
||||||
|
to "stdout".
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- [#1173](https://github.com/influxdata/telegraf/pull/1173): varnish input plugin. Thanks @sfox-xmatters!
|
||||||
|
- [#1138](https://github.com/influxdata/telegraf/pull/1138): nstat input plugin. Thanks @Maksadbek!
|
||||||
|
- [#1139](https://github.com/influxdata/telegraf/pull/1139): instrumental output plugin. Thanks @jasonroelofs!
|
||||||
|
- [#1172](https://github.com/influxdata/telegraf/pull/1172): Ceph storage stats. Thanks @robinpercy!
|
||||||
|
- [#1233](https://github.com/influxdata/telegraf/pull/1233): Updated golint gopsutil dependency.
|
||||||
|
- [#1238](https://github.com/influxdata/telegraf/pull/1238): chrony input plugin. Thanks @zbindenren!
|
||||||
|
- [#479](https://github.com/influxdata/telegraf/issues/479): per-plugin execution time added to debug output.
|
||||||
|
- [#1249](https://github.com/influxdata/telegraf/issues/1249): influxdb output: added write_consistency argument.
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- [#1195](https://github.com/influxdata/telegraf/pull/1195): Docker panic on timeout. Thanks @zstyblik!
|
||||||
|
- [#1211](https://github.com/influxdata/telegraf/pull/1211): mongodb input. Fix possible panic. Thanks @kols!
|
||||||
|
- [#1215](https://github.com/influxdata/telegraf/pull/1215): Fix for possible gopsutil-dependent plugin hangs.
|
||||||
|
- [#1228](https://github.com/influxdata/telegraf/pull/1228): Fix service plugin host tag overwrite.
|
||||||
|
- [#1198](https://github.com/influxdata/telegraf/pull/1198): http_response: override request Host header properly
|
||||||
|
- [#1230](https://github.com/influxdata/telegraf/issues/1230): Fix Telegraf process hangup due to a single plugin hanging.
|
||||||
|
- [#1214](https://github.com/influxdata/telegraf/issues/1214): Use TCP timeout argument in net_response plugin.
|
||||||
|
- [#1243](https://github.com/influxdata/telegraf/pull/1243): Logfile not created on systemd.
|
||||||
|
|
||||||
|
## v0.13 [2016-05-11]
|
||||||
|
|
||||||
|
### Release Notes
|
||||||
|
|
||||||
|
- **Breaking change** in jolokia plugin. See
|
||||||
|
https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia/README.md
|
||||||
|
for updated configuration. The plugin will now support proxy mode and will make
|
||||||
|
POST requests.
|
||||||
|
|
||||||
|
- New [agent] configuration option: `metric_batch_size`. This option tells
|
||||||
|
telegraf the maximum batch size to allow to accumulate before sending a flush
|
||||||
|
to the configured outputs. `metric_buffer_limit` now refers to the absolute
|
||||||
|
maximum number of metrics that will accumulate before metrics are dropped.
|
||||||
|
|
||||||
|
- There is no longer an option to
|
||||||
|
`flush_buffer_when_full`, this is now the default and only behavior of telegraf.
|
||||||
|
|
||||||
|
- **Breaking Change**: docker plugin tags. The cont_id tag no longer exists, it
|
||||||
|
will now be a field, and be called container_id. Additionally, cont_image and
|
||||||
|
cont_name are being renamed to container_image and container_name.
|
||||||
|
|
||||||
|
- **Breaking Change**: docker plugin measurements. The `docker_cpu`, `docker_mem`,
|
||||||
|
`docker_blkio` and `docker_net` measurements are being renamed to
|
||||||
|
`docker_container_cpu`, `docker_container_mem`, `docker_container_blkio` and
|
||||||
|
`docker_container_net`. Why? Because these metrics are
|
||||||
|
specifically tracking per-container stats. The problem with per-container stats,
|
||||||
|
in some use-cases, is that if containers are short-lived AND names are not
|
||||||
|
kept consistent, then the series cardinality will balloon very quickly.
|
||||||
|
So adding "container" to each metric will:
|
||||||
|
(1) make it more clear that these metrics are per-container, and
|
||||||
|
(2) allow users to easily drop per-container metrics if cardinality is an
|
||||||
|
issue (`namedrop = ["docker_container_*"]`)
|
||||||
|
|
||||||
|
- `tagexclude` and `taginclude` are now available, which can be used to remove
|
||||||
|
tags from measurements on inputs and outputs. See
|
||||||
|
[the configuration doc](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md)
|
||||||
|
for more details.
|
||||||
|
|
||||||
|
- **Measurement filtering:** All measurement filters now match based on glob
|
||||||
|
only. Previously there was an undocumented behavior where filters would match
|
||||||
|
based on _prefix_ in addition to globs. This means that a filter like
|
||||||
|
`fielddrop = ["time_"]` will need to be changed to `fielddrop = ["time_*"]`
|
||||||
|
|
||||||
|
- **datadog**: measurement and field names will no longer have `_` replaced by `.`
|
||||||
|
|
||||||
|
- The following plugins have changed their tags to _not_ overwrite the host tag:
|
||||||
|
- cassandra: `host -> cassandra_host`
|
||||||
|
- disque: `host -> disque_host`
|
||||||
|
- rethinkdb: `host -> rethinkdb_host`
|
||||||
|
|
||||||
|
- **Breaking Change**: The `win_perf_counters` input has been changed to
|
||||||
|
sanitize field names, replacing `/Sec` and `/sec` with `_persec`, as well as
|
||||||
|
spaces with underscores. This is needed because Graphite doesn't like slashes
|
||||||
|
and spaces, and was failing to accept metrics that had them.
|
||||||
|
The `/[sS]ec` -> `_persec` is just to make things clearer and uniform.
|
||||||
|
|
||||||
|
- **Breaking Change**: snmp plugin. The `host` tag of the snmp plugin has been
|
||||||
|
changed to the `snmp_host` tag.
|
||||||
|
|
||||||
|
- The `disk` input plugin can now be configured with the `HOST_MOUNT_PREFIX` environment variable.
|
||||||
|
This value is prepended to any mountpaths discovered before retrieving stats.
|
||||||
|
It is not included on the report path. This is necessary for reporting host disk stats when running from within a container.
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- [#1031](https://github.com/influxdata/telegraf/pull/1031): Jolokia plugin proxy mode. Thanks @saiello!
|
||||||
|
- [#1017](https://github.com/influxdata/telegraf/pull/1017): taginclude and tagexclude arguments.
|
||||||
|
- [#1015](https://github.com/influxdata/telegraf/pull/1015): Docker plugin schema refactor.
|
||||||
|
- [#889](https://github.com/influxdata/telegraf/pull/889): Improved MySQL plugin. Thanks @maksadbek!
|
||||||
|
- [#1060](https://github.com/influxdata/telegraf/pull/1060): TTL metrics added to MongoDB input plugin
|
||||||
|
- [#1056](https://github.com/influxdata/telegraf/pull/1056): Don't allow inputs to overwrite host tags.
|
||||||
|
- [#1035](https://github.com/influxdata/telegraf/issues/1035): Add `user`, `exe`, `pidfile` tags to procstat plugin.
|
||||||
|
- [#1041](https://github.com/influxdata/telegraf/issues/1041): Add `n_cpus` field to the system plugin.
|
||||||
|
- [#1072](https://github.com/influxdata/telegraf/pull/1072): New Input Plugin: filestat.
|
||||||
|
- [#1066](https://github.com/influxdata/telegraf/pull/1066): Replication lag metrics for MongoDB input plugin
|
||||||
|
- [#1086](https://github.com/influxdata/telegraf/pull/1086): Ability to specify AWS keys in config file. Thanks @johnrengleman!
|
||||||
|
- [#1096](https://github.com/influxdata/telegraf/pull/1096): Performance refactor of running output buffers.
|
||||||
|
- [#967](https://github.com/influxdata/telegraf/issues/967): Buffer logging improvements.
|
||||||
|
- [#1107](https://github.com/influxdata/telegraf/issues/1107): Support lustre2 job stats. Thanks @hanleyja!
|
||||||
|
- [#1122](https://github.com/influxdata/telegraf/pull/1122): Support setting config path through env variable and default paths.
|
||||||
|
- [#1128](https://github.com/influxdata/telegraf/pull/1128): MongoDB jumbo chunks metric for MongoDB input plugin
|
||||||
|
- [#1146](https://github.com/influxdata/telegraf/pull/1146): HAProxy socket support. Thanks weshmashian!
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- [#1050](https://github.com/influxdata/telegraf/issues/1050): jolokia plugin - do not overwrite host tag. Thanks @saiello!
|
||||||
|
- [#921](https://github.com/influxdata/telegraf/pull/921): mqtt_consumer stops gathering metrics. Thanks @chaton78!
|
||||||
|
- [#1013](https://github.com/influxdata/telegraf/pull/1013): Close dead riemann output connections. Thanks @echupriyanov!
|
||||||
|
- [#1012](https://github.com/influxdata/telegraf/pull/1012): Set default tags in test accumulator.
|
||||||
|
- [#1024](https://github.com/influxdata/telegraf/issues/1024): Don't replace `.` with `_` in datadog output.
|
||||||
|
- [#1058](https://github.com/influxdata/telegraf/issues/1058): Fix possible leaky TCP connections in influxdb output.
|
||||||
|
- [#1044](https://github.com/influxdata/telegraf/pull/1044): Fix SNMP OID possible collisions. Thanks @relip
|
||||||
|
- [#1022](https://github.com/influxdata/telegraf/issues/1022): Dont error deb/rpm install on systemd errors.
|
||||||
|
- [#1078](https://github.com/influxdata/telegraf/issues/1078): Use default AWS credential chain.
|
||||||
|
- [#1070](https://github.com/influxdata/telegraf/issues/1070): SQL Server input. Fix datatype conversion.
|
||||||
|
- [#1089](https://github.com/influxdata/telegraf/issues/1089): Fix leaky TCP connections in phpfpm plugin.
|
||||||
|
- [#914](https://github.com/influxdata/telegraf/issues/914): Telegraf can drop metrics on full buffers.
|
||||||
|
- [#1098](https://github.com/influxdata/telegraf/issues/1098): Sanitize invalid OpenTSDB characters.
|
||||||
|
- [#1110](https://github.com/influxdata/telegraf/pull/1110): Sanitize * to - in graphite serializer. Thanks @goodeggs!
|
||||||
|
- [#1118](https://github.com/influxdata/telegraf/pull/1118): Sanitize Counter names for `win_perf_counters` input.
|
||||||
|
- [#1125](https://github.com/influxdata/telegraf/pull/1125): Wrap all exec command runners with a timeout, so hung os processes don't halt Telegraf.
|
||||||
|
- [#1113](https://github.com/influxdata/telegraf/pull/1113): Set MaxRetry and RequiredAcks defaults in Kafka output.
|
||||||
|
- [#1090](https://github.com/influxdata/telegraf/issues/1090): [agent] and [global_tags] config sometimes not getting applied.
|
||||||
|
- [#1133](https://github.com/influxdata/telegraf/issues/1133): Use a timeout for docker list & stat cmds.
|
||||||
|
- [#1052](https://github.com/influxdata/telegraf/issues/1052): Docker panic fix when decode fails.
|
||||||
|
- [#1136](https://github.com/influxdata/telegraf/pull/1136): "DELAYED" Inserts were deprecated in MySQL 5.6.6. Thanks @PierreF
|
||||||
|
|
||||||
## v0.12.1 [2016-04-14]
|
## v0.12.1 [2016-04-14]
|
||||||
|
|
||||||
|
|||||||
@@ -212,8 +212,8 @@ func (s *Simple) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Simple) Write(metrics []telegraf.Metric) error {
|
func (s *Simple) Write(metrics []telegraf.Metric) error {
|
||||||
for _, pt := range points {
|
for _, metric := range metrics {
|
||||||
// write `pt` to the output sink here
|
// write `metric` to the output sink here
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
8
Godeps
8
Godeps
@@ -16,14 +16,17 @@ github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
|||||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||||
github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
|
github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
|
||||||
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
|
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
|
||||||
|
github.com/gobwas/glob 49571a1557cd20e6a2410adc6421f85b66c730b5
|
||||||
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
|
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
|
||||||
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
|
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
|
||||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||||
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
|
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
|
||||||
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
|
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
|
||||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||||
|
github.com/hashicorp/consul 5aa90455ce78d4d41578bafc86305e6e6b28d7d2
|
||||||
|
github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
|
||||||
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
||||||
github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48
|
github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab
|
||||||
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
||||||
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
||||||
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
|
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
|
||||||
@@ -40,10 +43,11 @@ github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
|||||||
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
||||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||||
github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42
|
github.com/shirou/gopsutil 586bb697f3ec9f8ec08ffefe18f521a64534037c
|
||||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||||
|
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
|
||||||
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
||||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||||
|
|||||||
14
Makefile
14
Makefile
@@ -14,21 +14,21 @@ windows: prepare-windows build-windows
|
|||||||
|
|
||||||
# Only run the build (no dependency grabbing)
|
# Only run the build (no dependency grabbing)
|
||||||
build:
|
build:
|
||||||
go install -ldflags "-X main.Version=$(VERSION)" ./...
|
go install -ldflags "-X main.version=$(VERSION)" ./...
|
||||||
|
|
||||||
build-windows:
|
build-windows:
|
||||||
go build -o telegraf.exe -ldflags \
|
go build -o telegraf.exe -ldflags \
|
||||||
"-X main.Version=$(VERSION)" \
|
"-X main.version=$(VERSION)" \
|
||||||
./cmd/telegraf/telegraf.go
|
./cmd/telegraf/telegraf.go
|
||||||
|
|
||||||
build-for-docker:
|
build-for-docker:
|
||||||
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
|
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
|
||||||
"-s -X main.Version=$(VERSION)" \
|
"-s -X main.version=$(VERSION)" \
|
||||||
./cmd/telegraf/telegraf.go
|
./cmd/telegraf/telegraf.go
|
||||||
|
|
||||||
# Build with race detector
|
# Build with race detector
|
||||||
dev: prepare
|
dev: prepare
|
||||||
go build -race -ldflags "-X main.Version=$(VERSION)" ./...
|
go build -race -ldflags "-X main.version=$(VERSION)" ./...
|
||||||
|
|
||||||
# run package script
|
# run package script
|
||||||
package:
|
package:
|
||||||
@@ -64,7 +64,6 @@ endif
|
|||||||
docker run --name memcached -p "11211:11211" -d memcached
|
docker run --name memcached -p "11211:11211" -d memcached
|
||||||
docker run --name postgres -p "5432:5432" -d postgres
|
docker run --name postgres -p "5432:5432" -d postgres
|
||||||
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
||||||
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
|
||||||
docker run --name redis -p "6379:6379" -d redis
|
docker run --name redis -p "6379:6379" -d redis
|
||||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||||
@@ -79,7 +78,6 @@ docker-run-circle:
|
|||||||
-e ADVERTISED_PORT=9092 \
|
-e ADVERTISED_PORT=9092 \
|
||||||
-p "2181:2181" -p "9092:9092" \
|
-p "2181:2181" -p "9092:9092" \
|
||||||
-d spotify/kafka
|
-d spotify/kafka
|
||||||
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
|
||||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||||
@@ -88,8 +86,8 @@ docker-run-circle:
|
|||||||
|
|
||||||
# Kill all docker containers, ignore errors
|
# Kill all docker containers, ignore errors
|
||||||
docker-kill:
|
docker-kill:
|
||||||
-docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||||
-docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||||
|
|
||||||
# Run full unit tests using docker containers (includes setup and teardown)
|
# Run full unit tests using docker containers (includes setup and teardown)
|
||||||
test: vet docker-kill docker-run
|
test: vet docker-kill docker-run
|
||||||
|
|||||||
53
README.md
53
README.md
@@ -1,4 +1,4 @@
|
|||||||
# Telegraf [](https://circleci.com/gh/influxdata/telegraf)
|
# Telegraf [](https://circleci.com/gh/influxdata/telegraf) [](https://hub.docker.com/_/telegraf/)
|
||||||
|
|
||||||
Telegraf is an agent written in Go for collecting metrics from the system it's
|
Telegraf is an agent written in Go for collecting metrics from the system it's
|
||||||
running on, or from other services, and writing them into InfluxDB or other
|
running on, or from other services, and writing them into InfluxDB or other
|
||||||
@@ -20,12 +20,12 @@ new plugins.
|
|||||||
### Linux deb and rpm Packages:
|
### Linux deb and rpm Packages:
|
||||||
|
|
||||||
Latest:
|
Latest:
|
||||||
* http://get.influxdb.org/telegraf/telegraf_0.12.1-1_amd64.deb
|
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta2_amd64.deb
|
||||||
* http://get.influxdb.org/telegraf/telegraf-0.12.1-1.x86_64.rpm
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta2.x86_64.rpm
|
||||||
|
|
||||||
Latest (arm):
|
Latest (arm):
|
||||||
* http://get.influxdb.org/telegraf/telegraf_0.12.1-1_armhf.deb
|
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta2_armhf.deb
|
||||||
* http://get.influxdb.org/telegraf/telegraf-0.12.1-1.armhf.rpm
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta2.armhf.rpm
|
||||||
|
|
||||||
##### Package Instructions:
|
##### Package Instructions:
|
||||||
|
|
||||||
@@ -46,32 +46,14 @@ to use this repo to install & update telegraf.
|
|||||||
### Linux tarballs:
|
### Linux tarballs:
|
||||||
|
|
||||||
Latest:
|
Latest:
|
||||||
* http://get.influxdb.org/telegraf/telegraf-0.12.1-1_linux_amd64.tar.gz
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta2_linux_amd64.tar.gz
|
||||||
* http://get.influxdb.org/telegraf/telegraf-0.12.1-1_linux_i386.tar.gz
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta2_linux_i386.tar.gz
|
||||||
* http://get.influxdb.org/telegraf/telegraf-0.12.1-1_linux_armhf.tar.gz
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta2_linux_armhf.tar.gz
|
||||||
|
|
||||||
##### tarball Instructions:
|
|
||||||
|
|
||||||
To install the full directory structure with config file, run:
|
|
||||||
|
|
||||||
```
|
|
||||||
sudo tar -C / -zxvf ./telegraf-0.12.1-1_linux_amd64.tar.gz
|
|
||||||
```
|
|
||||||
|
|
||||||
To extract only the binary, run:
|
|
||||||
|
|
||||||
```
|
|
||||||
tar -zxvf telegraf-0.12.1-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
|
|
||||||
```
|
|
||||||
|
|
||||||
### FreeBSD tarball:
|
### FreeBSD tarball:
|
||||||
|
|
||||||
Latest:
|
Latest:
|
||||||
* http://get.influxdb.org/telegraf/telegraf-0.12.1-1_freebsd_amd64.tar.gz
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta2_freebsd_amd64.tar.gz
|
||||||
|
|
||||||
##### tarball Instructions:
|
|
||||||
|
|
||||||
See linux instructions above.
|
|
||||||
|
|
||||||
### Ansible Role:
|
### Ansible Role:
|
||||||
|
|
||||||
@@ -87,8 +69,7 @@ brew install telegraf
|
|||||||
### Windows Binaries (EXPERIMENTAL)
|
### Windows Binaries (EXPERIMENTAL)
|
||||||
|
|
||||||
Latest:
|
Latest:
|
||||||
* http://get.influxdb.org/telegraf/telegraf-0.12.1-1_windows_amd64.zip
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta2_windows_amd64.zip
|
||||||
* http://get.influxdb.org/telegraf/telegraf-0.12.1-1_windows_i386.zip
|
|
||||||
|
|
||||||
### From Source:
|
### From Source:
|
||||||
|
|
||||||
@@ -161,6 +142,10 @@ Currently implemented sources:
|
|||||||
* [apache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/apache)
|
* [apache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/apache)
|
||||||
* [bcache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/bcache)
|
* [bcache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/bcache)
|
||||||
* [cassandra](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cassandra)
|
* [cassandra](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cassandra)
|
||||||
|
* [ceph](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ceph)
|
||||||
|
* [chrony](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/chrony)
|
||||||
|
* [consul](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/consul)
|
||||||
|
* [conntrack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/conntrack)
|
||||||
* [couchbase](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchbase)
|
* [couchbase](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchbase)
|
||||||
* [couchdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchdb)
|
* [couchdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchdb)
|
||||||
* [disque](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/disque)
|
* [disque](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/disque)
|
||||||
@@ -168,7 +153,8 @@ Currently implemented sources:
|
|||||||
* [docker](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker)
|
* [docker](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker)
|
||||||
* [dovecot](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dovecot)
|
* [dovecot](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dovecot)
|
||||||
* [elasticsearch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch)
|
* [elasticsearch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch)
|
||||||
* [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec ) (generic executable plugin, support JSON, influx, graphite and nagios)
|
* [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||||
|
* [filestat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/filestat)
|
||||||
* [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy)
|
* [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy)
|
||||||
* [http_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_response)
|
* [http_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_response)
|
||||||
* [httpjson](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
* [httpjson](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||||
@@ -185,6 +171,7 @@ Currently implemented sources:
|
|||||||
* [net_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/net_response)
|
* [net_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/net_response)
|
||||||
* [nginx](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nginx)
|
* [nginx](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nginx)
|
||||||
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq)
|
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq)
|
||||||
|
* [nstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nstat)
|
||||||
* [ntpq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ntpq)
|
* [ntpq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ntpq)
|
||||||
* [phpfpm](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/phpfpm)
|
* [phpfpm](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/phpfpm)
|
||||||
* [phusion passenger](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/passenger)
|
* [phusion passenger](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/passenger)
|
||||||
@@ -204,6 +191,7 @@ Currently implemented sources:
|
|||||||
* [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
|
* [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
|
||||||
* [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft)
|
* [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft)
|
||||||
* [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy)
|
* [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy)
|
||||||
|
* [varnish](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/varnish)
|
||||||
* [zfs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zfs)
|
* [zfs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zfs)
|
||||||
* [zookeeper](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zookeeper)
|
* [zookeeper](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zookeeper)
|
||||||
* [win_perf_counters ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters) (windows performance counters)
|
* [win_perf_counters ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters) (windows performance counters)
|
||||||
@@ -218,16 +206,19 @@ Currently implemented sources:
|
|||||||
* swap
|
* swap
|
||||||
* processes
|
* processes
|
||||||
* kernel (/proc/stat)
|
* kernel (/proc/stat)
|
||||||
|
* kernel (/proc/vmstat)
|
||||||
|
|
||||||
Telegraf can also collect metrics via the following service plugins:
|
Telegraf can also collect metrics via the following service plugins:
|
||||||
|
|
||||||
* [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd)
|
* [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd)
|
||||||
|
* [tail](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail)
|
||||||
* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
|
* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
|
||||||
* [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener)
|
* [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener)
|
||||||
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
|
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
|
||||||
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
|
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
|
||||||
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
|
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
|
||||||
* [github_webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/github_webhooks)
|
* [github_webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/github_webhooks)
|
||||||
|
* [rollbar_webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rollbar_webhooks)
|
||||||
|
|
||||||
We'll be adding support for many more over the coming months. Read on if you
|
We'll be adding support for many more over the coming months. Read on if you
|
||||||
want to add support for another service or third-party API.
|
want to add support for another service or third-party API.
|
||||||
@@ -242,6 +233,8 @@ want to add support for another service or third-party API.
|
|||||||
* [datadog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/datadog)
|
* [datadog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/datadog)
|
||||||
* [file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file)
|
* [file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file)
|
||||||
* [graphite](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graphite)
|
* [graphite](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graphite)
|
||||||
|
* [graylog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graylog)
|
||||||
|
* [instrumental](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/instrumental)
|
||||||
* [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka)
|
* [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka)
|
||||||
* [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato)
|
* [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato)
|
||||||
* [mqtt](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/mqtt)
|
* [mqtt](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/mqtt)
|
||||||
|
|||||||
@@ -18,4 +18,8 @@ type Accumulator interface {
|
|||||||
|
|
||||||
Debug() bool
|
Debug() bool
|
||||||
SetDebug(enabled bool)
|
SetDebug(enabled bool)
|
||||||
|
|
||||||
|
SetPrecision(precision, interval time.Duration)
|
||||||
|
|
||||||
|
DisablePrecision()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
@@ -18,21 +17,24 @@ func NewAccumulator(
|
|||||||
acc := accumulator{}
|
acc := accumulator{}
|
||||||
acc.metrics = metrics
|
acc.metrics = metrics
|
||||||
acc.inputConfig = inputConfig
|
acc.inputConfig = inputConfig
|
||||||
|
acc.precision = time.Nanosecond
|
||||||
return &acc
|
return &acc
|
||||||
}
|
}
|
||||||
|
|
||||||
type accumulator struct {
|
type accumulator struct {
|
||||||
sync.Mutex
|
|
||||||
|
|
||||||
metrics chan telegraf.Metric
|
metrics chan telegraf.Metric
|
||||||
|
|
||||||
defaultTags map[string]string
|
defaultTags map[string]string
|
||||||
|
|
||||||
debug bool
|
debug bool
|
||||||
|
// print every point added to the accumulator
|
||||||
|
trace bool
|
||||||
|
|
||||||
inputConfig *internal_models.InputConfig
|
inputConfig *internal_models.InputConfig
|
||||||
|
|
||||||
prefix string
|
prefix string
|
||||||
|
|
||||||
|
precision time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ac *accumulator) Add(
|
func (ac *accumulator) Add(
|
||||||
@@ -96,6 +98,7 @@ func (ac *accumulator) AddFields(
|
|||||||
tags[k] = v
|
tags[k] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ac.inputConfig.Filter.FilterTags(tags)
|
||||||
|
|
||||||
result := make(map[string]interface{})
|
result := make(map[string]interface{})
|
||||||
for k, v := range fields {
|
for k, v := range fields {
|
||||||
@@ -141,6 +144,7 @@ func (ac *accumulator) AddFields(
|
|||||||
} else {
|
} else {
|
||||||
timestamp = time.Now()
|
timestamp = time.Now()
|
||||||
}
|
}
|
||||||
|
timestamp = timestamp.Round(ac.precision)
|
||||||
|
|
||||||
if ac.prefix != "" {
|
if ac.prefix != "" {
|
||||||
measurement = ac.prefix + measurement
|
measurement = ac.prefix + measurement
|
||||||
@@ -151,7 +155,7 @@ func (ac *accumulator) AddFields(
|
|||||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if ac.debug {
|
if ac.trace {
|
||||||
fmt.Println("> " + m.String())
|
fmt.Println("> " + m.String())
|
||||||
}
|
}
|
||||||
ac.metrics <- m
|
ac.metrics <- m
|
||||||
@@ -165,6 +169,39 @@ func (ac *accumulator) SetDebug(debug bool) {
|
|||||||
ac.debug = debug
|
ac.debug = debug
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) Trace() bool {
|
||||||
|
return ac.trace
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) SetTrace(trace bool) {
|
||||||
|
ac.trace = trace
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPrecision takes two time.Duration objects. If the first is non-zero,
|
||||||
|
// it sets that as the precision. Otherwise, it takes the second argument
|
||||||
|
// as the order of time that the metrics should be rounded to, with the
|
||||||
|
// maximum being 1s.
|
||||||
|
func (ac *accumulator) SetPrecision(precision, interval time.Duration) {
|
||||||
|
if precision > 0 {
|
||||||
|
ac.precision = precision
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case interval >= time.Second:
|
||||||
|
ac.precision = time.Second
|
||||||
|
case interval >= time.Millisecond:
|
||||||
|
ac.precision = time.Millisecond
|
||||||
|
case interval >= time.Microsecond:
|
||||||
|
ac.precision = time.Microsecond
|
||||||
|
default:
|
||||||
|
ac.precision = time.Nanosecond
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) DisablePrecision() {
|
||||||
|
ac.precision = time.Nanosecond
|
||||||
|
}
|
||||||
|
|
||||||
func (ac *accumulator) setDefaultTags(tags map[string]string) {
|
func (ac *accumulator) setDefaultTags(tags map[string]string) {
|
||||||
ac.defaultTags = tags
|
ac.defaultTags = tags
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,6 +38,128 @@ func TestAdd(t *testing.T) {
|
|||||||
actual)
|
actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Second)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddNoIntervalWithPrecision(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.SetPrecision(time.Second, time.Millisecond)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddDisablePrecision(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.SetPrecision(time.Second, time.Millisecond)
|
||||||
|
a.DisablePrecision()
|
||||||
|
a.Add("acctest", float64(101), map[string]string{})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDifferentPrecisions(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Second)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||||
|
actual)
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Millisecond)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800083000000)),
|
||||||
|
actual)
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Microsecond)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082913000)),
|
||||||
|
actual)
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Nanosecond)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
func TestAddDefaultTags(t *testing.T) {
|
func TestAddDefaultTags(t *testing.T) {
|
||||||
a := accumulator{}
|
a := accumulator{}
|
||||||
a.addDefaultTag("default", "tag")
|
a.addDefaultTag("default", "tag")
|
||||||
@@ -300,3 +422,35 @@ func TestAddBools(t *testing.T) {
|
|||||||
fmt.Sprintf("acctest,acc=test,default=tag value=false %d", now.UnixNano()),
|
fmt.Sprintf("acctest,acc=test,default=tag value=false %d", now.UnixNano()),
|
||||||
actual)
|
actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test that tag filters get applied to metrics.
|
||||||
|
func TestAccFilterTags(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Now()
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
filter := internal_models.Filter{
|
||||||
|
TagExclude: []string{"acc"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, filter.CompileFilter())
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
a.inputConfig.Filter = filter
|
||||||
|
|
||||||
|
a.Add("acctest", float64(101), map[string]string{})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest value=101 %d", now.UnixNano()),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|||||||
190
agent/agent.go
190
agent/agent.go
@@ -1,17 +1,15 @@
|
|||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
cryptorand "crypto/rand"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"math/big"
|
|
||||||
"math/rand"
|
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
"github.com/influxdata/telegraf/internal/config"
|
"github.com/influxdata/telegraf/internal/config"
|
||||||
"github.com/influxdata/telegraf/internal/models"
|
"github.com/influxdata/telegraf/internal/models"
|
||||||
)
|
)
|
||||||
@@ -102,93 +100,41 @@ func panicRecover(input *internal_models.RunningInput) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// gatherParallel runs the inputs that are using the same reporting interval
|
// gatherer runs the inputs that have been configured with their own
|
||||||
// as the telegraf agent.
|
|
||||||
func (a *Agent) gatherParallel(metricC chan telegraf.Metric) error {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
start := time.Now()
|
|
||||||
counter := 0
|
|
||||||
jitter := a.Config.Agent.CollectionJitter.Duration.Nanoseconds()
|
|
||||||
for _, input := range a.Config.Inputs {
|
|
||||||
if input.Config.Interval != 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Add(1)
|
|
||||||
counter++
|
|
||||||
go func(input *internal_models.RunningInput) {
|
|
||||||
defer panicRecover(input)
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
acc := NewAccumulator(input.Config, metricC)
|
|
||||||
acc.SetDebug(a.Config.Agent.Debug)
|
|
||||||
acc.setDefaultTags(a.Config.Tags)
|
|
||||||
|
|
||||||
if jitter != 0 {
|
|
||||||
nanoSleep := rand.Int63n(jitter)
|
|
||||||
d, err := time.ParseDuration(fmt.Sprintf("%dns", nanoSleep))
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Jittering collection interval failed for plugin %s",
|
|
||||||
input.Name)
|
|
||||||
} else {
|
|
||||||
time.Sleep(d)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := input.Input.Gather(acc); err != nil {
|
|
||||||
log.Printf("Error in input [%s]: %s", input.Name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
}(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
if counter == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
elapsed := time.Since(start)
|
|
||||||
if !a.Config.Agent.Quiet {
|
|
||||||
log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n",
|
|
||||||
a.Config.Agent.Interval.Duration, counter, elapsed)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// gatherSeparate runs the inputs that have been configured with their own
|
|
||||||
// reporting interval.
|
// reporting interval.
|
||||||
func (a *Agent) gatherSeparate(
|
func (a *Agent) gatherer(
|
||||||
shutdown chan struct{},
|
shutdown chan struct{},
|
||||||
input *internal_models.RunningInput,
|
input *internal_models.RunningInput,
|
||||||
|
interval time.Duration,
|
||||||
metricC chan telegraf.Metric,
|
metricC chan telegraf.Metric,
|
||||||
) error {
|
) error {
|
||||||
defer panicRecover(input)
|
defer panicRecover(input)
|
||||||
|
|
||||||
ticker := time.NewTicker(input.Config.Interval)
|
ticker := time.NewTicker(interval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
var outerr error
|
var outerr error
|
||||||
start := time.Now()
|
|
||||||
|
|
||||||
acc := NewAccumulator(input.Config, metricC)
|
acc := NewAccumulator(input.Config, metricC)
|
||||||
acc.SetDebug(a.Config.Agent.Debug)
|
acc.SetDebug(a.Config.Agent.Debug)
|
||||||
|
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||||
|
a.Config.Agent.Interval.Duration)
|
||||||
acc.setDefaultTags(a.Config.Tags)
|
acc.setDefaultTags(a.Config.Tags)
|
||||||
|
|
||||||
if err := input.Input.Gather(acc); err != nil {
|
internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown)
|
||||||
log.Printf("Error in input [%s]: %s", input.Name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
gatherWithTimeout(shutdown, input, acc, interval)
|
||||||
elapsed := time.Since(start)
|
elapsed := time.Since(start)
|
||||||
if !a.Config.Agent.Quiet {
|
|
||||||
log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
|
|
||||||
input.Config.Interval, input.Name, elapsed)
|
|
||||||
}
|
|
||||||
|
|
||||||
if outerr != nil {
|
if outerr != nil {
|
||||||
return outerr
|
return outerr
|
||||||
}
|
}
|
||||||
|
if a.Config.Agent.Debug {
|
||||||
|
log.Printf("Input [%s] gathered metrics, (%s interval) in %s\n",
|
||||||
|
input.Name, interval, elapsed)
|
||||||
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-shutdown:
|
case <-shutdown:
|
||||||
@@ -199,6 +145,42 @@ func (a *Agent) gatherSeparate(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// gatherWithTimeout gathers from the given input, with the given timeout.
|
||||||
|
// when the given timeout is reached, gatherWithTimeout logs an error message
|
||||||
|
// but continues waiting for it to return. This is to avoid leaving behind
|
||||||
|
// hung processes, and to prevent re-calling the same hung process over and
|
||||||
|
// over.
|
||||||
|
func gatherWithTimeout(
|
||||||
|
shutdown chan struct{},
|
||||||
|
input *internal_models.RunningInput,
|
||||||
|
acc *accumulator,
|
||||||
|
timeout time.Duration,
|
||||||
|
) {
|
||||||
|
ticker := time.NewTicker(timeout)
|
||||||
|
defer ticker.Stop()
|
||||||
|
done := make(chan error)
|
||||||
|
go func() {
|
||||||
|
done <- input.Input.Gather(acc)
|
||||||
|
}()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case err := <-done:
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR in input [%s]: %s", input.Name, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
log.Printf("ERROR: input [%s] took longer to collect than "+
|
||||||
|
"collection interval (%s)",
|
||||||
|
input.Name, timeout)
|
||||||
|
continue
|
||||||
|
case <-shutdown:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Test verifies that we can 'Gather' from all inputs with their configured
|
// Test verifies that we can 'Gather' from all inputs with their configured
|
||||||
// Config struct
|
// Config struct
|
||||||
func (a *Agent) Test() error {
|
func (a *Agent) Test() error {
|
||||||
@@ -220,7 +202,10 @@ func (a *Agent) Test() error {
|
|||||||
|
|
||||||
for _, input := range a.Config.Inputs {
|
for _, input := range a.Config.Inputs {
|
||||||
acc := NewAccumulator(input.Config, metricC)
|
acc := NewAccumulator(input.Config, metricC)
|
||||||
acc.SetDebug(true)
|
acc.SetTrace(true)
|
||||||
|
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||||
|
a.Config.Agent.Interval.Duration)
|
||||||
|
acc.setDefaultTags(a.Config.Tags)
|
||||||
|
|
||||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
|
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
|
||||||
if input.Config.Interval != 0 {
|
if input.Config.Interval != 0 {
|
||||||
@@ -280,6 +265,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
|
|||||||
a.flush()
|
a.flush()
|
||||||
return nil
|
return nil
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
|
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
|
||||||
a.flush()
|
a.flush()
|
||||||
case m := <-metricC:
|
case m := <-metricC:
|
||||||
for _, o := range a.Config.Outputs {
|
for _, o := range a.Config.Outputs {
|
||||||
@@ -289,35 +275,10 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// jitterInterval applies the the interval jitter to the flush interval using
|
|
||||||
// crypto/rand number generator
|
|
||||||
func jitterInterval(ininterval, injitter time.Duration) time.Duration {
|
|
||||||
var jitter int64
|
|
||||||
outinterval := ininterval
|
|
||||||
if injitter.Nanoseconds() != 0 {
|
|
||||||
maxjitter := big.NewInt(injitter.Nanoseconds())
|
|
||||||
if j, err := cryptorand.Int(cryptorand.Reader, maxjitter); err == nil {
|
|
||||||
jitter = j.Int64()
|
|
||||||
}
|
|
||||||
outinterval = time.Duration(jitter + ininterval.Nanoseconds())
|
|
||||||
}
|
|
||||||
|
|
||||||
if outinterval.Nanoseconds() < time.Duration(500*time.Millisecond).Nanoseconds() {
|
|
||||||
log.Printf("Flush interval %s too low, setting to 500ms\n", outinterval)
|
|
||||||
outinterval = time.Duration(500 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
return outinterval
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run runs the agent daemon, gathering every Interval
|
// Run runs the agent daemon, gathering every Interval
|
||||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
a.Config.Agent.FlushInterval.Duration = jitterInterval(
|
|
||||||
a.Config.Agent.FlushInterval.Duration,
|
|
||||||
a.Config.Agent.FlushJitter.Duration)
|
|
||||||
|
|
||||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+
|
log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+
|
||||||
"Flush Interval:%s \n",
|
"Flush Interval:%s \n",
|
||||||
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet,
|
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet,
|
||||||
@@ -332,6 +293,9 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
|||||||
case telegraf.ServiceInput:
|
case telegraf.ServiceInput:
|
||||||
acc := NewAccumulator(input.Config, metricC)
|
acc := NewAccumulator(input.Config, metricC)
|
||||||
acc.SetDebug(a.Config.Agent.Debug)
|
acc.SetDebug(a.Config.Agent.Debug)
|
||||||
|
// Service input plugins should set their own precision of their
|
||||||
|
// metrics.
|
||||||
|
acc.DisablePrecision()
|
||||||
acc.setDefaultTags(a.Config.Tags)
|
acc.setDefaultTags(a.Config.Tags)
|
||||||
if err := p.Start(acc); err != nil {
|
if err := p.Start(acc); err != nil {
|
||||||
log.Printf("Service for input %s failed to start, exiting\n%s\n",
|
log.Printf("Service for input %s failed to start, exiting\n%s\n",
|
||||||
@@ -347,7 +311,6 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
|||||||
i := int64(a.Config.Agent.Interval.Duration)
|
i := int64(a.Config.Agent.Interval.Duration)
|
||||||
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
|
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
|
||||||
}
|
}
|
||||||
ticker := time.NewTicker(a.Config.Agent.Interval.Duration)
|
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
@@ -358,32 +321,21 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
wg.Add(len(a.Config.Inputs))
|
||||||
for _, input := range a.Config.Inputs {
|
for _, input := range a.Config.Inputs {
|
||||||
// Special handling for inputs that have their own collection interval
|
interval := a.Config.Agent.Interval.Duration
|
||||||
// configured. Default intervals are handled below with gatherParallel
|
// overwrite global interval if this plugin has it's own.
|
||||||
if input.Config.Interval != 0 {
|
if input.Config.Interval != 0 {
|
||||||
wg.Add(1)
|
interval = input.Config.Interval
|
||||||
go func(input *internal_models.RunningInput) {
|
|
||||||
defer wg.Done()
|
|
||||||
if err := a.gatherSeparate(shutdown, input, metricC); err != nil {
|
|
||||||
log.Printf(err.Error())
|
|
||||||
}
|
|
||||||
}(input)
|
|
||||||
}
|
}
|
||||||
|
go func(in *internal_models.RunningInput, interv time.Duration) {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := a.gatherer(shutdown, in, interv, metricC); err != nil {
|
||||||
|
log.Printf(err.Error())
|
||||||
|
}
|
||||||
|
}(input, interval)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer wg.Wait()
|
wg.Wait()
|
||||||
|
return nil
|
||||||
for {
|
|
||||||
if err := a.gatherParallel(metricC); err != nil {
|
|
||||||
log.Printf(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-shutdown:
|
|
||||||
return nil
|
|
||||||
case <-ticker.C:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package agent
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/internal/config"
|
"github.com/influxdata/telegraf/internal/config"
|
||||||
|
|
||||||
@@ -110,75 +109,3 @@ func TestAgent_LoadOutput(t *testing.T) {
|
|||||||
a, _ = NewAgent(c)
|
a, _ = NewAgent(c)
|
||||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAgent_ZeroJitter(t *testing.T) {
|
|
||||||
flushinterval := jitterInterval(time.Duration(10*time.Second),
|
|
||||||
time.Duration(0*time.Second))
|
|
||||||
|
|
||||||
actual := flushinterval.Nanoseconds()
|
|
||||||
exp := time.Duration(10 * time.Second).Nanoseconds()
|
|
||||||
|
|
||||||
if actual != exp {
|
|
||||||
t.Errorf("Actual %v, expected %v", actual, exp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAgent_ZeroInterval(t *testing.T) {
|
|
||||||
min := time.Duration(500 * time.Millisecond).Nanoseconds()
|
|
||||||
max := time.Duration(5 * time.Second).Nanoseconds()
|
|
||||||
|
|
||||||
for i := 0; i < 1000; i++ {
|
|
||||||
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
|
||||||
time.Duration(5*time.Second))
|
|
||||||
actual := flushinterval.Nanoseconds()
|
|
||||||
|
|
||||||
if actual > max {
|
|
||||||
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if actual < min {
|
|
||||||
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAgent_ZeroBoth(t *testing.T) {
|
|
||||||
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
|
||||||
time.Duration(0*time.Second))
|
|
||||||
|
|
||||||
actual := flushinterval
|
|
||||||
exp := time.Duration(500 * time.Millisecond)
|
|
||||||
|
|
||||||
if actual != exp {
|
|
||||||
t.Errorf("Actual %v, expected %v", actual, exp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAgent_JitterMax(t *testing.T) {
|
|
||||||
max := time.Duration(32 * time.Second).Nanoseconds()
|
|
||||||
|
|
||||||
for i := 0; i < 1000; i++ {
|
|
||||||
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
|
||||||
time.Duration(2*time.Second))
|
|
||||||
actual := flushinterval.Nanoseconds()
|
|
||||||
if actual > max {
|
|
||||||
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAgent_JitterMin(t *testing.T) {
|
|
||||||
min := time.Duration(30 * time.Second).Nanoseconds()
|
|
||||||
|
|
||||||
for i := 0; i < 1000; i++ {
|
|
||||||
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
|
||||||
time.Duration(2*time.Second))
|
|
||||||
actual := flushinterval.Nanoseconds()
|
|
||||||
if actual < min {
|
|
||||||
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -4,9 +4,9 @@ machine:
|
|||||||
post:
|
post:
|
||||||
- sudo service zookeeper stop
|
- sudo service zookeeper stop
|
||||||
- go version
|
- go version
|
||||||
- go version | grep 1.6.1 || sudo rm -rf /usr/local/go
|
- go version | grep 1.6.2 || sudo rm -rf /usr/local/go
|
||||||
- wget https://storage.googleapis.com/golang/go1.6.1.linux-amd64.tar.gz
|
- wget https://storage.googleapis.com/golang/go1.6.2.linux-amd64.tar.gz
|
||||||
- sudo tar -C /usr/local -xzf go1.6.1.linux-amd64.tar.gz
|
- sudo tar -C /usr/local -xzf go1.6.2.linux-amd64.tar.gz
|
||||||
- go version
|
- go version
|
||||||
|
|
||||||
dependencies:
|
dependencies:
|
||||||
|
|||||||
@@ -46,9 +46,13 @@ var fOutputFiltersLegacy = flag.String("outputfilter", "",
|
|||||||
var fConfigDirectoryLegacy = flag.String("configdirectory", "",
|
var fConfigDirectoryLegacy = flag.String("configdirectory", "",
|
||||||
"directory containing additional *.conf files")
|
"directory containing additional *.conf files")
|
||||||
|
|
||||||
// Telegraf version
|
// Telegraf version, populated linker.
|
||||||
// -ldflags "-X main.Version=`git describe --always --tags`"
|
// ie, -ldflags "-X main.version=`git describe --always --tags`"
|
||||||
var Version string
|
var (
|
||||||
|
version string
|
||||||
|
commit string
|
||||||
|
branch string
|
||||||
|
)
|
||||||
|
|
||||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||||
|
|
||||||
@@ -71,6 +75,13 @@ The flags are:
|
|||||||
-quiet run in quiet mode
|
-quiet run in quiet mode
|
||||||
-version print the version to stdout
|
-version print the version to stdout
|
||||||
|
|
||||||
|
In addition to the -config flag, telegraf will also load the config file from
|
||||||
|
an environment variable or default location. Precedence is:
|
||||||
|
1. -config flag
|
||||||
|
2. $TELEGRAF_CONFIG_PATH environment variable
|
||||||
|
3. $HOME/.telegraf/telegraf.conf
|
||||||
|
4. /etc/telegraf/telegraf.conf
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
|
||||||
# generate a telegraf config file:
|
# generate a telegraf config file:
|
||||||
@@ -98,12 +109,10 @@ func main() {
|
|||||||
flag.Parse()
|
flag.Parse()
|
||||||
args := flag.Args()
|
args := flag.Args()
|
||||||
|
|
||||||
if flag.NFlag() == 0 && len(args) == 0 {
|
|
||||||
usageExit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
var inputFilters []string
|
var inputFilters []string
|
||||||
if *fInputFiltersLegacy != "" {
|
if *fInputFiltersLegacy != "" {
|
||||||
|
fmt.Printf("WARNING '--filter' flag is deprecated, please use" +
|
||||||
|
" '--input-filter'")
|
||||||
inputFilter := strings.TrimSpace(*fInputFiltersLegacy)
|
inputFilter := strings.TrimSpace(*fInputFiltersLegacy)
|
||||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||||
}
|
}
|
||||||
@@ -114,6 +123,8 @@ func main() {
|
|||||||
|
|
||||||
var outputFilters []string
|
var outputFilters []string
|
||||||
if *fOutputFiltersLegacy != "" {
|
if *fOutputFiltersLegacy != "" {
|
||||||
|
fmt.Printf("WARNING '--outputfilter' flag is deprecated, please use" +
|
||||||
|
" '--output-filter'")
|
||||||
outputFilter := strings.TrimSpace(*fOutputFiltersLegacy)
|
outputFilter := strings.TrimSpace(*fOutputFiltersLegacy)
|
||||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||||
}
|
}
|
||||||
@@ -125,7 +136,7 @@ func main() {
|
|||||||
if len(args) > 0 {
|
if len(args) > 0 {
|
||||||
switch args[0] {
|
switch args[0] {
|
||||||
case "version":
|
case "version":
|
||||||
v := fmt.Sprintf("Telegraf - Version %s", Version)
|
v := fmt.Sprintf("Telegraf - version %s", version)
|
||||||
fmt.Println(v)
|
fmt.Println(v)
|
||||||
return
|
return
|
||||||
case "config":
|
case "config":
|
||||||
@@ -151,7 +162,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if *fVersion {
|
if *fVersion {
|
||||||
v := fmt.Sprintf("Telegraf - Version %s", Version)
|
v := fmt.Sprintf("Telegraf - version %s", version)
|
||||||
fmt.Println(v)
|
fmt.Println(v)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -170,25 +181,19 @@ func main() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
// If no other options are specified, load the config file and run.
|
||||||
c *config.Config
|
c := config.NewConfig()
|
||||||
err error
|
c.OutputFilters = outputFilters
|
||||||
)
|
c.InputFilters = inputFilters
|
||||||
|
err := c.LoadConfig(*fConfig)
|
||||||
if *fConfig != "" {
|
if err != nil {
|
||||||
c = config.NewConfig()
|
fmt.Println(err)
|
||||||
c.OutputFilters = outputFilters
|
|
||||||
c.InputFilters = inputFilters
|
|
||||||
err = c.LoadConfig(*fConfig)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Println("You must specify a config file. See telegraf --help")
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if *fConfigDirectoryLegacy != "" {
|
if *fConfigDirectoryLegacy != "" {
|
||||||
|
fmt.Printf("WARNING '--configdirectory' flag is deprecated, please use" +
|
||||||
|
" '--config-directory'")
|
||||||
err = c.LoadDirectory(*fConfigDirectoryLegacy)
|
err = c.LoadDirectory(*fConfigDirectoryLegacy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
@@ -250,7 +255,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
log.Printf("Starting Telegraf (version %s)\n", Version)
|
log.Printf("Starting Telegraf (version %s)\n", version)
|
||||||
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||||
log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||||
log.Printf("Tags enabled: %s", c.ListTags())
|
log.Printf("Tags enabled: %s", c.ListTags())
|
||||||
|
|||||||
@@ -3,11 +3,20 @@
|
|||||||
## Generating a Configuration File
|
## Generating a Configuration File
|
||||||
|
|
||||||
A default Telegraf config file can be generated using the -sample-config flag:
|
A default Telegraf config file can be generated using the -sample-config flag:
|
||||||
`telegraf -sample-config > telegraf.conf`
|
|
||||||
|
```
|
||||||
|
telegraf -sample-config > telegraf.conf
|
||||||
|
```
|
||||||
|
|
||||||
To generate a file with specific inputs and outputs, you can use the
|
To generate a file with specific inputs and outputs, you can use the
|
||||||
-input-filter and -output-filter flags:
|
-input-filter and -output-filter flags:
|
||||||
`telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka`
|
|
||||||
|
```
|
||||||
|
telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka
|
||||||
|
```
|
||||||
|
|
||||||
|
You can see the latest config file with all available plugins here:
|
||||||
|
[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf)
|
||||||
|
|
||||||
## Environment Variables
|
## Environment Variables
|
||||||
|
|
||||||
@@ -17,8 +26,8 @@ for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
|
|||||||
|
|
||||||
## `[global_tags]` Configuration
|
## `[global_tags]` Configuration
|
||||||
|
|
||||||
Global tags can be specific in the `[global_tags]` section of the config file in
|
Global tags can be specified in the `[global_tags]` section of the config file
|
||||||
key="value" format. All metrics being gathered on this host will be tagged
|
in key="value" format. All metrics being gathered on this host will be tagged
|
||||||
with the tags specified here.
|
with the tags specified here.
|
||||||
|
|
||||||
## `[agent]` Configuration
|
## `[agent]` Configuration
|
||||||
@@ -29,8 +38,12 @@ config.
|
|||||||
* **interval**: Default data collection interval for all inputs
|
* **interval**: Default data collection interval for all inputs
|
||||||
* **round_interval**: Rounds collection interval to 'interval'
|
* **round_interval**: Rounds collection interval to 'interval'
|
||||||
ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||||
|
* **metric_batch_size**: Telegraf will send metrics to output in batch of at
|
||||||
|
most metric_batch_size metrics.
|
||||||
* **metric_buffer_limit**: Telegraf will cache metric_buffer_limit metrics
|
* **metric_buffer_limit**: Telegraf will cache metric_buffer_limit metrics
|
||||||
for each output, and will flush this buffer on a successful write.
|
for each output, and will flush this buffer on a successful write.
|
||||||
|
This should be a multiple of metric_batch_size and could not be less
|
||||||
|
than 2 times metric_batch_size.
|
||||||
* **collection_jitter**: Collection jitter is used to jitter
|
* **collection_jitter**: Collection jitter is used to jitter
|
||||||
the collection by a random amount.
|
the collection by a random amount.
|
||||||
Each plugin will sleep for a random time within jitter before collecting.
|
Each plugin will sleep for a random time within jitter before collecting.
|
||||||
@@ -47,9 +60,35 @@ ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s
|
|||||||
* **quiet**: Run telegraf in quiet mode.
|
* **quiet**: Run telegraf in quiet mode.
|
||||||
* **hostname**: Override default hostname, if empty use os.Hostname().
|
* **hostname**: Override default hostname, if empty use os.Hostname().
|
||||||
|
|
||||||
## `[inputs.xxx]` Configuration
|
#### Measurement Filtering
|
||||||
|
|
||||||
There are some configuration options that are configurable per input:
|
Filters can be configured per input or output, see below for examples.
|
||||||
|
|
||||||
|
* **namepass**: An array of strings that is used to filter metrics generated by the
|
||||||
|
current input. Each string in the array is tested as a glob match against
|
||||||
|
measurement names and if it matches, the field is emitted.
|
||||||
|
* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted.
|
||||||
|
* **fieldpass**: An array of strings that is used to filter metrics generated by the
|
||||||
|
current input. Each string in the array is tested as a glob match against field names
|
||||||
|
and if it matches, the field is emitted. fieldpass is not available for outputs.
|
||||||
|
* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted.
|
||||||
|
fielddrop is not available for outputs.
|
||||||
|
* **tagpass**: tag names and arrays of strings that are used to filter
|
||||||
|
measurements by the current input. Each string in the array is tested as a glob
|
||||||
|
match against the tag name, and if it matches the measurement is emitted.
|
||||||
|
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
|
||||||
|
emitted. This is tested on measurements that have passed the tagpass test.
|
||||||
|
* **tagexclude**: tagexclude can be used to exclude a tag from measurement(s).
|
||||||
|
As opposed to tagdrop, which will drop an entire measurement based on it's
|
||||||
|
tags, tagexclude simply strips the given tag keys from the measurement. This
|
||||||
|
can be used on inputs & outputs, but it is _recommended_ to be used on inputs,
|
||||||
|
as it is more efficient to filter out tags at the ingestion point.
|
||||||
|
* **taginclude**: taginclude is the inverse of tagexclude. It will only include
|
||||||
|
the tag keys in the final measurement.
|
||||||
|
|
||||||
|
## Input Configuration
|
||||||
|
|
||||||
|
Some configuration options are configurable per input:
|
||||||
|
|
||||||
* **name_override**: Override the base name of the measurement.
|
* **name_override**: Override the base name of the measurement.
|
||||||
(Default is the name of the input).
|
(Default is the name of the input).
|
||||||
@@ -60,24 +99,6 @@ There are some configuration options that are configurable per input:
|
|||||||
global interval, but if one particular input should be run less or more often,
|
global interval, but if one particular input should be run less or more often,
|
||||||
you can configure that here.
|
you can configure that here.
|
||||||
|
|
||||||
#### Input Filters
|
|
||||||
|
|
||||||
There are also filters that can be configured per input:
|
|
||||||
|
|
||||||
* **namepass**: An array of strings that is used to filter metrics generated by the
|
|
||||||
current input. Each string in the array is tested as a glob match against
|
|
||||||
measurement names and if it matches, the field is emitted.
|
|
||||||
* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted.
|
|
||||||
* **fieldpass**: An array of strings that is used to filter metrics generated by the
|
|
||||||
current input. Each string in the array is tested as a glob match against field names
|
|
||||||
and if it matches, the field is emitted.
|
|
||||||
* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted.
|
|
||||||
* **tagpass**: tag names and arrays of strings that are used to filter
|
|
||||||
measurements by the current input. Each string in the array is tested as a glob
|
|
||||||
match against the tag name, and if it matches the measurement is emitted.
|
|
||||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
|
|
||||||
emitted. This is tested on measurements that have passed the tagpass test.
|
|
||||||
|
|
||||||
#### Input Configuration Examples
|
#### Input Configuration Examples
|
||||||
|
|
||||||
This is a full working config that will output CPU data to an InfluxDB instance
|
This is a full working config that will output CPU data to an InfluxDB instance
|
||||||
@@ -155,6 +176,20 @@ fields which begin with `time_`.
|
|||||||
namepass = ["rest_client_*"]
|
namepass = ["rest_client_*"]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Input Config: taginclude and tagexclude
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Only include the "cpu" tag in the measurements for the cpu plugin.
|
||||||
|
[[inputs.cpu]]
|
||||||
|
percpu = true
|
||||||
|
totalcpu = true
|
||||||
|
taginclude = ["cpu"]
|
||||||
|
|
||||||
|
# Exclude the "fstype" tag from the measurements for the disk plugin.
|
||||||
|
[[inputs.disk]]
|
||||||
|
tagexclude = ["fstype"]
|
||||||
|
```
|
||||||
|
|
||||||
#### Input config: prefix, suffix, and override
|
#### Input config: prefix, suffix, and override
|
||||||
|
|
||||||
This plugin will emit measurements with the name `cpu_total`
|
This plugin will emit measurements with the name `cpu_total`
|
||||||
@@ -180,6 +215,9 @@ This will emit measurements with the name `foobar`
|
|||||||
This plugin will emit measurements with two additional tags: `tag1=foo` and
|
This plugin will emit measurements with two additional tags: `tag1=foo` and
|
||||||
`tag2=bar`
|
`tag2=bar`
|
||||||
|
|
||||||
|
NOTE: Order matters, the `[inputs.cpu.tags]` table must be at the _end_ of the
|
||||||
|
plugin definition.
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
[[inputs.cpu]]
|
[[inputs.cpu]]
|
||||||
percpu = false
|
percpu = false
|
||||||
@@ -208,15 +246,12 @@ to avoid measurement collisions:
|
|||||||
fielddrop = ["cpu_time*"]
|
fielddrop = ["cpu_time*"]
|
||||||
```
|
```
|
||||||
|
|
||||||
## `[outputs.xxx]` Configuration
|
## Output Configuration
|
||||||
|
|
||||||
Telegraf also supports specifying multiple output sinks to send data to,
|
Telegraf also supports specifying multiple output sinks to send data to,
|
||||||
configuring each output sink is different, but examples can be
|
configuring each output sink is different, but examples can be
|
||||||
found by running `telegraf -sample-config`.
|
found by running `telegraf -sample-config`.
|
||||||
|
|
||||||
Outputs also support the same configurable options as inputs
|
|
||||||
(namepass, namedrop, tagpass, tagdrop)
|
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
[[outputs.influxdb]]
|
[[outputs.influxdb]]
|
||||||
urls = [ "http://localhost:8086" ]
|
urls = [ "http://localhost:8086" ]
|
||||||
|
|||||||
@@ -75,14 +75,19 @@ metrics are parsed directly into Telegraf metrics.
|
|||||||
|
|
||||||
# JSON:
|
# JSON:
|
||||||
|
|
||||||
The JSON data format flattens JSON into metric _fields_. For example, this JSON:
|
The JSON data format flattens JSON into metric _fields_.
|
||||||
|
NOTE: Only numerical values are converted to fields, and they are converted
|
||||||
|
into a float. strings are ignored unless specified as a tag_key (see below).
|
||||||
|
|
||||||
|
So for example, this JSON:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"a": 5,
|
"a": 5,
|
||||||
"b": {
|
"b": {
|
||||||
"c": 6
|
"c": 6
|
||||||
}
|
},
|
||||||
|
"ignored": "I'm a string"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -151,7 +156,12 @@ as the parsed metric.
|
|||||||
#### Value Configuration:
|
#### Value Configuration:
|
||||||
|
|
||||||
You **must** tell Telegraf what type of metric to collect by using the
|
You **must** tell Telegraf what type of metric to collect by using the
|
||||||
`data_type` configuration option.
|
`data_type` configuration option. Available options are:
|
||||||
|
|
||||||
|
1. integer
|
||||||
|
2. float or long
|
||||||
|
3. string
|
||||||
|
4. boolean
|
||||||
|
|
||||||
**Note:** It is also recommended that you set `name_override` to a measurement
|
**Note:** It is also recommended that you set `name_override` to a measurement
|
||||||
name that makes sense for your metric, otherwise it will just be set to the
|
name that makes sense for your metric, otherwise it will just be set to the
|
||||||
@@ -176,49 +186,59 @@ name of the plugin.
|
|||||||
# Graphite:
|
# Graphite:
|
||||||
|
|
||||||
The Graphite data format translates graphite _dot_ buckets directly into
|
The Graphite data format translates graphite _dot_ buckets directly into
|
||||||
telegraf measurement names, with a single value field, and without any tags. For
|
telegraf measurement names, with a single value field, and without any tags.
|
||||||
more advanced options, Telegraf supports specifying "templates" to translate
|
By default, the separator is left as ".", but this can be changed using the
|
||||||
|
"separator" argument. For more advanced options,
|
||||||
|
Telegraf supports specifying "templates" to translate
|
||||||
graphite buckets into Telegraf metrics.
|
graphite buckets into Telegraf metrics.
|
||||||
|
|
||||||
#### Separator:
|
Templates are of the form:
|
||||||
|
|
||||||
You can specify a separator to use for the parsed metrics.
|
|
||||||
By default, it will leave the metrics with a "." separator.
|
|
||||||
Setting `separator = "_"` will translate:
|
|
||||||
|
|
||||||
```
|
```
|
||||||
cpu.usage.idle 99
|
"host.mytag.mytag.measurement.measurement.field*"
|
||||||
=> cpu_usage_idle value=99
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Measurement/Tag Templates:
|
Where the following keywords exist:
|
||||||
|
|
||||||
|
1. `measurement`: specifies that this section of the graphite bucket corresponds
|
||||||
|
to the measurement name. This can be specified multiple times.
|
||||||
|
2. `field`: specifies that this section of the graphite bucket corresponds
|
||||||
|
to the field name. This can be specified multiple times.
|
||||||
|
3. `measurement*`: specifies that all remaining elements of the graphite bucket
|
||||||
|
correspond to the measurement name.
|
||||||
|
4. `field*`: specifies that all remaining elements of the graphite bucket
|
||||||
|
correspond to the field name.
|
||||||
|
|
||||||
|
Any part of the template that is not a keyword is treated as a tag key. This
|
||||||
|
can also be specified multiple times.
|
||||||
|
|
||||||
|
NOTE: `field*` cannot be used in conjunction with `measurement*`!
|
||||||
|
|
||||||
|
#### Measurement & Tag Templates:
|
||||||
|
|
||||||
The most basic template is to specify a single transformation to apply to all
|
The most basic template is to specify a single transformation to apply to all
|
||||||
incoming metrics. _measurement_ is a special keyword that tells Telegraf which
|
incoming metrics. So the following template:
|
||||||
parts of the graphite bucket to combine into the measurement name. It can have a
|
|
||||||
trailing `*` to indicate that the remainder of the metric should be used.
|
|
||||||
Other words are considered tag keys. So the following template:
|
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
templates = [
|
templates = [
|
||||||
"region.measurement*"
|
"region.region.measurement*"
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
would result in the following Graphite -> Telegraf transformation.
|
would result in the following Graphite -> Telegraf transformation.
|
||||||
|
|
||||||
```
|
```
|
||||||
us-west.cpu.load 100
|
us.west.cpu.load 100
|
||||||
=> cpu.load,region=us-west value=100
|
=> cpu.load,region=us.west value=100
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Field Templates:
|
#### Field Templates:
|
||||||
|
|
||||||
There is also a _field_ keyword, which can only be specified once.
|
|
||||||
The field keyword tells Telegraf to give the metric that field name.
|
The field keyword tells Telegraf to give the metric that field name.
|
||||||
So the following template:
|
So the following template:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
|
separator = "_"
|
||||||
templates = [
|
templates = [
|
||||||
"measurement.measurement.field.field.region"
|
"measurement.measurement.field.field.region"
|
||||||
]
|
]
|
||||||
@@ -227,24 +247,26 @@ templates = [
|
|||||||
would result in the following Graphite -> Telegraf transformation.
|
would result in the following Graphite -> Telegraf transformation.
|
||||||
|
|
||||||
```
|
```
|
||||||
cpu.usage.idle.percent.us-west 100
|
cpu.usage.idle.percent.eu-east 100
|
||||||
=> cpu_usage,region=us-west idle_percent=100
|
=> cpu_usage,region=eu-east idle_percent=100
|
||||||
```
|
```
|
||||||
|
|
||||||
The field key can also be derived from the second "half" of the input metric-name by specifying ```field*```:
|
The field key can also be derived from all remaining elements of the graphite
|
||||||
|
bucket by specifying `field*`:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
|
separator = "_"
|
||||||
templates = [
|
templates = [
|
||||||
"measurement.measurement.region.field*"
|
"measurement.measurement.region.field*"
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
would result in the following Graphite -> Telegraf transformation.
|
which would result in the following Graphite -> Telegraf transformation.
|
||||||
|
|
||||||
```
|
```
|
||||||
cpu.usage.us-west.idle.percentage 100
|
cpu.usage.eu-east.idle.percentage 100
|
||||||
=> cpu_usage,region=us-west idle_percentage=100
|
=> cpu_usage,region=eu-east idle_percentage=100
|
||||||
```
|
```
|
||||||
(This cannot be used in conjunction with "measurement*"!)
|
|
||||||
|
|
||||||
#### Filter Templates:
|
#### Filter Templates:
|
||||||
|
|
||||||
@@ -261,8 +283,8 @@ templates = [
|
|||||||
which would result in the following transformation:
|
which would result in the following transformation:
|
||||||
|
|
||||||
```
|
```
|
||||||
cpu.load.us-west 100
|
cpu.load.eu-east 100
|
||||||
=> cpu_load,region=us-west value=100
|
=> cpu_load,region=eu-east value=100
|
||||||
|
|
||||||
mem.cached.localhost 256
|
mem.cached.localhost 256
|
||||||
=> mem_cached,host=localhost value=256
|
=> mem_cached,host=localhost value=256
|
||||||
@@ -284,8 +306,8 @@ templates = [
|
|||||||
would result in the following Graphite -> Telegraf transformation.
|
would result in the following Graphite -> Telegraf transformation.
|
||||||
|
|
||||||
```
|
```
|
||||||
cpu.usage.idle.us-west 100
|
cpu.usage.idle.eu-east 100
|
||||||
=> cpu_usage,region=us-west,datacenter=1a idle=100
|
=> cpu_usage,region=eu-east,datacenter=1a idle=100
|
||||||
```
|
```
|
||||||
|
|
||||||
There are many more options available,
|
There are many more options available,
|
||||||
@@ -316,12 +338,12 @@ There are many more options available,
|
|||||||
## similar to the line protocol format. There can be only one default template.
|
## similar to the line protocol format. There can be only one default template.
|
||||||
## Templates support below format:
|
## Templates support below format:
|
||||||
## 1. filter + template
|
## 1. filter + template
|
||||||
## 2. filter + template + extra tag
|
## 2. filter + template + extra tag(s)
|
||||||
## 3. filter + template with field key
|
## 3. filter + template with field key
|
||||||
## 4. default template
|
## 4. default template
|
||||||
templates = [
|
templates = [
|
||||||
"*.app env.service.resource.measurement",
|
"*.app env.service.resource.measurement",
|
||||||
"stats.* .host.measurement* region=us-west,agent=sensu",
|
"stats.* .host.measurement* region=eu-east,agent=sensu",
|
||||||
"stats2.* .host.measurement.field",
|
"stats2.* .host.measurement.field",
|
||||||
"measurement*"
|
"measurement*"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -28,6 +28,5 @@
|
|||||||
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
||||||
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||||
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||||
- golang.org/x/crypto/* [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
- golang.org/x/crypto/ [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||||
- internal Glob function [MIT LICENSE](https://github.com/ryanuber/go-glob/blob/master/LICENSE)
|
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
79
filter/filter.go
Normal file
79
filter/filter.go
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
package filter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gobwas/glob"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Filter interface {
|
||||||
|
Match(string) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompileFilter takes a list of string filters and returns a Filter interface
|
||||||
|
// for matching a given string against the filter list. The filter list
|
||||||
|
// supports glob matching too, ie:
|
||||||
|
//
|
||||||
|
// f, _ := CompileFilter([]string{"cpu", "mem", "net*"})
|
||||||
|
// f.Match("cpu") // true
|
||||||
|
// f.Match("network") // true
|
||||||
|
// f.Match("memory") // false
|
||||||
|
//
|
||||||
|
func CompileFilter(filters []string) (Filter, error) {
|
||||||
|
// return if there is nothing to compile
|
||||||
|
if len(filters) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if we can compile a non-glob filter
|
||||||
|
noGlob := true
|
||||||
|
for _, filter := range filters {
|
||||||
|
if hasMeta(filter) {
|
||||||
|
noGlob = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case noGlob:
|
||||||
|
// return non-globbing filter if not needed.
|
||||||
|
return compileFilterNoGlob(filters), nil
|
||||||
|
case len(filters) == 1:
|
||||||
|
return glob.Compile(filters[0])
|
||||||
|
default:
|
||||||
|
return glob.Compile("{" + strings.Join(filters, ",") + "}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasMeta reports whether path contains any magic glob characters.
|
||||||
|
func hasMeta(s string) bool {
|
||||||
|
return strings.IndexAny(s, "*?[") >= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type filter struct {
|
||||||
|
m map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *filter) Match(s string) bool {
|
||||||
|
_, ok := f.m[s]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
type filtersingle struct {
|
||||||
|
s string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *filtersingle) Match(s string) bool {
|
||||||
|
return f.s == s
|
||||||
|
}
|
||||||
|
|
||||||
|
func compileFilterNoGlob(filters []string) Filter {
|
||||||
|
if len(filters) == 1 {
|
||||||
|
return &filtersingle{s: filters[0]}
|
||||||
|
}
|
||||||
|
out := filter{m: make(map[string]struct{})}
|
||||||
|
for _, filter := range filters {
|
||||||
|
out.m[filter] = struct{}{}
|
||||||
|
}
|
||||||
|
return &out
|
||||||
|
}
|
||||||
96
filter/filter_test.go
Normal file
96
filter/filter_test.go
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
package filter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCompileFilter(t *testing.T) {
|
||||||
|
f, err := CompileFilter([]string{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Nil(t, f)
|
||||||
|
|
||||||
|
f, err = CompileFilter([]string{"cpu"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, f.Match("cpu"))
|
||||||
|
assert.False(t, f.Match("cpu0"))
|
||||||
|
assert.False(t, f.Match("mem"))
|
||||||
|
|
||||||
|
f, err = CompileFilter([]string{"cpu*"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, f.Match("cpu"))
|
||||||
|
assert.True(t, f.Match("cpu0"))
|
||||||
|
assert.False(t, f.Match("mem"))
|
||||||
|
|
||||||
|
f, err = CompileFilter([]string{"cpu", "mem"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, f.Match("cpu"))
|
||||||
|
assert.False(t, f.Match("cpu0"))
|
||||||
|
assert.True(t, f.Match("mem"))
|
||||||
|
|
||||||
|
f, err = CompileFilter([]string{"cpu", "mem", "net*"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, f.Match("cpu"))
|
||||||
|
assert.False(t, f.Match("cpu0"))
|
||||||
|
assert.True(t, f.Match("mem"))
|
||||||
|
assert.True(t, f.Match("network"))
|
||||||
|
}
|
||||||
|
|
||||||
|
var benchbool bool
|
||||||
|
|
||||||
|
func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"cpu"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("network")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFilterSingleNoGlobTrue(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"cpu"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("cpu")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFilter(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"cpu", "mem", "net*"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("network")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFilterNoGlob(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"cpu", "mem", "net"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("net")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFilter2(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
||||||
|
"aw", "az", "axxx", "ab", "cpu", "mem", "net*"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("network")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFilter2NoGlob(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
||||||
|
"aw", "az", "axxx", "ab", "cpu", "mem", "net"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("net")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
77
internal/buffer/buffer.go
Normal file
77
internal/buffer/buffer.go
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
package buffer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Buffer is an object for storing metrics in a circular buffer.
|
||||||
|
type Buffer struct {
|
||||||
|
buf chan telegraf.Metric
|
||||||
|
// total dropped metrics
|
||||||
|
drops int
|
||||||
|
// total metrics added
|
||||||
|
total int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBuffer returns a Buffer
|
||||||
|
// size is the maximum number of metrics that Buffer will cache. If Add is
|
||||||
|
// called when the buffer is full, then the oldest metric(s) will be dropped.
|
||||||
|
func NewBuffer(size int) *Buffer {
|
||||||
|
return &Buffer{
|
||||||
|
buf: make(chan telegraf.Metric, size),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEmpty returns true if Buffer is empty.
|
||||||
|
func (b *Buffer) IsEmpty() bool {
|
||||||
|
return len(b.buf) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the current length of the buffer.
|
||||||
|
func (b *Buffer) Len() int {
|
||||||
|
return len(b.buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drops returns the total number of dropped metrics that have occured in this
|
||||||
|
// buffer since instantiation.
|
||||||
|
func (b *Buffer) Drops() int {
|
||||||
|
return b.drops
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total returns the total number of metrics that have been added to this buffer.
|
||||||
|
func (b *Buffer) Total() int {
|
||||||
|
return b.total
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds metrics to the buffer.
|
||||||
|
func (b *Buffer) Add(metrics ...telegraf.Metric) {
|
||||||
|
for i, _ := range metrics {
|
||||||
|
b.total++
|
||||||
|
select {
|
||||||
|
case b.buf <- metrics[i]:
|
||||||
|
default:
|
||||||
|
b.drops++
|
||||||
|
<-b.buf
|
||||||
|
b.buf <- metrics[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Batch returns a batch of metrics of size batchSize.
|
||||||
|
// the batch will be of maximum length batchSize. It can be less than batchSize,
|
||||||
|
// if the length of Buffer is less than batchSize.
|
||||||
|
func (b *Buffer) Batch(batchSize int) []telegraf.Metric {
|
||||||
|
n := min(len(b.buf), batchSize)
|
||||||
|
out := make([]telegraf.Metric, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
out[i] = <-b.buf
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func min(a, b int) int {
|
||||||
|
if b < a {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
||||||
94
internal/buffer/buffer_test.go
Normal file
94
internal/buffer/buffer_test.go
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
package buffer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
var metricList = []telegraf.Metric{
|
||||||
|
testutil.TestMetric(2, "mymetric1"),
|
||||||
|
testutil.TestMetric(1, "mymetric2"),
|
||||||
|
testutil.TestMetric(11, "mymetric3"),
|
||||||
|
testutil.TestMetric(15, "mymetric4"),
|
||||||
|
testutil.TestMetric(8, "mymetric5"),
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAddMetrics(b *testing.B) {
|
||||||
|
buf := NewBuffer(10000)
|
||||||
|
m := testutil.TestMetric(1, "mymetric")
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
buf.Add(m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewBufferBasicFuncs(t *testing.T) {
|
||||||
|
b := NewBuffer(10)
|
||||||
|
|
||||||
|
assert.True(t, b.IsEmpty())
|
||||||
|
assert.Zero(t, b.Len())
|
||||||
|
assert.Zero(t, b.Drops())
|
||||||
|
assert.Zero(t, b.Total())
|
||||||
|
|
||||||
|
m := testutil.TestMetric(1, "mymetric")
|
||||||
|
b.Add(m)
|
||||||
|
assert.False(t, b.IsEmpty())
|
||||||
|
assert.Equal(t, b.Len(), 1)
|
||||||
|
assert.Equal(t, b.Drops(), 0)
|
||||||
|
assert.Equal(t, b.Total(), 1)
|
||||||
|
|
||||||
|
b.Add(metricList...)
|
||||||
|
assert.False(t, b.IsEmpty())
|
||||||
|
assert.Equal(t, b.Len(), 6)
|
||||||
|
assert.Equal(t, b.Drops(), 0)
|
||||||
|
assert.Equal(t, b.Total(), 6)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDroppingMetrics(t *testing.T) {
|
||||||
|
b := NewBuffer(10)
|
||||||
|
|
||||||
|
// Add up to the size of the buffer
|
||||||
|
b.Add(metricList...)
|
||||||
|
b.Add(metricList...)
|
||||||
|
assert.False(t, b.IsEmpty())
|
||||||
|
assert.Equal(t, b.Len(), 10)
|
||||||
|
assert.Equal(t, b.Drops(), 0)
|
||||||
|
assert.Equal(t, b.Total(), 10)
|
||||||
|
|
||||||
|
// Add 5 more and verify they were dropped
|
||||||
|
b.Add(metricList...)
|
||||||
|
assert.False(t, b.IsEmpty())
|
||||||
|
assert.Equal(t, b.Len(), 10)
|
||||||
|
assert.Equal(t, b.Drops(), 5)
|
||||||
|
assert.Equal(t, b.Total(), 15)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGettingBatches(t *testing.T) {
|
||||||
|
b := NewBuffer(20)
|
||||||
|
|
||||||
|
// Verify that the buffer returned is smaller than requested when there are
|
||||||
|
// not as many items as requested.
|
||||||
|
b.Add(metricList...)
|
||||||
|
batch := b.Batch(10)
|
||||||
|
assert.Len(t, batch, 5)
|
||||||
|
|
||||||
|
// Verify that the buffer is now empty
|
||||||
|
assert.True(t, b.IsEmpty())
|
||||||
|
assert.Zero(t, b.Len())
|
||||||
|
assert.Zero(t, b.Drops())
|
||||||
|
assert.Equal(t, b.Total(), 5)
|
||||||
|
|
||||||
|
// Verify that the buffer returned is not more than the size requested
|
||||||
|
b.Add(metricList...)
|
||||||
|
batch = b.Batch(3)
|
||||||
|
assert.Len(t, batch, 3)
|
||||||
|
|
||||||
|
// Verify that buffer is not empty
|
||||||
|
assert.False(t, b.IsEmpty())
|
||||||
|
assert.Equal(t, b.Len(), 2)
|
||||||
|
assert.Equal(t, b.Drops(), 0)
|
||||||
|
assert.Equal(t, b.Total(), 10)
|
||||||
|
}
|
||||||
49
internal/config/aws/credentials.go
Normal file
49
internal/config/aws/credentials.go
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CredentialConfig struct {
|
||||||
|
Region string
|
||||||
|
AccessKey string
|
||||||
|
SecretKey string
|
||||||
|
RoleARN string
|
||||||
|
Profile string
|
||||||
|
Filename string
|
||||||
|
Token string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CredentialConfig) Credentials() client.ConfigProvider {
|
||||||
|
if c.RoleARN != "" {
|
||||||
|
return c.assumeCredentials()
|
||||||
|
} else {
|
||||||
|
return c.rootCredentials()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CredentialConfig) rootCredentials() client.ConfigProvider {
|
||||||
|
config := &aws.Config{
|
||||||
|
Region: aws.String(c.Region),
|
||||||
|
}
|
||||||
|
if c.AccessKey != "" || c.SecretKey != "" {
|
||||||
|
config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token)
|
||||||
|
} else if c.Profile != "" || c.Filename != "" {
|
||||||
|
config.Credentials = credentials.NewSharedCredentials(c.Filename, c.Profile)
|
||||||
|
}
|
||||||
|
|
||||||
|
return session.New(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CredentialConfig) assumeCredentials() client.ConfigProvider {
|
||||||
|
rootCredentials := c.rootCredentials()
|
||||||
|
config := &aws.Config{
|
||||||
|
Region: aws.String(c.Region),
|
||||||
|
}
|
||||||
|
config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN)
|
||||||
|
return session.New(config)
|
||||||
|
}
|
||||||
@@ -58,7 +58,6 @@ func NewConfig() *Config {
|
|||||||
Interval: internal.Duration{Duration: 10 * time.Second},
|
Interval: internal.Duration{Duration: 10 * time.Second},
|
||||||
RoundInterval: true,
|
RoundInterval: true,
|
||||||
FlushInterval: internal.Duration{Duration: 10 * time.Second},
|
FlushInterval: internal.Duration{Duration: 10 * time.Second},
|
||||||
FlushJitter: internal.Duration{Duration: 5 * time.Second},
|
|
||||||
},
|
},
|
||||||
|
|
||||||
Tags: make(map[string]string),
|
Tags: make(map[string]string),
|
||||||
@@ -78,6 +77,14 @@ type AgentConfig struct {
|
|||||||
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
||||||
RoundInterval bool
|
RoundInterval bool
|
||||||
|
|
||||||
|
// By default, precision will be set to the same timestamp order as the
|
||||||
|
// collection interval, with the maximum being 1s.
|
||||||
|
// ie, when interval = "10s", precision will be "1s"
|
||||||
|
// when interval = "250ms", precision will be "1ms"
|
||||||
|
// Precision will NOT be used for service inputs. It is up to each individual
|
||||||
|
// service input to set the timestamp at the appropriate precision.
|
||||||
|
Precision internal.Duration
|
||||||
|
|
||||||
// CollectionJitter is used to jitter the collection by a random amount.
|
// CollectionJitter is used to jitter the collection by a random amount.
|
||||||
// Each plugin will sleep for a random time within jitter before collecting.
|
// Each plugin will sleep for a random time within jitter before collecting.
|
||||||
// This can be used to avoid many plugins querying things like sysfs at the
|
// This can be used to avoid many plugins querying things like sysfs at the
|
||||||
@@ -93,9 +100,15 @@ type AgentConfig struct {
|
|||||||
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||||
FlushJitter internal.Duration
|
FlushJitter internal.Duration
|
||||||
|
|
||||||
|
// MetricBatchSize is the maximum number of metrics that is wrote to an
|
||||||
|
// output plugin in one call.
|
||||||
|
MetricBatchSize int
|
||||||
|
|
||||||
// MetricBufferLimit is the max number of metrics that each output plugin
|
// MetricBufferLimit is the max number of metrics that each output plugin
|
||||||
// will cache. The buffer is cleared when a successful write occurs. When
|
// will cache. The buffer is cleared when a successful write occurs. When
|
||||||
// full, the oldest metrics will be overwritten.
|
// full, the oldest metrics will be overwritten. This number should be a
|
||||||
|
// multiple of MetricBatchSize. Due to current implementation, this could
|
||||||
|
// not be less than 2 times MetricBatchSize.
|
||||||
MetricBufferLimit int
|
MetricBufferLimit int
|
||||||
|
|
||||||
// FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever
|
// FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever
|
||||||
@@ -103,11 +116,10 @@ type AgentConfig struct {
|
|||||||
// does _not_ deactivate FlushInterval.
|
// does _not_ deactivate FlushInterval.
|
||||||
FlushBufferWhenFull bool
|
FlushBufferWhenFull bool
|
||||||
|
|
||||||
// TODO(cam): Remove UTC and Precision parameters, they are no longer
|
// TODO(cam): Remove UTC and parameter, they are no longer
|
||||||
// valid for the agent config. Leaving them here for now for backwards-
|
// valid for the agent config. Leaving them here for now for backwards-
|
||||||
// compatability
|
// compatability
|
||||||
UTC bool `toml:"utc"`
|
UTC bool `toml:"utc"`
|
||||||
Precision string
|
|
||||||
|
|
||||||
// Debug is the option for running in debug mode
|
// Debug is the option for running in debug mode
|
||||||
Debug bool
|
Debug bool
|
||||||
@@ -182,11 +194,13 @@ var header = `# Telegraf Configuration
|
|||||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||||
round_interval = true
|
round_interval = true
|
||||||
|
|
||||||
## Telegraf will cache metric_buffer_limit metrics for each output, and will
|
## Telegraf will send metrics to outputs in batches of at
|
||||||
## flush this buffer on a successful write.
|
## most metric_batch_size metrics.
|
||||||
metric_buffer_limit = 1000
|
metric_batch_size = 1000
|
||||||
## Flush the buffer whenever full, regardless of flush_interval.
|
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
|
||||||
flush_buffer_when_full = true
|
## output, and will flush this buffer on a successful write. Oldest metrics
|
||||||
|
## are dropped first when this buffer fills.
|
||||||
|
metric_buffer_limit = 10000
|
||||||
|
|
||||||
## Collection jitter is used to jitter the collection by a random amount.
|
## Collection jitter is used to jitter the collection by a random amount.
|
||||||
## Each plugin will sleep for a random time within jitter before collecting.
|
## Each plugin will sleep for a random time within jitter before collecting.
|
||||||
@@ -202,6 +216,11 @@ var header = `# Telegraf Configuration
|
|||||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||||
flush_jitter = "0s"
|
flush_jitter = "0s"
|
||||||
|
|
||||||
|
## By default, precision will be set to the same timestamp order as the
|
||||||
|
## collection interval, with the maximum being 1s.
|
||||||
|
## Precision will NOT be used for service inputs, such as logparser and statsd.
|
||||||
|
## Valid values are "Nns", "Nus" (or "Nµs"), "Nms", "Ns".
|
||||||
|
precision = ""
|
||||||
## Run telegraf in debug mode
|
## Run telegraf in debug mode
|
||||||
debug = false
|
debug = false
|
||||||
## Run telegraf in quiet mode
|
## Run telegraf in quiet mode
|
||||||
@@ -349,7 +368,7 @@ func printConfig(name string, p printer, op string, commented bool) {
|
|||||||
fmt.Print("\n")
|
fmt.Print("\n")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fmt.Print(comment + line + "\n")
|
fmt.Print(strings.TrimRight(comment+line, " ") + "\n")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -404,13 +423,67 @@ func (c *Config) LoadDirectory(path string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Try to find a default config file at these locations (in order):
|
||||||
|
// 1. $TELEGRAF_CONFIG_PATH
|
||||||
|
// 2. $HOME/.telegraf/telegraf.conf
|
||||||
|
// 3. /etc/telegraf/telegraf.conf
|
||||||
|
//
|
||||||
|
func getDefaultConfigPath() (string, error) {
|
||||||
|
envfile := os.Getenv("TELEGRAF_CONFIG_PATH")
|
||||||
|
homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf")
|
||||||
|
etcfile := "/etc/telegraf/telegraf.conf"
|
||||||
|
for _, path := range []string{envfile, homefile, etcfile} {
|
||||||
|
if _, err := os.Stat(path); err == nil {
|
||||||
|
log.Printf("Using config file: %s", path)
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if we got here, we didn't find a file in a default location
|
||||||
|
return "", fmt.Errorf("No config file specified, and could not find one"+
|
||||||
|
" in $TELEGRAF_CONFIG_PATH, %s, or %s", homefile, etcfile)
|
||||||
|
}
|
||||||
|
|
||||||
// LoadConfig loads the given config file and applies it to c
|
// LoadConfig loads the given config file and applies it to c
|
||||||
func (c *Config) LoadConfig(path string) error {
|
func (c *Config) LoadConfig(path string) error {
|
||||||
|
var err error
|
||||||
|
if path == "" {
|
||||||
|
if path, err = getDefaultConfigPath(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
tbl, err := parseFile(path)
|
tbl, err := parseFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parse tags tables first:
|
||||||
|
for _, tableName := range []string{"tags", "global_tags"} {
|
||||||
|
if val, ok := tbl.Fields[tableName]; ok {
|
||||||
|
subTable, ok := val.(*ast.Table)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("%s: invalid configuration", path)
|
||||||
|
}
|
||||||
|
if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||||
|
log.Printf("Could not parse [global_tags] config\n")
|
||||||
|
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse agent table:
|
||||||
|
if val, ok := tbl.Fields["agent"]; ok {
|
||||||
|
subTable, ok := val.(*ast.Table)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("%s: invalid configuration", path)
|
||||||
|
}
|
||||||
|
if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||||
|
log.Printf("Could not parse [agent] config\n")
|
||||||
|
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse all the rest of the plugins:
|
||||||
for name, val := range tbl.Fields {
|
for name, val := range tbl.Fields {
|
||||||
subTable, ok := val.(*ast.Table)
|
subTable, ok := val.(*ast.Table)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -418,16 +491,7 @@ func (c *Config) LoadConfig(path string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch name {
|
switch name {
|
||||||
case "agent":
|
case "agent", "global_tags", "tags":
|
||||||
if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
|
|
||||||
log.Printf("Could not parse [agent] config\n")
|
|
||||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
|
||||||
}
|
|
||||||
case "global_tags", "tags":
|
|
||||||
if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
|
|
||||||
log.Printf("Could not parse [global_tags] config\n")
|
|
||||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
|
||||||
}
|
|
||||||
case "outputs":
|
case "outputs":
|
||||||
for pluginName, pluginVal := range subTable.Fields {
|
for pluginName, pluginVal := range subTable.Fields {
|
||||||
switch pluginSubTable := pluginVal.(type) {
|
switch pluginSubTable := pluginVal.(type) {
|
||||||
@@ -525,11 +589,8 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ro := internal_models.NewRunningOutput(name, output, outputConfig)
|
ro := internal_models.NewRunningOutput(name, output, outputConfig,
|
||||||
if c.Agent.MetricBufferLimit > 0 {
|
c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit)
|
||||||
ro.MetricBufferLimit = c.Agent.MetricBufferLimit
|
|
||||||
}
|
|
||||||
ro.FlushBufferWhenFull = c.Agent.FlushBufferWhenFull
|
|
||||||
c.Outputs = append(c.Outputs, ro)
|
c.Outputs = append(c.Outputs, ro)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -580,9 +641,9 @@ func (c *Config) addInput(name string, table *ast.Table) error {
|
|||||||
|
|
||||||
// buildFilter builds a Filter
|
// buildFilter builds a Filter
|
||||||
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
|
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
|
||||||
// be inserted into the internal_models.OutputConfig/internal_models.InputConfig to be used for prefix
|
// be inserted into the internal_models.OutputConfig/internal_models.InputConfig
|
||||||
// filtering on tags and measurements
|
// to be used for glob filtering on tags and measurements
|
||||||
func buildFilter(tbl *ast.Table) internal_models.Filter {
|
func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
|
||||||
f := internal_models.Filter{}
|
f := internal_models.Filter{}
|
||||||
|
|
||||||
if node, ok := tbl.Fields["namepass"]; ok {
|
if node, ok := tbl.Fields["namepass"]; ok {
|
||||||
@@ -681,6 +742,33 @@ func buildFilter(tbl *ast.Table) internal_models.Filter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if node, ok := tbl.Fields["tagexclude"]; ok {
|
||||||
|
if kv, ok := node.(*ast.KeyValue); ok {
|
||||||
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||||
|
for _, elem := range ary.Value {
|
||||||
|
if str, ok := elem.(*ast.String); ok {
|
||||||
|
f.TagExclude = append(f.TagExclude, str.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if node, ok := tbl.Fields["taginclude"]; ok {
|
||||||
|
if kv, ok := node.(*ast.KeyValue); ok {
|
||||||
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||||
|
for _, elem := range ary.Value {
|
||||||
|
if str, ok := elem.(*ast.String); ok {
|
||||||
|
f.TagInclude = append(f.TagInclude, str.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := f.CompileFilter(); err != nil {
|
||||||
|
return f, err
|
||||||
|
}
|
||||||
|
|
||||||
delete(tbl.Fields, "namedrop")
|
delete(tbl.Fields, "namedrop")
|
||||||
delete(tbl.Fields, "namepass")
|
delete(tbl.Fields, "namepass")
|
||||||
delete(tbl.Fields, "fielddrop")
|
delete(tbl.Fields, "fielddrop")
|
||||||
@@ -689,7 +777,9 @@ func buildFilter(tbl *ast.Table) internal_models.Filter {
|
|||||||
delete(tbl.Fields, "pass")
|
delete(tbl.Fields, "pass")
|
||||||
delete(tbl.Fields, "tagdrop")
|
delete(tbl.Fields, "tagdrop")
|
||||||
delete(tbl.Fields, "tagpass")
|
delete(tbl.Fields, "tagpass")
|
||||||
return f
|
delete(tbl.Fields, "tagexclude")
|
||||||
|
delete(tbl.Fields, "taginclude")
|
||||||
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildInput parses input specific items from the ast.Table,
|
// buildInput parses input specific items from the ast.Table,
|
||||||
@@ -748,7 +838,11 @@ func buildInput(name string, tbl *ast.Table) (*internal_models.InputConfig, erro
|
|||||||
delete(tbl.Fields, "name_override")
|
delete(tbl.Fields, "name_override")
|
||||||
delete(tbl.Fields, "interval")
|
delete(tbl.Fields, "interval")
|
||||||
delete(tbl.Fields, "tags")
|
delete(tbl.Fields, "tags")
|
||||||
cp.Filter = buildFilter(tbl)
|
var err error
|
||||||
|
cp.Filter, err = buildFilter(tbl)
|
||||||
|
if err != nil {
|
||||||
|
return cp, err
|
||||||
|
}
|
||||||
return cp, nil
|
return cp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -864,13 +958,18 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
|
|||||||
return serializers.NewSerializer(c)
|
return serializers.NewSerializer(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildOutput parses output specific items from the ast.Table, builds the filter and returns an
|
// buildOutput parses output specific items from the ast.Table,
|
||||||
|
// builds the filter and returns an
|
||||||
// internal_models.OutputConfig to be inserted into internal_models.RunningInput
|
// internal_models.OutputConfig to be inserted into internal_models.RunningInput
|
||||||
// Note: error exists in the return for future calls that might require error
|
// Note: error exists in the return for future calls that might require error
|
||||||
func buildOutput(name string, tbl *ast.Table) (*internal_models.OutputConfig, error) {
|
func buildOutput(name string, tbl *ast.Table) (*internal_models.OutputConfig, error) {
|
||||||
|
filter, err := buildFilter(tbl)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
oc := &internal_models.OutputConfig{
|
oc := &internal_models.OutputConfig{
|
||||||
Name: name,
|
Name: name,
|
||||||
Filter: buildFilter(tbl),
|
Filter: filter,
|
||||||
}
|
}
|
||||||
// Outputs don't support FieldDrop/FieldPass, so set to NameDrop/NamePass
|
// Outputs don't support FieldDrop/FieldPass, so set to NameDrop/NamePass
|
||||||
if len(oc.Filter.FieldDrop) > 0 {
|
if len(oc.Filter.FieldDrop) > 0 {
|
||||||
|
|||||||
@@ -26,27 +26,29 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
|
|||||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||||
memcached.Servers = []string{"192.168.1.1"}
|
memcached.Servers = []string{"192.168.1.1"}
|
||||||
|
|
||||||
mConfig := &internal_models.InputConfig{
|
filter := internal_models.Filter{
|
||||||
Name: "memcached",
|
NameDrop: []string{"metricname2"},
|
||||||
Filter: internal_models.Filter{
|
NamePass: []string{"metricname1"},
|
||||||
NameDrop: []string{"metricname2"},
|
FieldDrop: []string{"other", "stuff"},
|
||||||
NamePass: []string{"metricname1"},
|
FieldPass: []string{"some", "strings"},
|
||||||
FieldDrop: []string{"other", "stuff"},
|
TagDrop: []internal_models.TagFilter{
|
||||||
FieldPass: []string{"some", "strings"},
|
internal_models.TagFilter{
|
||||||
TagDrop: []internal_models.TagFilter{
|
Name: "badtag",
|
||||||
internal_models.TagFilter{
|
Filter: []string{"othertag"},
|
||||||
Name: "badtag",
|
|
||||||
Filter: []string{"othertag"},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
TagPass: []internal_models.TagFilter{
|
|
||||||
internal_models.TagFilter{
|
|
||||||
Name: "goodtag",
|
|
||||||
Filter: []string{"mytag"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
IsActive: true,
|
|
||||||
},
|
},
|
||||||
|
TagPass: []internal_models.TagFilter{
|
||||||
|
internal_models.TagFilter{
|
||||||
|
Name: "goodtag",
|
||||||
|
Filter: []string{"mytag"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
IsActive: true,
|
||||||
|
}
|
||||||
|
assert.NoError(t, filter.CompileFilter())
|
||||||
|
mConfig := &internal_models.InputConfig{
|
||||||
|
Name: "memcached",
|
||||||
|
Filter: filter,
|
||||||
Interval: 10 * time.Second,
|
Interval: 10 * time.Second,
|
||||||
}
|
}
|
||||||
mConfig.Tags = make(map[string]string)
|
mConfig.Tags = make(map[string]string)
|
||||||
@@ -64,27 +66,29 @@ func TestConfig_LoadSingleInput(t *testing.T) {
|
|||||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||||
memcached.Servers = []string{"localhost"}
|
memcached.Servers = []string{"localhost"}
|
||||||
|
|
||||||
mConfig := &internal_models.InputConfig{
|
filter := internal_models.Filter{
|
||||||
Name: "memcached",
|
NameDrop: []string{"metricname2"},
|
||||||
Filter: internal_models.Filter{
|
NamePass: []string{"metricname1"},
|
||||||
NameDrop: []string{"metricname2"},
|
FieldDrop: []string{"other", "stuff"},
|
||||||
NamePass: []string{"metricname1"},
|
FieldPass: []string{"some", "strings"},
|
||||||
FieldDrop: []string{"other", "stuff"},
|
TagDrop: []internal_models.TagFilter{
|
||||||
FieldPass: []string{"some", "strings"},
|
internal_models.TagFilter{
|
||||||
TagDrop: []internal_models.TagFilter{
|
Name: "badtag",
|
||||||
internal_models.TagFilter{
|
Filter: []string{"othertag"},
|
||||||
Name: "badtag",
|
|
||||||
Filter: []string{"othertag"},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
TagPass: []internal_models.TagFilter{
|
|
||||||
internal_models.TagFilter{
|
|
||||||
Name: "goodtag",
|
|
||||||
Filter: []string{"mytag"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
IsActive: true,
|
|
||||||
},
|
},
|
||||||
|
TagPass: []internal_models.TagFilter{
|
||||||
|
internal_models.TagFilter{
|
||||||
|
Name: "goodtag",
|
||||||
|
Filter: []string{"mytag"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
IsActive: true,
|
||||||
|
}
|
||||||
|
assert.NoError(t, filter.CompileFilter())
|
||||||
|
mConfig := &internal_models.InputConfig{
|
||||||
|
Name: "memcached",
|
||||||
|
Filter: filter,
|
||||||
Interval: 5 * time.Second,
|
Interval: 5 * time.Second,
|
||||||
}
|
}
|
||||||
mConfig.Tags = make(map[string]string)
|
mConfig.Tags = make(map[string]string)
|
||||||
@@ -109,27 +113,29 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
|||||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||||
memcached.Servers = []string{"localhost"}
|
memcached.Servers = []string{"localhost"}
|
||||||
|
|
||||||
mConfig := &internal_models.InputConfig{
|
filter := internal_models.Filter{
|
||||||
Name: "memcached",
|
NameDrop: []string{"metricname2"},
|
||||||
Filter: internal_models.Filter{
|
NamePass: []string{"metricname1"},
|
||||||
NameDrop: []string{"metricname2"},
|
FieldDrop: []string{"other", "stuff"},
|
||||||
NamePass: []string{"metricname1"},
|
FieldPass: []string{"some", "strings"},
|
||||||
FieldDrop: []string{"other", "stuff"},
|
TagDrop: []internal_models.TagFilter{
|
||||||
FieldPass: []string{"some", "strings"},
|
internal_models.TagFilter{
|
||||||
TagDrop: []internal_models.TagFilter{
|
Name: "badtag",
|
||||||
internal_models.TagFilter{
|
Filter: []string{"othertag"},
|
||||||
Name: "badtag",
|
|
||||||
Filter: []string{"othertag"},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
TagPass: []internal_models.TagFilter{
|
|
||||||
internal_models.TagFilter{
|
|
||||||
Name: "goodtag",
|
|
||||||
Filter: []string{"mytag"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
IsActive: true,
|
|
||||||
},
|
},
|
||||||
|
TagPass: []internal_models.TagFilter{
|
||||||
|
internal_models.TagFilter{
|
||||||
|
Name: "goodtag",
|
||||||
|
Filter: []string{"mytag"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
IsActive: true,
|
||||||
|
}
|
||||||
|
assert.NoError(t, filter.CompileFilter())
|
||||||
|
mConfig := &internal_models.InputConfig{
|
||||||
|
Name: "memcached",
|
||||||
|
Filter: filter,
|
||||||
Interval: 5 * time.Second,
|
Interval: 5 * time.Second,
|
||||||
}
|
}
|
||||||
mConfig.Tags = make(map[string]string)
|
mConfig.Tags = make(map[string]string)
|
||||||
|
|||||||
37
internal/errchan/errchan.go
Normal file
37
internal/errchan/errchan.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package errchan
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ErrChan struct {
|
||||||
|
C chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns an error channel of max length 'n'
|
||||||
|
// errors can be sent to the ErrChan.C channel, and will be returned when
|
||||||
|
// ErrChan.Error() is called.
|
||||||
|
func New(n int) *ErrChan {
|
||||||
|
return &ErrChan{
|
||||||
|
C: make(chan error, n),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error closes the ErrChan.C channel and returns an error if there are any
|
||||||
|
// non-nil errors, otherwise returns nil.
|
||||||
|
func (e *ErrChan) Error() error {
|
||||||
|
close(e.C)
|
||||||
|
|
||||||
|
var out string
|
||||||
|
for err := range e.C {
|
||||||
|
if err != nil {
|
||||||
|
out += "[" + err.Error() + "], "
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if out != "" {
|
||||||
|
return fmt.Errorf("Errors encountered: " + strings.TrimRight(out, ", "))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
98
internal/globpath/globpath.go
Normal file
98
internal/globpath/globpath.go
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
package globpath
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gobwas/glob"
|
||||||
|
)
|
||||||
|
|
||||||
|
var sepStr = fmt.Sprintf("%v", string(os.PathSeparator))
|
||||||
|
|
||||||
|
type GlobPath struct {
|
||||||
|
path string
|
||||||
|
hasMeta bool
|
||||||
|
g glob.Glob
|
||||||
|
root string
|
||||||
|
}
|
||||||
|
|
||||||
|
func Compile(path string) (*GlobPath, error) {
|
||||||
|
out := GlobPath{
|
||||||
|
hasMeta: hasMeta(path),
|
||||||
|
path: path,
|
||||||
|
}
|
||||||
|
|
||||||
|
// if there are no glob meta characters in the path, don't bother compiling
|
||||||
|
// a glob object or finding the root directory. (see short-circuit in Match)
|
||||||
|
if !out.hasMeta {
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if out.g, err = glob.Compile(path, os.PathSeparator); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Get the root directory for this filepath
|
||||||
|
out.root = findRootDir(path)
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GlobPath) Match() map[string]os.FileInfo {
|
||||||
|
if !g.hasMeta {
|
||||||
|
out := make(map[string]os.FileInfo)
|
||||||
|
info, err := os.Stat(g.path)
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
out[g.path] = info
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
return walkFilePath(g.root, g.g)
|
||||||
|
}
|
||||||
|
|
||||||
|
// walk the filepath from the given root and return a list of files that match
|
||||||
|
// the given glob.
|
||||||
|
func walkFilePath(root string, g glob.Glob) map[string]os.FileInfo {
|
||||||
|
matchedFiles := make(map[string]os.FileInfo)
|
||||||
|
walkfn := func(path string, info os.FileInfo, _ error) error {
|
||||||
|
if g.Match(path) {
|
||||||
|
matchedFiles[path] = info
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
filepath.Walk(root, walkfn)
|
||||||
|
return matchedFiles
|
||||||
|
}
|
||||||
|
|
||||||
|
// find the root dir of the given path (could include globs).
|
||||||
|
// ie:
|
||||||
|
// /var/log/telegraf.conf -> /var/log
|
||||||
|
// /home/** -> /home
|
||||||
|
// /home/*/** -> /home
|
||||||
|
// /lib/share/*/*/**.txt -> /lib/share
|
||||||
|
func findRootDir(path string) string {
|
||||||
|
pathItems := strings.Split(path, sepStr)
|
||||||
|
out := sepStr
|
||||||
|
for i, item := range pathItems {
|
||||||
|
if i == len(pathItems)-1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if item == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if hasMeta(item) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
out += item + sepStr
|
||||||
|
}
|
||||||
|
if out != "/" {
|
||||||
|
out = strings.TrimSuffix(out, "/")
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasMeta reports whether path contains any magic glob characters.
|
||||||
|
func hasMeta(path string) bool {
|
||||||
|
return strings.IndexAny(path, "*?[") >= 0
|
||||||
|
}
|
||||||
62
internal/globpath/globpath_test.go
Normal file
62
internal/globpath/globpath_test.go
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
package globpath
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCompileAndMatch(t *testing.T) {
|
||||||
|
dir := getTestdataDir()
|
||||||
|
// test super asterisk
|
||||||
|
g1, err := Compile(dir + "/**")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// test single asterisk
|
||||||
|
g2, err := Compile(dir + "/*.log")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// test no meta characters (file exists)
|
||||||
|
g3, err := Compile(dir + "/log1.log")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// test file that doesn't exist
|
||||||
|
g4, err := Compile(dir + "/i_dont_exist.log")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// test super asterisk that doesn't exist
|
||||||
|
g5, err := Compile(dir + "/dir_doesnt_exist/**")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
matches := g1.Match()
|
||||||
|
assert.Len(t, matches, 3)
|
||||||
|
matches = g2.Match()
|
||||||
|
assert.Len(t, matches, 2)
|
||||||
|
matches = g3.Match()
|
||||||
|
assert.Len(t, matches, 1)
|
||||||
|
matches = g4.Match()
|
||||||
|
assert.Len(t, matches, 0)
|
||||||
|
matches = g5.Match()
|
||||||
|
assert.Len(t, matches, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindRootDir(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
input string
|
||||||
|
output string
|
||||||
|
}{
|
||||||
|
{"/var/log/telegraf.conf", "/var/log"},
|
||||||
|
{"/home/**", "/home"},
|
||||||
|
{"/home/*/**", "/home"},
|
||||||
|
{"/lib/share/*/*/**.txt", "/lib/share"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
actual := findRootDir(test.input)
|
||||||
|
assert.Equal(t, test.output, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTestdataDir() string {
|
||||||
|
_, filename, _, _ := runtime.Caller(1)
|
||||||
|
return strings.Replace(filename, "globpath_test.go", "testdata", 1)
|
||||||
|
}
|
||||||
0
internal/globpath/testdata/log1.log
vendored
Normal file
0
internal/globpath/testdata/log1.log
vendored
Normal file
0
internal/globpath/testdata/log2.log
vendored
Normal file
0
internal/globpath/testdata/log2.log
vendored
Normal file
5
internal/globpath/testdata/test.conf
vendored
Normal file
5
internal/globpath/testdata/test.conf
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# this is a fake testing config file
|
||||||
|
# for testing the filestat plugin
|
||||||
|
|
||||||
|
option1 = "foo"
|
||||||
|
option2 = "bar"
|
||||||
@@ -2,13 +2,18 @@ package internal
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"bytes"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
"unicode"
|
"unicode"
|
||||||
@@ -16,6 +21,12 @@ import (
|
|||||||
|
|
||||||
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||||
|
|
||||||
|
var (
|
||||||
|
TimeoutErr = errors.New("Command timed out.")
|
||||||
|
|
||||||
|
NotImplementedError = errors.New("not implemented yet")
|
||||||
|
)
|
||||||
|
|
||||||
// Duration just wraps time.Duration
|
// Duration just wraps time.Duration
|
||||||
type Duration struct {
|
type Duration struct {
|
||||||
Duration time.Duration
|
Duration time.Duration
|
||||||
@@ -23,18 +34,29 @@ type Duration struct {
|
|||||||
|
|
||||||
// UnmarshalTOML parses the duration from the TOML config file
|
// UnmarshalTOML parses the duration from the TOML config file
|
||||||
func (d *Duration) UnmarshalTOML(b []byte) error {
|
func (d *Duration) UnmarshalTOML(b []byte) error {
|
||||||
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
|
var err error
|
||||||
if err != nil {
|
// Parse string duration, ie, "1s"
|
||||||
return err
|
d.Duration, err = time.ParseDuration(string(b[1 : len(b)-1]))
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Duration = dur
|
// First try parsing as integer seconds
|
||||||
|
sI, err := strconv.ParseInt(string(b), 10, 64)
|
||||||
|
if err == nil {
|
||||||
|
d.Duration = time.Second * time.Duration(sI)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Second try parsing as float seconds
|
||||||
|
sF, err := strconv.ParseFloat(string(b), 64)
|
||||||
|
if err == nil {
|
||||||
|
d.Duration = time.Second * time.Duration(sF)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var NotImplementedError = errors.New("not implemented yet")
|
|
||||||
|
|
||||||
// ReadLines reads contents from a file and splits them by new lines.
|
// ReadLines reads contents from a file and splits them by new lines.
|
||||||
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
|
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
|
||||||
func ReadLines(filename string) ([]string, error) {
|
func ReadLines(filename string) ([]string, error) {
|
||||||
@@ -140,58 +162,71 @@ func SnakeCase(in string) string {
|
|||||||
return string(out)
|
return string(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Glob will test a string pattern, potentially containing globs, against a
|
// CombinedOutputTimeout runs the given command with the given timeout and
|
||||||
// subject string. The result is a simple true/false, determining whether or
|
// returns the combined output of stdout and stderr.
|
||||||
// not the glob pattern matched the subject text.
|
// If the command times out, it attempts to kill the process.
|
||||||
//
|
func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) {
|
||||||
// Adapted from https://github.com/ryanuber/go-glob/blob/master/glob.go
|
var b bytes.Buffer
|
||||||
// thanks Ryan Uber!
|
c.Stdout = &b
|
||||||
func Glob(pattern, measurement string) bool {
|
c.Stderr = &b
|
||||||
// Empty pattern can only match empty subject
|
if err := c.Start(); err != nil {
|
||||||
if pattern == "" {
|
return nil, err
|
||||||
return measurement == pattern
|
}
|
||||||
|
err := WaitTimeout(c, timeout)
|
||||||
|
return b.Bytes(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunTimeout runs the given command with the given timeout.
|
||||||
|
// If the command times out, it attempts to kill the process.
|
||||||
|
func RunTimeout(c *exec.Cmd, timeout time.Duration) error {
|
||||||
|
if err := c.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return WaitTimeout(c, timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitTimeout waits for the given command to finish with a timeout.
|
||||||
|
// It assumes the command has already been started.
|
||||||
|
// If the command times out, it attempts to kill the process.
|
||||||
|
func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
|
||||||
|
timer := time.NewTimer(timeout)
|
||||||
|
done := make(chan error)
|
||||||
|
go func() { done <- c.Wait() }()
|
||||||
|
select {
|
||||||
|
case err := <-done:
|
||||||
|
timer.Stop()
|
||||||
|
return err
|
||||||
|
case <-timer.C:
|
||||||
|
if err := c.Process.Kill(); err != nil {
|
||||||
|
log.Printf("FATAL error killing process: %s", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// wait for the command to return after killing it
|
||||||
|
<-done
|
||||||
|
return TimeoutErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RandomSleep will sleep for a random amount of time up to max.
|
||||||
|
// If the shutdown channel is closed, it will return before it has finished
|
||||||
|
// sleeping.
|
||||||
|
func RandomSleep(max time.Duration, shutdown chan struct{}) {
|
||||||
|
if max == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
maxSleep := big.NewInt(max.Nanoseconds())
|
||||||
|
|
||||||
|
var sleepns int64
|
||||||
|
if j, err := rand.Int(rand.Reader, maxSleep); err == nil {
|
||||||
|
sleepns = j.Int64()
|
||||||
|
}
|
||||||
|
|
||||||
|
t := time.NewTimer(time.Nanosecond * time.Duration(sleepns))
|
||||||
|
select {
|
||||||
|
case <-t.C:
|
||||||
|
return
|
||||||
|
case <-shutdown:
|
||||||
|
t.Stop()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the pattern _is_ a glob, it matches everything
|
|
||||||
if pattern == "*" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := strings.Split(pattern, "*")
|
|
||||||
|
|
||||||
if len(parts) == 1 {
|
|
||||||
// No globs in pattern, so test for match
|
|
||||||
return pattern == measurement
|
|
||||||
}
|
|
||||||
|
|
||||||
leadingGlob := strings.HasPrefix(pattern, "*")
|
|
||||||
trailingGlob := strings.HasSuffix(pattern, "*")
|
|
||||||
end := len(parts) - 1
|
|
||||||
|
|
||||||
for i, part := range parts {
|
|
||||||
switch i {
|
|
||||||
case 0:
|
|
||||||
if leadingGlob {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(measurement, part) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
case end:
|
|
||||||
if len(measurement) > 0 {
|
|
||||||
return trailingGlob || strings.HasSuffix(measurement, part)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
if !strings.Contains(measurement, part) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trim evaluated text from measurement as we loop over the pattern.
|
|
||||||
idx := strings.Index(measurement, part) + len(part)
|
|
||||||
measurement = measurement[idx:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// All parts of the pattern matched
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,47 +1,12 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
import "testing"
|
import (
|
||||||
|
"os/exec"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
func testGlobMatch(t *testing.T, pattern, subj string) {
|
"github.com/stretchr/testify/assert"
|
||||||
if !Glob(pattern, subj) {
|
)
|
||||||
t.Errorf("%s should match %s", pattern, subj)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testGlobNoMatch(t *testing.T, pattern, subj string) {
|
|
||||||
if Glob(pattern, subj) {
|
|
||||||
t.Errorf("%s should not match %s", pattern, subj)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEmptyPattern(t *testing.T) {
|
|
||||||
testGlobMatch(t, "", "")
|
|
||||||
testGlobNoMatch(t, "", "test")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPatternWithoutGlobs(t *testing.T) {
|
|
||||||
testGlobMatch(t, "test", "test")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGlob(t *testing.T) {
|
|
||||||
for _, pattern := range []string{
|
|
||||||
"*test", // Leading glob
|
|
||||||
"this*", // Trailing glob
|
|
||||||
"*is*a*", // Lots of globs
|
|
||||||
"**test**", // Double glob characters
|
|
||||||
"**is**a***test*", // Varying number of globs
|
|
||||||
} {
|
|
||||||
testGlobMatch(t, pattern, "this_is_a_test")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, pattern := range []string{
|
|
||||||
"test*", // Implicit substring match should fail
|
|
||||||
"*is", // Partial match should fail
|
|
||||||
"*no*", // Globs without a match between them should fail
|
|
||||||
} {
|
|
||||||
testGlobNoMatch(t, pattern, "this_is_a_test")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type SnakeTest struct {
|
type SnakeTest struct {
|
||||||
input string
|
input string
|
||||||
@@ -71,3 +36,98 @@ func TestSnakeCase(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
sleepbin, _ = exec.LookPath("sleep")
|
||||||
|
echobin, _ = exec.LookPath("echo")
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRunTimeout(t *testing.T) {
|
||||||
|
if sleepbin == "" {
|
||||||
|
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||||
|
}
|
||||||
|
cmd := exec.Command(sleepbin, "10")
|
||||||
|
start := time.Now()
|
||||||
|
err := RunTimeout(cmd, time.Millisecond*20)
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
|
||||||
|
assert.Equal(t, TimeoutErr, err)
|
||||||
|
// Verify that command gets killed in 20ms, with some breathing room
|
||||||
|
assert.True(t, elapsed < time.Millisecond*75)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCombinedOutputTimeout(t *testing.T) {
|
||||||
|
if sleepbin == "" {
|
||||||
|
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||||
|
}
|
||||||
|
cmd := exec.Command(sleepbin, "10")
|
||||||
|
start := time.Now()
|
||||||
|
_, err := CombinedOutputTimeout(cmd, time.Millisecond*20)
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
|
||||||
|
assert.Equal(t, TimeoutErr, err)
|
||||||
|
// Verify that command gets killed in 20ms, with some breathing room
|
||||||
|
assert.True(t, elapsed < time.Millisecond*75)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCombinedOutput(t *testing.T) {
|
||||||
|
if echobin == "" {
|
||||||
|
t.Skip("'echo' binary not available on OS, skipping.")
|
||||||
|
}
|
||||||
|
cmd := exec.Command(echobin, "foo")
|
||||||
|
out, err := CombinedOutputTimeout(cmd, time.Second)
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "foo\n", string(out))
|
||||||
|
}
|
||||||
|
|
||||||
|
// test that CombinedOutputTimeout and exec.Cmd.CombinedOutput return
|
||||||
|
// the same output from a failed command.
|
||||||
|
func TestCombinedOutputError(t *testing.T) {
|
||||||
|
if sleepbin == "" {
|
||||||
|
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||||
|
}
|
||||||
|
cmd := exec.Command(sleepbin, "foo")
|
||||||
|
expected, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
|
cmd2 := exec.Command(sleepbin, "foo")
|
||||||
|
actual, err := CombinedOutputTimeout(cmd2, time.Second)
|
||||||
|
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunError(t *testing.T) {
|
||||||
|
if sleepbin == "" {
|
||||||
|
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||||
|
}
|
||||||
|
cmd := exec.Command(sleepbin, "foo")
|
||||||
|
err := RunTimeout(cmd, time.Second)
|
||||||
|
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRandomSleep(t *testing.T) {
|
||||||
|
// test that zero max returns immediately
|
||||||
|
s := time.Now()
|
||||||
|
RandomSleep(time.Duration(0), make(chan struct{}))
|
||||||
|
elapsed := time.Since(s)
|
||||||
|
assert.True(t, elapsed < time.Millisecond)
|
||||||
|
|
||||||
|
// test that max sleep is respected
|
||||||
|
s = time.Now()
|
||||||
|
RandomSleep(time.Millisecond*50, make(chan struct{}))
|
||||||
|
elapsed = time.Since(s)
|
||||||
|
assert.True(t, elapsed < time.Millisecond*50)
|
||||||
|
|
||||||
|
// test that shutdown is respected
|
||||||
|
s = time.Now()
|
||||||
|
shutdown := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
close(shutdown)
|
||||||
|
}()
|
||||||
|
RandomSleep(time.Second, shutdown)
|
||||||
|
elapsed = time.Since(s)
|
||||||
|
assert.True(t, elapsed < time.Millisecond*150)
|
||||||
|
}
|
||||||
|
|||||||
59
internal/limiter/limiter.go
Normal file
59
internal/limiter/limiter.go
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
package limiter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewRateLimiter returns a rate limiter that will will emit from the C
|
||||||
|
// channel only 'n' times every 'rate' seconds.
|
||||||
|
func NewRateLimiter(n int, rate time.Duration) *rateLimiter {
|
||||||
|
r := &rateLimiter{
|
||||||
|
C: make(chan bool),
|
||||||
|
rate: rate,
|
||||||
|
n: n,
|
||||||
|
shutdown: make(chan bool),
|
||||||
|
}
|
||||||
|
r.wg.Add(1)
|
||||||
|
go r.limiter()
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
type rateLimiter struct {
|
||||||
|
C chan bool
|
||||||
|
rate time.Duration
|
||||||
|
n int
|
||||||
|
|
||||||
|
shutdown chan bool
|
||||||
|
wg sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *rateLimiter) Stop() {
|
||||||
|
close(r.shutdown)
|
||||||
|
r.wg.Wait()
|
||||||
|
close(r.C)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *rateLimiter) limiter() {
|
||||||
|
defer r.wg.Done()
|
||||||
|
ticker := time.NewTicker(r.rate)
|
||||||
|
defer ticker.Stop()
|
||||||
|
counter := 0
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-r.shutdown:
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
counter = 0
|
||||||
|
default:
|
||||||
|
if counter < r.n {
|
||||||
|
select {
|
||||||
|
case r.C <- true:
|
||||||
|
counter++
|
||||||
|
case <-r.shutdown:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
54
internal/limiter/limiter_test.go
Normal file
54
internal/limiter/limiter_test.go
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
package limiter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRateLimiter(t *testing.T) {
|
||||||
|
r := NewRateLimiter(5, time.Second)
|
||||||
|
ticker := time.NewTicker(time.Millisecond * 75)
|
||||||
|
|
||||||
|
// test that we can only get 5 receives from the rate limiter
|
||||||
|
counter := 0
|
||||||
|
outer:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-r.C:
|
||||||
|
counter++
|
||||||
|
case <-ticker.C:
|
||||||
|
break outer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, 5, counter)
|
||||||
|
r.Stop()
|
||||||
|
// verify that the Stop function closes the channel.
|
||||||
|
_, ok := <-r.C
|
||||||
|
assert.False(t, ok)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRateLimiterMultipleIterations(t *testing.T) {
|
||||||
|
r := NewRateLimiter(5, time.Millisecond*50)
|
||||||
|
ticker := time.NewTicker(time.Millisecond * 250)
|
||||||
|
|
||||||
|
// test that we can get 15 receives from the rate limiter
|
||||||
|
counter := 0
|
||||||
|
outer:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
break outer
|
||||||
|
case <-r.C:
|
||||||
|
counter++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.True(t, counter > 10)
|
||||||
|
r.Stop()
|
||||||
|
// verify that the Stop function closes the channel.
|
||||||
|
_, ok := <-r.C
|
||||||
|
assert.False(t, ok)
|
||||||
|
}
|
||||||
@@ -1,33 +1,88 @@
|
|||||||
package internal_models
|
package internal_models
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
"fmt"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/internal"
|
"github.com/influxdata/telegraf/filter"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TagFilter is the name of a tag, and the values on which to filter
|
// TagFilter is the name of a tag, and the values on which to filter
|
||||||
type TagFilter struct {
|
type TagFilter struct {
|
||||||
Name string
|
Name string
|
||||||
Filter []string
|
Filter []string
|
||||||
|
filter filter.Filter
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter containing drop/pass and tagdrop/tagpass rules
|
// Filter containing drop/pass and tagdrop/tagpass rules
|
||||||
type Filter struct {
|
type Filter struct {
|
||||||
NameDrop []string
|
NameDrop []string
|
||||||
|
nameDrop filter.Filter
|
||||||
NamePass []string
|
NamePass []string
|
||||||
|
namePass filter.Filter
|
||||||
|
|
||||||
FieldDrop []string
|
FieldDrop []string
|
||||||
|
fieldDrop filter.Filter
|
||||||
FieldPass []string
|
FieldPass []string
|
||||||
|
fieldPass filter.Filter
|
||||||
|
|
||||||
TagDrop []TagFilter
|
TagDrop []TagFilter
|
||||||
TagPass []TagFilter
|
TagPass []TagFilter
|
||||||
|
|
||||||
|
TagExclude []string
|
||||||
|
tagExclude filter.Filter
|
||||||
|
TagInclude []string
|
||||||
|
tagInclude filter.Filter
|
||||||
|
|
||||||
IsActive bool
|
IsActive bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f Filter) ShouldMetricPass(metric telegraf.Metric) bool {
|
// Compile all Filter lists into filter.Filter objects.
|
||||||
|
func (f *Filter) CompileFilter() error {
|
||||||
|
var err error
|
||||||
|
f.nameDrop, err = filter.CompileFilter(f.NameDrop)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error compiling 'namedrop', %s", err)
|
||||||
|
}
|
||||||
|
f.namePass, err = filter.CompileFilter(f.NamePass)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error compiling 'namepass', %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f.fieldDrop, err = filter.CompileFilter(f.FieldDrop)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error compiling 'fielddrop', %s", err)
|
||||||
|
}
|
||||||
|
f.fieldPass, err = filter.CompileFilter(f.FieldPass)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error compiling 'fieldpass', %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f.tagExclude, err = filter.CompileFilter(f.TagExclude)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error compiling 'tagexclude', %s", err)
|
||||||
|
}
|
||||||
|
f.tagInclude, err = filter.CompileFilter(f.TagInclude)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error compiling 'taginclude', %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, _ := range f.TagDrop {
|
||||||
|
f.TagDrop[i].filter, err = filter.CompileFilter(f.TagDrop[i].Filter)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i, _ := range f.TagPass {
|
||||||
|
f.TagPass[i].filter, err = filter.CompileFilter(f.TagPass[i].Filter)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error compiling 'tagpass', %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Filter) ShouldMetricPass(metric telegraf.Metric) bool {
|
||||||
if f.ShouldNamePass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) {
|
if f.ShouldNamePass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -36,70 +91,51 @@ func (f Filter) ShouldMetricPass(metric telegraf.Metric) bool {
|
|||||||
|
|
||||||
// ShouldFieldsPass returns true if the metric should pass, false if should drop
|
// ShouldFieldsPass returns true if the metric should pass, false if should drop
|
||||||
// based on the drop/pass filter parameters
|
// based on the drop/pass filter parameters
|
||||||
func (f Filter) ShouldNamePass(key string) bool {
|
func (f *Filter) ShouldNamePass(key string) bool {
|
||||||
if f.NamePass != nil {
|
if f.namePass != nil {
|
||||||
for _, pat := range f.NamePass {
|
if f.namePass.Match(key) {
|
||||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
return true
|
||||||
// Cam, 2015-12-07
|
|
||||||
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.NameDrop != nil {
|
if f.nameDrop != nil {
|
||||||
for _, pat := range f.NameDrop {
|
if f.nameDrop.Match(key) {
|
||||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
return false
|
||||||
// Cam, 2015-12-07
|
|
||||||
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShouldFieldsPass returns true if the metric should pass, false if should drop
|
// ShouldFieldsPass returns true if the metric should pass, false if should drop
|
||||||
// based on the drop/pass filter parameters
|
// based on the drop/pass filter parameters
|
||||||
func (f Filter) ShouldFieldsPass(key string) bool {
|
func (f *Filter) ShouldFieldsPass(key string) bool {
|
||||||
if f.FieldPass != nil {
|
if f.fieldPass != nil {
|
||||||
for _, pat := range f.FieldPass {
|
if f.fieldPass.Match(key) {
|
||||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
return true
|
||||||
// Cam, 2015-12-07
|
|
||||||
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.FieldDrop != nil {
|
if f.fieldDrop != nil {
|
||||||
for _, pat := range f.FieldDrop {
|
if f.fieldDrop.Match(key) {
|
||||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
return false
|
||||||
// Cam, 2015-12-07
|
|
||||||
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShouldTagsPass returns true if the metric should pass, false if should drop
|
// ShouldTagsPass returns true if the metric should pass, false if should drop
|
||||||
// based on the tagdrop/tagpass filter parameters
|
// based on the tagdrop/tagpass filter parameters
|
||||||
func (f Filter) ShouldTagsPass(tags map[string]string) bool {
|
func (f *Filter) ShouldTagsPass(tags map[string]string) bool {
|
||||||
if f.TagPass != nil {
|
if f.TagPass != nil {
|
||||||
for _, pat := range f.TagPass {
|
for _, pat := range f.TagPass {
|
||||||
|
if pat.filter == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if tagval, ok := tags[pat.Name]; ok {
|
if tagval, ok := tags[pat.Name]; ok {
|
||||||
for _, filter := range pat.Filter {
|
if pat.filter.Match(tagval) {
|
||||||
if internal.Glob(filter, tagval) {
|
return true
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -108,11 +144,12 @@ func (f Filter) ShouldTagsPass(tags map[string]string) bool {
|
|||||||
|
|
||||||
if f.TagDrop != nil {
|
if f.TagDrop != nil {
|
||||||
for _, pat := range f.TagDrop {
|
for _, pat := range f.TagDrop {
|
||||||
|
if pat.filter == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if tagval, ok := tags[pat.Name]; ok {
|
if tagval, ok := tags[pat.Name]; ok {
|
||||||
for _, filter := range pat.Filter {
|
if pat.filter.Match(tagval) {
|
||||||
if internal.Glob(filter, tagval) {
|
return false
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -121,3 +158,23 @@ func (f Filter) ShouldTagsPass(tags map[string]string) bool {
|
|||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Apply TagInclude and TagExclude filters.
|
||||||
|
// modifies the tags map in-place.
|
||||||
|
func (f *Filter) FilterTags(tags map[string]string) {
|
||||||
|
if f.tagInclude != nil {
|
||||||
|
for k, _ := range tags {
|
||||||
|
if !f.tagInclude.Match(k) {
|
||||||
|
delete(tags, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.tagExclude != nil {
|
||||||
|
for k, _ := range tags {
|
||||||
|
if f.tagExclude.Match(k) {
|
||||||
|
delete(tags, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,6 +2,11 @@ package internal_models
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFilter_Empty(t *testing.T) {
|
func TestFilter_Empty(t *testing.T) {
|
||||||
@@ -28,6 +33,7 @@ func TestFilter_NamePass(t *testing.T) {
|
|||||||
f := Filter{
|
f := Filter{
|
||||||
NamePass: []string{"foo*", "cpu_usage_idle"},
|
NamePass: []string{"foo*", "cpu_usage_idle"},
|
||||||
}
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
passes := []string{
|
passes := []string{
|
||||||
"foo",
|
"foo",
|
||||||
@@ -61,6 +67,7 @@ func TestFilter_NameDrop(t *testing.T) {
|
|||||||
f := Filter{
|
f := Filter{
|
||||||
NameDrop: []string{"foo*", "cpu_usage_idle"},
|
NameDrop: []string{"foo*", "cpu_usage_idle"},
|
||||||
}
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
drops := []string{
|
drops := []string{
|
||||||
"foo",
|
"foo",
|
||||||
@@ -94,6 +101,7 @@ func TestFilter_FieldPass(t *testing.T) {
|
|||||||
f := Filter{
|
f := Filter{
|
||||||
FieldPass: []string{"foo*", "cpu_usage_idle"},
|
FieldPass: []string{"foo*", "cpu_usage_idle"},
|
||||||
}
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
passes := []string{
|
passes := []string{
|
||||||
"foo",
|
"foo",
|
||||||
@@ -127,6 +135,7 @@ func TestFilter_FieldDrop(t *testing.T) {
|
|||||||
f := Filter{
|
f := Filter{
|
||||||
FieldDrop: []string{"foo*", "cpu_usage_idle"},
|
FieldDrop: []string{"foo*", "cpu_usage_idle"},
|
||||||
}
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
drops := []string{
|
drops := []string{
|
||||||
"foo",
|
"foo",
|
||||||
@@ -169,6 +178,7 @@ func TestFilter_TagPass(t *testing.T) {
|
|||||||
f := Filter{
|
f := Filter{
|
||||||
TagPass: filters,
|
TagPass: filters,
|
||||||
}
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
passes := []map[string]string{
|
passes := []map[string]string{
|
||||||
{"cpu": "cpu-total"},
|
{"cpu": "cpu-total"},
|
||||||
@@ -212,6 +222,7 @@ func TestFilter_TagDrop(t *testing.T) {
|
|||||||
f := Filter{
|
f := Filter{
|
||||||
TagDrop: filters,
|
TagDrop: filters,
|
||||||
}
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
drops := []map[string]string{
|
drops := []map[string]string{
|
||||||
{"cpu": "cpu-total"},
|
{"cpu": "cpu-total"},
|
||||||
@@ -241,3 +252,70 @@ func TestFilter_TagDrop(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFilter_ShouldMetricsPass(t *testing.T) {
|
||||||
|
m := testutil.TestMetric(1, "testmetric")
|
||||||
|
f := Filter{
|
||||||
|
NameDrop: []string{"foobar"},
|
||||||
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
require.True(t, f.ShouldMetricPass(m))
|
||||||
|
|
||||||
|
m = testutil.TestMetric(1, "foobar")
|
||||||
|
require.False(t, f.ShouldMetricPass(m))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilter_FilterTagsNoMatches(t *testing.T) {
|
||||||
|
pretags := map[string]string{
|
||||||
|
"host": "localhost",
|
||||||
|
"mytag": "foobar",
|
||||||
|
}
|
||||||
|
f := Filter{
|
||||||
|
TagExclude: []string{"nomatch"},
|
||||||
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
|
f.FilterTags(pretags)
|
||||||
|
assert.Equal(t, map[string]string{
|
||||||
|
"host": "localhost",
|
||||||
|
"mytag": "foobar",
|
||||||
|
}, pretags)
|
||||||
|
|
||||||
|
f = Filter{
|
||||||
|
TagInclude: []string{"nomatch"},
|
||||||
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
|
f.FilterTags(pretags)
|
||||||
|
assert.Equal(t, map[string]string{}, pretags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilter_FilterTagsMatches(t *testing.T) {
|
||||||
|
pretags := map[string]string{
|
||||||
|
"host": "localhost",
|
||||||
|
"mytag": "foobar",
|
||||||
|
}
|
||||||
|
f := Filter{
|
||||||
|
TagExclude: []string{"ho*"},
|
||||||
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
|
f.FilterTags(pretags)
|
||||||
|
assert.Equal(t, map[string]string{
|
||||||
|
"mytag": "foobar",
|
||||||
|
}, pretags)
|
||||||
|
|
||||||
|
pretags = map[string]string{
|
||||||
|
"host": "localhost",
|
||||||
|
"mytag": "foobar",
|
||||||
|
}
|
||||||
|
f = Filter{
|
||||||
|
TagInclude: []string{"my*"},
|
||||||
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
|
f.FilterTags(pretags)
|
||||||
|
assert.Equal(t, map[string]string{
|
||||||
|
"mytag": "foobar",
|
||||||
|
}, pretags)
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,48 +2,54 @@ package internal_models
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"log"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal/buffer"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Default number of metrics kept between flushes.
|
// Default size of metrics batch size.
|
||||||
DEFAULT_METRIC_BUFFER_LIMIT = 1000
|
DEFAULT_METRIC_BATCH_SIZE = 1000
|
||||||
|
|
||||||
// Limit how many full metric buffers are kept due to failed writes.
|
// Default number of metrics kept. It should be a multiple of batch size.
|
||||||
FULL_METRIC_BUFFERS_LIMIT = 100
|
DEFAULT_METRIC_BUFFER_LIMIT = 10000
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// RunningOutput contains the output configuration
|
||||||
type RunningOutput struct {
|
type RunningOutput struct {
|
||||||
Name string
|
Name string
|
||||||
Output telegraf.Output
|
Output telegraf.Output
|
||||||
Config *OutputConfig
|
Config *OutputConfig
|
||||||
Quiet bool
|
Quiet bool
|
||||||
MetricBufferLimit int
|
MetricBufferLimit int
|
||||||
FlushBufferWhenFull bool
|
MetricBatchSize int
|
||||||
|
|
||||||
metrics []telegraf.Metric
|
metrics *buffer.Buffer
|
||||||
tmpmetrics map[int][]telegraf.Metric
|
failMetrics *buffer.Buffer
|
||||||
overwriteI int
|
|
||||||
mapI int
|
|
||||||
|
|
||||||
sync.Mutex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRunningOutput(
|
func NewRunningOutput(
|
||||||
name string,
|
name string,
|
||||||
output telegraf.Output,
|
output telegraf.Output,
|
||||||
conf *OutputConfig,
|
conf *OutputConfig,
|
||||||
|
batchSize int,
|
||||||
|
bufferLimit int,
|
||||||
) *RunningOutput {
|
) *RunningOutput {
|
||||||
|
if bufferLimit == 0 {
|
||||||
|
bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT
|
||||||
|
}
|
||||||
|
if batchSize == 0 {
|
||||||
|
batchSize = DEFAULT_METRIC_BATCH_SIZE
|
||||||
|
}
|
||||||
ro := &RunningOutput{
|
ro := &RunningOutput{
|
||||||
Name: name,
|
Name: name,
|
||||||
metrics: make([]telegraf.Metric, 0),
|
metrics: buffer.NewBuffer(batchSize),
|
||||||
tmpmetrics: make(map[int][]telegraf.Metric),
|
failMetrics: buffer.NewBuffer(bufferLimit),
|
||||||
Output: output,
|
Output: output,
|
||||||
Config: conf,
|
Config: conf,
|
||||||
MetricBufferLimit: DEFAULT_METRIC_BUFFER_LIMIT,
|
MetricBufferLimit: bufferLimit,
|
||||||
|
MetricBatchSize: batchSize,
|
||||||
}
|
}
|
||||||
return ro
|
return ro
|
||||||
}
|
}
|
||||||
@@ -56,67 +62,78 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ro.Lock()
|
|
||||||
defer ro.Unlock()
|
|
||||||
|
|
||||||
if len(ro.metrics) < ro.MetricBufferLimit {
|
// Filter any tagexclude/taginclude parameters before adding metric
|
||||||
ro.metrics = append(ro.metrics, metric)
|
if len(ro.Config.Filter.TagExclude) != 0 || len(ro.Config.Filter.TagInclude) != 0 {
|
||||||
} else {
|
// In order to filter out tags, we need to create a new metric, since
|
||||||
if ro.FlushBufferWhenFull {
|
// metrics are immutable once created.
|
||||||
ro.metrics = append(ro.metrics, metric)
|
tags := metric.Tags()
|
||||||
tmpmetrics := make([]telegraf.Metric, len(ro.metrics))
|
fields := metric.Fields()
|
||||||
copy(tmpmetrics, ro.metrics)
|
t := metric.Time()
|
||||||
ro.metrics = make([]telegraf.Metric, 0)
|
name := metric.Name()
|
||||||
err := ro.write(tmpmetrics)
|
ro.Config.Filter.FilterTags(tags)
|
||||||
if err != nil {
|
// error is not possible if creating from another metric, so ignore.
|
||||||
log.Printf("ERROR writing full metric buffer to output %s, %s",
|
metric, _ = telegraf.NewMetric(name, tags, fields, t)
|
||||||
ro.Name, err)
|
}
|
||||||
if len(ro.tmpmetrics) == FULL_METRIC_BUFFERS_LIMIT {
|
|
||||||
ro.mapI = 0
|
ro.metrics.Add(metric)
|
||||||
// overwrite one
|
if ro.metrics.Len() == ro.MetricBatchSize {
|
||||||
ro.tmpmetrics[ro.mapI] = tmpmetrics
|
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
||||||
ro.mapI++
|
err := ro.write(batch)
|
||||||
} else {
|
if err != nil {
|
||||||
ro.tmpmetrics[ro.mapI] = tmpmetrics
|
ro.failMetrics.Add(batch...)
|
||||||
ro.mapI++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if ro.overwriteI == 0 {
|
|
||||||
log.Printf("WARNING: overwriting cached metrics, you may want to " +
|
|
||||||
"increase the metric_buffer_limit setting in your [agent] " +
|
|
||||||
"config if you do not wish to overwrite metrics.\n")
|
|
||||||
}
|
|
||||||
if ro.overwriteI == len(ro.metrics) {
|
|
||||||
ro.overwriteI = 0
|
|
||||||
}
|
|
||||||
ro.metrics[ro.overwriteI] = metric
|
|
||||||
ro.overwriteI++
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write writes all cached points to this output.
|
// Write writes all cached points to this output.
|
||||||
func (ro *RunningOutput) Write() error {
|
func (ro *RunningOutput) Write() error {
|
||||||
ro.Lock()
|
if !ro.Quiet {
|
||||||
defer ro.Unlock()
|
log.Printf("Output [%s] buffer fullness: %d / %d metrics. "+
|
||||||
err := ro.write(ro.metrics)
|
"Total gathered metrics: %d. Total dropped metrics: %d.",
|
||||||
if err != nil {
|
ro.Name,
|
||||||
return err
|
ro.failMetrics.Len()+ro.metrics.Len(),
|
||||||
} else {
|
ro.MetricBufferLimit,
|
||||||
ro.metrics = make([]telegraf.Metric, 0)
|
ro.metrics.Total(),
|
||||||
ro.overwriteI = 0
|
ro.metrics.Drops()+ro.failMetrics.Drops())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write any cached metric buffers that failed previously
|
var err error
|
||||||
for i, tmpmetrics := range ro.tmpmetrics {
|
if !ro.failMetrics.IsEmpty() {
|
||||||
if err := ro.write(tmpmetrics); err != nil {
|
bufLen := ro.failMetrics.Len()
|
||||||
return err
|
// how many batches of failed writes we need to write.
|
||||||
} else {
|
nBatches := bufLen/ro.MetricBatchSize + 1
|
||||||
delete(ro.tmpmetrics, i)
|
batchSize := ro.MetricBatchSize
|
||||||
|
|
||||||
|
for i := 0; i < nBatches; i++ {
|
||||||
|
// If it's the last batch, only grab the metrics that have not had
|
||||||
|
// a write attempt already (this is primarily to preserve order).
|
||||||
|
if i == nBatches-1 {
|
||||||
|
batchSize = bufLen % ro.MetricBatchSize
|
||||||
|
}
|
||||||
|
batch := ro.failMetrics.Batch(batchSize)
|
||||||
|
// If we've already failed previous writes, don't bother trying to
|
||||||
|
// write to this output again. We are not exiting the loop just so
|
||||||
|
// that we can rotate the metrics to preserve order.
|
||||||
|
if err == nil {
|
||||||
|
err = ro.write(batch)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
ro.failMetrics.Add(batch...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
||||||
|
// see comment above about not trying to write to an already failed output.
|
||||||
|
// if ro.failMetrics is empty then err will always be nil at this point.
|
||||||
|
if err == nil {
|
||||||
|
err = ro.write(batch)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
ro.failMetrics.Add(batch...)
|
||||||
|
return err
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -129,8 +146,8 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
|||||||
elapsed := time.Since(start)
|
elapsed := time.Since(start)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if !ro.Quiet {
|
if !ro.Quiet {
|
||||||
log.Printf("Wrote %d metrics to output %s in %s\n",
|
log.Printf("Output [%s] wrote batch of %d metrics in %s\n",
|
||||||
len(metrics), ro.Name, elapsed)
|
ro.Name, len(metrics), elapsed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package internal_models
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -29,16 +28,100 @@ var next5 = []telegraf.Metric{
|
|||||||
testutil.TestMetric(101, "metric10"),
|
testutil.TestMetric(101, "metric10"),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that we can write metrics with simple default setup.
|
// Benchmark adding metrics.
|
||||||
func TestRunningOutputDefault(t *testing.T) {
|
func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
||||||
conf := &OutputConfig{
|
conf := &OutputConfig{
|
||||||
Filter: Filter{
|
Filter: Filter{
|
||||||
IsActive: false,
|
IsActive: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m := &perfOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
ro.Quiet = true
|
||||||
|
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
ro.AddMetric(first5[0])
|
||||||
|
ro.Write()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Benchmark adding metrics.
|
||||||
|
func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &perfOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
ro.Quiet = true
|
||||||
|
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
ro.AddMetric(first5[0])
|
||||||
|
if n%100 == 0 {
|
||||||
|
ro.Write()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Benchmark adding metrics.
|
||||||
|
func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &perfOutput{}
|
||||||
|
m.failWrite = true
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
ro.Quiet = true
|
||||||
|
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
ro.AddMetric(first5[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that NameDrop filters ger properly applied.
|
||||||
|
func TestRunningOutput_DropFilter(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: true,
|
||||||
|
NameDrop: []string{"metric1", "metric2"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.NoError(t, conf.Filter.CompileFilter())
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf)
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
for _, metric := range next5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
err := ro.Write()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, m.Metrics(), 8)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that NameDrop filters without a match do nothing.
|
||||||
|
func TestRunningOutput_PassFilter(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: true,
|
||||||
|
NameDrop: []string{"metric1000", "foo*"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.NoError(t, conf.Filter.CompileFilter())
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
|
||||||
for _, metric := range first5 {
|
for _, metric := range first5 {
|
||||||
ro.AddMetric(metric)
|
ro.AddMetric(metric)
|
||||||
@@ -53,41 +136,96 @@ func TestRunningOutputDefault(t *testing.T) {
|
|||||||
assert.Len(t, m.Metrics(), 10)
|
assert.Len(t, m.Metrics(), 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that the first metric gets overwritten if there is a buffer overflow.
|
// Test that tags are properly included
|
||||||
func TestRunningOutputOverwrite(t *testing.T) {
|
func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
|
||||||
conf := &OutputConfig{
|
conf := &OutputConfig{
|
||||||
Filter: Filter{
|
Filter: Filter{
|
||||||
IsActive: false,
|
IsActive: true,
|
||||||
|
TagInclude: []string{"nothing*"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
assert.NoError(t, conf.Filter.CompileFilter())
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf)
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
ro.MetricBufferLimit = 4
|
|
||||||
|
|
||||||
for _, metric := range first5 {
|
ro.AddMetric(first5[0])
|
||||||
ro.AddMetric(metric)
|
assert.Len(t, m.Metrics(), 0)
|
||||||
}
|
|
||||||
require.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
err := ro.Write()
|
err := ro.Write()
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
require.Len(t, m.Metrics(), 4)
|
assert.Len(t, m.Metrics(), 1)
|
||||||
|
assert.Empty(t, m.Metrics()[0].Tags())
|
||||||
var expected, actual []string
|
|
||||||
for i, exp := range first5[1:] {
|
|
||||||
expected = append(expected, exp.String())
|
|
||||||
actual = append(actual, m.Metrics()[i].String())
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Strings(expected)
|
|
||||||
sort.Strings(actual)
|
|
||||||
|
|
||||||
assert.Equal(t, expected, actual)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that multiple buffer overflows are handled properly.
|
// Test that tags are properly excluded
|
||||||
func TestRunningOutputMultiOverwrite(t *testing.T) {
|
func TestRunningOutput_TagExcludeMatch(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: true,
|
||||||
|
TagExclude: []string{"tag*"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.NoError(t, conf.Filter.CompileFilter())
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
|
||||||
|
ro.AddMetric(first5[0])
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
err := ro.Write()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, m.Metrics(), 1)
|
||||||
|
assert.Len(t, m.Metrics()[0].Tags(), 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that tags are properly Excluded
|
||||||
|
func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: true,
|
||||||
|
TagExclude: []string{"nothing*"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.NoError(t, conf.Filter.CompileFilter())
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
|
||||||
|
ro.AddMetric(first5[0])
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
err := ro.Write()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, m.Metrics(), 1)
|
||||||
|
assert.Len(t, m.Metrics()[0].Tags(), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that tags are properly included
|
||||||
|
func TestRunningOutput_TagIncludeMatch(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: true,
|
||||||
|
TagInclude: []string{"tag*"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.NoError(t, conf.Filter.CompileFilter())
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
|
||||||
|
ro.AddMetric(first5[0])
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
err := ro.Write()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, m.Metrics(), 1)
|
||||||
|
assert.Len(t, m.Metrics()[0].Tags(), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that we can write metrics with simple default setup.
|
||||||
|
func TestRunningOutputDefault(t *testing.T) {
|
||||||
conf := &OutputConfig{
|
conf := &OutputConfig{
|
||||||
Filter: Filter{
|
Filter: Filter{
|
||||||
IsActive: false,
|
IsActive: false,
|
||||||
@@ -95,8 +233,7 @@ func TestRunningOutputMultiOverwrite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf)
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
ro.MetricBufferLimit = 3
|
|
||||||
|
|
||||||
for _, metric := range first5 {
|
for _, metric := range first5 {
|
||||||
ro.AddMetric(metric)
|
ro.AddMetric(metric)
|
||||||
@@ -104,22 +241,11 @@ func TestRunningOutputMultiOverwrite(t *testing.T) {
|
|||||||
for _, metric := range next5 {
|
for _, metric := range next5 {
|
||||||
ro.AddMetric(metric)
|
ro.AddMetric(metric)
|
||||||
}
|
}
|
||||||
require.Len(t, m.Metrics(), 0)
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
err := ro.Write()
|
err := ro.Write()
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
require.Len(t, m.Metrics(), 3)
|
assert.Len(t, m.Metrics(), 10)
|
||||||
|
|
||||||
var expected, actual []string
|
|
||||||
for i, exp := range next5[2:] {
|
|
||||||
expected = append(expected, exp.String())
|
|
||||||
actual = append(actual, m.Metrics()[i].String())
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Strings(expected)
|
|
||||||
sort.Strings(actual)
|
|
||||||
|
|
||||||
assert.Equal(t, expected, actual)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that running output doesn't flush until it's full when
|
// Test that running output doesn't flush until it's full when
|
||||||
@@ -132,11 +258,9 @@ func TestRunningOutputFlushWhenFull(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf)
|
ro := NewRunningOutput("test", m, conf, 6, 10)
|
||||||
ro.FlushBufferWhenFull = true
|
|
||||||
ro.MetricBufferLimit = 5
|
|
||||||
|
|
||||||
// Fill buffer to limit
|
// Fill buffer to 1 under limit
|
||||||
for _, metric := range first5 {
|
for _, metric := range first5 {
|
||||||
ro.AddMetric(metric)
|
ro.AddMetric(metric)
|
||||||
}
|
}
|
||||||
@@ -165,9 +289,7 @@ func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
ro := NewRunningOutput("test", m, conf)
|
ro := NewRunningOutput("test", m, conf, 4, 12)
|
||||||
ro.FlushBufferWhenFull = true
|
|
||||||
ro.MetricBufferLimit = 4
|
|
||||||
|
|
||||||
// Fill buffer past limit twive
|
// Fill buffer past limit twive
|
||||||
for _, metric := range first5 {
|
for _, metric := range first5 {
|
||||||
@@ -177,7 +299,7 @@ func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
|
|||||||
ro.AddMetric(metric)
|
ro.AddMetric(metric)
|
||||||
}
|
}
|
||||||
// flushed twice
|
// flushed twice
|
||||||
assert.Len(t, m.Metrics(), 10)
|
assert.Len(t, m.Metrics(), 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRunningOutputWriteFail(t *testing.T) {
|
func TestRunningOutputWriteFail(t *testing.T) {
|
||||||
@@ -189,11 +311,9 @@ func TestRunningOutputWriteFail(t *testing.T) {
|
|||||||
|
|
||||||
m := &mockOutput{}
|
m := &mockOutput{}
|
||||||
m.failWrite = true
|
m.failWrite = true
|
||||||
ro := NewRunningOutput("test", m, conf)
|
ro := NewRunningOutput("test", m, conf, 4, 12)
|
||||||
ro.FlushBufferWhenFull = true
|
|
||||||
ro.MetricBufferLimit = 4
|
|
||||||
|
|
||||||
// Fill buffer past limit twice
|
// Fill buffer to limit twice
|
||||||
for _, metric := range first5 {
|
for _, metric := range first5 {
|
||||||
ro.AddMetric(metric)
|
ro.AddMetric(metric)
|
||||||
}
|
}
|
||||||
@@ -216,6 +336,161 @@ func TestRunningOutputWriteFail(t *testing.T) {
|
|||||||
assert.Len(t, m.Metrics(), 10)
|
assert.Len(t, m.Metrics(), 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Verify that the order of points is preserved during a write failure.
|
||||||
|
func TestRunningOutputWriteFailOrder(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
m.failWrite = true
|
||||||
|
ro := NewRunningOutput("test", m, conf, 100, 1000)
|
||||||
|
|
||||||
|
// add 5 metrics
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
// Write fails
|
||||||
|
err := ro.Write()
|
||||||
|
require.Error(t, err)
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
m.failWrite = false
|
||||||
|
// add 5 more metrics
|
||||||
|
for _, metric := range next5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
err = ro.Write()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify that 10 metrics were written
|
||||||
|
assert.Len(t, m.Metrics(), 10)
|
||||||
|
// Verify that they are in order
|
||||||
|
expected := append(first5, next5...)
|
||||||
|
assert.Equal(t, expected, m.Metrics())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the order of points is preserved during many write failures.
|
||||||
|
func TestRunningOutputWriteFailOrder2(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
m.failWrite = true
|
||||||
|
ro := NewRunningOutput("test", m, conf, 5, 100)
|
||||||
|
|
||||||
|
// add 5 metrics
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// Write fails
|
||||||
|
err := ro.Write()
|
||||||
|
require.Error(t, err)
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
// add 5 metrics
|
||||||
|
for _, metric := range next5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// Write fails
|
||||||
|
err = ro.Write()
|
||||||
|
require.Error(t, err)
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
// add 5 metrics
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// Write fails
|
||||||
|
err = ro.Write()
|
||||||
|
require.Error(t, err)
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
// add 5 metrics
|
||||||
|
for _, metric := range next5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// Write fails
|
||||||
|
err = ro.Write()
|
||||||
|
require.Error(t, err)
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
m.failWrite = false
|
||||||
|
err = ro.Write()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify that 10 metrics were written
|
||||||
|
assert.Len(t, m.Metrics(), 20)
|
||||||
|
// Verify that they are in order
|
||||||
|
expected := append(first5, next5...)
|
||||||
|
expected = append(expected, first5...)
|
||||||
|
expected = append(expected, next5...)
|
||||||
|
assert.Equal(t, expected, m.Metrics())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the order of points is preserved when there is a remainder
|
||||||
|
// of points for the batch.
|
||||||
|
//
|
||||||
|
// ie, with a batch size of 5:
|
||||||
|
//
|
||||||
|
// 1 2 3 4 5 6 <-- order, failed points
|
||||||
|
// 6 1 2 3 4 5 <-- order, after 1st write failure (1 2 3 4 5 was batch)
|
||||||
|
// 1 2 3 4 5 6 <-- order, after 2nd write failure, (6 was batch)
|
||||||
|
//
|
||||||
|
func TestRunningOutputWriteFailOrder3(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
m.failWrite = true
|
||||||
|
ro := NewRunningOutput("test", m, conf, 5, 1000)
|
||||||
|
|
||||||
|
// add 5 metrics
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
// Write fails
|
||||||
|
err := ro.Write()
|
||||||
|
require.Error(t, err)
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
// add and attempt to write a single metric:
|
||||||
|
ro.AddMetric(next5[0])
|
||||||
|
err = ro.Write()
|
||||||
|
require.Error(t, err)
|
||||||
|
|
||||||
|
// unset fail and write metrics
|
||||||
|
m.failWrite = false
|
||||||
|
err = ro.Write()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify that 6 metrics were written
|
||||||
|
assert.Len(t, m.Metrics(), 6)
|
||||||
|
// Verify that they are in order
|
||||||
|
expected := append(first5, next5[0])
|
||||||
|
assert.Equal(t, expected, m.Metrics())
|
||||||
|
}
|
||||||
|
|
||||||
type mockOutput struct {
|
type mockOutput struct {
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
|
|
||||||
@@ -263,3 +538,31 @@ func (m *mockOutput) Metrics() []telegraf.Metric {
|
|||||||
defer m.Unlock()
|
defer m.Unlock()
|
||||||
return m.metrics
|
return m.metrics
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type perfOutput struct {
|
||||||
|
// if true, mock a write failure
|
||||||
|
failWrite bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *perfOutput) Connect() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *perfOutput) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *perfOutput) Description() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *perfOutput) SampleConfig() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *perfOutput) Write(metrics []telegraf.Metric) error {
|
||||||
|
if m.failWrite {
|
||||||
|
return fmt.Errorf("Failed Write!")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -45,14 +45,9 @@ func NewMetric(
|
|||||||
name string,
|
name string,
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
fields map[string]interface{},
|
fields map[string]interface{},
|
||||||
t ...time.Time,
|
t time.Time,
|
||||||
) (Metric, error) {
|
) (Metric, error) {
|
||||||
var T time.Time
|
pt, err := client.NewPoint(name, tags, fields, t)
|
||||||
if len(t) > 0 {
|
|
||||||
T = t[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
pt, err := client.NewPoint(name, tags, fields, T)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -51,23 +51,6 @@ func TestNewMetricString(t *testing.T) {
|
|||||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewMetricStringNoTime(t *testing.T) {
|
|
||||||
tags := map[string]string{
|
|
||||||
"host": "localhost",
|
|
||||||
}
|
|
||||||
fields := map[string]interface{}{
|
|
||||||
"usage_idle": float64(99),
|
|
||||||
}
|
|
||||||
m, err := NewMetric("cpu", tags, fields)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
|
||||||
assert.Equal(t, lineProto, m.String())
|
|
||||||
|
|
||||||
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
|
||||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewMetricFailNaN(t *testing.T) {
|
func TestNewMetricFailNaN(t *testing.T) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,11 @@ import (
|
|||||||
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
|
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
|
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/chrony"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch"
|
_ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/conntrack"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/consul"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
|
_ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
|
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
|
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
|
||||||
@@ -14,7 +18,9 @@ import (
|
|||||||
_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
|
_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks"
|
_ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
|
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
||||||
@@ -23,6 +29,7 @@ import (
|
|||||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||||
@@ -34,6 +41,7 @@ import (
|
|||||||
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
|
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
|
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
|
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
|
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
|
||||||
@@ -49,16 +57,19 @@ import (
|
|||||||
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
|
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
|
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
|
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/rollbar_webhooks"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
|
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
|
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
|
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
|
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/system"
|
_ "github.com/influxdata/telegraf/plugins/inputs/system"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
|
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
|
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
|
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
|
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
|
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# Telegraf plugin: Apache
|
# Telegraf plugin: Apache
|
||||||
|
|
||||||
#### Plugin arguments:
|
#### Plugin arguments:
|
||||||
- **urls** []string: List of apache-status URLs to collect from.
|
- **urls** []string: List of apache-status URLs to collect from. Default is "http://localhost/server-status?auto".
|
||||||
|
|
||||||
#### Description
|
#### Description
|
||||||
|
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ type Apache struct {
|
|||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
## An array of Apache status URI to gather stats.
|
## An array of Apache status URI to gather stats.
|
||||||
|
## Default is "http://localhost/server-status?auto".
|
||||||
urls = ["http://localhost/server-status?auto"]
|
urls = ["http://localhost/server-status?auto"]
|
||||||
`
|
`
|
||||||
|
|
||||||
@@ -33,6 +34,10 @@ func (n *Apache) Description() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||||
|
if len(n.Urls) == 0 {
|
||||||
|
n.Urls = []string{"http://localhost/server-status?auto"}
|
||||||
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
var outerr error
|
var outerr error
|
||||||
|
|
||||||
|
|||||||
@@ -7,19 +7,12 @@ import (
|
|||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
//"reflect"
|
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
/*type Server struct {
|
|
||||||
Host string
|
|
||||||
Username string
|
|
||||||
Password string
|
|
||||||
Port string
|
|
||||||
}*/
|
|
||||||
|
|
||||||
type JolokiaClient interface {
|
type JolokiaClient interface {
|
||||||
MakeRequest(req *http.Request) (*http.Response, error)
|
MakeRequest(req *http.Request) (*http.Response, error)
|
||||||
}
|
}
|
||||||
@@ -55,12 +48,6 @@ type jmxMetric interface {
|
|||||||
addTagsFields(out map[string]interface{})
|
addTagsFields(out map[string]interface{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func addServerTags(host string, tags map[string]string) {
|
|
||||||
if host != "" && host != "localhost" && host != "127.0.0.1" {
|
|
||||||
tags["host"] = host
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newJavaMetric(host string, metric string,
|
func newJavaMetric(host string, metric string,
|
||||||
acc telegraf.Accumulator) *javaMetric {
|
acc telegraf.Accumulator) *javaMetric {
|
||||||
return &javaMetric{host: host, metric: metric, acc: acc}
|
return &javaMetric{host: host, metric: metric, acc: acc}
|
||||||
@@ -120,7 +107,7 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) {
|
|||||||
|
|
||||||
tokens := parseJmxMetricRequest(mbean)
|
tokens := parseJmxMetricRequest(mbean)
|
||||||
addTokensToTags(tokens, tags)
|
addTokensToTags(tokens, tags)
|
||||||
addServerTags(j.host, tags)
|
tags["cassandra_host"] = j.host
|
||||||
|
|
||||||
if _, ok := tags["mname"]; !ok {
|
if _, ok := tags["mname"]; !ok {
|
||||||
//Queries for a single value will not return a "name" tag in the response.
|
//Queries for a single value will not return a "name" tag in the response.
|
||||||
@@ -148,7 +135,7 @@ func addCassandraMetric(mbean string, c cassandraMetric,
|
|||||||
fields := make(map[string]interface{})
|
fields := make(map[string]interface{})
|
||||||
tokens := parseJmxMetricRequest(mbean)
|
tokens := parseJmxMetricRequest(mbean)
|
||||||
addTokensToTags(tokens, tags)
|
addTokensToTags(tokens, tags)
|
||||||
addServerTags(c.host, tags)
|
tags["cassandra_host"] = c.host
|
||||||
addValuesAsFields(values, fields, tags["mname"])
|
addValuesAsFields(values, fields, tags["mname"])
|
||||||
c.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
|
c.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
|
||||||
|
|
||||||
@@ -192,7 +179,7 @@ func (j *Cassandra) SampleConfig() string {
|
|||||||
servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
|
servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
|
||||||
## List of metrics collected on above servers
|
## List of metrics collected on above servers
|
||||||
## Each metric consists of a jmx path.
|
## Each metric consists of a jmx path.
|
||||||
## This will collect all heap memory usage metrics from the jvm and
|
## This will collect all heap memory usage metrics from the jvm and
|
||||||
## ReadLatency metrics for all keyspaces and tables.
|
## ReadLatency metrics for all keyspaces and tables.
|
||||||
## "type=Table" in the query works with Cassandra3.0. Older versions might
|
## "type=Table" in the query works with Cassandra3.0. Older versions might
|
||||||
## need to use "type=ColumnFamily"
|
## need to use "type=ColumnFamily"
|
||||||
@@ -277,15 +264,19 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
|||||||
|
|
||||||
for _, server := range servers {
|
for _, server := range servers {
|
||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
var m jmxMetric
|
|
||||||
|
|
||||||
serverTokens := parseServerTokens(server)
|
serverTokens := parseServerTokens(server)
|
||||||
|
|
||||||
|
var m jmxMetric
|
||||||
if strings.HasPrefix(metric, "/java.lang:") {
|
if strings.HasPrefix(metric, "/java.lang:") {
|
||||||
m = newJavaMetric(serverTokens["host"], metric, acc)
|
m = newJavaMetric(serverTokens["host"], metric, acc)
|
||||||
} else if strings.HasPrefix(metric,
|
} else if strings.HasPrefix(metric,
|
||||||
"/org.apache.cassandra.metrics:") {
|
"/org.apache.cassandra.metrics:") {
|
||||||
m = newCassandraMetric(serverTokens["host"], metric, acc)
|
m = newCassandraMetric(serverTokens["host"], metric, acc)
|
||||||
|
} else {
|
||||||
|
// unsupported metric type
|
||||||
|
log.Printf("Unsupported Cassandra metric [%s], skipping",
|
||||||
|
metric)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prepare URL
|
// Prepare URL
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ const validCassandraNestedMultiValueJSON = `
|
|||||||
"status": 200,
|
"status": 200,
|
||||||
"timestamp": 1458089184,
|
"timestamp": 1458089184,
|
||||||
"value": {
|
"value": {
|
||||||
"org.apache.cassandra.metrics:keyspace=test_keyspace1,name=ReadLatency,scope=test_table1,type=Table":
|
"org.apache.cassandra.metrics:keyspace=test_keyspace1,name=ReadLatency,scope=test_table1,type=Table":
|
||||||
{ "999thPercentile": 1.0,
|
{ "999thPercentile": 1.0,
|
||||||
"Count": 100,
|
"Count": 100,
|
||||||
"DurationUnit": "microseconds",
|
"DurationUnit": "microseconds",
|
||||||
@@ -66,7 +66,7 @@ const validCassandraNestedMultiValueJSON = `
|
|||||||
"RateUnit": "events/second",
|
"RateUnit": "events/second",
|
||||||
"StdDev": null
|
"StdDev": null
|
||||||
},
|
},
|
||||||
"org.apache.cassandra.metrics:keyspace=test_keyspace2,name=ReadLatency,scope=test_table2,type=Table":
|
"org.apache.cassandra.metrics:keyspace=test_keyspace2,name=ReadLatency,scope=test_table2,type=Table":
|
||||||
{ "999thPercentile": 2.0,
|
{ "999thPercentile": 2.0,
|
||||||
"Count": 200,
|
"Count": 200,
|
||||||
"DurationUnit": "microseconds",
|
"DurationUnit": "microseconds",
|
||||||
@@ -163,13 +163,13 @@ func TestHttpJsonJavaMultiValue(t *testing.T) {
|
|||||||
"HeapMemoryUsage_used": 203288528.0,
|
"HeapMemoryUsage_used": 203288528.0,
|
||||||
}
|
}
|
||||||
tags1 := map[string]string{
|
tags1 := map[string]string{
|
||||||
"host": "10.10.10.10",
|
"cassandra_host": "10.10.10.10",
|
||||||
"mname": "HeapMemoryUsage",
|
"mname": "HeapMemoryUsage",
|
||||||
}
|
}
|
||||||
|
|
||||||
tags2 := map[string]string{
|
tags2 := map[string]string{
|
||||||
"host": "10.10.10.11",
|
"cassandra_host": "10.10.10.11",
|
||||||
"mname": "HeapMemoryUsage",
|
"mname": "HeapMemoryUsage",
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "javaMemory", fields, tags1)
|
acc.AssertContainsTaggedFields(t, "javaMemory", fields, tags1)
|
||||||
acc.AssertContainsTaggedFields(t, "javaMemory", fields, tags2)
|
acc.AssertContainsTaggedFields(t, "javaMemory", fields, tags2)
|
||||||
@@ -190,8 +190,8 @@ func TestHttpJsonJavaMultiType(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
"host": "10.10.10.10",
|
"cassandra_host": "10.10.10.10",
|
||||||
"mname": "ConcurrentMarkSweep",
|
"mname": "ConcurrentMarkSweep",
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "javaGarbageCollector", fields, tags)
|
acc.AssertContainsTaggedFields(t, "javaGarbageCollector", fields, tags)
|
||||||
}
|
}
|
||||||
@@ -231,10 +231,10 @@ func TestHttpJsonCassandraMultiValue(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
"host": "10.10.10.10",
|
"cassandra_host": "10.10.10.10",
|
||||||
"mname": "ReadLatency",
|
"mname": "ReadLatency",
|
||||||
"keyspace": "test_keyspace1",
|
"keyspace": "test_keyspace1",
|
||||||
"scope": "test_table",
|
"scope": "test_table",
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "cassandraTable", fields, tags)
|
acc.AssertContainsTaggedFields(t, "cassandraTable", fields, tags)
|
||||||
}
|
}
|
||||||
@@ -268,17 +268,17 @@ func TestHttpJsonCassandraNestedMultiValue(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tags1 := map[string]string{
|
tags1 := map[string]string{
|
||||||
"host": "10.10.10.10",
|
"cassandra_host": "10.10.10.10",
|
||||||
"mname": "ReadLatency",
|
"mname": "ReadLatency",
|
||||||
"keyspace": "test_keyspace1",
|
"keyspace": "test_keyspace1",
|
||||||
"scope": "test_table1",
|
"scope": "test_table1",
|
||||||
}
|
}
|
||||||
|
|
||||||
tags2 := map[string]string{
|
tags2 := map[string]string{
|
||||||
"host": "10.10.10.10",
|
"cassandra_host": "10.10.10.10",
|
||||||
"mname": "ReadLatency",
|
"mname": "ReadLatency",
|
||||||
"keyspace": "test_keyspace2",
|
"keyspace": "test_keyspace2",
|
||||||
"scope": "test_table2",
|
"scope": "test_table2",
|
||||||
}
|
}
|
||||||
|
|
||||||
acc.AssertContainsTaggedFields(t, "cassandraTable", fields1, tags1)
|
acc.AssertContainsTaggedFields(t, "cassandraTable", fields1, tags1)
|
||||||
|
|||||||
109
plugins/inputs/ceph/README.md
Normal file
109
plugins/inputs/ceph/README.md
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
# Ceph Storage Input Plugin
|
||||||
|
|
||||||
|
Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
|
||||||
|
|
||||||
|
The plugin works by scanning the configured SocketDir for OSD and MON socket files. When it finds
|
||||||
|
a MON socket, it runs **ceph --admin-daemon $file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf dump**
|
||||||
|
|
||||||
|
The resulting JSON is parsed and grouped into collections, based on top-level key. Top-level keys are
|
||||||
|
used as collection tags, and all sub-keys are flattened. For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"paxos": {
|
||||||
|
"refresh": 9363435,
|
||||||
|
"refresh_latency": {
|
||||||
|
"avgcount": 9363435,
|
||||||
|
"sum": 5378.794002000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Would be parsed into the following metrics, all of which would be tagged with collection=paxos:
|
||||||
|
|
||||||
|
- refresh = 9363435
|
||||||
|
- refresh_latency.avgcount: 9363435
|
||||||
|
- refresh_latency.sum: 5378.794002000
|
||||||
|
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
```
|
||||||
|
# Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
|
||||||
|
[[inputs.ceph]]
|
||||||
|
## All configuration values are optional, defaults are shown below
|
||||||
|
|
||||||
|
## location of ceph binary
|
||||||
|
ceph_binary = "/usr/bin/ceph"
|
||||||
|
|
||||||
|
## directory in which to look for socket files
|
||||||
|
socket_dir = "/var/run/ceph"
|
||||||
|
|
||||||
|
## prefix of MON and OSD socket files, used to determine socket type
|
||||||
|
mon_prefix = "ceph-mon"
|
||||||
|
osd_prefix = "ceph-osd"
|
||||||
|
|
||||||
|
## suffix used to identify socket files
|
||||||
|
socket_suffix = "asok"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Measurements & Fields:
|
||||||
|
|
||||||
|
All fields are collected under the **ceph** measurement and stored as float64s. For a full list of fields, see the sample perf dumps in ceph_test.go.
|
||||||
|
|
||||||
|
|
||||||
|
### Tags:
|
||||||
|
|
||||||
|
All measurements will have the following tags:
|
||||||
|
|
||||||
|
- type: either 'osd' or 'mon' to indicate which type of node was queried
|
||||||
|
- id: a unique string identifier, parsed from the socket file name for the node
|
||||||
|
- collection: the top-level key under which these fields were reported. Possible values are:
|
||||||
|
- for MON nodes:
|
||||||
|
- cluster
|
||||||
|
- leveldb
|
||||||
|
- mon
|
||||||
|
- paxos
|
||||||
|
- throttle-mon_client_bytes
|
||||||
|
- throttle-mon_daemon_bytes
|
||||||
|
- throttle-msgr_dispatch_throttler-mon
|
||||||
|
- for OSD nodes:
|
||||||
|
- WBThrottle
|
||||||
|
- filestore
|
||||||
|
- leveldb
|
||||||
|
- mutex-FileJournal::completions_lock
|
||||||
|
- mutex-FileJournal::finisher_lock
|
||||||
|
- mutex-FileJournal::write_lock
|
||||||
|
- mutex-FileJournal::writeq_lock
|
||||||
|
- mutex-JOS::ApplyManager::apply_lock
|
||||||
|
- mutex-JOS::ApplyManager::com_lock
|
||||||
|
- mutex-JOS::SubmitManager::lock
|
||||||
|
- mutex-WBThrottle::lock
|
||||||
|
- objecter
|
||||||
|
- osd
|
||||||
|
- recoverystate_perf
|
||||||
|
- throttle-filestore_bytes
|
||||||
|
- throttle-filestore_ops
|
||||||
|
- throttle-msgr_dispatch_throttler-client
|
||||||
|
- throttle-msgr_dispatch_throttler-cluster
|
||||||
|
- throttle-msgr_dispatch_throttler-hb_back_server
|
||||||
|
- throttle-msgr_dispatch_throttler-hb_front_serve
|
||||||
|
- throttle-msgr_dispatch_throttler-hbclient
|
||||||
|
- throttle-msgr_dispatch_throttler-ms_objecter
|
||||||
|
- throttle-objecter_bytes
|
||||||
|
- throttle-objecter_ops
|
||||||
|
- throttle-osd_client_bytes
|
||||||
|
- throttle-osd_client_messages
|
||||||
|
|
||||||
|
|
||||||
|
### Example Output:
|
||||||
|
|
||||||
|
<pre>
|
||||||
|
telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d -input-filter ceph
|
||||||
|
* Plugin: ceph, Collection 1
|
||||||
|
> ceph,collection=paxos, id=node-2,role=openstack,type=mon accept_timeout=0,begin=14931264,begin_bytes.avgcount=14931264,begin_bytes.sum=180309683362,begin_keys.avgcount=0,begin_keys.sum=0,begin_latency.avgcount=14931264,begin_latency.sum=9293.29589,collect=1,collect_bytes.avgcount=1,collect_bytes.sum=24,collect_keys.avgcount=1,collect_keys.sum=1,collect_latency.avgcount=1,collect_latency.sum=0.00028,collect_timeout=0,collect_uncommitted=0,commit=14931264,commit_bytes.avgcount=0,commit_bytes.sum=0,commit_keys.avgcount=0,commit_keys.sum=0,commit_latency.avgcount=0,commit_latency.sum=0,lease_ack_timeout=0,lease_timeout=0,new_pn=0,new_pn_latency.avgcount=0,new_pn_latency.sum=0,refresh=14931264,refresh_latency.avgcount=14931264,refresh_latency.sum=8706.98498,restart=4,share_state=0,share_state_bytes.avgcount=0,share_state_bytes.sum=0,share_state_keys.avgcount=0,share_state_keys.sum=0,start_leader=0,start_peon=1,store_state=14931264,store_state_bytes.avgcount=14931264,store_state_bytes.sum=353119959211,store_state_keys.avgcount=14931264,store_state_keys.sum=289807523,store_state_latency.avgcount=14931264,store_state_latency.sum=10952.835724 1462821234814535148
|
||||||
|
> ceph,collection=throttle-mon_client_bytes,id=node-2,type=mon get=1413017,get_or_fail_fail=0,get_or_fail_success=0,get_sum=71211705,max=104857600,put=1413013,put_sum=71211459,take=0,take_sum=0,val=246,wait.avgcount=0,wait.sum=0 1462821234814737219
|
||||||
|
> ceph,collection=throttle-mon_daemon_bytes,id=node-2,type=mon get=4058121,get_or_fail_fail=0,get_or_fail_success=0,get_sum=6027348117,max=419430400,put=4058121,put_sum=6027348117,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814815661
|
||||||
|
> ceph,collection=throttle-msgr_dispatch_throttler-mon,id=node-2,type=mon get=54276277,get_or_fail_fail=0,get_or_fail_success=0,get_sum=370232877040,max=104857600,put=54276277,put_sum=370232877040,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814872064
|
||||||
|
</pre>
|
||||||
249
plugins/inputs/ceph/ceph.go
Normal file
249
plugins/inputs/ceph/ceph.go
Normal file
@@ -0,0 +1,249 @@
|
|||||||
|
package ceph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
measurement = "ceph"
|
||||||
|
typeMon = "monitor"
|
||||||
|
typeOsd = "osd"
|
||||||
|
osdPrefix = "ceph-osd"
|
||||||
|
monPrefix = "ceph-mon"
|
||||||
|
sockSuffix = "asok"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Ceph struct {
|
||||||
|
CephBinary string
|
||||||
|
OsdPrefix string
|
||||||
|
MonPrefix string
|
||||||
|
SocketDir string
|
||||||
|
SocketSuffix string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Ceph) setDefaults() {
|
||||||
|
if c.CephBinary == "" {
|
||||||
|
c.CephBinary = "/usr/bin/ceph"
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.OsdPrefix == "" {
|
||||||
|
c.OsdPrefix = osdPrefix
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.MonPrefix == "" {
|
||||||
|
c.MonPrefix = monPrefix
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.SocketDir == "" {
|
||||||
|
c.SocketDir = "/var/run/ceph"
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.SocketSuffix == "" {
|
||||||
|
c.SocketSuffix = sockSuffix
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Ceph) Description() string {
|
||||||
|
return "Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster."
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
## All configuration values are optional, defaults are shown below
|
||||||
|
|
||||||
|
## location of ceph binary
|
||||||
|
ceph_binary = "/usr/bin/ceph"
|
||||||
|
|
||||||
|
## directory in which to look for socket files
|
||||||
|
socket_dir = "/var/run/ceph"
|
||||||
|
|
||||||
|
## prefix of MON and OSD socket files, used to determine socket type
|
||||||
|
mon_prefix = "ceph-mon"
|
||||||
|
osd_prefix = "ceph-osd"
|
||||||
|
|
||||||
|
## suffix used to identify socket files
|
||||||
|
socket_suffix = "asok"
|
||||||
|
`
|
||||||
|
|
||||||
|
func (c *Ceph) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Ceph) Gather(acc telegraf.Accumulator) error {
|
||||||
|
c.setDefaults()
|
||||||
|
sockets, err := findSockets(c)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to find sockets at path '%s': %v", c.SocketDir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range sockets {
|
||||||
|
dump, err := perfDump(c.CephBinary, s)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("error reading from socket '%s': %v", s.socket, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
data, err := parseDump(dump)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("error parsing dump from socket '%s': %v", s.socket, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for tag, metrics := range *data {
|
||||||
|
acc.AddFields(measurement,
|
||||||
|
map[string]interface{}(metrics),
|
||||||
|
map[string]string{"type": s.sockType, "id": s.sockId, "collection": tag})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add(measurement, func() telegraf.Input { return &Ceph{} })
|
||||||
|
}
|
||||||
|
|
||||||
|
var perfDump = func(binary string, socket *socket) (string, error) {
|
||||||
|
cmdArgs := []string{"--admin-daemon", socket.socket}
|
||||||
|
if socket.sockType == typeOsd {
|
||||||
|
cmdArgs = append(cmdArgs, "perf", "dump")
|
||||||
|
} else if socket.sockType == typeMon {
|
||||||
|
cmdArgs = append(cmdArgs, "perfcounters_dump")
|
||||||
|
} else {
|
||||||
|
return "", fmt.Errorf("ignoring unknown socket type: %s", socket.sockType)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command(binary, cmdArgs...)
|
||||||
|
var out bytes.Buffer
|
||||||
|
cmd.Stdout = &out
|
||||||
|
err := cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error running ceph dump: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return out.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var findSockets = func(c *Ceph) ([]*socket, error) {
|
||||||
|
listing, err := ioutil.ReadDir(c.SocketDir)
|
||||||
|
if err != nil {
|
||||||
|
return []*socket{}, fmt.Errorf("Failed to read socket directory '%s': %v", c.SocketDir, err)
|
||||||
|
}
|
||||||
|
sockets := make([]*socket, 0, len(listing))
|
||||||
|
for _, info := range listing {
|
||||||
|
f := info.Name()
|
||||||
|
var sockType string
|
||||||
|
var sockPrefix string
|
||||||
|
if strings.HasPrefix(f, c.MonPrefix) {
|
||||||
|
sockType = typeMon
|
||||||
|
sockPrefix = monPrefix
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(f, c.OsdPrefix) {
|
||||||
|
sockType = typeOsd
|
||||||
|
sockPrefix = osdPrefix
|
||||||
|
|
||||||
|
}
|
||||||
|
if sockType == typeOsd || sockType == typeMon {
|
||||||
|
path := filepath.Join(c.SocketDir, f)
|
||||||
|
sockets = append(sockets, &socket{parseSockId(f, sockPrefix, c.SocketSuffix), sockType, path})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sockets, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSockId(fname, prefix, suffix string) string {
|
||||||
|
s := fname
|
||||||
|
s = strings.TrimPrefix(s, prefix)
|
||||||
|
s = strings.TrimSuffix(s, suffix)
|
||||||
|
s = strings.Trim(s, ".-_")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
type socket struct {
|
||||||
|
sockId string
|
||||||
|
sockType string
|
||||||
|
socket string
|
||||||
|
}
|
||||||
|
|
||||||
|
type metric struct {
|
||||||
|
pathStack []string // lifo stack of name components
|
||||||
|
value float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pops names of pathStack to build the flattened name for a metric
|
||||||
|
func (m *metric) name() string {
|
||||||
|
buf := bytes.Buffer{}
|
||||||
|
for i := len(m.pathStack) - 1; i >= 0; i-- {
|
||||||
|
if buf.Len() > 0 {
|
||||||
|
buf.WriteString(".")
|
||||||
|
}
|
||||||
|
buf.WriteString(m.pathStack[i])
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
type metricMap map[string]interface{}
|
||||||
|
|
||||||
|
type taggedMetricMap map[string]metricMap
|
||||||
|
|
||||||
|
// Parses a raw JSON string into a taggedMetricMap
|
||||||
|
// Delegates the actual parsing to newTaggedMetricMap(..)
|
||||||
|
func parseDump(dump string) (*taggedMetricMap, error) {
|
||||||
|
data := make(map[string]interface{})
|
||||||
|
err := json.Unmarshal([]byte(dump), &data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse json: '%s': %v", dump, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tmm := newTaggedMetricMap(data)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to tag dataset: '%v': %v", tmm, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tmm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Builds a TaggedMetricMap out of a generic string map.
|
||||||
|
// The top-level key is used as a tag and all sub-keys are flattened into metrics
|
||||||
|
func newTaggedMetricMap(data map[string]interface{}) *taggedMetricMap {
|
||||||
|
tmm := make(taggedMetricMap)
|
||||||
|
for tag, datapoints := range data {
|
||||||
|
mm := make(metricMap)
|
||||||
|
for _, m := range flatten(datapoints) {
|
||||||
|
mm[m.name()] = m.value
|
||||||
|
}
|
||||||
|
tmm[tag] = mm
|
||||||
|
}
|
||||||
|
return &tmm
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively flattens any k-v hierarchy present in data.
|
||||||
|
// Nested keys are flattened into ordered slices associated with a metric value.
|
||||||
|
// The key slices are treated as stacks, and are expected to be reversed and concatenated
|
||||||
|
// when passed as metrics to the accumulator. (see (*metric).name())
|
||||||
|
func flatten(data interface{}) []*metric {
|
||||||
|
var metrics []*metric
|
||||||
|
|
||||||
|
switch val := data.(type) {
|
||||||
|
case float64:
|
||||||
|
metrics = []*metric{&metric{make([]string, 0, 1), val}}
|
||||||
|
case map[string]interface{}:
|
||||||
|
metrics = make([]*metric, 0, len(val))
|
||||||
|
for k, v := range val {
|
||||||
|
for _, m := range flatten(v) {
|
||||||
|
m.pathStack = append(m.pathStack, k)
|
||||||
|
metrics = append(metrics, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
log.Printf("Ignoring unexpected type '%T' for value %v", val, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
return metrics
|
||||||
|
}
|
||||||
682
plugins/inputs/ceph/ceph_test.go
Normal file
682
plugins/inputs/ceph/ceph_test.go
Normal file
@@ -0,0 +1,682 @@
|
|||||||
|
package ceph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
epsilon = float64(0.00000001)
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseSockId(t *testing.T) {
|
||||||
|
s := parseSockId(sockFile(osdPrefix, 1), osdPrefix, sockSuffix)
|
||||||
|
assert.Equal(t, s, "1")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseMonDump(t *testing.T) {
|
||||||
|
dump, err := parseDump(monPerfDump)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.InEpsilon(t, 5678670180, (*dump)["cluster"]["osd_kb_used"], epsilon)
|
||||||
|
assert.InEpsilon(t, 6866.540527000, (*dump)["paxos"]["store_state_latency.sum"], epsilon)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseOsdDump(t *testing.T) {
|
||||||
|
dump, err := parseDump(osdPerfDump)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.InEpsilon(t, 552132.109360000, (*dump)["filestore"]["commitcycle_interval.sum"], epsilon)
|
||||||
|
assert.Equal(t, float64(0), (*dump)["mutex-FileJournal::finisher_lock"]["wait.avgcount"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGather(t *testing.T) {
|
||||||
|
saveFind := findSockets
|
||||||
|
saveDump := perfDump
|
||||||
|
defer func() {
|
||||||
|
findSockets = saveFind
|
||||||
|
perfDump = saveDump
|
||||||
|
}()
|
||||||
|
|
||||||
|
findSockets = func(c *Ceph) ([]*socket, error) {
|
||||||
|
return []*socket{&socket{"osd.1", typeOsd, ""}}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
perfDump = func(binary string, s *socket) (string, error) {
|
||||||
|
return osdPerfDump, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
acc := &testutil.Accumulator{}
|
||||||
|
c := &Ceph{}
|
||||||
|
c.Gather(acc)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindSockets(t *testing.T) {
|
||||||
|
tmpdir, err := ioutil.TempDir("", "socktest")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
err := os.Remove(tmpdir)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}()
|
||||||
|
c := &Ceph{
|
||||||
|
CephBinary: "foo",
|
||||||
|
SocketDir: tmpdir,
|
||||||
|
}
|
||||||
|
|
||||||
|
c.setDefaults()
|
||||||
|
|
||||||
|
for _, st := range sockTestParams {
|
||||||
|
createTestFiles(tmpdir, st)
|
||||||
|
|
||||||
|
sockets, err := findSockets(c)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
for i := 1; i <= st.osds; i++ {
|
||||||
|
assertFoundSocket(t, tmpdir, typeOsd, i, sockets)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 1; i <= st.mons; i++ {
|
||||||
|
assertFoundSocket(t, tmpdir, typeMon, i, sockets)
|
||||||
|
}
|
||||||
|
cleanupTestFiles(tmpdir, st)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertFoundSocket(t *testing.T, dir, sockType string, i int, sockets []*socket) {
|
||||||
|
var prefix string
|
||||||
|
if sockType == typeOsd {
|
||||||
|
prefix = osdPrefix
|
||||||
|
} else {
|
||||||
|
prefix = monPrefix
|
||||||
|
}
|
||||||
|
expected := path.Join(dir, sockFile(prefix, i))
|
||||||
|
found := false
|
||||||
|
for _, s := range sockets {
|
||||||
|
fmt.Printf("Checking %s\n", s.socket)
|
||||||
|
if s.socket == expected {
|
||||||
|
found = true
|
||||||
|
assert.Equal(t, s.sockType, sockType, "Unexpected socket type for '%s'", s)
|
||||||
|
assert.Equal(t, s.sockId, strconv.Itoa(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.True(t, found, "Did not find socket: %s", expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
func sockFile(prefix string, i int) string {
|
||||||
|
return strings.Join([]string{prefix, strconv.Itoa(i), sockSuffix}, ".")
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestFiles(dir string, st *SockTest) {
|
||||||
|
writeFile := func(prefix string, i int) {
|
||||||
|
f := sockFile(prefix, i)
|
||||||
|
fpath := path.Join(dir, f)
|
||||||
|
ioutil.WriteFile(fpath, []byte(""), 0777)
|
||||||
|
}
|
||||||
|
tstFileApply(st, writeFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanupTestFiles(dir string, st *SockTest) {
|
||||||
|
rmFile := func(prefix string, i int) {
|
||||||
|
f := sockFile(prefix, i)
|
||||||
|
fpath := path.Join(dir, f)
|
||||||
|
err := os.Remove(fpath)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error removing test file %s: %v\n", fpath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tstFileApply(st, rmFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func tstFileApply(st *SockTest, fn func(prefix string, i int)) {
|
||||||
|
for i := 1; i <= st.osds; i++ {
|
||||||
|
fn(osdPrefix, i)
|
||||||
|
}
|
||||||
|
for i := 1; i <= st.mons; i++ {
|
||||||
|
fn(monPrefix, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type SockTest struct {
|
||||||
|
osds int
|
||||||
|
mons int
|
||||||
|
}
|
||||||
|
|
||||||
|
var sockTestParams = []*SockTest{
|
||||||
|
&SockTest{
|
||||||
|
osds: 2,
|
||||||
|
mons: 2,
|
||||||
|
},
|
||||||
|
&SockTest{
|
||||||
|
mons: 1,
|
||||||
|
},
|
||||||
|
&SockTest{
|
||||||
|
osds: 1,
|
||||||
|
},
|
||||||
|
&SockTest{},
|
||||||
|
}
|
||||||
|
|
||||||
|
var monPerfDump = `
|
||||||
|
{ "cluster": { "num_mon": 2,
|
||||||
|
"num_mon_quorum": 2,
|
||||||
|
"num_osd": 26,
|
||||||
|
"num_osd_up": 26,
|
||||||
|
"num_osd_in": 26,
|
||||||
|
"osd_epoch": 3306,
|
||||||
|
"osd_kb": 11487846448,
|
||||||
|
"osd_kb_used": 5678670180,
|
||||||
|
"osd_kb_avail": 5809176268,
|
||||||
|
"num_pool": 12,
|
||||||
|
"num_pg": 768,
|
||||||
|
"num_pg_active_clean": 768,
|
||||||
|
"num_pg_active": 768,
|
||||||
|
"num_pg_peering": 0,
|
||||||
|
"num_object": 397616,
|
||||||
|
"num_object_degraded": 0,
|
||||||
|
"num_object_unfound": 0,
|
||||||
|
"num_bytes": 2917848227467,
|
||||||
|
"num_mds_up": 0,
|
||||||
|
"num_mds_in": 0,
|
||||||
|
"num_mds_failed": 0,
|
||||||
|
"mds_epoch": 1},
|
||||||
|
"leveldb": { "leveldb_get": 321950312,
|
||||||
|
"leveldb_transaction": 18729922,
|
||||||
|
"leveldb_compact": 0,
|
||||||
|
"leveldb_compact_range": 74141,
|
||||||
|
"leveldb_compact_queue_merge": 0,
|
||||||
|
"leveldb_compact_queue_len": 0},
|
||||||
|
"mon": {},
|
||||||
|
"paxos": { "start_leader": 0,
|
||||||
|
"start_peon": 1,
|
||||||
|
"restart": 4,
|
||||||
|
"refresh": 9363435,
|
||||||
|
"refresh_latency": { "avgcount": 9363435,
|
||||||
|
"sum": 5378.794002000},
|
||||||
|
"begin": 9363435,
|
||||||
|
"begin_keys": { "avgcount": 0,
|
||||||
|
"sum": 0},
|
||||||
|
"begin_bytes": { "avgcount": 9363435,
|
||||||
|
"sum": 110468605489},
|
||||||
|
"begin_latency": { "avgcount": 9363435,
|
||||||
|
"sum": 5850.060682000},
|
||||||
|
"commit": 9363435,
|
||||||
|
"commit_keys": { "avgcount": 0,
|
||||||
|
"sum": 0},
|
||||||
|
"commit_bytes": { "avgcount": 0,
|
||||||
|
"sum": 0},
|
||||||
|
"commit_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000},
|
||||||
|
"collect": 1,
|
||||||
|
"collect_keys": { "avgcount": 1,
|
||||||
|
"sum": 1},
|
||||||
|
"collect_bytes": { "avgcount": 1,
|
||||||
|
"sum": 24},
|
||||||
|
"collect_latency": { "avgcount": 1,
|
||||||
|
"sum": 0.000280000},
|
||||||
|
"collect_uncommitted": 0,
|
||||||
|
"collect_timeout": 0,
|
||||||
|
"accept_timeout": 0,
|
||||||
|
"lease_ack_timeout": 0,
|
||||||
|
"lease_timeout": 0,
|
||||||
|
"store_state": 9363435,
|
||||||
|
"store_state_keys": { "avgcount": 9363435,
|
||||||
|
"sum": 176572789},
|
||||||
|
"store_state_bytes": { "avgcount": 9363435,
|
||||||
|
"sum": 216355887217},
|
||||||
|
"store_state_latency": { "avgcount": 9363435,
|
||||||
|
"sum": 6866.540527000},
|
||||||
|
"share_state": 0,
|
||||||
|
"share_state_keys": { "avgcount": 0,
|
||||||
|
"sum": 0},
|
||||||
|
"share_state_bytes": { "avgcount": 0,
|
||||||
|
"sum": 0},
|
||||||
|
"new_pn": 0,
|
||||||
|
"new_pn_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-mon_client_bytes": { "val": 246,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 896030,
|
||||||
|
"get_sum": 45854374,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 896026,
|
||||||
|
"put_sum": 45854128,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-mon_daemon_bytes": { "val": 0,
|
||||||
|
"max": 419430400,
|
||||||
|
"get": 2773768,
|
||||||
|
"get_sum": 3627676976,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 2773768,
|
||||||
|
"put_sum": 3627676976,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-msgr_dispatch_throttler-mon": { "val": 0,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 34504949,
|
||||||
|
"get_sum": 226860281124,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 34504949,
|
||||||
|
"put_sum": 226860281124,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}}}
|
||||||
|
`
|
||||||
|
|
||||||
|
var osdPerfDump = `
|
||||||
|
{ "WBThrottle": { "bytes_dirtied": 28405539,
|
||||||
|
"bytes_wb": 0,
|
||||||
|
"ios_dirtied": 93,
|
||||||
|
"ios_wb": 0,
|
||||||
|
"inodes_dirtied": 86,
|
||||||
|
"inodes_wb": 0},
|
||||||
|
"filestore": { "journal_queue_max_ops": 0,
|
||||||
|
"journal_queue_ops": 0,
|
||||||
|
"journal_ops": 1108008,
|
||||||
|
"journal_queue_max_bytes": 0,
|
||||||
|
"journal_queue_bytes": 0,
|
||||||
|
"journal_bytes": 73233416196,
|
||||||
|
"journal_latency": { "avgcount": 1108008,
|
||||||
|
"sum": 290.981036000},
|
||||||
|
"journal_wr": 1091866,
|
||||||
|
"journal_wr_bytes": { "avgcount": 1091866,
|
||||||
|
"sum": 74925682688},
|
||||||
|
"journal_full": 0,
|
||||||
|
"committing": 0,
|
||||||
|
"commitcycle": 110389,
|
||||||
|
"commitcycle_interval": { "avgcount": 110389,
|
||||||
|
"sum": 552132.109360000},
|
||||||
|
"commitcycle_latency": { "avgcount": 110389,
|
||||||
|
"sum": 178.657804000},
|
||||||
|
"op_queue_max_ops": 50,
|
||||||
|
"op_queue_ops": 0,
|
||||||
|
"ops": 1108008,
|
||||||
|
"op_queue_max_bytes": 104857600,
|
||||||
|
"op_queue_bytes": 0,
|
||||||
|
"bytes": 73226768148,
|
||||||
|
"apply_latency": { "avgcount": 1108008,
|
||||||
|
"sum": 947.742722000},
|
||||||
|
"queue_transaction_latency_avg": { "avgcount": 1108008,
|
||||||
|
"sum": 0.511327000}},
|
||||||
|
"leveldb": { "leveldb_get": 4361221,
|
||||||
|
"leveldb_transaction": 4351276,
|
||||||
|
"leveldb_compact": 0,
|
||||||
|
"leveldb_compact_range": 0,
|
||||||
|
"leveldb_compact_queue_merge": 0,
|
||||||
|
"leveldb_compact_queue_len": 0},
|
||||||
|
"mutex-FileJournal::completions_lock": { "wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"mutex-FileJournal::finisher_lock": { "wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"mutex-FileJournal::write_lock": { "wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"mutex-FileJournal::writeq_lock": { "wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"mutex-JOS::ApplyManager::apply_lock": { "wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"mutex-JOS::ApplyManager::com_lock": { "wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"mutex-JOS::SubmitManager::lock": { "wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"mutex-WBThrottle::lock": { "wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"objecter": { "op_active": 0,
|
||||||
|
"op_laggy": 0,
|
||||||
|
"op_send": 0,
|
||||||
|
"op_send_bytes": 0,
|
||||||
|
"op_resend": 0,
|
||||||
|
"op_ack": 0,
|
||||||
|
"op_commit": 0,
|
||||||
|
"op": 0,
|
||||||
|
"op_r": 0,
|
||||||
|
"op_w": 0,
|
||||||
|
"op_rmw": 0,
|
||||||
|
"op_pg": 0,
|
||||||
|
"osdop_stat": 0,
|
||||||
|
"osdop_create": 0,
|
||||||
|
"osdop_read": 0,
|
||||||
|
"osdop_write": 0,
|
||||||
|
"osdop_writefull": 0,
|
||||||
|
"osdop_append": 0,
|
||||||
|
"osdop_zero": 0,
|
||||||
|
"osdop_truncate": 0,
|
||||||
|
"osdop_delete": 0,
|
||||||
|
"osdop_mapext": 0,
|
||||||
|
"osdop_sparse_read": 0,
|
||||||
|
"osdop_clonerange": 0,
|
||||||
|
"osdop_getxattr": 0,
|
||||||
|
"osdop_setxattr": 0,
|
||||||
|
"osdop_cmpxattr": 0,
|
||||||
|
"osdop_rmxattr": 0,
|
||||||
|
"osdop_resetxattrs": 0,
|
||||||
|
"osdop_tmap_up": 0,
|
||||||
|
"osdop_tmap_put": 0,
|
||||||
|
"osdop_tmap_get": 0,
|
||||||
|
"osdop_call": 0,
|
||||||
|
"osdop_watch": 0,
|
||||||
|
"osdop_notify": 0,
|
||||||
|
"osdop_src_cmpxattr": 0,
|
||||||
|
"osdop_pgls": 0,
|
||||||
|
"osdop_pgls_filter": 0,
|
||||||
|
"osdop_other": 0,
|
||||||
|
"linger_active": 0,
|
||||||
|
"linger_send": 0,
|
||||||
|
"linger_resend": 0,
|
||||||
|
"poolop_active": 0,
|
||||||
|
"poolop_send": 0,
|
||||||
|
"poolop_resend": 0,
|
||||||
|
"poolstat_active": 0,
|
||||||
|
"poolstat_send": 0,
|
||||||
|
"poolstat_resend": 0,
|
||||||
|
"statfs_active": 0,
|
||||||
|
"statfs_send": 0,
|
||||||
|
"statfs_resend": 0,
|
||||||
|
"command_active": 0,
|
||||||
|
"command_send": 0,
|
||||||
|
"command_resend": 0,
|
||||||
|
"map_epoch": 3300,
|
||||||
|
"map_full": 0,
|
||||||
|
"map_inc": 3293,
|
||||||
|
"osd_sessions": 0,
|
||||||
|
"osd_session_open": 0,
|
||||||
|
"osd_session_close": 0,
|
||||||
|
"osd_laggy": 0},
|
||||||
|
"osd": { "opq": 0,
|
||||||
|
"op_wip": 0,
|
||||||
|
"op": 23939,
|
||||||
|
"op_in_bytes": 1245903961,
|
||||||
|
"op_out_bytes": 29103083856,
|
||||||
|
"op_latency": { "avgcount": 23939,
|
||||||
|
"sum": 440.192015000},
|
||||||
|
"op_process_latency": { "avgcount": 23939,
|
||||||
|
"sum": 30.170685000},
|
||||||
|
"op_r": 23112,
|
||||||
|
"op_r_out_bytes": 29103056146,
|
||||||
|
"op_r_latency": { "avgcount": 23112,
|
||||||
|
"sum": 19.373526000},
|
||||||
|
"op_r_process_latency": { "avgcount": 23112,
|
||||||
|
"sum": 14.625928000},
|
||||||
|
"op_w": 549,
|
||||||
|
"op_w_in_bytes": 1245804358,
|
||||||
|
"op_w_rlat": { "avgcount": 549,
|
||||||
|
"sum": 17.022299000},
|
||||||
|
"op_w_latency": { "avgcount": 549,
|
||||||
|
"sum": 418.494610000},
|
||||||
|
"op_w_process_latency": { "avgcount": 549,
|
||||||
|
"sum": 13.316555000},
|
||||||
|
"op_rw": 278,
|
||||||
|
"op_rw_in_bytes": 99603,
|
||||||
|
"op_rw_out_bytes": 27710,
|
||||||
|
"op_rw_rlat": { "avgcount": 278,
|
||||||
|
"sum": 2.213785000},
|
||||||
|
"op_rw_latency": { "avgcount": 278,
|
||||||
|
"sum": 2.323879000},
|
||||||
|
"op_rw_process_latency": { "avgcount": 278,
|
||||||
|
"sum": 2.228202000},
|
||||||
|
"subop": 1074774,
|
||||||
|
"subop_in_bytes": 26841811636,
|
||||||
|
"subop_latency": { "avgcount": 1074774,
|
||||||
|
"sum": 745.509160000},
|
||||||
|
"subop_w": 0,
|
||||||
|
"subop_w_in_bytes": 26841811636,
|
||||||
|
"subop_w_latency": { "avgcount": 1074774,
|
||||||
|
"sum": 745.509160000},
|
||||||
|
"subop_pull": 0,
|
||||||
|
"subop_pull_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000},
|
||||||
|
"subop_push": 0,
|
||||||
|
"subop_push_in_bytes": 0,
|
||||||
|
"subop_push_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000},
|
||||||
|
"pull": 0,
|
||||||
|
"push": 28,
|
||||||
|
"push_out_bytes": 103483392,
|
||||||
|
"push_in": 0,
|
||||||
|
"push_in_bytes": 0,
|
||||||
|
"recovery_ops": 15,
|
||||||
|
"loadavg": 202,
|
||||||
|
"buffer_bytes": 0,
|
||||||
|
"numpg": 18,
|
||||||
|
"numpg_primary": 8,
|
||||||
|
"numpg_replica": 10,
|
||||||
|
"numpg_stray": 0,
|
||||||
|
"heartbeat_to_peers": 10,
|
||||||
|
"heartbeat_from_peers": 0,
|
||||||
|
"map_messages": 7413,
|
||||||
|
"map_message_epochs": 9792,
|
||||||
|
"map_message_epoch_dups": 10105,
|
||||||
|
"messages_delayed_for_map": 83,
|
||||||
|
"stat_bytes": 102123175936,
|
||||||
|
"stat_bytes_used": 49961820160,
|
||||||
|
"stat_bytes_avail": 52161355776,
|
||||||
|
"copyfrom": 0,
|
||||||
|
"tier_promote": 0,
|
||||||
|
"tier_flush": 0,
|
||||||
|
"tier_flush_fail": 0,
|
||||||
|
"tier_try_flush": 0,
|
||||||
|
"tier_try_flush_fail": 0,
|
||||||
|
"tier_evict": 0,
|
||||||
|
"tier_whiteout": 0,
|
||||||
|
"tier_dirty": 230,
|
||||||
|
"tier_clean": 0,
|
||||||
|
"tier_delay": 0,
|
||||||
|
"agent_wake": 0,
|
||||||
|
"agent_skip": 0,
|
||||||
|
"agent_flush": 0,
|
||||||
|
"agent_evict": 0},
|
||||||
|
"recoverystate_perf": { "initial_latency": { "avgcount": 473,
|
||||||
|
"sum": 0.027207000},
|
||||||
|
"started_latency": { "avgcount": 1480,
|
||||||
|
"sum": 9854902.397648000},
|
||||||
|
"reset_latency": { "avgcount": 1953,
|
||||||
|
"sum": 0.096206000},
|
||||||
|
"start_latency": { "avgcount": 1953,
|
||||||
|
"sum": 0.059947000},
|
||||||
|
"primary_latency": { "avgcount": 765,
|
||||||
|
"sum": 4688922.186935000},
|
||||||
|
"peering_latency": { "avgcount": 704,
|
||||||
|
"sum": 1668.652135000},
|
||||||
|
"backfilling_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000},
|
||||||
|
"waitremotebackfillreserved_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000},
|
||||||
|
"waitlocalbackfillreserved_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000},
|
||||||
|
"notbackfilling_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000},
|
||||||
|
"repnotrecovering_latency": { "avgcount": 462,
|
||||||
|
"sum": 5158922.114600000},
|
||||||
|
"repwaitrecoveryreserved_latency": { "avgcount": 15,
|
||||||
|
"sum": 0.008275000},
|
||||||
|
"repwaitbackfillreserved_latency": { "avgcount": 1,
|
||||||
|
"sum": 0.000095000},
|
||||||
|
"RepRecovering_latency": { "avgcount": 16,
|
||||||
|
"sum": 2274.944727000},
|
||||||
|
"activating_latency": { "avgcount": 514,
|
||||||
|
"sum": 261.008520000},
|
||||||
|
"waitlocalrecoveryreserved_latency": { "avgcount": 20,
|
||||||
|
"sum": 0.175422000},
|
||||||
|
"waitremoterecoveryreserved_latency": { "avgcount": 20,
|
||||||
|
"sum": 0.682778000},
|
||||||
|
"recovering_latency": { "avgcount": 20,
|
||||||
|
"sum": 0.697551000},
|
||||||
|
"recovered_latency": { "avgcount": 511,
|
||||||
|
"sum": 0.011038000},
|
||||||
|
"clean_latency": { "avgcount": 503,
|
||||||
|
"sum": 4686961.154278000},
|
||||||
|
"active_latency": { "avgcount": 506,
|
||||||
|
"sum": 4687223.640464000},
|
||||||
|
"replicaactive_latency": { "avgcount": 446,
|
||||||
|
"sum": 5161197.078966000},
|
||||||
|
"stray_latency": { "avgcount": 794,
|
||||||
|
"sum": 4805.105128000},
|
||||||
|
"getinfo_latency": { "avgcount": 704,
|
||||||
|
"sum": 1138.477937000},
|
||||||
|
"getlog_latency": { "avgcount": 678,
|
||||||
|
"sum": 0.036393000},
|
||||||
|
"waitactingchange_latency": { "avgcount": 69,
|
||||||
|
"sum": 59.172893000},
|
||||||
|
"incomplete_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000},
|
||||||
|
"getmissing_latency": { "avgcount": 609,
|
||||||
|
"sum": 0.012288000},
|
||||||
|
"waitupthru_latency": { "avgcount": 576,
|
||||||
|
"sum": 530.106999000}},
|
||||||
|
"throttle-filestore_bytes": { "val": 0,
|
||||||
|
"max": 0,
|
||||||
|
"get": 0,
|
||||||
|
"get_sum": 0,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 0,
|
||||||
|
"put_sum": 0,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-filestore_ops": { "val": 0,
|
||||||
|
"max": 0,
|
||||||
|
"get": 0,
|
||||||
|
"get_sum": 0,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 0,
|
||||||
|
"put_sum": 0,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-msgr_dispatch_throttler-client": { "val": 0,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 130730,
|
||||||
|
"get_sum": 1246039872,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 130730,
|
||||||
|
"put_sum": 1246039872,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-msgr_dispatch_throttler-cluster": { "val": 0,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 1108033,
|
||||||
|
"get_sum": 71277949992,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 1108033,
|
||||||
|
"put_sum": 71277949992,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-msgr_dispatch_throttler-hb_back_server": { "val": 0,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 18320575,
|
||||||
|
"get_sum": 861067025,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 18320575,
|
||||||
|
"put_sum": 861067025,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-msgr_dispatch_throttler-hb_front_server": { "val": 0,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 18320575,
|
||||||
|
"get_sum": 861067025,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 18320575,
|
||||||
|
"put_sum": 861067025,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-msgr_dispatch_throttler-hbclient": { "val": 0,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 40479394,
|
||||||
|
"get_sum": 1902531518,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 40479394,
|
||||||
|
"put_sum": 1902531518,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-msgr_dispatch_throttler-ms_objecter": { "val": 0,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 0,
|
||||||
|
"get_sum": 0,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 0,
|
||||||
|
"put_sum": 0,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-objecter_bytes": { "val": 0,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 0,
|
||||||
|
"get_sum": 0,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 0,
|
||||||
|
"put_sum": 0,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-objecter_ops": { "val": 0,
|
||||||
|
"max": 1024,
|
||||||
|
"get": 0,
|
||||||
|
"get_sum": 0,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 0,
|
||||||
|
"put_sum": 0,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-osd_client_bytes": { "val": 0,
|
||||||
|
"max": 524288000,
|
||||||
|
"get": 24241,
|
||||||
|
"get_sum": 1241992581,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 25958,
|
||||||
|
"put_sum": 1241992581,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-osd_client_messages": { "val": 0,
|
||||||
|
"max": 100,
|
||||||
|
"get": 49214,
|
||||||
|
"get_sum": 49214,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 49214,
|
||||||
|
"put_sum": 49214,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}}}
|
||||||
|
`
|
||||||
92
plugins/inputs/chrony/README.md
Normal file
92
plugins/inputs/chrony/README.md
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
# chrony Input Plugin
|
||||||
|
|
||||||
|
Get standard chrony metrics, requires chronyc executable.
|
||||||
|
|
||||||
|
Below is the documentation of the various headers returned by `chronyc tracking`.
|
||||||
|
|
||||||
|
- Reference ID - This is the refid and name (or IP address) if available, of the
|
||||||
|
server to which the computer is currently synchronised. If this is 127.127.1.1
|
||||||
|
it means the computer is not synchronised to any external source and that you
|
||||||
|
have the ‘local’ mode operating (via the local command in chronyc (see section local),
|
||||||
|
or the local directive in the ‘/etc/chrony.conf’ file (see section local)).
|
||||||
|
- Stratum - The stratum indicates how many hops away from a computer with an attached
|
||||||
|
reference clock we are. Such a computer is a stratum-1 computer, so the computer in the
|
||||||
|
example is two hops away (i.e. a.b.c is a stratum-2 and is synchronised from a stratum-1).
|
||||||
|
- Ref time - This is the time (UTC) at which the last measurement from the reference
|
||||||
|
source was processed.
|
||||||
|
- System time - In normal operation, chronyd never steps the system clock, because any
|
||||||
|
jump in the timescale can have adverse consequences for certain application programs.
|
||||||
|
Instead, any error in the system clock is corrected by slightly speeding up or slowing
|
||||||
|
down the system clock until the error has been removed, and then returning to the system
|
||||||
|
clock’s normal speed. A consequence of this is that there will be a period when the
|
||||||
|
system clock (as read by other programs using the gettimeofday() system call, or by the
|
||||||
|
date command in the shell) will be different from chronyd's estimate of the current true
|
||||||
|
time (which it reports to NTP clients when it is operating in server mode). The value
|
||||||
|
reported on this line is the difference due to this effect.
|
||||||
|
- Last offset - This is the estimated local offset on the last clock update.
|
||||||
|
- RMS offset - This is a long-term average of the offset value.
|
||||||
|
- Frequency - The ‘frequency’ is the rate by which the system’s clock would be
|
||||||
|
wrong if chronyd was not correcting it. It is expressed in ppm (parts per million).
|
||||||
|
For example, a value of 1ppm would mean that when the system’s clock thinks it has
|
||||||
|
advanced 1 second, it has actually advanced by 1.000001 seconds relative to true time.
|
||||||
|
- Residual freq - This shows the ‘residual frequency’ for the currently selected
|
||||||
|
reference source. This reflects any difference between what the measurements from the
|
||||||
|
reference source indicate the frequency should be and the frequency currently being used.
|
||||||
|
The reason this is not always zero is that a smoothing procedure is applied to the
|
||||||
|
frequency. Each time a measurement from the reference source is obtained and a new
|
||||||
|
residual frequency computed, the estimated accuracy of this residual is compared with the
|
||||||
|
estimated accuracy (see ‘skew’ next) of the existing frequency value. A weighted average
|
||||||
|
is computed for the new frequency, with weights depending on these accuracies. If the
|
||||||
|
measurements from the reference source follow a consistent trend, the residual will be
|
||||||
|
driven to zero over time.
|
||||||
|
- Skew - This is the estimated error bound on the frequency.
|
||||||
|
- Root delay - This is the total of the network path delays to the stratum-1 computer
|
||||||
|
from which the computer is ultimately synchronised. In certain extreme situations, this
|
||||||
|
value can be negative. (This can arise in a symmetric peer arrangement where the computers’
|
||||||
|
frequencies are not tracking each other and the network delay is very short relative to the
|
||||||
|
turn-around time at each computer.)
|
||||||
|
- Root dispersion - This is the total dispersion accumulated through all the computers
|
||||||
|
back to the stratum-1 computer from which the computer is ultimately synchronised.
|
||||||
|
Dispersion is due to system clock resolution, statistical measurement variations etc.
|
||||||
|
- Leap status - This is the leap status, which can be Normal, Insert second,
|
||||||
|
Delete second or Not synchronised.
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Get standard chrony metrics, requires chronyc executable.
|
||||||
|
[[inputs.chrony]]
|
||||||
|
## If true, chronyc tries to perform a DNS lookup for the time server.
|
||||||
|
# dns_lookup = false
|
||||||
|
```
|
||||||
|
|
||||||
|
### Measurements & Fields:
|
||||||
|
|
||||||
|
- chrony
|
||||||
|
- last_offset (float, seconds)
|
||||||
|
- rms_offset (float, seconds)
|
||||||
|
- frequency (float, ppm)
|
||||||
|
- residual_freq (float, ppm)
|
||||||
|
- skew (float, ppm)
|
||||||
|
- root_delay (float, seconds)
|
||||||
|
- root_dispersion (float, seconds)
|
||||||
|
- update_interval (float, seconds)
|
||||||
|
|
||||||
|
### Tags:
|
||||||
|
|
||||||
|
- All measurements have the following tags:
|
||||||
|
- reference_id
|
||||||
|
- stratum
|
||||||
|
- leap_status
|
||||||
|
|
||||||
|
### Example Output:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ telegraf -config telegraf.conf -input-filter chrony -test
|
||||||
|
* Plugin: chrony, Collection 1
|
||||||
|
> chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
129
plugins/inputs/chrony/chrony.go
Normal file
129
plugins/inputs/chrony/chrony.go
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
package chrony
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
execCommand = exec.Command // execCommand is used to mock commands in tests.
|
||||||
|
)
|
||||||
|
|
||||||
|
type Chrony struct {
|
||||||
|
DNSLookup bool `toml:"dns_lookup"`
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Chrony) Description() string {
|
||||||
|
return "Get standard chrony metrics, requires chronyc executable."
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Chrony) SampleConfig() string {
|
||||||
|
return `
|
||||||
|
## If true, chronyc tries to perform a DNS lookup for the time server.
|
||||||
|
# dns_lookup = false
|
||||||
|
`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Chrony) Gather(acc telegraf.Accumulator) error {
|
||||||
|
if len(c.path) == 0 {
|
||||||
|
return errors.New("chronyc not found: verify that chrony is installed and that chronyc is in your PATH")
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := []string{}
|
||||||
|
if !c.DNSLookup {
|
||||||
|
flags = append(flags, "-n")
|
||||||
|
}
|
||||||
|
flags = append(flags, "tracking")
|
||||||
|
|
||||||
|
cmd := execCommand(c.path, flags...)
|
||||||
|
out, err := internal.CombinedOutputTimeout(cmd, time.Second*5)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
|
||||||
|
}
|
||||||
|
fields, tags, err := processChronycOutput(string(out))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
acc.AddFields("chrony", fields, tags)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// processChronycOutput takes in a string output from the chronyc command, like:
|
||||||
|
//
|
||||||
|
// Reference ID : 192.168.1.22 (ntp.example.com)
|
||||||
|
// Stratum : 3
|
||||||
|
// Ref time (UTC) : Thu May 12 14:27:07 2016
|
||||||
|
// System time : 0.000020390 seconds fast of NTP time
|
||||||
|
// Last offset : +0.000012651 seconds
|
||||||
|
// RMS offset : 0.000025577 seconds
|
||||||
|
// Frequency : 16.001 ppm slow
|
||||||
|
// Residual freq : -0.000 ppm
|
||||||
|
// Skew : 0.006 ppm
|
||||||
|
// Root delay : 0.001655 seconds
|
||||||
|
// Root dispersion : 0.003307 seconds
|
||||||
|
// Update interval : 507.2 seconds
|
||||||
|
// Leap status : Normal
|
||||||
|
//
|
||||||
|
// The value on the left side of the colon is used as field name, if the first field on
|
||||||
|
// the right side is a float. If it cannot be parsed as float, it is a tag name.
|
||||||
|
//
|
||||||
|
// Ref time is ignored and all names are converted to snake case.
|
||||||
|
//
|
||||||
|
// It returns (<fields>, <tags>)
|
||||||
|
func processChronycOutput(out string) (map[string]interface{}, map[string]string, error) {
|
||||||
|
tags := map[string]string{}
|
||||||
|
fields := map[string]interface{}{}
|
||||||
|
lines := strings.Split(strings.TrimSpace(out), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
stats := strings.Split(line, ":")
|
||||||
|
if len(stats) < 2 {
|
||||||
|
return nil, nil, fmt.Errorf("unexpected output from chronyc, expected ':' in %s", out)
|
||||||
|
}
|
||||||
|
name := strings.ToLower(strings.Replace(strings.TrimSpace(stats[0]), " ", "_", -1))
|
||||||
|
// ignore reference time
|
||||||
|
if strings.Contains(name, "time") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
valueFields := strings.Fields(stats[1])
|
||||||
|
if len(valueFields) == 0 {
|
||||||
|
return nil, nil, fmt.Errorf("unexpected output from chronyc: %s", out)
|
||||||
|
}
|
||||||
|
if strings.Contains(strings.ToLower(name), "stratum") {
|
||||||
|
tags["stratum"] = valueFields[0]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
value, err := strconv.ParseFloat(valueFields[0], 64)
|
||||||
|
if err != nil {
|
||||||
|
tags[name] = strings.ToLower(valueFields[0])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.Contains(stats[1], "slow") {
|
||||||
|
value = -value
|
||||||
|
}
|
||||||
|
fields[name] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
return fields, tags, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
c := Chrony{}
|
||||||
|
path, _ := exec.LookPath("chronyc")
|
||||||
|
if len(path) > 0 {
|
||||||
|
c.path = path
|
||||||
|
}
|
||||||
|
inputs.Add("chrony", func() telegraf.Input {
|
||||||
|
return &c
|
||||||
|
})
|
||||||
|
}
|
||||||
3
plugins/inputs/chrony/chrony_notlinux.go
Normal file
3
plugins/inputs/chrony/chrony_notlinux.go
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package chrony
|
||||||
109
plugins/inputs/chrony/chrony_test.go
Normal file
109
plugins/inputs/chrony/chrony_test.go
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
package chrony
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGather(t *testing.T) {
|
||||||
|
c := Chrony{
|
||||||
|
path: "chronyc",
|
||||||
|
}
|
||||||
|
// overwriting exec commands with mock commands
|
||||||
|
execCommand = fakeExecCommand
|
||||||
|
defer func() { execCommand = exec.Command }()
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
err := c.Gather(&acc)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := map[string]string{
|
||||||
|
"reference_id": "192.168.1.22",
|
||||||
|
"leap_status": "normal",
|
||||||
|
"stratum": "3",
|
||||||
|
}
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"last_offset": 0.000012651,
|
||||||
|
"rms_offset": 0.000025577,
|
||||||
|
"frequency": -16.001,
|
||||||
|
"residual_freq": 0.0,
|
||||||
|
"skew": 0.006,
|
||||||
|
"root_delay": 0.001655,
|
||||||
|
"root_dispersion": 0.003307,
|
||||||
|
"update_interval": 507.2,
|
||||||
|
}
|
||||||
|
|
||||||
|
acc.AssertContainsTaggedFields(t, "chrony", fields, tags)
|
||||||
|
|
||||||
|
// test with dns lookup
|
||||||
|
c.DNSLookup = true
|
||||||
|
err = c.Gather(&acc)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "chrony", fields, tags)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// fackeExecCommand is a helper function that mock
|
||||||
|
// the exec.Command call (and call the test binary)
|
||||||
|
func fakeExecCommand(command string, args ...string) *exec.Cmd {
|
||||||
|
cs := []string{"-test.run=TestHelperProcess", "--", command}
|
||||||
|
cs = append(cs, args...)
|
||||||
|
cmd := exec.Command(os.Args[0], cs...)
|
||||||
|
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHelperProcess isn't a real test. It's used to mock exec.Command
|
||||||
|
// For example, if you run:
|
||||||
|
// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking
|
||||||
|
// it returns below mockData.
|
||||||
|
func TestHelperProcess(t *testing.T) {
|
||||||
|
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lookup := "Reference ID : 192.168.1.22 (ntp.example.com)\n"
|
||||||
|
noLookup := "Reference ID : 192.168.1.22 (192.168.1.22)\n"
|
||||||
|
mockData := `Stratum : 3
|
||||||
|
Ref time (UTC) : Thu May 12 14:27:07 2016
|
||||||
|
System time : 0.000020390 seconds fast of NTP time
|
||||||
|
Last offset : +0.000012651 seconds
|
||||||
|
RMS offset : 0.000025577 seconds
|
||||||
|
Frequency : 16.001 ppm slow
|
||||||
|
Residual freq : -0.000 ppm
|
||||||
|
Skew : 0.006 ppm
|
||||||
|
Root delay : 0.001655 seconds
|
||||||
|
Root dispersion : 0.003307 seconds
|
||||||
|
Update interval : 507.2 seconds
|
||||||
|
Leap status : Normal
|
||||||
|
`
|
||||||
|
|
||||||
|
args := os.Args
|
||||||
|
|
||||||
|
// Previous arguments are tests stuff, that looks like :
|
||||||
|
// /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess --
|
||||||
|
cmd, args := args[3], args[4:]
|
||||||
|
|
||||||
|
if cmd == "chronyc" {
|
||||||
|
if args[0] == "tracking" {
|
||||||
|
fmt.Fprint(os.Stdout, lookup+mockData)
|
||||||
|
} else {
|
||||||
|
fmt.Fprint(os.Stdout, noLookup+mockData)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Fprint(os.Stdout, "command not found")
|
||||||
|
os.Exit(1)
|
||||||
|
|
||||||
|
}
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
@@ -6,9 +6,12 @@ This plugin will pull Metric Statistics from Amazon CloudWatch.
|
|||||||
|
|
||||||
This plugin uses a credential chain for Authentication with the CloudWatch
|
This plugin uses a credential chain for Authentication with the CloudWatch
|
||||||
API endpoint. In the following order the plugin will attempt to authenticate.
|
API endpoint. In the following order the plugin will attempt to authenticate.
|
||||||
1. [IAMS Role](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
|
1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules)
|
||||||
2. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables)
|
2. Explicit credentials from `access_key`, `secret_key`, and `token` attributes
|
||||||
3. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file)
|
3. Shared profile from `profile` attribute
|
||||||
|
4. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables)
|
||||||
|
5. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file)
|
||||||
|
6. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
|
||||||
|
|
||||||
### Configuration:
|
### Configuration:
|
||||||
|
|
||||||
@@ -24,7 +27,7 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
|||||||
delay = '1m'
|
delay = '1m'
|
||||||
|
|
||||||
## Override global run interval (optional - defaults to global interval)
|
## Override global run interval (optional - defaults to global interval)
|
||||||
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
|
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||||
## gaps or overlap in pulled data
|
## gaps or overlap in pulled data
|
||||||
interval = '1m'
|
interval = '1m'
|
||||||
|
|
||||||
@@ -36,11 +39,15 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
|||||||
## Refreshes Namespace available metrics every 1h
|
## Refreshes Namespace available metrics every 1h
|
||||||
[[inputs.cloudwatch.metrics]]
|
[[inputs.cloudwatch.metrics]]
|
||||||
names = ['Latency', 'RequestCount']
|
names = ['Latency', 'RequestCount']
|
||||||
|
|
||||||
## Dimension filters for Metric (optional)
|
## Dimension filters for Metric (optional)
|
||||||
[[inputs.cloudwatch.metrics.dimensions]]
|
[[inputs.cloudwatch.metrics.dimensions]]
|
||||||
name = 'LoadBalancerName'
|
name = 'LoadBalancerName'
|
||||||
value = 'p-example'
|
value = 'p-example'
|
||||||
|
|
||||||
|
[[inputs.cloudwatch.metrics.dimensions]]
|
||||||
|
name = 'AvailabilityZone'
|
||||||
|
value = '*'
|
||||||
```
|
```
|
||||||
#### Requirements and Terminology
|
#### Requirements and Terminology
|
||||||
|
|
||||||
@@ -52,6 +59,39 @@ Plugin Configuration utilizes [CloudWatch concepts](http://docs.aws.amazon.com/A
|
|||||||
- `names` must be valid CloudWatch [Metric](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Metric) names
|
- `names` must be valid CloudWatch [Metric](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Metric) names
|
||||||
- `dimensions` must be valid CloudWatch [Dimension](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Dimension) name/value pairs
|
- `dimensions` must be valid CloudWatch [Dimension](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Dimension) name/value pairs
|
||||||
|
|
||||||
|
Omitting or specifying a value of `'*'` for a dimension value configures all available metrics that contain a dimension with the specified name
|
||||||
|
to be retrieved. If specifying >1 dimension, then the metric must contain *all* the configured dimensions where the the value of the
|
||||||
|
wildcard dimension is ignored.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```
|
||||||
|
[[inputs.cloudwatch.metrics]]
|
||||||
|
names = ['Latency']
|
||||||
|
|
||||||
|
## Dimension filters for Metric (optional)
|
||||||
|
[[inputs.cloudwatch.metrics.dimensions]]
|
||||||
|
name = 'LoadBalancerName'
|
||||||
|
value = 'p-example'
|
||||||
|
|
||||||
|
[[inputs.cloudwatch.metrics.dimensions]]
|
||||||
|
name = 'AvailabilityZone'
|
||||||
|
value = '*'
|
||||||
|
```
|
||||||
|
|
||||||
|
If the following ELBs are available:
|
||||||
|
- name: `p-example`, availabilityZone: `us-east-1a`
|
||||||
|
- name: `p-example`, availabilityZone: `us-east-1b`
|
||||||
|
- name: `q-example`, availabilityZone: `us-east-1a`
|
||||||
|
- name: `q-example`, availabilityZone: `us-east-1b`
|
||||||
|
|
||||||
|
|
||||||
|
Then 2 metrics will be output:
|
||||||
|
- name: `p-example`, availabilityZone: `us-east-1a`
|
||||||
|
- name: `p-example`, availabilityZone: `us-east-1b`
|
||||||
|
|
||||||
|
If the `AvailabilityZone` wildcard dimension was omitted, then a single metric (name: `p-example`)
|
||||||
|
would be exported containing the aggregate values of the ELB across availability zones.
|
||||||
|
|
||||||
#### Restrictions and Limitations
|
#### Restrictions and Limitations
|
||||||
- CloudWatch metrics are not available instantly via the CloudWatch API. You should adjust your collection `delay` to account for this lag in metrics availability based on your [monitoring subscription level](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html)
|
- CloudWatch metrics are not available instantly via the CloudWatch API. You should adjust your collection `delay` to account for this lag in metrics availability based on your [monitoring subscription level](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html)
|
||||||
- CloudWatch API usage incurs cost - see [GetMetricStatistics Pricing](https://aws.amazon.com/cloudwatch/pricing/)
|
- CloudWatch API usage incurs cost - see [GetMetricStatistics Pricing](https://aws.amazon.com/cloudwatch/pricing/)
|
||||||
|
|||||||
@@ -3,28 +3,36 @@ package cloudwatch
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/internal"
|
"github.com/influxdata/telegraf/internal"
|
||||||
|
internalaws "github.com/influxdata/telegraf/internal/config/aws"
|
||||||
|
"github.com/influxdata/telegraf/internal/errchan"
|
||||||
|
"github.com/influxdata/telegraf/internal/limiter"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
CloudWatch struct {
|
CloudWatch struct {
|
||||||
Region string `toml:"region"`
|
Region string `toml:"region"`
|
||||||
|
AccessKey string `toml:"access_key"`
|
||||||
|
SecretKey string `toml:"secret_key"`
|
||||||
|
RoleARN string `toml:"role_arn"`
|
||||||
|
Profile string `toml:"profile"`
|
||||||
|
Filename string `toml:"shared_credential_file"`
|
||||||
|
Token string `toml:"token"`
|
||||||
|
|
||||||
Period internal.Duration `toml:"period"`
|
Period internal.Duration `toml:"period"`
|
||||||
Delay internal.Duration `toml:"delay"`
|
Delay internal.Duration `toml:"delay"`
|
||||||
Namespace string `toml:"namespace"`
|
Namespace string `toml:"namespace"`
|
||||||
Metrics []*Metric `toml:"metrics"`
|
Metrics []*Metric `toml:"metrics"`
|
||||||
|
CacheTTL internal.Duration `toml:"cache_ttl"`
|
||||||
client cloudwatchClient
|
client cloudwatchClient
|
||||||
metricCache *MetricCache
|
metricCache *MetricCache
|
||||||
}
|
}
|
||||||
@@ -56,16 +64,35 @@ func (c *CloudWatch) SampleConfig() string {
|
|||||||
## Amazon Region
|
## Amazon Region
|
||||||
region = 'us-east-1'
|
region = 'us-east-1'
|
||||||
|
|
||||||
|
## Amazon Credentials
|
||||||
|
## Credentials are loaded in the following order
|
||||||
|
## 1) Assumed credentials via STS if role_arn is specified
|
||||||
|
## 2) explicit credentials from 'access_key' and 'secret_key'
|
||||||
|
## 3) shared profile from 'profile'
|
||||||
|
## 4) environment variables
|
||||||
|
## 5) shared credentials file
|
||||||
|
## 6) EC2 Instance Profile
|
||||||
|
#access_key = ""
|
||||||
|
#secret_key = ""
|
||||||
|
#token = ""
|
||||||
|
#role_arn = ""
|
||||||
|
#profile = ""
|
||||||
|
#shared_credential_file = ""
|
||||||
|
|
||||||
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
|
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
|
||||||
period = '1m'
|
period = '1m'
|
||||||
|
|
||||||
## Collection Delay (required - must account for metrics availability via CloudWatch API)
|
## Collection Delay (required - must account for metrics availability via CloudWatch API)
|
||||||
delay = '1m'
|
delay = '1m'
|
||||||
|
|
||||||
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
|
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||||
## gaps or overlap in pulled data
|
## gaps or overlap in pulled data
|
||||||
interval = '1m'
|
interval = '1m'
|
||||||
|
|
||||||
|
## Configure the TTL for the internal cache of metrics.
|
||||||
|
## Defaults to 1 hr if not specified
|
||||||
|
#cache_ttl = '10m'
|
||||||
|
|
||||||
## Metric Statistic Namespace (required)
|
## Metric Statistic Namespace (required)
|
||||||
namespace = 'AWS/ELB'
|
namespace = 'AWS/ELB'
|
||||||
|
|
||||||
@@ -74,7 +101,7 @@ func (c *CloudWatch) SampleConfig() string {
|
|||||||
## Refreshes Namespace available metrics every 1h
|
## Refreshes Namespace available metrics every 1h
|
||||||
#[[inputs.cloudwatch.metrics]]
|
#[[inputs.cloudwatch.metrics]]
|
||||||
# names = ['Latency', 'RequestCount']
|
# names = ['Latency', 'RequestCount']
|
||||||
#
|
#
|
||||||
# ## Dimension filters for Metric (optional)
|
# ## Dimension filters for Metric (optional)
|
||||||
# [[inputs.cloudwatch.metrics.dimensions]]
|
# [[inputs.cloudwatch.metrics.dimensions]]
|
||||||
# name = 'LoadBalancerName'
|
# name = 'LoadBalancerName'
|
||||||
@@ -97,20 +124,40 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
|||||||
if c.Metrics != nil {
|
if c.Metrics != nil {
|
||||||
metrics = []*cloudwatch.Metric{}
|
metrics = []*cloudwatch.Metric{}
|
||||||
for _, m := range c.Metrics {
|
for _, m := range c.Metrics {
|
||||||
dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions))
|
if !hasWilcard(m.Dimensions) {
|
||||||
for k, d := range m.Dimensions {
|
dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions))
|
||||||
dimensions[k] = &cloudwatch.Dimension{
|
for k, d := range m.Dimensions {
|
||||||
Name: aws.String(d.Name),
|
fmt.Printf("Dimension [%s]:[%s]\n", d.Name, d.Value)
|
||||||
Value: aws.String(d.Value),
|
dimensions[k] = &cloudwatch.Dimension{
|
||||||
|
Name: aws.String(d.Name),
|
||||||
|
Value: aws.String(d.Value),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, name := range m.MetricNames {
|
||||||
|
metrics = append(metrics, &cloudwatch.Metric{
|
||||||
|
Namespace: aws.String(c.Namespace),
|
||||||
|
MetricName: aws.String(name),
|
||||||
|
Dimensions: dimensions,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
allMetrics, err := c.fetchNamespaceMetrics()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, name := range m.MetricNames {
|
||||||
|
for _, metric := range allMetrics {
|
||||||
|
if isSelected(metric, m.Dimensions) {
|
||||||
|
metrics = append(metrics, &cloudwatch.Metric{
|
||||||
|
Namespace: aws.String(c.Namespace),
|
||||||
|
MetricName: aws.String(name),
|
||||||
|
Dimensions: metric.Dimensions,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, name := range m.MetricNames {
|
|
||||||
metrics = append(metrics, &cloudwatch.Metric{
|
|
||||||
Namespace: aws.String(c.Namespace),
|
|
||||||
MetricName: aws.String(name),
|
|
||||||
Dimensions: dimensions,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var err error
|
var err error
|
||||||
@@ -121,30 +168,35 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
metricCount := len(metrics)
|
metricCount := len(metrics)
|
||||||
var errChan = make(chan error, metricCount)
|
errChan := errchan.New(metricCount)
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
// limit concurrency or we can easily exhaust user connection limit
|
// limit concurrency or we can easily exhaust user connection limit
|
||||||
semaphore := make(chan byte, 64)
|
// see cloudwatch API request limits:
|
||||||
|
// http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html
|
||||||
|
lmtr := limiter.NewRateLimiter(10, time.Second)
|
||||||
|
defer lmtr.Stop()
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(metrics))
|
||||||
for _, m := range metrics {
|
for _, m := range metrics {
|
||||||
semaphore <- 0x1
|
<-lmtr.C
|
||||||
go c.gatherMetric(acc, m, now, semaphore, errChan)
|
go func(inm *cloudwatch.Metric) {
|
||||||
|
defer wg.Done()
|
||||||
|
c.gatherMetric(acc, inm, now, errChan.C)
|
||||||
|
}(m)
|
||||||
}
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
for i := 1; i <= metricCount; i++ {
|
return errChan.Error()
|
||||||
err := <-errChan
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
inputs.Add("cloudwatch", func() telegraf.Input {
|
inputs.Add("cloudwatch", func() telegraf.Input {
|
||||||
return &CloudWatch{}
|
ttl, _ := time.ParseDuration("1hr")
|
||||||
|
return &CloudWatch{
|
||||||
|
CacheTTL: internal.Duration{Duration: ttl},
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -152,17 +204,18 @@ func init() {
|
|||||||
* Initialize CloudWatch client
|
* Initialize CloudWatch client
|
||||||
*/
|
*/
|
||||||
func (c *CloudWatch) initializeCloudWatch() error {
|
func (c *CloudWatch) initializeCloudWatch() error {
|
||||||
config := &aws.Config{
|
credentialConfig := &internalaws.CredentialConfig{
|
||||||
Region: aws.String(c.Region),
|
Region: c.Region,
|
||||||
Credentials: credentials.NewChainCredentials(
|
AccessKey: c.AccessKey,
|
||||||
[]credentials.Provider{
|
SecretKey: c.SecretKey,
|
||||||
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())},
|
RoleARN: c.RoleARN,
|
||||||
&credentials.EnvProvider{},
|
Profile: c.Profile,
|
||||||
&credentials.SharedCredentialsProvider{},
|
Filename: c.Filename,
|
||||||
}),
|
Token: c.Token,
|
||||||
}
|
}
|
||||||
|
configProvider := credentialConfig.Credentials()
|
||||||
|
|
||||||
c.client = cloudwatch.New(session.New(config))
|
c.client = cloudwatch.New(configProvider)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -197,11 +250,10 @@ func (c *CloudWatch) fetchNamespaceMetrics() (metrics []*cloudwatch.Metric, err
|
|||||||
more = token != nil
|
more = token != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheTTL, _ := time.ParseDuration("1hr")
|
|
||||||
c.metricCache = &MetricCache{
|
c.metricCache = &MetricCache{
|
||||||
Metrics: metrics,
|
Metrics: metrics,
|
||||||
Fetched: time.Now(),
|
Fetched: time.Now(),
|
||||||
TTL: cacheTTL,
|
TTL: c.CacheTTL.Duration,
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
@@ -210,12 +262,16 @@ func (c *CloudWatch) fetchNamespaceMetrics() (metrics []*cloudwatch.Metric, err
|
|||||||
/*
|
/*
|
||||||
* Gather given Metric and emit any error
|
* Gather given Metric and emit any error
|
||||||
*/
|
*/
|
||||||
func (c *CloudWatch) gatherMetric(acc telegraf.Accumulator, metric *cloudwatch.Metric, now time.Time, semaphore chan byte, errChan chan error) {
|
func (c *CloudWatch) gatherMetric(
|
||||||
|
acc telegraf.Accumulator,
|
||||||
|
metric *cloudwatch.Metric,
|
||||||
|
now time.Time,
|
||||||
|
errChan chan error,
|
||||||
|
) {
|
||||||
params := c.getStatisticsInput(metric, now)
|
params := c.getStatisticsInput(metric, now)
|
||||||
resp, err := c.client.GetMetricStatistics(params)
|
resp, err := c.client.GetMetricStatistics(params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errChan <- err
|
errChan <- err
|
||||||
<-semaphore
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -252,7 +308,6 @@ func (c *CloudWatch) gatherMetric(acc telegraf.Accumulator, metric *cloudwatch.M
|
|||||||
}
|
}
|
||||||
|
|
||||||
errChan <- nil
|
errChan <- nil
|
||||||
<-semaphore
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -303,3 +358,32 @@ func (c *CloudWatch) getStatisticsInput(metric *cloudwatch.Metric, now time.Time
|
|||||||
func (c *MetricCache) IsValid() bool {
|
func (c *MetricCache) IsValid() bool {
|
||||||
return c.Metrics != nil && time.Since(c.Fetched) < c.TTL
|
return c.Metrics != nil && time.Since(c.Fetched) < c.TTL
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func hasWilcard(dimensions []*Dimension) bool {
|
||||||
|
for _, d := range dimensions {
|
||||||
|
if d.Value == "" || d.Value == "*" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSelected(metric *cloudwatch.Metric, dimensions []*Dimension) bool {
|
||||||
|
if len(metric.Dimensions) != len(dimensions) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, d := range dimensions {
|
||||||
|
selected := false
|
||||||
|
for _, d2 := range metric.Dimensions {
|
||||||
|
if d.Name == *d2.Name {
|
||||||
|
if d.Value == "" || d.Value == "*" || d.Value == *d2.Value {
|
||||||
|
selected = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !selected {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|||||||
56
plugins/inputs/conntrack/README.md
Normal file
56
plugins/inputs/conntrack/README.md
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
# Conntrack Plugin
|
||||||
|
|
||||||
|
Collects stats from Netfilter's conntrack-tools.
|
||||||
|
|
||||||
|
The conntrack-tools provide a mechanism for tracking various aspects of
|
||||||
|
network connections as they are processed by netfilter. At runtime,
|
||||||
|
conntrack exposes many of those connection statistics within /proc/sys/net.
|
||||||
|
Depending on your kernel version, these files can be found in either
|
||||||
|
/proc/sys/net/ipv4/netfilter or /proc/sys/net/netfilter and will be
|
||||||
|
prefixed with either ip_ or nf_. This plugin reads the files specified
|
||||||
|
in its configuration and publishes each one as a field, with the prefix
|
||||||
|
normalized to ip_.
|
||||||
|
|
||||||
|
In order to simplify configuration in a heterogeneous environment, a superset
|
||||||
|
of directory and filenames can be specified. Any locations that don't exist
|
||||||
|
will be ignored.
|
||||||
|
|
||||||
|
For more information on conntrack-tools, see the
|
||||||
|
[Netfilter Documentation](http://conntrack-tools.netfilter.org/).
|
||||||
|
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Collects conntrack stats from the configured directories and files.
|
||||||
|
[[inputs.conntrack]]
|
||||||
|
## The following defaults would work with multiple versions of conntrack.
|
||||||
|
## Note the nf_ and ip_ filename prefixes are mutually exclusive across
|
||||||
|
## kernel versions, as are the directory locations.
|
||||||
|
|
||||||
|
## Superset of filenames to look for within the conntrack dirs.
|
||||||
|
## Missing files will be ignored.
|
||||||
|
files = ["ip_conntrack_count","ip_conntrack_max",
|
||||||
|
"nf_conntrack_count","nf_conntrack_max"]
|
||||||
|
|
||||||
|
## Directories to search within for the conntrack files above.
|
||||||
|
## Missing directrories will be ignored.
|
||||||
|
dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Measurements & Fields:
|
||||||
|
|
||||||
|
- conntrack
|
||||||
|
- ip_conntrack_count (int, count): the number of entries in the conntrack table
|
||||||
|
- ip_conntrack_max (int, size): the max capacity of the conntrack table
|
||||||
|
|
||||||
|
### Tags:
|
||||||
|
|
||||||
|
This input does not use tags.
|
||||||
|
|
||||||
|
### Example Output:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ ./telegraf -config telegraf.conf -input-filter conntrack -test
|
||||||
|
conntrack,host=myhost ip_conntrack_count=2,ip_conntrack_max=262144 1461620427667995735
|
||||||
|
```
|
||||||
119
plugins/inputs/conntrack/conntrack.go
Normal file
119
plugins/inputs/conntrack/conntrack.go
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
package conntrack
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
"log"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Conntrack struct {
|
||||||
|
Path string
|
||||||
|
Dirs []string
|
||||||
|
Files []string
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
inputName = "conntrack"
|
||||||
|
)
|
||||||
|
|
||||||
|
var dfltDirs = []string{
|
||||||
|
"/proc/sys/net/ipv4/netfilter",
|
||||||
|
"/proc/sys/net/netfilter",
|
||||||
|
}
|
||||||
|
|
||||||
|
var dfltFiles = []string{
|
||||||
|
"ip_conntrack_count",
|
||||||
|
"ip_conntrack_max",
|
||||||
|
"nf_conntrack_count",
|
||||||
|
"nf_conntrack_max",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Conntrack) setDefaults() {
|
||||||
|
if len(c.Dirs) == 0 {
|
||||||
|
c.Dirs = dfltDirs
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.Files) == 0 {
|
||||||
|
c.Files = dfltFiles
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Conntrack) Description() string {
|
||||||
|
return "Collects conntrack stats from the configured directories and files."
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
## The following defaults would work with multiple versions of conntrack.
|
||||||
|
## Note the nf_ and ip_ filename prefixes are mutually exclusive across
|
||||||
|
## kernel versions, as are the directory locations.
|
||||||
|
|
||||||
|
## Superset of filenames to look for within the conntrack dirs.
|
||||||
|
## Missing files will be ignored.
|
||||||
|
files = ["ip_conntrack_count","ip_conntrack_max",
|
||||||
|
"nf_conntrack_count","nf_conntrack_max"]
|
||||||
|
|
||||||
|
## Directories to search within for the conntrack files above.
|
||||||
|
## Missing directrories will be ignored.
|
||||||
|
dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
|
||||||
|
`
|
||||||
|
|
||||||
|
func (c *Conntrack) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
|
||||||
|
c.setDefaults()
|
||||||
|
|
||||||
|
var metricKey string
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
|
||||||
|
for _, dir := range c.Dirs {
|
||||||
|
for _, file := range c.Files {
|
||||||
|
// NOTE: no system will have both nf_ and ip_ prefixes,
|
||||||
|
// so we're safe to branch on suffix only.
|
||||||
|
parts := strings.SplitN(file, "_", 2)
|
||||||
|
if len(parts) < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
metricKey = "ip_" + parts[1]
|
||||||
|
|
||||||
|
fName := filepath.Join(dir, file)
|
||||||
|
if _, err := os.Stat(fName); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
contents, err := ioutil.ReadFile(fName)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("failed to read file '%s': %v", fName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
v := strings.TrimSpace(string(contents))
|
||||||
|
fields[metricKey], err = strconv.ParseFloat(v, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("failed to parse metric, expected number but "+
|
||||||
|
" found '%s': %v", v, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(fields) == 0 {
|
||||||
|
return fmt.Errorf("Conntrack input failed to collect metrics. " +
|
||||||
|
"Is the conntrack kernel module loaded?")
|
||||||
|
}
|
||||||
|
|
||||||
|
acc.AddFields(inputName, fields, nil)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add(inputName, func() telegraf.Input { return &Conntrack{} })
|
||||||
|
}
|
||||||
3
plugins/inputs/conntrack/conntrack_notlinux.go
Normal file
3
plugins/inputs/conntrack/conntrack_notlinux.go
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package conntrack
|
||||||
90
plugins/inputs/conntrack/conntrack_test.go
Normal file
90
plugins/inputs/conntrack/conntrack_test.go
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
package conntrack
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func restoreDflts(savedFiles, savedDirs []string) {
|
||||||
|
dfltFiles = savedFiles
|
||||||
|
dfltDirs = savedDirs
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNoFilesFound(t *testing.T) {
|
||||||
|
defer restoreDflts(dfltFiles, dfltDirs)
|
||||||
|
|
||||||
|
dfltFiles = []string{"baz.txt"}
|
||||||
|
dfltDirs = []string{"./foo/bar"}
|
||||||
|
c := &Conntrack{}
|
||||||
|
acc := &testutil.Accumulator{}
|
||||||
|
err := c.Gather(acc)
|
||||||
|
|
||||||
|
assert.EqualError(t, err, "Conntrack input failed to collect metrics. "+
|
||||||
|
"Is the conntrack kernel module loaded?")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDefaultsUsed(t *testing.T) {
|
||||||
|
defer restoreDflts(dfltFiles, dfltDirs)
|
||||||
|
tmpdir, err := ioutil.TempDir("", "tmp1")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
defer os.Remove(tmpdir)
|
||||||
|
|
||||||
|
tmpFile, err := ioutil.TempFile(tmpdir, "ip_conntrack_count")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
dfltDirs = []string{tmpdir}
|
||||||
|
fname := path.Base(tmpFile.Name())
|
||||||
|
dfltFiles = []string{fname}
|
||||||
|
|
||||||
|
count := 1234321
|
||||||
|
ioutil.WriteFile(tmpFile.Name(), []byte(strconv.Itoa(count)), 0660)
|
||||||
|
c := &Conntrack{}
|
||||||
|
acc := &testutil.Accumulator{}
|
||||||
|
|
||||||
|
c.Gather(acc)
|
||||||
|
acc.AssertContainsFields(t, inputName, map[string]interface{}{
|
||||||
|
fname: float64(count)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigsUsed(t *testing.T) {
|
||||||
|
defer restoreDflts(dfltFiles, dfltDirs)
|
||||||
|
tmpdir, err := ioutil.TempDir("", "tmp1")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
defer os.Remove(tmpdir)
|
||||||
|
|
||||||
|
cntFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_count")
|
||||||
|
maxFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_max")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
dfltDirs = []string{tmpdir}
|
||||||
|
cntFname := path.Base(cntFile.Name())
|
||||||
|
maxFname := path.Base(maxFile.Name())
|
||||||
|
dfltFiles = []string{cntFname, maxFname}
|
||||||
|
|
||||||
|
count := 1234321
|
||||||
|
max := 9999999
|
||||||
|
ioutil.WriteFile(cntFile.Name(), []byte(strconv.Itoa(count)), 0660)
|
||||||
|
ioutil.WriteFile(maxFile.Name(), []byte(strconv.Itoa(max)), 0660)
|
||||||
|
c := &Conntrack{}
|
||||||
|
acc := &testutil.Accumulator{}
|
||||||
|
|
||||||
|
c.Gather(acc)
|
||||||
|
|
||||||
|
fix := func(s string) string {
|
||||||
|
return strings.Replace(s, "nf_", "ip_", 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
acc.AssertContainsFields(t, inputName,
|
||||||
|
map[string]interface{}{
|
||||||
|
fix(cntFname): float64(count),
|
||||||
|
fix(maxFname): float64(max),
|
||||||
|
})
|
||||||
|
}
|
||||||
46
plugins/inputs/consul/README.md
Normal file
46
plugins/inputs/consul/README.md
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
# Telegraf Input Plugin: Consul
|
||||||
|
|
||||||
|
This plugin will collect statistics about all helath checks registered in the Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
|
||||||
|
to query the data. It will not report the [telemetry](https://www.consul.io/docs/agent/telemetry.html) but Consul can report those stats already using StatsD protocol if needed.
|
||||||
|
|
||||||
|
## Configuration:
|
||||||
|
|
||||||
|
```
|
||||||
|
# Gather health check statuses from services registered in Consul
|
||||||
|
[[inputs.consul]]
|
||||||
|
## Most of these values defaults to the one configured on a Consul's agent level.
|
||||||
|
## Optional Consul server address (default: "")
|
||||||
|
# address = ""
|
||||||
|
## Optional URI scheme for the Consul server (default: "")
|
||||||
|
# scheme = ""
|
||||||
|
## Optional ACL token used in every request (default: "")
|
||||||
|
# token = ""
|
||||||
|
## Optional username used for request HTTP Basic Authentication (default: "")
|
||||||
|
# username = ""
|
||||||
|
## Optional password used for HTTP Basic Authentication (default: "")
|
||||||
|
# password = ""
|
||||||
|
## Optional data centre to query the health checks from (default: "")
|
||||||
|
# datacentre = ""
|
||||||
|
```
|
||||||
|
|
||||||
|
## Measurements:
|
||||||
|
|
||||||
|
### Consul:
|
||||||
|
Tags:
|
||||||
|
- node: on which node check/service is registered on
|
||||||
|
- service_name: name of the service (this is the service name not the service ID)
|
||||||
|
|
||||||
|
Fields:
|
||||||
|
- check_id
|
||||||
|
- check_name
|
||||||
|
- service_id
|
||||||
|
- status
|
||||||
|
|
||||||
|
## Example output
|
||||||
|
|
||||||
|
```
|
||||||
|
$ telegraf --config ./telegraf.conf -input-filter consul -test
|
||||||
|
* Plugin: consul, Collection 1
|
||||||
|
> consul_health_checks,host=wolfpit,node=consul-server-node check_id="serfHealth",check_name="Serf Health Status",service_id="",status="passing" 1464698464486439902
|
||||||
|
> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com check_id="service:www-example-com.test01",check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical" 1464698464486519036
|
||||||
|
```
|
||||||
136
plugins/inputs/consul/consul.go
Normal file
136
plugins/inputs/consul/consul.go
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
package consul
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Consul struct {
|
||||||
|
Address string
|
||||||
|
Scheme string
|
||||||
|
Token string
|
||||||
|
Username string
|
||||||
|
Password string
|
||||||
|
Datacentre string
|
||||||
|
|
||||||
|
// Path to CA file
|
||||||
|
SSLCA string `toml:"ssl_ca"`
|
||||||
|
// Path to host cert file
|
||||||
|
SSLCert string `toml:"ssl_cert"`
|
||||||
|
// Path to cert key file
|
||||||
|
SSLKey string `toml:"ssl_key"`
|
||||||
|
// Use SSL but skip chain & host verification
|
||||||
|
InsecureSkipVerify bool
|
||||||
|
|
||||||
|
// client used to connect to Consul agnet
|
||||||
|
client *api.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
## Most of these values defaults to the one configured on a Consul's agent level.
|
||||||
|
## Optional Consul server address (default: "localhost")
|
||||||
|
# address = "localhost"
|
||||||
|
## Optional URI scheme for the Consul server (default: "http")
|
||||||
|
# scheme = "http"
|
||||||
|
## Optional ACL token used in every request (default: "")
|
||||||
|
# token = ""
|
||||||
|
## Optional username used for request HTTP Basic Authentication (default: "")
|
||||||
|
# username = ""
|
||||||
|
## Optional password used for HTTP Basic Authentication (default: "")
|
||||||
|
# password = ""
|
||||||
|
## Optional data centre to query the health checks from (default: "")
|
||||||
|
# datacentre = ""
|
||||||
|
`
|
||||||
|
|
||||||
|
func (c *Consul) Description() string {
|
||||||
|
return "Gather health check statuses from services registered in Consul"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Consul) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Consul) createAPIClient() (*api.Client, error) {
|
||||||
|
config := api.DefaultConfig()
|
||||||
|
|
||||||
|
if c.Address != "" {
|
||||||
|
config.Address = c.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Scheme != "" {
|
||||||
|
config.Scheme = c.Scheme
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Datacentre != "" {
|
||||||
|
config.Datacenter = c.Datacentre
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Username != "" {
|
||||||
|
config.HttpAuth = &api.HttpBasicAuth{
|
||||||
|
Username: c.Username,
|
||||||
|
Password: c.Password,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tlsCfg, err := internal.GetTLSConfig(
|
||||||
|
c.SSLCert, c.SSLKey, c.SSLCA, c.InsecureSkipVerify)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
config.HttpClient.Transport = &http.Transport{
|
||||||
|
TLSClientConfig: tlsCfg,
|
||||||
|
}
|
||||||
|
|
||||||
|
return api.NewClient(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Consul) GatherHealthCheck(acc telegraf.Accumulator, checks []*api.HealthCheck) {
|
||||||
|
for _, check := range checks {
|
||||||
|
record := make(map[string]interface{})
|
||||||
|
tags := make(map[string]string)
|
||||||
|
|
||||||
|
record["check_id"] = check.CheckID
|
||||||
|
record["check_name"] = check.Name
|
||||||
|
record["service_id"] = check.ServiceID
|
||||||
|
record["status"] = check.Status
|
||||||
|
|
||||||
|
tags["node"] = check.Node
|
||||||
|
tags["service_name"] = check.ServiceName
|
||||||
|
|
||||||
|
acc.AddFields("consul_health_checks", record, tags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Consul) Gather(acc telegraf.Accumulator) error {
|
||||||
|
if c.client == nil {
|
||||||
|
newClient, err := c.createAPIClient()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.client = newClient
|
||||||
|
}
|
||||||
|
|
||||||
|
checks, _, err := c.client.Health().State("any", nil)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.GatherHealthCheck(acc, checks)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add("consul", func() telegraf.Input {
|
||||||
|
return &Consul{}
|
||||||
|
})
|
||||||
|
}
|
||||||
42
plugins/inputs/consul/consul_test.go
Normal file
42
plugins/inputs/consul/consul_test.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package consul
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
var sampleChecks = []*api.HealthCheck{
|
||||||
|
&api.HealthCheck{
|
||||||
|
Node: "localhost",
|
||||||
|
CheckID: "foo.health123",
|
||||||
|
Name: "foo.health",
|
||||||
|
Status: "passing",
|
||||||
|
Notes: "lorem ipsum",
|
||||||
|
Output: "OK",
|
||||||
|
ServiceID: "foo.123",
|
||||||
|
ServiceName: "foo",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGatherHealtCheck(t *testing.T) {
|
||||||
|
expectedFields := map[string]interface{}{
|
||||||
|
"check_id": "foo.health123",
|
||||||
|
"check_name": "foo.health",
|
||||||
|
"status": "passing",
|
||||||
|
"service_id": "foo.123",
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedTags := map[string]string{
|
||||||
|
"node": "localhost",
|
||||||
|
"service_name": "foo",
|
||||||
|
}
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
consul := &Consul{}
|
||||||
|
consul.GatherHealthCheck(&acc, sampleChecks)
|
||||||
|
|
||||||
|
acc.AssertContainsTaggedFields(t, "consul_health_checks", expectedFields, expectedTags)
|
||||||
|
}
|
||||||
@@ -162,7 +162,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
|||||||
var read int
|
var read int
|
||||||
|
|
||||||
fields := make(map[string]interface{})
|
fields := make(map[string]interface{})
|
||||||
tags := map[string]string{"host": addr.String()}
|
tags := map[string]string{"disque_host": addr.String()}
|
||||||
for read < sz {
|
for read < sz {
|
||||||
line, err := r.ReadString('\n')
|
line, err := r.ReadString('\n')
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -15,6 +15,9 @@ var servers = []string{"8.8.8.8"}
|
|||||||
var domains = []string{"google.com"}
|
var domains = []string{"google.com"}
|
||||||
|
|
||||||
func TestGathering(t *testing.T) {
|
func TestGathering(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping network-dependent test in short mode.")
|
||||||
|
}
|
||||||
var dnsConfig = DnsQuery{
|
var dnsConfig = DnsQuery{
|
||||||
Servers: servers,
|
Servers: servers,
|
||||||
Domains: domains,
|
Domains: domains,
|
||||||
@@ -31,6 +34,9 @@ func TestGathering(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGatheringMxRecord(t *testing.T) {
|
func TestGatheringMxRecord(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping network-dependent test in short mode.")
|
||||||
|
}
|
||||||
var dnsConfig = DnsQuery{
|
var dnsConfig = DnsQuery{
|
||||||
Servers: servers,
|
Servers: servers,
|
||||||
Domains: domains,
|
Domains: domains,
|
||||||
@@ -48,6 +54,9 @@ func TestGatheringMxRecord(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGatheringRootDomain(t *testing.T) {
|
func TestGatheringRootDomain(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping network-dependent test in short mode.")
|
||||||
|
}
|
||||||
var dnsConfig = DnsQuery{
|
var dnsConfig = DnsQuery{
|
||||||
Servers: servers,
|
Servers: servers,
|
||||||
Domains: []string{"."},
|
Domains: []string{"."},
|
||||||
@@ -72,6 +81,9 @@ func TestGatheringRootDomain(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) {
|
func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping network-dependent test in short mode.")
|
||||||
|
}
|
||||||
var dnsConfig = DnsQuery{
|
var dnsConfig = DnsQuery{
|
||||||
Servers: servers,
|
Servers: servers,
|
||||||
Domains: domains,
|
Domains: domains,
|
||||||
@@ -95,6 +107,9 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGatheringTimeout(t *testing.T) {
|
func TestGatheringTimeout(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping network-dependent test in short mode.")
|
||||||
|
}
|
||||||
var dnsConfig = DnsQuery{
|
var dnsConfig = DnsQuery{
|
||||||
Servers: servers,
|
Servers: servers,
|
||||||
Domains: domains,
|
Domains: domains,
|
||||||
|
|||||||
@@ -29,10 +29,10 @@ for the stat structure can be found
|
|||||||
Every effort was made to preserve the names based on the JSON response from the
|
Every effort was made to preserve the names based on the JSON response from the
|
||||||
docker API.
|
docker API.
|
||||||
|
|
||||||
Note that the docker_cpu metric may appear multiple times per collection, based
|
Note that the docker_container_cpu metric may appear multiple times per collection,
|
||||||
on the availability of per-cpu stats on your system.
|
based on the availability of per-cpu stats on your system.
|
||||||
|
|
||||||
- docker_mem
|
- docker_container_mem
|
||||||
- total_pgmafault
|
- total_pgmafault
|
||||||
- cache
|
- cache
|
||||||
- mapped_file
|
- mapped_file
|
||||||
@@ -66,7 +66,8 @@ on the availability of per-cpu stats on your system.
|
|||||||
- usage
|
- usage
|
||||||
- failcnt
|
- failcnt
|
||||||
- limit
|
- limit
|
||||||
- docker_cpu
|
- container_id
|
||||||
|
- docker_container_cpu
|
||||||
- throttling_periods
|
- throttling_periods
|
||||||
- throttling_throttled_periods
|
- throttling_throttled_periods
|
||||||
- throttling_throttled_time
|
- throttling_throttled_time
|
||||||
@@ -75,7 +76,8 @@ on the availability of per-cpu stats on your system.
|
|||||||
- usage_system
|
- usage_system
|
||||||
- usage_total
|
- usage_total
|
||||||
- usage_percent
|
- usage_percent
|
||||||
- docker_net
|
- container_id
|
||||||
|
- docker_container_net
|
||||||
- rx_dropped
|
- rx_dropped
|
||||||
- rx_bytes
|
- rx_bytes
|
||||||
- rx_errors
|
- rx_errors
|
||||||
@@ -84,7 +86,8 @@ on the availability of per-cpu stats on your system.
|
|||||||
- rx_packets
|
- rx_packets
|
||||||
- tx_errors
|
- tx_errors
|
||||||
- tx_bytes
|
- tx_bytes
|
||||||
- docker_blkio
|
- container_id
|
||||||
|
- docker_container_blkio
|
||||||
- io_service_bytes_recursive_async
|
- io_service_bytes_recursive_async
|
||||||
- io_service_bytes_recursive_read
|
- io_service_bytes_recursive_read
|
||||||
- io_service_bytes_recursive_sync
|
- io_service_bytes_recursive_sync
|
||||||
@@ -95,6 +98,7 @@ on the availability of per-cpu stats on your system.
|
|||||||
- io_serviced_recursive_sync
|
- io_serviced_recursive_sync
|
||||||
- io_serviced_recursive_total
|
- io_serviced_recursive_total
|
||||||
- io_serviced_recursive_write
|
- io_serviced_recursive_write
|
||||||
|
- container_id
|
||||||
- docker_
|
- docker_
|
||||||
- n_used_file_descriptors
|
- n_used_file_descriptors
|
||||||
- n_cpus
|
- n_cpus
|
||||||
@@ -125,20 +129,20 @@ on the availability of per-cpu stats on your system.
|
|||||||
- docker_metadata
|
- docker_metadata
|
||||||
- unit=bytes
|
- unit=bytes
|
||||||
|
|
||||||
- docker_cpu specific:
|
- docker_container_mem specific:
|
||||||
- cont_id (container ID)
|
- container_image
|
||||||
- cont_image (container image)
|
- container_name
|
||||||
- cont_name (container name)
|
- docker_container_cpu specific:
|
||||||
|
- container_image
|
||||||
|
- container_name
|
||||||
- cpu
|
- cpu
|
||||||
- docker_net specific:
|
- docker_container_net specific:
|
||||||
- cont_id (container ID)
|
- container_image
|
||||||
- cont_image (container image)
|
- container_name
|
||||||
- cont_name (container name)
|
|
||||||
- network
|
- network
|
||||||
- docker_blkio specific:
|
- docker_container_blkio specific:
|
||||||
- cont_id (container ID)
|
- container_image
|
||||||
- cont_image (container image)
|
- container_name
|
||||||
- cont_name (container name)
|
|
||||||
- device
|
- device
|
||||||
|
|
||||||
### Example Output:
|
### Example Output:
|
||||||
@@ -156,8 +160,8 @@ on the availability of per-cpu stats on your system.
|
|||||||
> docker,unit=bytes pool_blocksize=65540i 1456926671065383978
|
> docker,unit=bytes pool_blocksize=65540i 1456926671065383978
|
||||||
> docker_data,unit=bytes available=24340000000i,total=107400000000i,used=14820000000i 1456926671065383978
|
> docker_data,unit=bytes available=24340000000i,total=107400000000i,used=14820000000i 1456926671065383978
|
||||||
> docker_metadata,unit=bytes available=2126999999i,total=2146999999i,used=20420000i 145692667106538
|
> docker_metadata,unit=bytes available=2126999999i,total=2146999999i,used=20420000i 145692667106538
|
||||||
> docker_mem,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
> docker_container_mem,
|
||||||
cont_image=spotify/kafka,cont_name=kafka \
|
container_image=spotify/kafka,container_name=kafka \
|
||||||
active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\
|
active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\
|
||||||
hierarchical_memory_limit=9223372036854771712i,inactive_anon=52707328i,\
|
hierarchical_memory_limit=9223372036854771712i,inactive_anon=52707328i,\
|
||||||
inactive_file=5111808i,limit=1044578304i,mapped_file=10301440i,\
|
inactive_file=5111808i,limit=1044578304i,mapped_file=10301440i,\
|
||||||
@@ -168,21 +172,21 @@ total_inactive_file=5111808i,total_mapped_file=10301440i,total_pgfault=63762i,\
|
|||||||
total_pgmafault=0i,total_pgpgin=73355i,total_pgpgout=45736i,\
|
total_pgmafault=0i,total_pgpgin=73355i,total_pgpgout=45736i,\
|
||||||
total_rss=105275392i,total_rss_huge=4194304i,total_unevictable=0i,\
|
total_rss=105275392i,total_rss_huge=4194304i,total_unevictable=0i,\
|
||||||
total_writeback=0i,unevictable=0i,usage=117440512i,writeback=0i 1453409536840126713
|
total_writeback=0i,unevictable=0i,usage=117440512i,writeback=0i 1453409536840126713
|
||||||
> docker_cpu,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
> docker_container_cpu,
|
||||||
cont_image=spotify/kafka,cont_name=kafka,cpu=cpu-total \
|
container_image=spotify/kafka,container_name=kafka,cpu=cpu-total \
|
||||||
throttling_periods=0i,throttling_throttled_periods=0i,\
|
throttling_periods=0i,throttling_throttled_periods=0i,\
|
||||||
throttling_throttled_time=0i,usage_in_kernelmode=440000000i,\
|
throttling_throttled_time=0i,usage_in_kernelmode=440000000i,\
|
||||||
usage_in_usermode=2290000000i,usage_system=84795360000000i,\
|
usage_in_usermode=2290000000i,usage_system=84795360000000i,\
|
||||||
usage_total=6628208865i 1453409536840126713
|
usage_total=6628208865i 1453409536840126713
|
||||||
> docker_cpu,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
> docker_container_cpu,
|
||||||
cont_image=spotify/kafka,cont_name=kafka,cpu=cpu0 \
|
container_image=spotify/kafka,container_name=kafka,cpu=cpu0 \
|
||||||
usage_total=6628208865i 1453409536840126713
|
usage_total=6628208865i 1453409536840126713
|
||||||
> docker_net,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
> docker_container_net,\
|
||||||
cont_image=spotify/kafka,cont_name=kafka,network=eth0 \
|
container_image=spotify/kafka,container_name=kafka,network=eth0 \
|
||||||
rx_bytes=7468i,rx_dropped=0i,rx_errors=0i,rx_packets=94i,tx_bytes=946i,\
|
rx_bytes=7468i,rx_dropped=0i,rx_errors=0i,rx_packets=94i,tx_bytes=946i,\
|
||||||
tx_dropped=0i,tx_errors=0i,tx_packets=13i 1453409536840126713
|
tx_dropped=0i,tx_errors=0i,tx_packets=13i 1453409536840126713
|
||||||
> docker_blkio,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
> docker_container_blkio,
|
||||||
cont_image=spotify/kafka,cont_name=kafka,device=8:0 \
|
container_image=spotify/kafka,container_name=kafka,device=8:0 \
|
||||||
io_service_bytes_recursive_async=80216064i,io_service_bytes_recursive_read=79925248i,\
|
io_service_bytes_recursive_async=80216064i,io_service_bytes_recursive_read=79925248i,\
|
||||||
io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888i,\
|
io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888i,\
|
||||||
io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\
|
io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ import (
|
|||||||
"github.com/docker/engine-api/client"
|
"github.com/docker/engine-api/client"
|
||||||
"github.com/docker/engine-api/types"
|
"github.com/docker/engine-api/types"
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -23,6 +24,7 @@ import (
|
|||||||
type Docker struct {
|
type Docker struct {
|
||||||
Endpoint string
|
Endpoint string
|
||||||
ContainerNames []string
|
ContainerNames []string
|
||||||
|
Timeout internal.Duration
|
||||||
|
|
||||||
client DockerClient
|
client DockerClient
|
||||||
}
|
}
|
||||||
@@ -54,6 +56,8 @@ var sampleConfig = `
|
|||||||
endpoint = "unix:///var/run/docker.sock"
|
endpoint = "unix:///var/run/docker.sock"
|
||||||
## Only collect metrics for these containers, collect all if empty
|
## Only collect metrics for these containers, collect all if empty
|
||||||
container_names = []
|
container_names = []
|
||||||
|
## Timeout for docker list, info, and stats commands
|
||||||
|
timeout = "5s"
|
||||||
`
|
`
|
||||||
|
|
||||||
// Description returns input description
|
// Description returns input description
|
||||||
@@ -97,7 +101,9 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
|||||||
|
|
||||||
// List containers
|
// List containers
|
||||||
opts := types.ContainerListOptions{}
|
opts := types.ContainerListOptions{}
|
||||||
containers, err := d.client.ContainerList(context.Background(), opts)
|
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||||
|
defer cancel()
|
||||||
|
containers, err := d.client.ContainerList(ctx, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -106,12 +112,12 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
|||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(len(containers))
|
wg.Add(len(containers))
|
||||||
for _, container := range containers {
|
for _, container := range containers {
|
||||||
|
|
||||||
go func(c types.Container) {
|
go func(c types.Container) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := d.gatherContainer(c, acc)
|
err := d.gatherContainer(c, acc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err.Error())
|
log.Printf("Error gathering container %s stats: %s\n",
|
||||||
|
c.Names, err.Error())
|
||||||
}
|
}
|
||||||
}(container)
|
}(container)
|
||||||
}
|
}
|
||||||
@@ -126,7 +132,9 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
|||||||
metadataFields := make(map[string]interface{})
|
metadataFields := make(map[string]interface{})
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
// Get info from docker daemon
|
// Get info from docker daemon
|
||||||
info, err := d.client.Info(context.Background())
|
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||||
|
defer cancel()
|
||||||
|
info, err := d.client.Info(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -200,9 +208,8 @@ func (d *Docker) gatherContainer(
|
|||||||
}
|
}
|
||||||
|
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
"cont_id": container.ID,
|
"container_name": cname,
|
||||||
"cont_name": cname,
|
"container_image": container.Image,
|
||||||
"cont_image": container.Image,
|
|
||||||
}
|
}
|
||||||
if len(d.ContainerNames) > 0 {
|
if len(d.ContainerNames) > 0 {
|
||||||
if !sliceContains(cname, d.ContainerNames) {
|
if !sliceContains(cname, d.ContainerNames) {
|
||||||
@@ -210,22 +217,27 @@ func (d *Docker) gatherContainer(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := d.client.ContainerStats(context.Background(), container.ID, false)
|
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||||
|
defer cancel()
|
||||||
|
r, err := d.client.ContainerStats(ctx, container.ID, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error getting docker stats: %s\n", err.Error())
|
return fmt.Errorf("Error getting docker stats: %s", err.Error())
|
||||||
}
|
}
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
dec := json.NewDecoder(r)
|
dec := json.NewDecoder(r)
|
||||||
if err = dec.Decode(&v); err != nil {
|
if err = dec.Decode(&v); err != nil {
|
||||||
log.Printf("Error decoding: %s\n", err.Error())
|
if err == io.EOF {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Error decoding: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add labels to tags
|
// Add labels to tags
|
||||||
for k, v := range container.Labels {
|
for k, label := range container.Labels {
|
||||||
tags[k] = v
|
tags[k] = label
|
||||||
}
|
}
|
||||||
|
|
||||||
gatherContainerStats(v, acc, tags)
|
gatherContainerStats(v, acc, tags, container.ID)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -234,6 +246,7 @@ func gatherContainerStats(
|
|||||||
stat *types.StatsJSON,
|
stat *types.StatsJSON,
|
||||||
acc telegraf.Accumulator,
|
acc telegraf.Accumulator,
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
|
id string,
|
||||||
) {
|
) {
|
||||||
now := stat.Read
|
now := stat.Read
|
||||||
|
|
||||||
@@ -272,8 +285,9 @@ func gatherContainerStats(
|
|||||||
"inactive_file": stat.MemoryStats.Stats["inactive_file"],
|
"inactive_file": stat.MemoryStats.Stats["inactive_file"],
|
||||||
"total_pgpgin": stat.MemoryStats.Stats["total_pgpgin"],
|
"total_pgpgin": stat.MemoryStats.Stats["total_pgpgin"],
|
||||||
"usage_percent": calculateMemPercent(stat),
|
"usage_percent": calculateMemPercent(stat),
|
||||||
|
"container_id": id,
|
||||||
}
|
}
|
||||||
acc.AddFields("docker_mem", memfields, tags, now)
|
acc.AddFields("docker_container_mem", memfields, tags, now)
|
||||||
|
|
||||||
cpufields := map[string]interface{}{
|
cpufields := map[string]interface{}{
|
||||||
"usage_total": stat.CPUStats.CPUUsage.TotalUsage,
|
"usage_total": stat.CPUStats.CPUUsage.TotalUsage,
|
||||||
@@ -284,35 +298,41 @@ func gatherContainerStats(
|
|||||||
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
|
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
|
||||||
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
|
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
|
||||||
"usage_percent": calculateCPUPercent(stat),
|
"usage_percent": calculateCPUPercent(stat),
|
||||||
|
"container_id": id,
|
||||||
}
|
}
|
||||||
cputags := copyTags(tags)
|
cputags := copyTags(tags)
|
||||||
cputags["cpu"] = "cpu-total"
|
cputags["cpu"] = "cpu-total"
|
||||||
acc.AddFields("docker_cpu", cpufields, cputags, now)
|
acc.AddFields("docker_container_cpu", cpufields, cputags, now)
|
||||||
|
|
||||||
for i, percpu := range stat.CPUStats.CPUUsage.PercpuUsage {
|
for i, percpu := range stat.CPUStats.CPUUsage.PercpuUsage {
|
||||||
percputags := copyTags(tags)
|
percputags := copyTags(tags)
|
||||||
percputags["cpu"] = fmt.Sprintf("cpu%d", i)
|
percputags["cpu"] = fmt.Sprintf("cpu%d", i)
|
||||||
acc.AddFields("docker_cpu", map[string]interface{}{"usage_total": percpu}, percputags, now)
|
fields := map[string]interface{}{
|
||||||
|
"usage_total": percpu,
|
||||||
|
"container_id": id,
|
||||||
|
}
|
||||||
|
acc.AddFields("docker_container_cpu", fields, percputags, now)
|
||||||
}
|
}
|
||||||
|
|
||||||
for network, netstats := range stat.Networks {
|
for network, netstats := range stat.Networks {
|
||||||
netfields := map[string]interface{}{
|
netfields := map[string]interface{}{
|
||||||
"rx_dropped": netstats.RxDropped,
|
"rx_dropped": netstats.RxDropped,
|
||||||
"rx_bytes": netstats.RxBytes,
|
"rx_bytes": netstats.RxBytes,
|
||||||
"rx_errors": netstats.RxErrors,
|
"rx_errors": netstats.RxErrors,
|
||||||
"tx_packets": netstats.TxPackets,
|
"tx_packets": netstats.TxPackets,
|
||||||
"tx_dropped": netstats.TxDropped,
|
"tx_dropped": netstats.TxDropped,
|
||||||
"rx_packets": netstats.RxPackets,
|
"rx_packets": netstats.RxPackets,
|
||||||
"tx_errors": netstats.TxErrors,
|
"tx_errors": netstats.TxErrors,
|
||||||
"tx_bytes": netstats.TxBytes,
|
"tx_bytes": netstats.TxBytes,
|
||||||
|
"container_id": id,
|
||||||
}
|
}
|
||||||
// Create a new network tag dictionary for the "network" tag
|
// Create a new network tag dictionary for the "network" tag
|
||||||
nettags := copyTags(tags)
|
nettags := copyTags(tags)
|
||||||
nettags["network"] = network
|
nettags["network"] = network
|
||||||
acc.AddFields("docker_net", netfields, nettags, now)
|
acc.AddFields("docker_container_net", netfields, nettags, now)
|
||||||
}
|
}
|
||||||
|
|
||||||
gatherBlockIOMetrics(stat, acc, tags, now)
|
gatherBlockIOMetrics(stat, acc, tags, now, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
func calculateMemPercent(stat *types.StatsJSON) float64 {
|
func calculateMemPercent(stat *types.StatsJSON) float64 {
|
||||||
@@ -340,6 +360,7 @@ func gatherBlockIOMetrics(
|
|||||||
acc telegraf.Accumulator,
|
acc telegraf.Accumulator,
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
now time.Time,
|
now time.Time,
|
||||||
|
id string,
|
||||||
) {
|
) {
|
||||||
blkioStats := stat.BlkioStats
|
blkioStats := stat.BlkioStats
|
||||||
// Make a map of devices to their block io stats
|
// Make a map of devices to their block io stats
|
||||||
@@ -404,7 +425,8 @@ func gatherBlockIOMetrics(
|
|||||||
for device, fields := range deviceStatMap {
|
for device, fields := range deviceStatMap {
|
||||||
iotags := copyTags(tags)
|
iotags := copyTags(tags)
|
||||||
iotags["device"] = device
|
iotags["device"] = device
|
||||||
acc.AddFields("docker_blkio", fields, iotags, now)
|
fields["container_id"] = id
|
||||||
|
acc.AddFields("docker_container_blkio", fields, iotags, now)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -448,6 +470,8 @@ func parseSize(sizeStr string) (int64, error) {
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
inputs.Add("docker", func() telegraf.Input {
|
inputs.Add("docker", func() telegraf.Input {
|
||||||
return &Docker{}
|
return &Docker{
|
||||||
|
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,26 +21,26 @@ func TestDockerGatherContainerStats(t *testing.T) {
|
|||||||
stats := testStats()
|
stats := testStats()
|
||||||
|
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
"cont_id": "foobarbaz",
|
"container_name": "redis",
|
||||||
"cont_name": "redis",
|
"container_image": "redis/image",
|
||||||
"cont_image": "redis/image",
|
|
||||||
}
|
}
|
||||||
gatherContainerStats(stats, &acc, tags)
|
gatherContainerStats(stats, &acc, tags, "123456789")
|
||||||
|
|
||||||
// test docker_net measurement
|
// test docker_container_net measurement
|
||||||
netfields := map[string]interface{}{
|
netfields := map[string]interface{}{
|
||||||
"rx_dropped": uint64(1),
|
"rx_dropped": uint64(1),
|
||||||
"rx_bytes": uint64(2),
|
"rx_bytes": uint64(2),
|
||||||
"rx_errors": uint64(3),
|
"rx_errors": uint64(3),
|
||||||
"tx_packets": uint64(4),
|
"tx_packets": uint64(4),
|
||||||
"tx_dropped": uint64(1),
|
"tx_dropped": uint64(1),
|
||||||
"rx_packets": uint64(2),
|
"rx_packets": uint64(2),
|
||||||
"tx_errors": uint64(3),
|
"tx_errors": uint64(3),
|
||||||
"tx_bytes": uint64(4),
|
"tx_bytes": uint64(4),
|
||||||
|
"container_id": "123456789",
|
||||||
}
|
}
|
||||||
nettags := copyTags(tags)
|
nettags := copyTags(tags)
|
||||||
nettags["network"] = "eth0"
|
nettags["network"] = "eth0"
|
||||||
acc.AssertContainsTaggedFields(t, "docker_net", netfields, nettags)
|
acc.AssertContainsTaggedFields(t, "docker_container_net", netfields, nettags)
|
||||||
|
|
||||||
// test docker_blkio measurement
|
// test docker_blkio measurement
|
||||||
blkiotags := copyTags(tags)
|
blkiotags := copyTags(tags)
|
||||||
@@ -48,10 +48,11 @@ func TestDockerGatherContainerStats(t *testing.T) {
|
|||||||
blkiofields := map[string]interface{}{
|
blkiofields := map[string]interface{}{
|
||||||
"io_service_bytes_recursive_read": uint64(100),
|
"io_service_bytes_recursive_read": uint64(100),
|
||||||
"io_serviced_recursive_write": uint64(101),
|
"io_serviced_recursive_write": uint64(101),
|
||||||
|
"container_id": "123456789",
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "docker_blkio", blkiofields, blkiotags)
|
acc.AssertContainsTaggedFields(t, "docker_container_blkio", blkiofields, blkiotags)
|
||||||
|
|
||||||
// test docker_mem measurement
|
// test docker_container_mem measurement
|
||||||
memfields := map[string]interface{}{
|
memfields := map[string]interface{}{
|
||||||
"max_usage": uint64(1001),
|
"max_usage": uint64(1001),
|
||||||
"usage": uint64(1111),
|
"usage": uint64(1111),
|
||||||
@@ -87,11 +88,12 @@ func TestDockerGatherContainerStats(t *testing.T) {
|
|||||||
"inactive_file": uint64(3),
|
"inactive_file": uint64(3),
|
||||||
"total_pgpgin": uint64(4),
|
"total_pgpgin": uint64(4),
|
||||||
"usage_percent": float64(55.55),
|
"usage_percent": float64(55.55),
|
||||||
|
"container_id": "123456789",
|
||||||
}
|
}
|
||||||
|
|
||||||
acc.AssertContainsTaggedFields(t, "docker_mem", memfields, tags)
|
acc.AssertContainsTaggedFields(t, "docker_container_mem", memfields, tags)
|
||||||
|
|
||||||
// test docker_cpu measurement
|
// test docker_container_cpu measurement
|
||||||
cputags := copyTags(tags)
|
cputags := copyTags(tags)
|
||||||
cputags["cpu"] = "cpu-total"
|
cputags["cpu"] = "cpu-total"
|
||||||
cpufields := map[string]interface{}{
|
cpufields := map[string]interface{}{
|
||||||
@@ -103,20 +105,23 @@ func TestDockerGatherContainerStats(t *testing.T) {
|
|||||||
"throttling_throttled_periods": uint64(0),
|
"throttling_throttled_periods": uint64(0),
|
||||||
"throttling_throttled_time": uint64(0),
|
"throttling_throttled_time": uint64(0),
|
||||||
"usage_percent": float64(400.0),
|
"usage_percent": float64(400.0),
|
||||||
|
"container_id": "123456789",
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "docker_cpu", cpufields, cputags)
|
acc.AssertContainsTaggedFields(t, "docker_container_cpu", cpufields, cputags)
|
||||||
|
|
||||||
cputags["cpu"] = "cpu0"
|
cputags["cpu"] = "cpu0"
|
||||||
cpu0fields := map[string]interface{}{
|
cpu0fields := map[string]interface{}{
|
||||||
"usage_total": uint64(1),
|
"usage_total": uint64(1),
|
||||||
|
"container_id": "123456789",
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "docker_cpu", cpu0fields, cputags)
|
acc.AssertContainsTaggedFields(t, "docker_container_cpu", cpu0fields, cputags)
|
||||||
|
|
||||||
cputags["cpu"] = "cpu1"
|
cputags["cpu"] = "cpu1"
|
||||||
cpu1fields := map[string]interface{}{
|
cpu1fields := map[string]interface{}{
|
||||||
"usage_total": uint64(1002),
|
"usage_total": uint64(1002),
|
||||||
|
"container_id": "123456789",
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "docker_cpu", cpu1fields, cputags)
|
acc.AssertContainsTaggedFields(t, "docker_container_cpu", cpu1fields, cputags)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testStats() *types.StatsJSON {
|
func testStats() *types.StatsJSON {
|
||||||
@@ -367,19 +372,19 @@ func TestDockerGatherInfo(t *testing.T) {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
acc.AssertContainsTaggedFields(t,
|
acc.AssertContainsTaggedFields(t,
|
||||||
"docker_cpu",
|
"docker_container_cpu",
|
||||||
map[string]interface{}{
|
map[string]interface{}{
|
||||||
"usage_total": uint64(1231652),
|
"usage_total": uint64(1231652),
|
||||||
|
"container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"cont_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
"container_name": "etcd2",
|
||||||
"cont_name": "etcd2",
|
"container_image": "quay.io/coreos/etcd:v2.2.2",
|
||||||
"cont_image": "quay.io/coreos/etcd:v2.2.2",
|
"cpu": "cpu3",
|
||||||
"cpu": "cpu3",
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
acc.AssertContainsTaggedFields(t,
|
acc.AssertContainsTaggedFields(t,
|
||||||
"docker_mem",
|
"docker_container_mem",
|
||||||
map[string]interface{}{
|
map[string]interface{}{
|
||||||
"total_pgpgout": uint64(0),
|
"total_pgpgout": uint64(0),
|
||||||
"usage_percent": float64(0),
|
"usage_percent": float64(0),
|
||||||
@@ -415,11 +420,11 @@ func TestDockerGatherInfo(t *testing.T) {
|
|||||||
"pgfault": uint64(0),
|
"pgfault": uint64(0),
|
||||||
"usage": uint64(0),
|
"usage": uint64(0),
|
||||||
"limit": uint64(18935443456),
|
"limit": uint64(18935443456),
|
||||||
|
"container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"cont_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
"container_name": "etcd2",
|
||||||
"cont_name": "etcd2",
|
"container_image": "quay.io/coreos/etcd:v2.2.2",
|
||||||
"cont_image": "quay.io/coreos/etcd:v2.2.2",
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -1,320 +1,307 @@
|
|||||||
# Elasticsearch plugin
|
# Elasticsearch input plugin
|
||||||
|
|
||||||
#### Plugin arguments:
|
|
||||||
- **servers** []string: list of one or more Elasticsearch servers
|
|
||||||
- **local** boolean: If false, it will read the indices stats from all nodes
|
|
||||||
- **cluster_health** boolean: If true, it will also obtain cluster level stats
|
|
||||||
|
|
||||||
#### Description
|
|
||||||
|
|
||||||
The [elasticsearch](https://www.elastic.co/) plugin queries endpoints to obtain
|
The [elasticsearch](https://www.elastic.co/) plugin queries endpoints to obtain
|
||||||
[node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html)
|
[node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html)
|
||||||
and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) stats.
|
and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) stats.
|
||||||
|
|
||||||
Example:
|
### Configuration:
|
||||||
|
|
||||||
```
|
```
|
||||||
[elasticsearch]
|
[[inputs.elasticsearch]]
|
||||||
|
servers = ["http://localhost:9200"]
|
||||||
servers = ["http://localhost:9200"]
|
local = true
|
||||||
|
cluster_health = true
|
||||||
local = true
|
|
||||||
|
|
||||||
cluster_health = true
|
|
||||||
```
|
```
|
||||||
|
|
||||||
# Measurements
|
### Measurements & Fields:
|
||||||
#### cluster measurements (utilizes fields instead of single values):
|
|
||||||
|
|
||||||
contains `status`, `timed_out`, `number_of_nodes`, `number_of_data_nodes`,
|
|
||||||
`active_primary_shards`, `active_shards`, `relocating_shards`,
|
|
||||||
`initializing_shards`, `unassigned_shards` fields
|
|
||||||
- elasticsearch_cluster_health
|
|
||||||
|
|
||||||
contains `status`, `number_of_shards`, `number_of_replicas`,
|
|
||||||
`active_primary_shards`, `active_shards`, `relocating_shards`,
|
|
||||||
`initializing_shards`, `unassigned_shards` fields
|
|
||||||
- elasticsearch_indices
|
|
||||||
|
|
||||||
#### node measurements:
|
|
||||||
|
|
||||||
field data circuit breaker measurement names:
|
field data circuit breaker measurement names:
|
||||||
- elasticsearch_breakers_fielddata_estimated_size_in_bytes value=0
|
- elasticsearch_breakers
|
||||||
- elasticsearch_breakers_fielddata_overhead value=1.03
|
- fielddata_estimated_size_in_bytes value=0
|
||||||
- elasticsearch_breakers_fielddata_tripped value=0
|
- fielddata_overhead value=1.03
|
||||||
- elasticsearch_breakers_fielddata_limit_size_in_bytes value=623326003
|
- fielddata_tripped value=0
|
||||||
- elasticsearch_breakers_request_estimated_size_in_bytes value=0
|
- fielddata_limit_size_in_bytes value=623326003
|
||||||
- elasticsearch_breakers_request_overhead value=1.0
|
- request_estimated_size_in_bytes value=0
|
||||||
- elasticsearch_breakers_request_tripped value=0
|
- request_overhead value=1.0
|
||||||
- elasticsearch_breakers_request_limit_size_in_bytes value=415550668
|
- request_tripped value=0
|
||||||
- elasticsearch_breakers_parent_overhead value=1.0
|
- request_limit_size_in_bytes value=415550668
|
||||||
- elasticsearch_breakers_parent_tripped value=0
|
- parent_overhead value=1.0
|
||||||
- elasticsearch_breakers_parent_limit_size_in_bytes value=727213670
|
- parent_tripped value=0
|
||||||
- elasticsearch_breakers_parent_estimated_size_in_bytes value=0
|
- parent_limit_size_in_bytes value=727213670
|
||||||
|
- parent_estimated_size_in_bytes value=0
|
||||||
|
|
||||||
File system information, data path, free disk space, read/write measurement names:
|
File system information, data path, free disk space, read/write measurement names:
|
||||||
- elasticsearch_fs_timestamp value=1436460392946
|
- elasticsearch_fs
|
||||||
- elasticsearch_fs_total_free_in_bytes value=16909316096
|
- timestamp value=1436460392946
|
||||||
- elasticsearch_fs_total_available_in_bytes value=15894814720
|
- total_free_in_bytes value=16909316096
|
||||||
- elasticsearch_fs_total_total_in_bytes value=19507089408
|
- total_available_in_bytes value=15894814720
|
||||||
|
- total_total_in_bytes value=19507089408
|
||||||
|
|
||||||
indices size, document count, indexing and deletion times, search times,
|
indices size, document count, indexing and deletion times, search times,
|
||||||
field cache size, merges and flushes measurement names:
|
field cache size, merges and flushes measurement names:
|
||||||
- elasticsearch_indices_id_cache_memory_size_in_bytes value=0
|
- elasticsearch_indices
|
||||||
- elasticsearch_indices_completion_size_in_bytes value=0
|
- id_cache_memory_size_in_bytes value=0
|
||||||
- elasticsearch_indices_suggest_total value=0
|
- completion_size_in_bytes value=0
|
||||||
- elasticsearch_indices_suggest_time_in_millis value=0
|
- suggest_total value=0
|
||||||
- elasticsearch_indices_suggest_current value=0
|
- suggest_time_in_millis value=0
|
||||||
- elasticsearch_indices_query_cache_memory_size_in_bytes value=0
|
- suggest_current value=0
|
||||||
- elasticsearch_indices_query_cache_evictions value=0
|
- query_cache_memory_size_in_bytes value=0
|
||||||
- elasticsearch_indices_query_cache_hit_count value=0
|
- query_cache_evictions value=0
|
||||||
- elasticsearch_indices_query_cache_miss_count value=0
|
- query_cache_hit_count value=0
|
||||||
- elasticsearch_indices_store_size_in_bytes value=37715234
|
- query_cache_miss_count value=0
|
||||||
- elasticsearch_indices_store_throttle_time_in_millis value=215
|
- store_size_in_bytes value=37715234
|
||||||
- elasticsearch_indices_merges_current_docs value=0
|
- store_throttle_time_in_millis value=215
|
||||||
- elasticsearch_indices_merges_current_size_in_bytes value=0
|
- merges_current_docs value=0
|
||||||
- elasticsearch_indices_merges_total value=133
|
- merges_current_size_in_bytes value=0
|
||||||
- elasticsearch_indices_merges_total_time_in_millis value=21060
|
- merges_total value=133
|
||||||
- elasticsearch_indices_merges_total_docs value=203672
|
- merges_total_time_in_millis value=21060
|
||||||
- elasticsearch_indices_merges_total_size_in_bytes value=142900226
|
- merges_total_docs value=203672
|
||||||
- elasticsearch_indices_merges_current value=0
|
- merges_total_size_in_bytes value=142900226
|
||||||
- elasticsearch_indices_filter_cache_memory_size_in_bytes value=7384
|
- merges_current value=0
|
||||||
- elasticsearch_indices_filter_cache_evictions value=0
|
- filter_cache_memory_size_in_bytes value=7384
|
||||||
- elasticsearch_indices_indexing_index_total value=84790
|
- filter_cache_evictions value=0
|
||||||
- elasticsearch_indices_indexing_index_time_in_millis value=29680
|
- indexing_index_total value=84790
|
||||||
- elasticsearch_indices_indexing_index_current value=0
|
- indexing_index_time_in_millis value=29680
|
||||||
- elasticsearch_indices_indexing_noop_update_total value=0
|
- indexing_index_current value=0
|
||||||
- elasticsearch_indices_indexing_throttle_time_in_millis value=0
|
- indexing_noop_update_total value=0
|
||||||
- elasticsearch_indices_indexing_delete_tota value=13879
|
- indexing_throttle_time_in_millis value=0
|
||||||
- elasticsearch_indices_indexing_delete_time_in_millis value=1139
|
- indexing_delete_tota value=13879
|
||||||
- elasticsearch_indices_indexing_delete_current value=0
|
- indexing_delete_time_in_millis value=1139
|
||||||
- elasticsearch_indices_get_exists_time_in_millis value=0
|
- indexing_delete_current value=0
|
||||||
- elasticsearch_indices_get_missing_total value=1
|
- get_exists_time_in_millis value=0
|
||||||
- elasticsearch_indices_get_missing_time_in_millis value=2
|
- get_missing_total value=1
|
||||||
- elasticsearch_indices_get_current value=0
|
- get_missing_time_in_millis value=2
|
||||||
- elasticsearch_indices_get_total value=1
|
- get_current value=0
|
||||||
- elasticsearch_indices_get_time_in_millis value=2
|
- get_total value=1
|
||||||
- elasticsearch_indices_get_exists_total value=0
|
- get_time_in_millis value=2
|
||||||
- elasticsearch_indices_refresh_total value=1076
|
- get_exists_total value=0
|
||||||
- elasticsearch_indices_refresh_total_time_in_millis value=20078
|
- refresh_total value=1076
|
||||||
- elasticsearch_indices_percolate_current value=0
|
- refresh_total_time_in_millis value=20078
|
||||||
- elasticsearch_indices_percolate_memory_size_in_bytes value=-1
|
- percolate_current value=0
|
||||||
- elasticsearch_indices_percolate_queries value=0
|
- percolate_memory_size_in_bytes value=-1
|
||||||
- elasticsearch_indices_percolate_total value=0
|
- percolate_queries value=0
|
||||||
- elasticsearch_indices_percolate_time_in_millis value=0
|
- percolate_total value=0
|
||||||
- elasticsearch_indices_translog_operations value=17702
|
- percolate_time_in_millis value=0
|
||||||
- elasticsearch_indices_translog_size_in_bytes value=17
|
- translog_operations value=17702
|
||||||
- elasticsearch_indices_recovery_current_as_source value=0
|
- translog_size_in_bytes value=17
|
||||||
- elasticsearch_indices_recovery_current_as_target value=0
|
- recovery_current_as_source value=0
|
||||||
- elasticsearch_indices_recovery_throttle_time_in_millis value=0
|
- recovery_current_as_target value=0
|
||||||
- elasticsearch_indices_docs_count value=29652
|
- recovery_throttle_time_in_millis value=0
|
||||||
- elasticsearch_indices_docs_deleted value=5229
|
- docs_count value=29652
|
||||||
- elasticsearch_indices_flush_total_time_in_millis value=2401
|
- docs_deleted value=5229
|
||||||
- elasticsearch_indices_flush_total value=115
|
- flush_total_time_in_millis value=2401
|
||||||
- elasticsearch_indices_fielddata_memory_size_in_bytes value=12996
|
- flush_total value=115
|
||||||
- elasticsearch_indices_fielddata_evictions value=0
|
- fielddata_memory_size_in_bytes value=12996
|
||||||
- elasticsearch_indices_search_fetch_current value=0
|
- fielddata_evictions value=0
|
||||||
- elasticsearch_indices_search_open_contexts value=0
|
- search_fetch_current value=0
|
||||||
- elasticsearch_indices_search_query_total value=1452
|
- search_open_contexts value=0
|
||||||
- elasticsearch_indices_search_query_time_in_millis value=5695
|
- search_query_total value=1452
|
||||||
- elasticsearch_indices_search_query_current value=0
|
- search_query_time_in_millis value=5695
|
||||||
- elasticsearch_indices_search_fetch_total value=414
|
- search_query_current value=0
|
||||||
- elasticsearch_indices_search_fetch_time_in_millis value=146
|
- search_fetch_total value=414
|
||||||
- elasticsearch_indices_warmer_current value=0
|
- search_fetch_time_in_millis value=146
|
||||||
- elasticsearch_indices_warmer_total value=2319
|
- warmer_current value=0
|
||||||
- elasticsearch_indices_warmer_total_time_in_millis value=448
|
- warmer_total value=2319
|
||||||
- elasticsearch_indices_segments_count value=134
|
- warmer_total_time_in_millis value=448
|
||||||
- elasticsearch_indices_segments_memory_in_bytes value=1285212
|
- segments_count value=134
|
||||||
- elasticsearch_indices_segments_index_writer_memory_in_bytes value=0
|
- segments_memory_in_bytes value=1285212
|
||||||
- elasticsearch_indices_segments_index_writer_max_memory_in_bytes value=172368955
|
- segments_index_writer_memory_in_bytes value=0
|
||||||
- elasticsearch_indices_segments_version_map_memory_in_bytes value=611844
|
- segments_index_writer_max_memory_in_bytes value=172368955
|
||||||
- elasticsearch_indices_segments_fixed_bit_set_memory_in_bytes value=0
|
- segments_version_map_memory_in_bytes value=611844
|
||||||
|
- segments_fixed_bit_set_memory_in_bytes value=0
|
||||||
|
|
||||||
HTTP connection measurement names:
|
HTTP connection measurement names:
|
||||||
- elasticsearch_http_current_open value=3
|
- elasticsearch_http
|
||||||
- elasticsearch_http_total_opened value=3
|
- current_open value=3
|
||||||
|
- total_opened value=3
|
||||||
|
|
||||||
JVM stats, memory pool information, garbage collection, buffer pools measurement names:
|
JVM stats, memory pool information, garbage collection, buffer pools measurement names:
|
||||||
- elasticsearch_jvm_timestamp value=1436460392945
|
- elasticsearch_jvm
|
||||||
- elasticsearch_jvm_uptime_in_millis value=202245
|
- timestamp value=1436460392945
|
||||||
- elasticsearch_jvm_mem_non_heap_used_in_bytes value=39634576
|
- uptime_in_millis value=202245
|
||||||
- elasticsearch_jvm_mem_non_heap_committed_in_bytes value=40841216
|
- mem_non_heap_used_in_bytes value=39634576
|
||||||
- elasticsearch_jvm_mem_pools_young_max_in_bytes value=279183360
|
- mem_non_heap_committed_in_bytes value=40841216
|
||||||
- elasticsearch_jvm_mem_pools_young_peak_used_in_bytes value=71630848
|
- mem_pools_young_max_in_bytes value=279183360
|
||||||
- elasticsearch_jvm_mem_pools_young_peak_max_in_bytes value=279183360
|
- mem_pools_young_peak_used_in_bytes value=71630848
|
||||||
- elasticsearch_jvm_mem_pools_young_used_in_bytes value=32685760
|
- mem_pools_young_peak_max_in_bytes value=279183360
|
||||||
- elasticsearch_jvm_mem_pools_survivor_peak_used_in_bytes value=8912888
|
- mem_pools_young_used_in_bytes value=32685760
|
||||||
- elasticsearch_jvm_mem_pools_survivor_peak_max_in_bytes value=34865152
|
- mem_pools_survivor_peak_used_in_bytes value=8912888
|
||||||
- elasticsearch_jvm_mem_pools_survivor_used_in_bytes value=8912880
|
- mem_pools_survivor_peak_max_in_bytes value=34865152
|
||||||
- elasticsearch_jvm_mem_pools_survivor_max_in_bytes value=34865152
|
- mem_pools_survivor_used_in_bytes value=8912880
|
||||||
- elasticsearch_jvm_mem_pools_old_peak_max_in_bytes value=724828160
|
- mem_pools_survivor_max_in_bytes value=34865152
|
||||||
- elasticsearch_jvm_mem_pools_old_used_in_bytes value=11110928
|
- mem_pools_old_peak_max_in_bytes value=724828160
|
||||||
- elasticsearch_jvm_mem_pools_old_max_in_bytes value=724828160
|
- mem_pools_old_used_in_bytes value=11110928
|
||||||
- elasticsearch_jvm_mem_pools_old_peak_used_in_bytes value=14354608
|
- mem_pools_old_max_in_bytes value=724828160
|
||||||
- elasticsearch_jvm_mem_heap_used_in_bytes value=52709568
|
- mem_pools_old_peak_used_in_bytes value=14354608
|
||||||
- elasticsearch_jvm_mem_heap_used_percent value=5
|
- mem_heap_used_in_bytes value=52709568
|
||||||
- elasticsearch_jvm_mem_heap_committed_in_bytes value=259522560
|
- mem_heap_used_percent value=5
|
||||||
- elasticsearch_jvm_mem_heap_max_in_bytes value=1038876672
|
- mem_heap_committed_in_bytes value=259522560
|
||||||
- elasticsearch_jvm_threads_peak_count value=45
|
- mem_heap_max_in_bytes value=1038876672
|
||||||
- elasticsearch_jvm_threads_count value=44
|
- threads_peak_count value=45
|
||||||
- elasticsearch_jvm_gc_collectors_young_collection_count value=2
|
- threads_count value=44
|
||||||
- elasticsearch_jvm_gc_collectors_young_collection_time_in_millis value=98
|
- gc_collectors_young_collection_count value=2
|
||||||
- elasticsearch_jvm_gc_collectors_old_collection_count value=1
|
- gc_collectors_young_collection_time_in_millis value=98
|
||||||
- elasticsearch_jvm_gc_collectors_old_collection_time_in_millis value=24
|
- gc_collectors_old_collection_count value=1
|
||||||
- elasticsearch_jvm_buffer_pools_direct_count value=40
|
- gc_collectors_old_collection_time_in_millis value=24
|
||||||
- elasticsearch_jvm_buffer_pools_direct_used_in_bytes value=6304239
|
- buffer_pools_direct_count value=40
|
||||||
- elasticsearch_jvm_buffer_pools_direct_total_capacity_in_bytes value=6304239
|
- buffer_pools_direct_used_in_bytes value=6304239
|
||||||
- elasticsearch_jvm_buffer_pools_mapped_count value=0
|
- buffer_pools_direct_total_capacity_in_bytes value=6304239
|
||||||
- elasticsearch_jvm_buffer_pools_mapped_used_in_bytes value=0
|
- buffer_pools_mapped_count value=0
|
||||||
- elasticsearch_jvm_buffer_pools_mapped_total_capacity_in_bytes value=0
|
- buffer_pools_mapped_used_in_bytes value=0
|
||||||
|
- buffer_pools_mapped_total_capacity_in_bytes value=0
|
||||||
|
|
||||||
TCP information measurement names:
|
TCP information measurement names:
|
||||||
- elasticsearch_network_tcp_in_errs value=0
|
- elasticsearch_network
|
||||||
- elasticsearch_network_tcp_passive_opens value=16
|
- tcp_in_errs value=0
|
||||||
- elasticsearch_network_tcp_curr_estab value=29
|
- tcp_passive_opens value=16
|
||||||
- elasticsearch_network_tcp_in_segs value=113
|
- tcp_curr_estab value=29
|
||||||
- elasticsearch_network_tcp_out_segs value=97
|
- tcp_in_segs value=113
|
||||||
- elasticsearch_network_tcp_retrans_segs value=0
|
- tcp_out_segs value=97
|
||||||
- elasticsearch_network_tcp_attempt_fails value=0
|
- tcp_retrans_segs value=0
|
||||||
- elasticsearch_network_tcp_active_opens value=13
|
- tcp_attempt_fails value=0
|
||||||
- elasticsearch_network_tcp_estab_resets value=0
|
- tcp_active_opens value=13
|
||||||
- elasticsearch_network_tcp_out_rsts value=0
|
- tcp_estab_resets value=0
|
||||||
|
- tcp_out_rsts value=0
|
||||||
|
|
||||||
Operating system stats, load average, cpu, mem, swap measurement names:
|
Operating system stats, load average, cpu, mem, swap measurement names:
|
||||||
- elasticsearch_os_swap_used_in_bytes value=0
|
- elasticsearch_os
|
||||||
- elasticsearch_os_swap_free_in_bytes value=487997440
|
- swap_used_in_bytes value=0
|
||||||
- elasticsearch_os_timestamp value=1436460392944
|
- swap_free_in_bytes value=487997440
|
||||||
- elasticsearch_os_uptime_in_millis value=25092
|
- timestamp value=1436460392944
|
||||||
- elasticsearch_os_cpu_sys value=0
|
- uptime_in_millis value=25092
|
||||||
- elasticsearch_os_cpu_user value=0
|
- cpu_sys value=0
|
||||||
- elasticsearch_os_cpu_idle value=99
|
- cpu_user value=0
|
||||||
- elasticsearch_os_cpu_usage value=0
|
- cpu_idle value=99
|
||||||
- elasticsearch_os_cpu_stolen value=0
|
- cpu_usage value=0
|
||||||
- elasticsearch_os_mem_free_percent value=74
|
- cpu_stolen value=0
|
||||||
- elasticsearch_os_mem_used_percent value=25
|
- mem_free_percent value=74
|
||||||
- elasticsearch_os_mem_actual_free_in_bytes value=1565470720
|
- mem_used_percent value=25
|
||||||
- elasticsearch_os_mem_actual_used_in_bytes value=534159360
|
- mem_actual_free_in_bytes value=1565470720
|
||||||
- elasticsearch_os_mem_free_in_bytes value=477761536
|
- mem_actual_used_in_bytes value=534159360
|
||||||
- elasticsearch_os_mem_used_in_bytes value=1621868544
|
- mem_free_in_bytes value=477761536
|
||||||
|
- mem_used_in_bytes value=1621868544
|
||||||
|
|
||||||
Process statistics, memory consumption, cpu usage, open file descriptors measurement names:
|
Process statistics, memory consumption, cpu usage, open file descriptors measurement names:
|
||||||
- elasticsearch_process_mem_resident_in_bytes value=246382592
|
- elasticsearch_process
|
||||||
- elasticsearch_process_mem_share_in_bytes value=18747392
|
- mem_resident_in_bytes value=246382592
|
||||||
- elasticsearch_process_mem_total_virtual_in_bytes value=4747890688
|
- mem_share_in_bytes value=18747392
|
||||||
- elasticsearch_process_timestamp value=1436460392945
|
- mem_total_virtual_in_bytes value=4747890688
|
||||||
- elasticsearch_process_open_file_descriptors value=160
|
- timestamp value=1436460392945
|
||||||
- elasticsearch_process_cpu_total_in_millis value=15480
|
- open_file_descriptors value=160
|
||||||
- elasticsearch_process_cpu_percent value=2
|
- cpu_total_in_millis value=15480
|
||||||
- elasticsearch_process_cpu_sys_in_millis value=1870
|
- cpu_percent value=2
|
||||||
- elasticsearch_process_cpu_user_in_millis value=13610
|
- cpu_sys_in_millis value=1870
|
||||||
|
- cpu_user_in_millis value=13610
|
||||||
|
|
||||||
Statistics about each thread pool, including current size, queue and rejected tasks measurement names:
|
Statistics about each thread pool, including current size, queue and rejected tasks measurement names:
|
||||||
- elasticsearch_thread_pool_merge_threads value=6
|
- elasticsearch_thread_pool
|
||||||
- elasticsearch_thread_pool_merge_queue value=4
|
- merge_threads value=6
|
||||||
- elasticsearch_thread_pool_merge_active value=5
|
- merge_queue value=4
|
||||||
- elasticsearch_thread_pool_merge_rejected value=2
|
- merge_active value=5
|
||||||
- elasticsearch_thread_pool_merge_largest value=5
|
- merge_rejected value=2
|
||||||
- elasticsearch_thread_pool_merge_completed value=1
|
- merge_largest value=5
|
||||||
- elasticsearch_thread_pool_bulk_threads value=4
|
- merge_completed value=1
|
||||||
- elasticsearch_thread_pool_bulk_queue value=5
|
- bulk_threads value=4
|
||||||
- elasticsearch_thread_pool_bulk_active value=7
|
- bulk_queue value=5
|
||||||
- elasticsearch_thread_pool_bulk_rejected value=3
|
- bulk_active value=7
|
||||||
- elasticsearch_thread_pool_bulk_largest value=1
|
- bulk_rejected value=3
|
||||||
- elasticsearch_thread_pool_bulk_completed value=4
|
- bulk_largest value=1
|
||||||
- elasticsearch_thread_pool_warmer_threads value=2
|
- bulk_completed value=4
|
||||||
- elasticsearch_thread_pool_warmer_queue value=7
|
- warmer_threads value=2
|
||||||
- elasticsearch_thread_pool_warmer_active value=3
|
- warmer_queue value=7
|
||||||
- elasticsearch_thread_pool_warmer_rejected value=2
|
- warmer_active value=3
|
||||||
- elasticsearch_thread_pool_warmer_largest value=3
|
- warmer_rejected value=2
|
||||||
- elasticsearch_thread_pool_warmer_completed value=1
|
- warmer_largest value=3
|
||||||
- elasticsearch_thread_pool_get_largest value=2
|
- warmer_completed value=1
|
||||||
- elasticsearch_thread_pool_get_completed value=1
|
- get_largest value=2
|
||||||
- elasticsearch_thread_pool_get_threads value=1
|
- get_completed value=1
|
||||||
- elasticsearch_thread_pool_get_queue value=8
|
- get_threads value=1
|
||||||
- elasticsearch_thread_pool_get_active value=4
|
- get_queue value=8
|
||||||
- elasticsearch_thread_pool_get_rejected value=3
|
- get_active value=4
|
||||||
- elasticsearch_thread_pool_index_threads value=6
|
- get_rejected value=3
|
||||||
- elasticsearch_thread_pool_index_queue value=8
|
- index_threads value=6
|
||||||
- elasticsearch_thread_pool_index_active value=4
|
- index_queue value=8
|
||||||
- elasticsearch_thread_pool_index_rejected value=2
|
- index_active value=4
|
||||||
- elasticsearch_thread_pool_index_largest value=3
|
- index_rejected value=2
|
||||||
- elasticsearch_thread_pool_index_completed value=6
|
- index_largest value=3
|
||||||
- elasticsearch_thread_pool_suggest_threads value=2
|
- index_completed value=6
|
||||||
- elasticsearch_thread_pool_suggest_queue value=7
|
- suggest_threads value=2
|
||||||
- elasticsearch_thread_pool_suggest_active value=2
|
- suggest_queue value=7
|
||||||
- elasticsearch_thread_pool_suggest_rejected value=1
|
- suggest_active value=2
|
||||||
- elasticsearch_thread_pool_suggest_largest value=8
|
- suggest_rejected value=1
|
||||||
- elasticsearch_thread_pool_suggest_completed value=3
|
- suggest_largest value=8
|
||||||
- elasticsearch_thread_pool_fetch_shard_store_queue value=7
|
- suggest_completed value=3
|
||||||
- elasticsearch_thread_pool_fetch_shard_store_active value=4
|
- fetch_shard_store_queue value=7
|
||||||
- elasticsearch_thread_pool_fetch_shard_store_rejected value=2
|
- fetch_shard_store_active value=4
|
||||||
- elasticsearch_thread_pool_fetch_shard_store_largest value=4
|
- fetch_shard_store_rejected value=2
|
||||||
- elasticsearch_thread_pool_fetch_shard_store_completed value=1
|
- fetch_shard_store_largest value=4
|
||||||
- elasticsearch_thread_pool_fetch_shard_store_threads value=1
|
- fetch_shard_store_completed value=1
|
||||||
- elasticsearch_thread_pool_management_threads value=2
|
- fetch_shard_store_threads value=1
|
||||||
- elasticsearch_thread_pool_management_queue value=3
|
- management_threads value=2
|
||||||
- elasticsearch_thread_pool_management_active value=1
|
- management_queue value=3
|
||||||
- elasticsearch_thread_pool_management_rejected value=6
|
- management_active value=1
|
||||||
- elasticsearch_thread_pool_management_largest value=2
|
- management_rejected value=6
|
||||||
- elasticsearch_thread_pool_management_completed value=22
|
- management_largest value=2
|
||||||
- elasticsearch_thread_pool_percolate_queue value=23
|
- management_completed value=22
|
||||||
- elasticsearch_thread_pool_percolate_active value=13
|
- percolate_queue value=23
|
||||||
- elasticsearch_thread_pool_percolate_rejected value=235
|
- percolate_active value=13
|
||||||
- elasticsearch_thread_pool_percolate_largest value=23
|
- percolate_rejected value=235
|
||||||
- elasticsearch_thread_pool_percolate_completed value=33
|
- percolate_largest value=23
|
||||||
- elasticsearch_thread_pool_percolate_threads value=123
|
- percolate_completed value=33
|
||||||
- elasticsearch_thread_pool_listener_active value=4
|
- percolate_threads value=123
|
||||||
- elasticsearch_thread_pool_listener_rejected value=8
|
- listener_active value=4
|
||||||
- elasticsearch_thread_pool_listener_largest value=1
|
- listener_rejected value=8
|
||||||
- elasticsearch_thread_pool_listener_completed value=1
|
- listener_largest value=1
|
||||||
- elasticsearch_thread_pool_listener_threads value=1
|
- listener_completed value=1
|
||||||
- elasticsearch_thread_pool_listener_queue value=2
|
- listener_threads value=1
|
||||||
- elasticsearch_thread_pool_search_rejected value=7
|
- listener_queue value=2
|
||||||
- elasticsearch_thread_pool_search_largest value=2
|
- search_rejected value=7
|
||||||
- elasticsearch_thread_pool_search_completed value=4
|
- search_largest value=2
|
||||||
- elasticsearch_thread_pool_search_threads value=5
|
- search_completed value=4
|
||||||
- elasticsearch_thread_pool_search_queue value=7
|
- search_threads value=5
|
||||||
- elasticsearch_thread_pool_search_active value=2
|
- search_queue value=7
|
||||||
- elasticsearch_thread_pool_fetch_shard_started_threads value=3
|
- search_active value=2
|
||||||
- elasticsearch_thread_pool_fetch_shard_started_queue value=1
|
- fetch_shard_started_threads value=3
|
||||||
- elasticsearch_thread_pool_fetch_shard_started_active value=5
|
- fetch_shard_started_queue value=1
|
||||||
- elasticsearch_thread_pool_fetch_shard_started_rejected value=6
|
- fetch_shard_started_active value=5
|
||||||
- elasticsearch_thread_pool_fetch_shard_started_largest value=4
|
- fetch_shard_started_rejected value=6
|
||||||
- elasticsearch_thread_pool_fetch_shard_started_completed value=54
|
- fetch_shard_started_largest value=4
|
||||||
- elasticsearch_thread_pool_refresh_rejected value=4
|
- fetch_shard_started_completed value=54
|
||||||
- elasticsearch_thread_pool_refresh_largest value=8
|
- refresh_rejected value=4
|
||||||
- elasticsearch_thread_pool_refresh_completed value=3
|
- refresh_largest value=8
|
||||||
- elasticsearch_thread_pool_refresh_threads value=23
|
- refresh_completed value=3
|
||||||
- elasticsearch_thread_pool_refresh_queue value=7
|
- refresh_threads value=23
|
||||||
- elasticsearch_thread_pool_refresh_active value=3
|
- refresh_queue value=7
|
||||||
- elasticsearch_thread_pool_optimize_threads value=3
|
- refresh_active value=3
|
||||||
- elasticsearch_thread_pool_optimize_queue value=4
|
- optimize_threads value=3
|
||||||
- elasticsearch_thread_pool_optimize_active value=1
|
- optimize_queue value=4
|
||||||
- elasticsearch_thread_pool_optimize_rejected value=2
|
- optimize_active value=1
|
||||||
- elasticsearch_thread_pool_optimize_largest value=7
|
- optimize_rejected value=2
|
||||||
- elasticsearch_thread_pool_optimize_completed value=3
|
- optimize_largest value=7
|
||||||
- elasticsearch_thread_pool_snapshot_largest value=1
|
- optimize_completed value=3
|
||||||
- elasticsearch_thread_pool_snapshot_completed value=0
|
- snapshot_largest value=1
|
||||||
- elasticsearch_thread_pool_snapshot_threads value=8
|
- snapshot_completed value=0
|
||||||
- elasticsearch_thread_pool_snapshot_queue value=5
|
- snapshot_threads value=8
|
||||||
- elasticsearch_thread_pool_snapshot_active value=6
|
- snapshot_queue value=5
|
||||||
- elasticsearch_thread_pool_snapshot_rejected value=2
|
- snapshot_active value=6
|
||||||
- elasticsearch_thread_pool_generic_threads value=1
|
- snapshot_rejected value=2
|
||||||
- elasticsearch_thread_pool_generic_queue value=4
|
- generic_threads value=1
|
||||||
- elasticsearch_thread_pool_generic_active value=6
|
- generic_queue value=4
|
||||||
- elasticsearch_thread_pool_generic_rejected value=3
|
- generic_active value=6
|
||||||
- elasticsearch_thread_pool_generic_largest value=2
|
- generic_rejected value=3
|
||||||
- elasticsearch_thread_pool_generic_completed value=27
|
- generic_largest value=2
|
||||||
- elasticsearch_thread_pool_flush_threads value=3
|
- generic_completed value=27
|
||||||
- elasticsearch_thread_pool_flush_queue value=8
|
- flush_threads value=3
|
||||||
- elasticsearch_thread_pool_flush_active value=0
|
- flush_queue value=8
|
||||||
- elasticsearch_thread_pool_flush_rejected value=1
|
- flush_active value=0
|
||||||
- elasticsearch_thread_pool_flush_largest value=5
|
- flush_rejected value=1
|
||||||
- elasticsearch_thread_pool_flush_completed value=3
|
- flush_largest value=5
|
||||||
|
- flush_completed value=3
|
||||||
|
|
||||||
Transport statistics about sent and received bytes in cluster communication measurement names:
|
Transport statistics about sent and received bytes in cluster communication measurement names:
|
||||||
- elasticsearch_transport_server_open value=13
|
- elasticsearch_transport
|
||||||
- elasticsearch_transport_rx_count value=6
|
- server_open value=13
|
||||||
- elasticsearch_transport_rx_size_in_bytes value=1380
|
- rx_count value=6
|
||||||
- elasticsearch_transport_tx_count value=6
|
- rx_size_in_bytes value=1380
|
||||||
- elasticsearch_transport_tx_size_in_bytes value=1380
|
- tx_count value=6
|
||||||
|
- tx_size_in_bytes value=1380
|
||||||
|
|||||||
@@ -2,14 +2,13 @@ package elasticsearch
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal/errchan"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||||
)
|
)
|
||||||
@@ -102,7 +101,7 @@ func (e *Elasticsearch) Description() string {
|
|||||||
// Gather reads the stats from Elasticsearch and writes it to the
|
// Gather reads the stats from Elasticsearch and writes it to the
|
||||||
// Accumulator.
|
// Accumulator.
|
||||||
func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||||
errChan := make(chan error, len(e.Servers))
|
errChan := errchan.New(len(e.Servers))
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(len(e.Servers))
|
wg.Add(len(e.Servers))
|
||||||
|
|
||||||
@@ -116,7 +115,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
|||||||
url = s + statsPath
|
url = s + statsPath
|
||||||
}
|
}
|
||||||
if err := e.gatherNodeStats(url, acc); err != nil {
|
if err := e.gatherNodeStats(url, acc); err != nil {
|
||||||
errChan <- err
|
errChan.C <- err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if e.ClusterHealth {
|
if e.ClusterHealth {
|
||||||
@@ -126,17 +125,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
close(errChan)
|
return errChan.Error()
|
||||||
// Get all errors and return them as one giant error
|
|
||||||
errStrings := []string{}
|
|
||||||
for err := range errChan {
|
|
||||||
errStrings = append(errStrings, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errStrings) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return errors.New(strings.Join(errStrings, "\n"))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
|
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
|
||||||
|
|||||||
@@ -6,14 +6,20 @@ Please also see: [Telegraf Input Data Formats](https://github.com/influxdata/tel
|
|||||||
|
|
||||||
#### Configuration
|
#### Configuration
|
||||||
|
|
||||||
In this example a script called ```/tmp/test.sh``` and a script called ```/tmp/test2.sh```
|
In this example a script called ```/tmp/test.sh```, a script called ```/tmp/test2.sh```, and
|
||||||
are configured for ```[[inputs.exec]]``` in JSON format.
|
all scripts matching glob pattern ```/tmp/collect_*.sh``` are configured for ```[[inputs.exec]]```
|
||||||
|
in JSON format. Glob patterns are matched on every run, so adding new scripts that match the pattern
|
||||||
|
will cause them to be picked up immediately.
|
||||||
|
|
||||||
```
|
```toml
|
||||||
# Read flattened metrics from one or more commands that output JSON to stdout
|
# Read flattened metrics from one or more commands that output JSON to stdout
|
||||||
[[inputs.exec]]
|
[[inputs.exec]]
|
||||||
# Shell/commands array
|
# Shell/commands array
|
||||||
commands = ["/tmp/test.sh", "/tmp/test2.sh"]
|
# Full command line to executable with parameters, or a glob pattern to run all matching files.
|
||||||
|
commands = ["/tmp/test.sh", "/tmp/test2.sh", "/tmp/collect_*.sh"]
|
||||||
|
|
||||||
|
## Timeout for each command to complete.
|
||||||
|
timeout = "5s"
|
||||||
|
|
||||||
# Data format to consume.
|
# Data format to consume.
|
||||||
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
||||||
@@ -21,26 +27,6 @@ are configured for ```[[inputs.exec]]``` in JSON format.
|
|||||||
|
|
||||||
# measurement name suffix (for separating different commands)
|
# measurement name suffix (for separating different commands)
|
||||||
name_suffix = "_mycollector"
|
name_suffix = "_mycollector"
|
||||||
|
|
||||||
## Below configuration will be used for data_format = "graphite", can be ignored for other data_format
|
|
||||||
## If matching multiple measurement files, this string will be used to join the matched values.
|
|
||||||
#separator = "."
|
|
||||||
|
|
||||||
## Each template line requires a template pattern. It can have an optional
|
|
||||||
## filter before the template and separated by spaces. It can also have optional extra
|
|
||||||
## tags following the template. Multiple tags should be separated by commas and no spaces
|
|
||||||
## similar to the line protocol format. The can be only one default template.
|
|
||||||
## Templates support below format:
|
|
||||||
## 1. filter + template
|
|
||||||
## 2. filter + template + extra tag
|
|
||||||
## 3. filter + template with field key
|
|
||||||
## 4. default template
|
|
||||||
#templates = [
|
|
||||||
# "*.app env.service.resource.measurement",
|
|
||||||
# "stats.* .host.measurement* region=us-west,agent=sensu",
|
|
||||||
# "stats2.* .host.measurement.field",
|
|
||||||
# "measurement*"
|
|
||||||
#]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Other options for modifying the measurement names are:
|
Other options for modifying the measurement names are:
|
||||||
@@ -79,7 +65,7 @@ in influx line-protocol format.
|
|||||||
|
|
||||||
#### Configuration
|
#### Configuration
|
||||||
|
|
||||||
```
|
```toml
|
||||||
[[inputs.exec]]
|
[[inputs.exec]]
|
||||||
# Shell/commands array
|
# Shell/commands array
|
||||||
# compatible with old version
|
# compatible with old version
|
||||||
@@ -87,6 +73,9 @@ in influx line-protocol format.
|
|||||||
# command = "/usr/bin/line_protocol_collector"
|
# command = "/usr/bin/line_protocol_collector"
|
||||||
commands = ["/usr/bin/line_protocol_collector","/tmp/test2.sh"]
|
commands = ["/usr/bin/line_protocol_collector","/tmp/test2.sh"]
|
||||||
|
|
||||||
|
## Timeout for each command to complete.
|
||||||
|
timeout = "5s"
|
||||||
|
|
||||||
# Data format to consume.
|
# Data format to consume.
|
||||||
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
||||||
data_format = "influx"
|
data_format = "influx"
|
||||||
@@ -120,12 +109,16 @@ We can also change the data_format to "graphite" to use the metrics collecting s
|
|||||||
In this example a script called /tmp/test.sh and a script called /tmp/test2.sh are configured for [[inputs.exec]] in graphite format.
|
In this example a script called /tmp/test.sh and a script called /tmp/test2.sh are configured for [[inputs.exec]] in graphite format.
|
||||||
|
|
||||||
#### Configuration
|
#### Configuration
|
||||||
```
|
|
||||||
|
```toml
|
||||||
# Read flattened metrics from one or more commands that output JSON to stdout
|
# Read flattened metrics from one or more commands that output JSON to stdout
|
||||||
[[inputs.exec]]
|
[[inputs.exec]]
|
||||||
# Shell/commands array
|
# Shell/commands array
|
||||||
commands = ["/tmp/test.sh","/tmp/test2.sh"]
|
commands = ["/tmp/test.sh","/tmp/test2.sh"]
|
||||||
|
|
||||||
|
## Timeout for each command to complete.
|
||||||
|
timeout = "5s"
|
||||||
|
|
||||||
# Data format to consume.
|
# Data format to consume.
|
||||||
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
||||||
data_format = "graphite"
|
data_format = "graphite"
|
||||||
@@ -180,4 +173,3 @@ sensu.metric.net.server0.eth0.rx_dropped 0 1444234982
|
|||||||
The templates configuration will be used to parse the graphite metrics to support influxdb/opentsdb tagging store engines.
|
The templates configuration will be used to parse the graphite metrics to support influxdb/opentsdb tagging store engines.
|
||||||
|
|
||||||
More detail information about templates, please refer to [The graphite Input](https://github.com/influxdata/influxdb/blob/master/services/graphite/README.md)
|
More detail information about templates, please refer to [The graphite Input](https://github.com/influxdata/influxdb/blob/master/services/graphite/README.md)
|
||||||
|
|
||||||
|
|||||||
@@ -4,12 +4,17 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/gonuts/go-shellquote"
|
"github.com/gonuts/go-shellquote"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
|
"github.com/influxdata/telegraf/internal/errchan"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
"github.com/influxdata/telegraf/plugins/parsers"
|
"github.com/influxdata/telegraf/plugins/parsers"
|
||||||
"github.com/influxdata/telegraf/plugins/parsers/nagios"
|
"github.com/influxdata/telegraf/plugins/parsers/nagios"
|
||||||
@@ -17,7 +22,14 @@ import (
|
|||||||
|
|
||||||
const sampleConfig = `
|
const sampleConfig = `
|
||||||
## Commands array
|
## Commands array
|
||||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
commands = [
|
||||||
|
"/tmp/test.sh",
|
||||||
|
"/usr/bin/mycollector --foo=bar",
|
||||||
|
"/tmp/collect_*.sh"
|
||||||
|
]
|
||||||
|
|
||||||
|
## Timeout for each command to complete.
|
||||||
|
timeout = "5s"
|
||||||
|
|
||||||
## measurement name suffix (for separating different commands)
|
## measurement name suffix (for separating different commands)
|
||||||
name_suffix = "_mycollector"
|
name_suffix = "_mycollector"
|
||||||
@@ -32,6 +44,7 @@ const sampleConfig = `
|
|||||||
type Exec struct {
|
type Exec struct {
|
||||||
Commands []string
|
Commands []string
|
||||||
Command string
|
Command string
|
||||||
|
Timeout internal.Duration
|
||||||
|
|
||||||
parser parsers.Parser
|
parser parsers.Parser
|
||||||
|
|
||||||
@@ -43,7 +56,8 @@ type Exec struct {
|
|||||||
|
|
||||||
func NewExec() *Exec {
|
func NewExec() *Exec {
|
||||||
return &Exec{
|
return &Exec{
|
||||||
runner: CommandRunner{},
|
runner: CommandRunner{},
|
||||||
|
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -73,7 +87,11 @@ func AddNagiosState(exitCode error, acc telegraf.Accumulator) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c CommandRunner) Run(e *Exec, command string, acc telegraf.Accumulator) ([]byte, error) {
|
func (c CommandRunner) Run(
|
||||||
|
e *Exec,
|
||||||
|
command string,
|
||||||
|
acc telegraf.Accumulator,
|
||||||
|
) ([]byte, error) {
|
||||||
split_cmd, err := shellquote.Split(command)
|
split_cmd, err := shellquote.Split(command)
|
||||||
if err != nil || len(split_cmd) == 0 {
|
if err != nil || len(split_cmd) == 0 {
|
||||||
return nil, fmt.Errorf("exec: unable to parse command, %s", err)
|
return nil, fmt.Errorf("exec: unable to parse command, %s", err)
|
||||||
@@ -84,7 +102,7 @@ func (c CommandRunner) Run(e *Exec, command string, acc telegraf.Accumulator) ([
|
|||||||
var out bytes.Buffer
|
var out bytes.Buffer
|
||||||
cmd.Stdout = &out
|
cmd.Stdout = &out
|
||||||
|
|
||||||
if err := cmd.Run(); err != nil {
|
if err := internal.RunTimeout(cmd, e.Timeout.Duration); err != nil {
|
||||||
switch e.parser.(type) {
|
switch e.parser.(type) {
|
||||||
case *nagios.NagiosParser:
|
case *nagios.NagiosParser:
|
||||||
AddNagiosState(err, acc)
|
AddNagiosState(err, acc)
|
||||||
@@ -139,23 +157,45 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error {
|
|||||||
e.Command = ""
|
e.Command = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
e.errChan = make(chan error, len(e.Commands))
|
commands := make([]string, 0, len(e.Commands))
|
||||||
|
for _, pattern := range e.Commands {
|
||||||
|
cmdAndArgs := strings.SplitN(pattern, " ", 2)
|
||||||
|
if len(cmdAndArgs) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
e.wg.Add(len(e.Commands))
|
matches, err := filepath.Glob(cmdAndArgs[0])
|
||||||
for _, command := range e.Commands {
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(matches) == 0 {
|
||||||
|
// There were no matches with the glob pattern, so let's assume
|
||||||
|
// that the command is in PATH and just run it as it is
|
||||||
|
commands = append(commands, pattern)
|
||||||
|
} else {
|
||||||
|
// There were matches, so we'll append each match together with
|
||||||
|
// the arguments to the commands slice
|
||||||
|
for _, match := range matches {
|
||||||
|
if len(cmdAndArgs) == 1 {
|
||||||
|
commands = append(commands, match)
|
||||||
|
} else {
|
||||||
|
commands = append(commands,
|
||||||
|
strings.Join([]string{match, cmdAndArgs[1]}, " "))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
errChan := errchan.New(len(commands))
|
||||||
|
e.errChan = errChan.C
|
||||||
|
|
||||||
|
e.wg.Add(len(commands))
|
||||||
|
for _, command := range commands {
|
||||||
go e.ProcessCommand(command, acc)
|
go e.ProcessCommand(command, acc)
|
||||||
}
|
}
|
||||||
e.wg.Wait()
|
e.wg.Wait()
|
||||||
|
return errChan.Error()
|
||||||
select {
|
|
||||||
default:
|
|
||||||
close(e.errChan)
|
|
||||||
return nil
|
|
||||||
case err := <-e.errChan:
|
|
||||||
close(e.errChan)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
@@ -169,3 +169,51 @@ func TestLineProtocolParseMultiple(t *testing.T) {
|
|||||||
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
|
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestExecCommandWithGlob(t *testing.T) {
|
||||||
|
parser, _ := parsers.NewValueParser("metric", "string", nil)
|
||||||
|
e := NewExec()
|
||||||
|
e.Commands = []string{"/bin/ech* metric_value"}
|
||||||
|
e.SetParser(parser)
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
err := e.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"value": "metric_value",
|
||||||
|
}
|
||||||
|
acc.AssertContainsFields(t, "metric", fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExecCommandWithoutGlob(t *testing.T) {
|
||||||
|
parser, _ := parsers.NewValueParser("metric", "string", nil)
|
||||||
|
e := NewExec()
|
||||||
|
e.Commands = []string{"/bin/echo metric_value"}
|
||||||
|
e.SetParser(parser)
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
err := e.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"value": "metric_value",
|
||||||
|
}
|
||||||
|
acc.AssertContainsFields(t, "metric", fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExecCommandWithoutGlobAndPath(t *testing.T) {
|
||||||
|
parser, _ := parsers.NewValueParser("metric", "string", nil)
|
||||||
|
e := NewExec()
|
||||||
|
e.Commands = []string{"echo metric_value"}
|
||||||
|
e.SetParser(parser)
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
err := e.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"value": "metric_value",
|
||||||
|
}
|
||||||
|
acc.AssertContainsFields(t, "metric", fields)
|
||||||
|
}
|
||||||
|
|||||||
37
plugins/inputs/filestat/README.md
Normal file
37
plugins/inputs/filestat/README.md
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
# filestat Input Plugin
|
||||||
|
|
||||||
|
The filestat plugin gathers metrics about file existence, size, and other stats.
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Read stats about given file(s)
|
||||||
|
[[inputs.filestat]]
|
||||||
|
## Files to gather stats about.
|
||||||
|
## These accept standard unix glob matching rules, but with the addition of
|
||||||
|
## ** as a "super asterisk". See https://github.com/gobwas/glob.
|
||||||
|
files = ["/etc/telegraf/telegraf.conf", "/var/log/**.log"]
|
||||||
|
## If true, read the entire file and calculate an md5 checksum.
|
||||||
|
md5 = false
|
||||||
|
```
|
||||||
|
|
||||||
|
### Measurements & Fields:
|
||||||
|
|
||||||
|
- filestat
|
||||||
|
- exists (int, 0 | 1)
|
||||||
|
- size_bytes (int, bytes)
|
||||||
|
- md5 (optional, string)
|
||||||
|
|
||||||
|
### Tags:
|
||||||
|
|
||||||
|
- All measurements have the following tags:
|
||||||
|
- file (the path the to file, as specified in the config)
|
||||||
|
|
||||||
|
### Example Output:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ telegraf -config /etc/telegraf/telegraf.conf -input-filter filestat -test
|
||||||
|
* Plugin: filestat, Collection 1
|
||||||
|
> filestat,file=/tmp/foo/bar,host=tyrion exists=0i 1461203374493128216
|
||||||
|
> filestat,file=/Users/sparrc/ws/telegraf.conf,host=tyrion exists=1i,size=47894i 1461203374493199335
|
||||||
|
```
|
||||||
125
plugins/inputs/filestat/filestat.go
Normal file
125
plugins/inputs/filestat/filestat.go
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
package filestat
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/md5"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal/globpath"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
)
|
||||||
|
|
||||||
|
const sampleConfig = `
|
||||||
|
## Files to gather stats about.
|
||||||
|
## These accept standard unix glob matching rules, but with the addition of
|
||||||
|
## ** as a "super asterisk". ie:
|
||||||
|
## "/var/log/**.log" -> recursively find all .log files in /var/log
|
||||||
|
## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
|
||||||
|
## "/var/log/apache.log" -> just tail the apache log file
|
||||||
|
##
|
||||||
|
## See https://github.com/gobwas/glob for more examples
|
||||||
|
##
|
||||||
|
files = ["/var/log/**.log"]
|
||||||
|
## If true, read the entire file and calculate an md5 checksum.
|
||||||
|
md5 = false
|
||||||
|
`
|
||||||
|
|
||||||
|
type FileStat struct {
|
||||||
|
Md5 bool
|
||||||
|
Files []string
|
||||||
|
|
||||||
|
// maps full file paths to globmatch obj
|
||||||
|
globs map[string]*globpath.GlobPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFileStat() *FileStat {
|
||||||
|
return &FileStat{
|
||||||
|
globs: make(map[string]*globpath.GlobPath),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *FileStat) Description() string {
|
||||||
|
return "Read stats about given file(s)"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *FileStat) SampleConfig() string { return sampleConfig }
|
||||||
|
|
||||||
|
func (f *FileStat) Gather(acc telegraf.Accumulator) error {
|
||||||
|
var errS string
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for _, filepath := range f.Files {
|
||||||
|
// Get the compiled glob object for this filepath
|
||||||
|
g, ok := f.globs[filepath]
|
||||||
|
if !ok {
|
||||||
|
if g, err = globpath.Compile(filepath); err != nil {
|
||||||
|
errS += err.Error() + " "
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
f.globs[filepath] = g
|
||||||
|
}
|
||||||
|
|
||||||
|
files := g.Match()
|
||||||
|
if len(files) == 0 {
|
||||||
|
acc.AddFields("filestat",
|
||||||
|
map[string]interface{}{
|
||||||
|
"exists": int64(0),
|
||||||
|
},
|
||||||
|
map[string]string{
|
||||||
|
"file": filepath,
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for fileName, fileInfo := range files {
|
||||||
|
tags := map[string]string{
|
||||||
|
"file": fileName,
|
||||||
|
}
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"exists": int64(1),
|
||||||
|
"size_bytes": fileInfo.Size(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.Md5 {
|
||||||
|
md5, err := getMd5(fileName)
|
||||||
|
if err != nil {
|
||||||
|
errS += err.Error() + " "
|
||||||
|
} else {
|
||||||
|
fields["md5_sum"] = md5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
acc.AddFields("filestat", fields, tags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if errS != "" {
|
||||||
|
return fmt.Errorf(errS)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read given file and calculate an md5 hash.
|
||||||
|
func getMd5(file string) (string, error) {
|
||||||
|
of, err := os.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer of.Close()
|
||||||
|
|
||||||
|
hash := md5.New()
|
||||||
|
_, err = io.Copy(hash, of)
|
||||||
|
if err != nil {
|
||||||
|
// fatal error
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%x", hash.Sum(nil)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add("filestat", func() telegraf.Input {
|
||||||
|
return NewFileStat()
|
||||||
|
})
|
||||||
|
}
|
||||||
180
plugins/inputs/filestat/filestat_test.go
Normal file
180
plugins/inputs/filestat/filestat_test.go
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
package filestat
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGatherNoMd5(t *testing.T) {
|
||||||
|
dir := getTestdataDir()
|
||||||
|
fs := NewFileStat()
|
||||||
|
fs.Files = []string{
|
||||||
|
dir + "log1.log",
|
||||||
|
dir + "log2.log",
|
||||||
|
"/non/existant/file",
|
||||||
|
}
|
||||||
|
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
fs.Gather(&acc)
|
||||||
|
|
||||||
|
tags1 := map[string]string{
|
||||||
|
"file": dir + "log1.log",
|
||||||
|
}
|
||||||
|
fields1 := map[string]interface{}{
|
||||||
|
"size_bytes": int64(0),
|
||||||
|
"exists": int64(1),
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
|
||||||
|
|
||||||
|
tags2 := map[string]string{
|
||||||
|
"file": dir + "log2.log",
|
||||||
|
}
|
||||||
|
fields2 := map[string]interface{}{
|
||||||
|
"size_bytes": int64(0),
|
||||||
|
"exists": int64(1),
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
|
||||||
|
|
||||||
|
tags3 := map[string]string{
|
||||||
|
"file": "/non/existant/file",
|
||||||
|
}
|
||||||
|
fields3 := map[string]interface{}{
|
||||||
|
"exists": int64(0),
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGatherExplicitFiles(t *testing.T) {
|
||||||
|
dir := getTestdataDir()
|
||||||
|
fs := NewFileStat()
|
||||||
|
fs.Md5 = true
|
||||||
|
fs.Files = []string{
|
||||||
|
dir + "log1.log",
|
||||||
|
dir + "log2.log",
|
||||||
|
"/non/existant/file",
|
||||||
|
}
|
||||||
|
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
fs.Gather(&acc)
|
||||||
|
|
||||||
|
tags1 := map[string]string{
|
||||||
|
"file": dir + "log1.log",
|
||||||
|
}
|
||||||
|
fields1 := map[string]interface{}{
|
||||||
|
"size_bytes": int64(0),
|
||||||
|
"exists": int64(1),
|
||||||
|
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
|
||||||
|
|
||||||
|
tags2 := map[string]string{
|
||||||
|
"file": dir + "log2.log",
|
||||||
|
}
|
||||||
|
fields2 := map[string]interface{}{
|
||||||
|
"size_bytes": int64(0),
|
||||||
|
"exists": int64(1),
|
||||||
|
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
|
||||||
|
|
||||||
|
tags3 := map[string]string{
|
||||||
|
"file": "/non/existant/file",
|
||||||
|
}
|
||||||
|
fields3 := map[string]interface{}{
|
||||||
|
"exists": int64(0),
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGatherGlob(t *testing.T) {
|
||||||
|
dir := getTestdataDir()
|
||||||
|
fs := NewFileStat()
|
||||||
|
fs.Md5 = true
|
||||||
|
fs.Files = []string{
|
||||||
|
dir + "*.log",
|
||||||
|
}
|
||||||
|
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
fs.Gather(&acc)
|
||||||
|
|
||||||
|
tags1 := map[string]string{
|
||||||
|
"file": dir + "log1.log",
|
||||||
|
}
|
||||||
|
fields1 := map[string]interface{}{
|
||||||
|
"size_bytes": int64(0),
|
||||||
|
"exists": int64(1),
|
||||||
|
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
|
||||||
|
|
||||||
|
tags2 := map[string]string{
|
||||||
|
"file": dir + "log2.log",
|
||||||
|
}
|
||||||
|
fields2 := map[string]interface{}{
|
||||||
|
"size_bytes": int64(0),
|
||||||
|
"exists": int64(1),
|
||||||
|
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGatherSuperAsterisk(t *testing.T) {
|
||||||
|
dir := getTestdataDir()
|
||||||
|
fs := NewFileStat()
|
||||||
|
fs.Md5 = true
|
||||||
|
fs.Files = []string{
|
||||||
|
dir + "**",
|
||||||
|
}
|
||||||
|
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
fs.Gather(&acc)
|
||||||
|
|
||||||
|
tags1 := map[string]string{
|
||||||
|
"file": dir + "log1.log",
|
||||||
|
}
|
||||||
|
fields1 := map[string]interface{}{
|
||||||
|
"size_bytes": int64(0),
|
||||||
|
"exists": int64(1),
|
||||||
|
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
|
||||||
|
|
||||||
|
tags2 := map[string]string{
|
||||||
|
"file": dir + "log2.log",
|
||||||
|
}
|
||||||
|
fields2 := map[string]interface{}{
|
||||||
|
"size_bytes": int64(0),
|
||||||
|
"exists": int64(1),
|
||||||
|
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
|
||||||
|
|
||||||
|
tags3 := map[string]string{
|
||||||
|
"file": dir + "test.conf",
|
||||||
|
}
|
||||||
|
fields3 := map[string]interface{}{
|
||||||
|
"size_bytes": int64(104),
|
||||||
|
"exists": int64(1),
|
||||||
|
"md5_sum": "5a7e9b77fa25e7bb411dbd17cf403c1f",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetMd5(t *testing.T) {
|
||||||
|
dir := getTestdataDir()
|
||||||
|
md5, err := getMd5(dir + "test.conf")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "5a7e9b77fa25e7bb411dbd17cf403c1f", md5)
|
||||||
|
|
||||||
|
md5, err = getMd5("/tmp/foo/bar/fooooo")
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTestdataDir() string {
|
||||||
|
_, filename, _, _ := runtime.Caller(1)
|
||||||
|
return strings.Replace(filename, "filestat_test.go", "testdata/", 1)
|
||||||
|
}
|
||||||
0
plugins/inputs/filestat/testdata/log1.log
vendored
Normal file
0
plugins/inputs/filestat/testdata/log1.log
vendored
Normal file
0
plugins/inputs/filestat/testdata/log2.log
vendored
Normal file
0
plugins/inputs/filestat/testdata/log2.log
vendored
Normal file
5
plugins/inputs/filestat/testdata/test.conf
vendored
Normal file
5
plugins/inputs/filestat/testdata/test.conf
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# this is a fake testing config file
|
||||||
|
# for testing the filestat plugin
|
||||||
|
|
||||||
|
option1 = "foo"
|
||||||
|
option2 = "bar"
|
||||||
@@ -91,193 +91,12 @@ func (gh *GithubWebhooks) eventHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCommitComment(data []byte) (Event, error) {
|
func generateEvent(data []byte, event Event) (Event, error) {
|
||||||
commitCommentStruct := CommitCommentEvent{}
|
err := json.Unmarshal(data, event)
|
||||||
err := json.Unmarshal(data, &commitCommentStruct)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return commitCommentStruct, nil
|
return event, nil
|
||||||
}
|
|
||||||
|
|
||||||
func newCreate(data []byte) (Event, error) {
|
|
||||||
createStruct := CreateEvent{}
|
|
||||||
err := json.Unmarshal(data, &createStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return createStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDelete(data []byte) (Event, error) {
|
|
||||||
deleteStruct := DeleteEvent{}
|
|
||||||
err := json.Unmarshal(data, &deleteStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return deleteStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDeployment(data []byte) (Event, error) {
|
|
||||||
deploymentStruct := DeploymentEvent{}
|
|
||||||
err := json.Unmarshal(data, &deploymentStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return deploymentStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDeploymentStatus(data []byte) (Event, error) {
|
|
||||||
deploymentStatusStruct := DeploymentStatusEvent{}
|
|
||||||
err := json.Unmarshal(data, &deploymentStatusStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return deploymentStatusStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newFork(data []byte) (Event, error) {
|
|
||||||
forkStruct := ForkEvent{}
|
|
||||||
err := json.Unmarshal(data, &forkStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return forkStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newGollum(data []byte) (Event, error) {
|
|
||||||
gollumStruct := GollumEvent{}
|
|
||||||
err := json.Unmarshal(data, &gollumStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return gollumStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newIssueComment(data []byte) (Event, error) {
|
|
||||||
issueCommentStruct := IssueCommentEvent{}
|
|
||||||
err := json.Unmarshal(data, &issueCommentStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return issueCommentStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newIssues(data []byte) (Event, error) {
|
|
||||||
issuesStruct := IssuesEvent{}
|
|
||||||
err := json.Unmarshal(data, &issuesStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return issuesStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMember(data []byte) (Event, error) {
|
|
||||||
memberStruct := MemberEvent{}
|
|
||||||
err := json.Unmarshal(data, &memberStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return memberStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMembership(data []byte) (Event, error) {
|
|
||||||
membershipStruct := MembershipEvent{}
|
|
||||||
err := json.Unmarshal(data, &membershipStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return membershipStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPageBuild(data []byte) (Event, error) {
|
|
||||||
pageBuildEvent := PageBuildEvent{}
|
|
||||||
err := json.Unmarshal(data, &pageBuildEvent)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return pageBuildEvent, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPublic(data []byte) (Event, error) {
|
|
||||||
publicEvent := PublicEvent{}
|
|
||||||
err := json.Unmarshal(data, &publicEvent)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return publicEvent, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPullRequest(data []byte) (Event, error) {
|
|
||||||
pullRequestStruct := PullRequestEvent{}
|
|
||||||
err := json.Unmarshal(data, &pullRequestStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return pullRequestStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPullRequestReviewComment(data []byte) (Event, error) {
|
|
||||||
pullRequestReviewCommentStruct := PullRequestReviewCommentEvent{}
|
|
||||||
err := json.Unmarshal(data, &pullRequestReviewCommentStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return pullRequestReviewCommentStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPush(data []byte) (Event, error) {
|
|
||||||
pushStruct := PushEvent{}
|
|
||||||
err := json.Unmarshal(data, &pushStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return pushStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRelease(data []byte) (Event, error) {
|
|
||||||
releaseStruct := ReleaseEvent{}
|
|
||||||
err := json.Unmarshal(data, &releaseStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return releaseStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRepository(data []byte) (Event, error) {
|
|
||||||
repositoryStruct := RepositoryEvent{}
|
|
||||||
err := json.Unmarshal(data, &repositoryStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return repositoryStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStatus(data []byte) (Event, error) {
|
|
||||||
statusStruct := StatusEvent{}
|
|
||||||
err := json.Unmarshal(data, &statusStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return statusStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTeamAdd(data []byte) (Event, error) {
|
|
||||||
teamAddStruct := TeamAddEvent{}
|
|
||||||
err := json.Unmarshal(data, &teamAddStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return teamAddStruct, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newWatch(data []byte) (Event, error) {
|
|
||||||
watchStruct := WatchEvent{}
|
|
||||||
err := json.Unmarshal(data, &watchStruct)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return watchStruct, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type newEventError struct {
|
type newEventError struct {
|
||||||
@@ -288,51 +107,51 @@ func (e *newEventError) Error() string {
|
|||||||
return e.s
|
return e.s
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEvent(r []byte, t string) (Event, error) {
|
func NewEvent(data []byte, name string) (Event, error) {
|
||||||
log.Printf("New %v event recieved", t)
|
log.Printf("New %v event received", name)
|
||||||
switch t {
|
switch name {
|
||||||
case "commit_comment":
|
case "commit_comment":
|
||||||
return newCommitComment(r)
|
return generateEvent(data, &CommitCommentEvent{})
|
||||||
case "create":
|
case "create":
|
||||||
return newCreate(r)
|
return generateEvent(data, &CreateEvent{})
|
||||||
case "delete":
|
case "delete":
|
||||||
return newDelete(r)
|
return generateEvent(data, &DeleteEvent{})
|
||||||
case "deployment":
|
case "deployment":
|
||||||
return newDeployment(r)
|
return generateEvent(data, &DeploymentEvent{})
|
||||||
case "deployment_status":
|
case "deployment_status":
|
||||||
return newDeploymentStatus(r)
|
return generateEvent(data, &DeploymentStatusEvent{})
|
||||||
case "fork":
|
case "fork":
|
||||||
return newFork(r)
|
return generateEvent(data, &ForkEvent{})
|
||||||
case "gollum":
|
case "gollum":
|
||||||
return newGollum(r)
|
return generateEvent(data, &GollumEvent{})
|
||||||
case "issue_comment":
|
case "issue_comment":
|
||||||
return newIssueComment(r)
|
return generateEvent(data, &IssueCommentEvent{})
|
||||||
case "issues":
|
case "issues":
|
||||||
return newIssues(r)
|
return generateEvent(data, &IssuesEvent{})
|
||||||
case "member":
|
case "member":
|
||||||
return newMember(r)
|
return generateEvent(data, &MemberEvent{})
|
||||||
case "membership":
|
case "membership":
|
||||||
return newMembership(r)
|
return generateEvent(data, &MembershipEvent{})
|
||||||
case "page_build":
|
case "page_build":
|
||||||
return newPageBuild(r)
|
return generateEvent(data, &PageBuildEvent{})
|
||||||
case "public":
|
case "public":
|
||||||
return newPublic(r)
|
return generateEvent(data, &PublicEvent{})
|
||||||
case "pull_request":
|
case "pull_request":
|
||||||
return newPullRequest(r)
|
return generateEvent(data, &PullRequestEvent{})
|
||||||
case "pull_request_review_comment":
|
case "pull_request_review_comment":
|
||||||
return newPullRequestReviewComment(r)
|
return generateEvent(data, &PullRequestReviewCommentEvent{})
|
||||||
case "push":
|
case "push":
|
||||||
return newPush(r)
|
return generateEvent(data, &PushEvent{})
|
||||||
case "release":
|
case "release":
|
||||||
return newRelease(r)
|
return generateEvent(data, &ReleaseEvent{})
|
||||||
case "repository":
|
case "repository":
|
||||||
return newRepository(r)
|
return generateEvent(data, &RepositoryEvent{})
|
||||||
case "status":
|
case "status":
|
||||||
return newStatus(r)
|
return generateEvent(data, &StatusEvent{})
|
||||||
case "team_add":
|
case "team_add":
|
||||||
return newTeamAdd(r)
|
return generateEvent(data, &TeamAddEvent{})
|
||||||
case "watch":
|
case "watch":
|
||||||
return newWatch(r)
|
return generateEvent(data, &WatchEvent{})
|
||||||
}
|
}
|
||||||
return nil, &newEventError{"Not a recgonized event type"}
|
return nil, &newEventError{"Not a recognized event type"}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,231 +7,89 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCommitCommentEvent(t *testing.T) {
|
func GithubWebhookRequest(event string, jsonString string, t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
gh := NewGithubWebhooks()
|
||||||
jsonString := CommitCommentEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||||
req.Header.Add("X-Github-Event", "commit_comment")
|
req.Header.Add("X-Github-Event", event)
|
||||||
w := httptest.NewRecorder()
|
w := httptest.NewRecorder()
|
||||||
gh.eventHandler(w, req)
|
gh.eventHandler(w, req)
|
||||||
if w.Code != http.StatusOK {
|
if w.Code != http.StatusOK {
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
t.Errorf("POST "+event+" returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCommitCommentEvent(t *testing.T) {
|
||||||
|
GithubWebhookRequest("commit_comment", CommitCommentEventJSON(), t)
|
||||||
|
}
|
||||||
|
|
||||||
func TestDeleteEvent(t *testing.T) {
|
func TestDeleteEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("delete", DeleteEventJSON(), t)
|
||||||
jsonString := DeleteEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "delete")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeploymentEvent(t *testing.T) {
|
func TestDeploymentEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("deployment", DeploymentEventJSON(), t)
|
||||||
jsonString := DeploymentEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "deployment")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeploymentStatusEvent(t *testing.T) {
|
func TestDeploymentStatusEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("deployment_status", DeploymentStatusEventJSON(), t)
|
||||||
jsonString := DeploymentStatusEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "deployment_status")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestForkEvent(t *testing.T) {
|
func TestForkEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("fork", ForkEventJSON(), t)
|
||||||
jsonString := ForkEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "fork")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGollumEvent(t *testing.T) {
|
func TestGollumEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("gollum", GollumEventJSON(), t)
|
||||||
jsonString := GollumEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "gollum")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIssueCommentEvent(t *testing.T) {
|
func TestIssueCommentEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("issue_comment", IssueCommentEventJSON(), t)
|
||||||
jsonString := IssueCommentEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "issue_comment")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIssuesEvent(t *testing.T) {
|
func TestIssuesEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("issues", IssuesEventJSON(), t)
|
||||||
jsonString := IssuesEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "issues")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMemberEvent(t *testing.T) {
|
func TestMemberEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("member", MemberEventJSON(), t)
|
||||||
jsonString := MemberEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "member")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMembershipEvent(t *testing.T) {
|
func TestMembershipEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("membership", MembershipEventJSON(), t)
|
||||||
jsonString := MembershipEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "membership")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPageBuildEvent(t *testing.T) {
|
func TestPageBuildEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("page_build", PageBuildEventJSON(), t)
|
||||||
jsonString := PageBuildEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "page_build")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPublicEvent(t *testing.T) {
|
func TestPublicEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("public", PublicEventJSON(), t)
|
||||||
jsonString := PublicEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "public")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPullRequestReviewCommentEvent(t *testing.T) {
|
func TestPullRequestReviewCommentEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("pull_request_review_comment", PullRequestReviewCommentEventJSON(), t)
|
||||||
jsonString := PullRequestReviewCommentEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "pull_request_review_comment")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPushEvent(t *testing.T) {
|
func TestPushEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("push", PushEventJSON(), t)
|
||||||
jsonString := PushEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "push")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReleaseEvent(t *testing.T) {
|
func TestReleaseEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("release", ReleaseEventJSON(), t)
|
||||||
jsonString := ReleaseEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "release")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRepositoryEvent(t *testing.T) {
|
func TestRepositoryEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("repository", RepositoryEventJSON(), t)
|
||||||
jsonString := RepositoryEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "repository")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStatusEvent(t *testing.T) {
|
func TestStatusEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("status", StatusEventJSON(), t)
|
||||||
|
|
||||||
jsonString := StatusEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "status")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTeamAddEvent(t *testing.T) {
|
func TestTeamAddEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("team_add", TeamAddEventJSON(), t)
|
||||||
jsonString := TeamAddEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "team_add")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWatchEvent(t *testing.T) {
|
func TestWatchEvent(t *testing.T) {
|
||||||
gh := NewGithubWebhooks()
|
GithubWebhookRequest("watch", WatchEventJSON(), t)
|
||||||
jsonString := WatchEventJSON()
|
|
||||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
|
||||||
req.Header.Add("X-Github-Event", "watch")
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
gh.eventHandler(w, req)
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
55
plugins/inputs/graylog/README.md
Normal file
55
plugins/inputs/graylog/README.md
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
# GrayLog plugin
|
||||||
|
|
||||||
|
The Graylog plugin can collect data from remote Graylog service URLs.
|
||||||
|
|
||||||
|
Plugin currently support two type of end points:-
|
||||||
|
|
||||||
|
- multiple (Ex http://[graylog-server-ip]:12900/system/metrics/multiple)
|
||||||
|
- namespace (Ex http://[graylog-server-ip]:12900/system/metrics/namespace/{namespace})
|
||||||
|
|
||||||
|
End Point can be a mixe of one multiple end point and several namespaces end points
|
||||||
|
|
||||||
|
|
||||||
|
Note: if namespace end point specified metrics array will be ignored for that call.
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Read flattened metrics from one or more GrayLog HTTP endpoints
|
||||||
|
[[inputs.graylog]]
|
||||||
|
## API endpoint, currently supported API:
|
||||||
|
##
|
||||||
|
## - multiple (Ex http://<host>:12900/system/metrics/multiple)
|
||||||
|
## - namespace (Ex http://<host>:12900/system/metrics/namespace/{namespace})
|
||||||
|
##
|
||||||
|
## For namespace endpoint, the metrics array will be ignored for that call.
|
||||||
|
## Endpoint can contain namespace and multiple type calls.
|
||||||
|
##
|
||||||
|
## Please check http://[graylog-server-ip]:12900/api-browser for full list
|
||||||
|
## of endpoints
|
||||||
|
servers = [
|
||||||
|
"http://[graylog-server-ip]:12900/system/metrics/multiple",
|
||||||
|
]
|
||||||
|
|
||||||
|
## Metrics list
|
||||||
|
## List of metrics can be found on Graylog webservice documentation.
|
||||||
|
## Or by hitting the the web service api at:
|
||||||
|
## http://[graylog-host]:12900/system/metrics
|
||||||
|
metrics = [
|
||||||
|
"jvm.cl.loaded",
|
||||||
|
"jvm.memory.pools.Metaspace.committed"
|
||||||
|
]
|
||||||
|
|
||||||
|
## Username and password
|
||||||
|
username = ""
|
||||||
|
password = ""
|
||||||
|
|
||||||
|
## Optional SSL Config
|
||||||
|
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||||
|
# ssl_key = "/etc/telegraf/key.pem"
|
||||||
|
## Use SSL but skip chain & host verification
|
||||||
|
# insecure_skip_verify = false
|
||||||
|
```
|
||||||
|
|
||||||
|
Please refer to GrayLog metrics api browser for full metric end points http://host:12900/api-browser
|
||||||
312
plugins/inputs/graylog/graylog.go
Normal file
312
plugins/inputs/graylog/graylog.go
Normal file
@@ -0,0 +1,312 @@
|
|||||||
|
package graylog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ResponseMetrics struct {
|
||||||
|
total int
|
||||||
|
Metrics []Metric `json:"metrics"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Metric struct {
|
||||||
|
FullName string `json:"full_name"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Fields map[string]interface{} `json:"metric"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GrayLog struct {
|
||||||
|
Servers []string
|
||||||
|
Metrics []string
|
||||||
|
Username string
|
||||||
|
Password string
|
||||||
|
|
||||||
|
// Path to CA file
|
||||||
|
SSLCA string `toml:"ssl_ca"`
|
||||||
|
// Path to host cert file
|
||||||
|
SSLCert string `toml:"ssl_cert"`
|
||||||
|
// Path to cert key file
|
||||||
|
SSLKey string `toml:"ssl_key"`
|
||||||
|
// Use SSL but skip chain & host verification
|
||||||
|
InsecureSkipVerify bool
|
||||||
|
|
||||||
|
client HTTPClient
|
||||||
|
}
|
||||||
|
|
||||||
|
type HTTPClient interface {
|
||||||
|
// Returns the result of an http request
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// req: HTTP request object
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// http.Response: HTTP respons object
|
||||||
|
// error : Any error that may have occurred
|
||||||
|
MakeRequest(req *http.Request) (*http.Response, error)
|
||||||
|
|
||||||
|
SetHTTPClient(client *http.Client)
|
||||||
|
HTTPClient() *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
type Messagebody struct {
|
||||||
|
Metrics []string `json:"metrics"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RealHTTPClient struct {
|
||||||
|
client *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||||
|
return c.client.Do(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RealHTTPClient) SetHTTPClient(client *http.Client) {
|
||||||
|
c.client = client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RealHTTPClient) HTTPClient() *http.Client {
|
||||||
|
return c.client
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
## API endpoint, currently supported API:
|
||||||
|
##
|
||||||
|
## - multiple (Ex http://<host>:12900/system/metrics/multiple)
|
||||||
|
## - namespace (Ex http://<host>:12900/system/metrics/namespace/{namespace})
|
||||||
|
##
|
||||||
|
## For namespace endpoint, the metrics array will be ignored for that call.
|
||||||
|
## Endpoint can contain namespace and multiple type calls.
|
||||||
|
##
|
||||||
|
## Please check http://[graylog-server-ip]:12900/api-browser for full list
|
||||||
|
## of endpoints
|
||||||
|
servers = [
|
||||||
|
"http://[graylog-server-ip]:12900/system/metrics/multiple",
|
||||||
|
]
|
||||||
|
|
||||||
|
## Metrics list
|
||||||
|
## List of metrics can be found on Graylog webservice documentation.
|
||||||
|
## Or by hitting the the web service api at:
|
||||||
|
## http://[graylog-host]:12900/system/metrics
|
||||||
|
metrics = [
|
||||||
|
"jvm.cl.loaded",
|
||||||
|
"jvm.memory.pools.Metaspace.committed"
|
||||||
|
]
|
||||||
|
|
||||||
|
## Username and password
|
||||||
|
username = ""
|
||||||
|
password = ""
|
||||||
|
|
||||||
|
## Optional SSL Config
|
||||||
|
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||||
|
# ssl_key = "/etc/telegraf/key.pem"
|
||||||
|
## Use SSL but skip chain & host verification
|
||||||
|
# insecure_skip_verify = false
|
||||||
|
`
|
||||||
|
|
||||||
|
func (h *GrayLog) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *GrayLog) Description() string {
|
||||||
|
return "Read flattened metrics from one or more GrayLog HTTP endpoints"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gathers data for all servers.
|
||||||
|
func (h *GrayLog) Gather(acc telegraf.Accumulator) error {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
if h.client.HTTPClient() == nil {
|
||||||
|
tlsCfg, err := internal.GetTLSConfig(
|
||||||
|
h.SSLCert, h.SSLKey, h.SSLCA, h.InsecureSkipVerify)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tr := &http.Transport{
|
||||||
|
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||||
|
TLSClientConfig: tlsCfg,
|
||||||
|
}
|
||||||
|
client := &http.Client{
|
||||||
|
Transport: tr,
|
||||||
|
Timeout: time.Duration(4 * time.Second),
|
||||||
|
}
|
||||||
|
h.client.SetHTTPClient(client)
|
||||||
|
}
|
||||||
|
|
||||||
|
errorChannel := make(chan error, len(h.Servers))
|
||||||
|
|
||||||
|
for _, server := range h.Servers {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(server string) {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := h.gatherServer(acc, server); err != nil {
|
||||||
|
errorChannel <- err
|
||||||
|
}
|
||||||
|
}(server)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
close(errorChannel)
|
||||||
|
|
||||||
|
// Get all errors and return them as one giant error
|
||||||
|
errorStrings := []string{}
|
||||||
|
for err := range errorChannel {
|
||||||
|
errorStrings = append(errorStrings, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errorStrings) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.New(strings.Join(errorStrings, "\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gathers data from a particular server
|
||||||
|
// Parameters:
|
||||||
|
// acc : The telegraf Accumulator to use
|
||||||
|
// serverURL: endpoint to send request to
|
||||||
|
// service : the service being queried
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// error: Any error that may have occurred
|
||||||
|
func (h *GrayLog) gatherServer(
|
||||||
|
acc telegraf.Accumulator,
|
||||||
|
serverURL string,
|
||||||
|
) error {
|
||||||
|
resp, _, err := h.sendRequest(serverURL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
requestURL, err := url.Parse(serverURL)
|
||||||
|
host, port, _ := net.SplitHostPort(requestURL.Host)
|
||||||
|
var dat ResponseMetrics
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal([]byte(resp), &dat); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, m_item := range dat.Metrics {
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
tags := map[string]string{
|
||||||
|
"server": host,
|
||||||
|
"port": port,
|
||||||
|
"name": m_item.Name,
|
||||||
|
"type": m_item.Type,
|
||||||
|
}
|
||||||
|
h.flatten(m_item.Fields, fields, "")
|
||||||
|
acc.AddFields(m_item.FullName, fields, tags)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flatten JSON hierarchy to produce field name and field value
|
||||||
|
// Parameters:
|
||||||
|
// item: Item map to flatten
|
||||||
|
// fields: Map to store generated fields.
|
||||||
|
// id: Prefix for top level metric (empty string "")
|
||||||
|
// Returns:
|
||||||
|
// void
|
||||||
|
func (h *GrayLog) flatten(item map[string]interface{}, fields map[string]interface{}, id string) {
|
||||||
|
if id != "" {
|
||||||
|
id = id + "_"
|
||||||
|
}
|
||||||
|
for k, i := range item {
|
||||||
|
switch i.(type) {
|
||||||
|
case int:
|
||||||
|
fields[id+k] = i.(float64)
|
||||||
|
case float64:
|
||||||
|
fields[id+k] = i.(float64)
|
||||||
|
case map[string]interface{}:
|
||||||
|
h.flatten(i.(map[string]interface{}), fields, id+k)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sends an HTTP request to the server using the GrayLog object's HTTPClient.
|
||||||
|
// Parameters:
|
||||||
|
// serverURL: endpoint to send request to
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// string: body of the response
|
||||||
|
// error : Any error that may have occurred
|
||||||
|
func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) {
|
||||||
|
headers := map[string]string{
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Accept": "application/json",
|
||||||
|
}
|
||||||
|
method := "GET"
|
||||||
|
content := bytes.NewBufferString("")
|
||||||
|
headers["Authorization"] = "Basic " + base64.URLEncoding.EncodeToString([]byte(h.Username+":"+h.Password))
|
||||||
|
// Prepare URL
|
||||||
|
requestURL, err := url.Parse(serverURL)
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL)
|
||||||
|
}
|
||||||
|
if strings.Contains(requestURL.String(), "multiple") {
|
||||||
|
m := &Messagebody{Metrics: h.Metrics}
|
||||||
|
http_body, err := json.Marshal(m)
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, fmt.Errorf("Invalid list of Metrics %s", h.Metrics)
|
||||||
|
}
|
||||||
|
method = "POST"
|
||||||
|
content = bytes.NewBuffer(http_body)
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(method, requestURL.String(), content)
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
// Add header parameters
|
||||||
|
for k, v := range headers {
|
||||||
|
req.Header.Add(k, v)
|
||||||
|
}
|
||||||
|
start := time.Now()
|
||||||
|
resp, err := h.client.MakeRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer resp.Body.Close()
|
||||||
|
responseTime := time.Since(start).Seconds()
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return string(body), responseTime, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process response
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
|
||||||
|
requestURL.String(),
|
||||||
|
resp.StatusCode,
|
||||||
|
http.StatusText(resp.StatusCode),
|
||||||
|
http.StatusOK,
|
||||||
|
http.StatusText(http.StatusOK))
|
||||||
|
return string(body), responseTime, err
|
||||||
|
}
|
||||||
|
return string(body), responseTime, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add("graylog", func() telegraf.Input {
|
||||||
|
return &GrayLog{
|
||||||
|
client: &RealHTTPClient{},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
199
plugins/inputs/graylog/graylog_test.go
Normal file
199
plugins/inputs/graylog/graylog_test.go
Normal file
@@ -0,0 +1,199 @@
|
|||||||
|
package graylog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
const validJSON = `
|
||||||
|
{
|
||||||
|
"total": 3,
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"full_name": "jvm.cl.loaded",
|
||||||
|
"metric": {
|
||||||
|
"value": 18910
|
||||||
|
},
|
||||||
|
"name": "loaded",
|
||||||
|
"type": "gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"full_name": "jvm.memory.pools.Metaspace.committed",
|
||||||
|
"metric": {
|
||||||
|
"value": 108040192
|
||||||
|
},
|
||||||
|
"name": "committed",
|
||||||
|
"type": "gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"full_name": "org.graylog2.shared.journal.KafkaJournal.writeTime",
|
||||||
|
"metric": {
|
||||||
|
"time": {
|
||||||
|
"min": 99
|
||||||
|
},
|
||||||
|
"rate": {
|
||||||
|
"total": 10,
|
||||||
|
"mean": 2
|
||||||
|
},
|
||||||
|
"duration_unit": "microseconds",
|
||||||
|
"rate_unit": "events/second"
|
||||||
|
},
|
||||||
|
"name": "writeTime",
|
||||||
|
"type": "hdrtimer"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}`
|
||||||
|
|
||||||
|
var validTags = map[string]map[string]string{
|
||||||
|
"jvm.cl.loaded": {
|
||||||
|
"name": "loaded",
|
||||||
|
"type": "gauge",
|
||||||
|
"port": "12900",
|
||||||
|
"server": "localhost",
|
||||||
|
},
|
||||||
|
"jvm.memory.pools.Metaspace.committed": {
|
||||||
|
"name": "committed",
|
||||||
|
"type": "gauge",
|
||||||
|
"port": "12900",
|
||||||
|
"server": "localhost",
|
||||||
|
},
|
||||||
|
"org.graylog2.shared.journal.KafkaJournal.writeTime": {
|
||||||
|
"name": "writeTime",
|
||||||
|
"type": "hdrtimer",
|
||||||
|
"port": "12900",
|
||||||
|
"server": "localhost",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var expectedFields = map[string]map[string]interface{}{
|
||||||
|
"jvm.cl.loaded": {
|
||||||
|
"value": float64(18910),
|
||||||
|
},
|
||||||
|
"jvm.memory.pools.Metaspace.committed": {
|
||||||
|
"value": float64(108040192),
|
||||||
|
},
|
||||||
|
"org.graylog2.shared.journal.KafkaJournal.writeTime": {
|
||||||
|
"time_min": float64(99),
|
||||||
|
"rate_total": float64(10),
|
||||||
|
"rate_mean": float64(2),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
const invalidJSON = "I don't think this is JSON"
|
||||||
|
|
||||||
|
const empty = ""
|
||||||
|
|
||||||
|
type mockHTTPClient struct {
|
||||||
|
responseBody string
|
||||||
|
statusCode int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mock implementation of MakeRequest. Usually returns an http.Response with
|
||||||
|
// hard-coded responseBody and statusCode. However, if the request uses a
|
||||||
|
// nonstandard method, it uses status code 405 (method not allowed)
|
||||||
|
func (c *mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||||
|
resp := http.Response{}
|
||||||
|
resp.StatusCode = c.statusCode
|
||||||
|
|
||||||
|
// basic error checking on request method
|
||||||
|
allowedMethods := []string{"GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT"}
|
||||||
|
methodValid := false
|
||||||
|
for _, method := range allowedMethods {
|
||||||
|
if req.Method == method {
|
||||||
|
methodValid = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !methodValid {
|
||||||
|
resp.StatusCode = 405 // Method not allowed
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody))
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mockHTTPClient) SetHTTPClient(_ *http.Client) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mockHTTPClient) HTTPClient() *http.Client {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generates a pointer to an HttpJson object that uses a mock HTTP client.
|
||||||
|
// Parameters:
|
||||||
|
// response : Body of the response that the mock HTTP client should return
|
||||||
|
// statusCode: HTTP status code the mock HTTP client should return
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
|
||||||
|
func genMockGrayLog(response string, statusCode int) []*GrayLog {
|
||||||
|
return []*GrayLog{
|
||||||
|
&GrayLog{
|
||||||
|
client: &mockHTTPClient{responseBody: response, statusCode: statusCode},
|
||||||
|
Servers: []string{
|
||||||
|
"http://localhost:12900/system/metrics/multiple",
|
||||||
|
},
|
||||||
|
Metrics: []string{
|
||||||
|
"jvm.memory.pools.Metaspace.committed",
|
||||||
|
"jvm.cl.loaded",
|
||||||
|
"org.graylog2.shared.journal.KafkaJournal.writeTime",
|
||||||
|
},
|
||||||
|
Username: "test",
|
||||||
|
Password: "test",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that the proper values are ignored or collected
|
||||||
|
func TestNormalResponse(t *testing.T) {
|
||||||
|
graylog := genMockGrayLog(validJSON, 200)
|
||||||
|
|
||||||
|
for _, service := range graylog {
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
err := service.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
for k, v := range expectedFields {
|
||||||
|
acc.AssertContainsTaggedFields(t, k, v, validTags[k])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test response to HTTP 500
|
||||||
|
func TestHttpJson500(t *testing.T) {
|
||||||
|
graylog := genMockGrayLog(validJSON, 500)
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
err := graylog[0].Gather(&acc)
|
||||||
|
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
assert.Equal(t, 0, acc.NFields())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test response to malformed JSON
|
||||||
|
func TestHttpJsonBadJson(t *testing.T) {
|
||||||
|
graylog := genMockGrayLog(invalidJSON, 200)
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
err := graylog[0].Gather(&acc)
|
||||||
|
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
assert.Equal(t, 0, acc.NFields())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test response to empty string as response objectgT
|
||||||
|
func TestHttpJsonEmptyResponse(t *testing.T) {
|
||||||
|
graylog := genMockGrayLog(empty, 200)
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
err := graylog[0].Gather(&acc)
|
||||||
|
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
assert.Equal(t, 0, acc.NFields())
|
||||||
|
}
|
||||||
@@ -3,14 +3,18 @@ package haproxy
|
|||||||
import (
|
import (
|
||||||
"encoding/csv"
|
"encoding/csv"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
|
||||||
"io"
|
"io"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal/errchan"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
)
|
)
|
||||||
|
|
||||||
//CSV format: https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#9.1
|
//CSV format: https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#9.1
|
||||||
@@ -47,7 +51,7 @@ const (
|
|||||||
HF_THROTTLE = 29 //29. throttle [...S]: current throttle percentage for the server, when slowstart is active, or no value if not in slowstart.
|
HF_THROTTLE = 29 //29. throttle [...S]: current throttle percentage for the server, when slowstart is active, or no value if not in slowstart.
|
||||||
HF_LBTOT = 30 //30. lbtot [..BS]: total number of times a server was selected, either for new sessions, or when re-dispatching. The server counter is the number of times that server was selected.
|
HF_LBTOT = 30 //30. lbtot [..BS]: total number of times a server was selected, either for new sessions, or when re-dispatching. The server counter is the number of times that server was selected.
|
||||||
HF_TRACKED = 31 //31. tracked [...S]: id of proxy/server if tracking is enabled.
|
HF_TRACKED = 31 //31. tracked [...S]: id of proxy/server if tracking is enabled.
|
||||||
HF_TYPE = 32 //32. type [LFBS]: (0 = frontend, 1 = backend, 2 = server, 3 = socket/listener)
|
HF_TYPE = 32 //32. type [LFBS]: (0 = frontend, 1 = backend, 2 = server, 3 = socket/listener)
|
||||||
HF_RATE = 33 //33. rate [.FBS]: number of sessions per second over last elapsed second
|
HF_RATE = 33 //33. rate [.FBS]: number of sessions per second over last elapsed second
|
||||||
HF_RATE_LIM = 34 //34. rate_lim [.F..]: configured limit on new sessions per second
|
HF_RATE_LIM = 34 //34. rate_lim [.F..]: configured limit on new sessions per second
|
||||||
HF_RATE_MAX = 35 //35. rate_max [.FBS]: max number of new sessions per second
|
HF_RATE_MAX = 35 //35. rate_max [.FBS]: max number of new sessions per second
|
||||||
@@ -91,8 +95,8 @@ var sampleConfig = `
|
|||||||
|
|
||||||
## If no servers are specified, then default to 127.0.0.1:1936
|
## If no servers are specified, then default to 127.0.0.1:1936
|
||||||
servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
|
servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
|
||||||
## Or you can also use local socket(not work yet)
|
## Or you can also use local socket
|
||||||
## servers = ["socket://run/haproxy/admin.sock"]
|
## servers = ["socket:/run/haproxy/admin.sock"]
|
||||||
`
|
`
|
||||||
|
|
||||||
func (r *haproxy) SampleConfig() string {
|
func (r *haproxy) SampleConfig() string {
|
||||||
@@ -111,23 +115,49 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
errChan := errchan.New(len(g.Servers))
|
||||||
var outerr error
|
wg.Add(len(g.Servers))
|
||||||
|
for _, server := range g.Servers {
|
||||||
for _, serv := range g.Servers {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(serv string) {
|
go func(serv string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
outerr = g.gatherServer(serv, acc)
|
errChan.C <- g.gatherServer(serv, acc)
|
||||||
}(serv)
|
}(server)
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
return errChan.Error()
|
||||||
|
}
|
||||||
|
|
||||||
return outerr
|
func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error {
|
||||||
|
var socketPath string
|
||||||
|
socketAddr := strings.Split(addr, ":")
|
||||||
|
|
||||||
|
if len(socketAddr) >= 2 {
|
||||||
|
socketPath = socketAddr[1]
|
||||||
|
} else {
|
||||||
|
socketPath = socketAddr[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := net.Dial("unix", socketPath)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not connect to socket '%s': %s", addr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, errw := c.Write([]byte("show stat\n"))
|
||||||
|
|
||||||
|
if errw != nil {
|
||||||
|
return fmt.Errorf("Could not write to socket '%s': %s", addr, errw)
|
||||||
|
}
|
||||||
|
|
||||||
|
return importCsvResult(c, acc, socketPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
|
func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
|
||||||
|
if !strings.HasPrefix(addr, "http") {
|
||||||
|
return g.gatherServerSocket(addr, acc)
|
||||||
|
}
|
||||||
|
|
||||||
if g.client == nil {
|
if g.client == nil {
|
||||||
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
|
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
|
||||||
client := &http.Client{
|
client := &http.Client{
|
||||||
|
|||||||
@@ -1,17 +1,42 @@
|
|||||||
package haproxy
|
package haproxy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type statServer struct{}
|
||||||
|
|
||||||
|
func (s statServer) serverSocket(l net.Listener) {
|
||||||
|
for {
|
||||||
|
conn, err := l.Accept()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
go func(c net.Conn) {
|
||||||
|
buf := make([]byte, 1024)
|
||||||
|
n, _ := c.Read(buf)
|
||||||
|
|
||||||
|
data := buf[:n]
|
||||||
|
if string(data) == "show stat\n" {
|
||||||
|
c.Write([]byte(csvOutputSample))
|
||||||
|
c.Close()
|
||||||
|
}
|
||||||
|
}(conn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
|
func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
|
||||||
//We create a fake server to return test data
|
//We create a fake server to return test data
|
||||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
@@ -146,6 +171,69 @@ func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) {
|
|||||||
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
|
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) {
|
||||||
|
var randomNumber int64
|
||||||
|
binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)
|
||||||
|
sock, err := net.Listen("unix", fmt.Sprintf("/tmp/test-haproxy%d.sock", randomNumber))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Cannot initialize socket ")
|
||||||
|
}
|
||||||
|
|
||||||
|
defer sock.Close()
|
||||||
|
|
||||||
|
s := statServer{}
|
||||||
|
go s.serverSocket(sock)
|
||||||
|
|
||||||
|
r := &haproxy{
|
||||||
|
Servers: []string{sock.Addr().String()},
|
||||||
|
}
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
err = r.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tags := map[string]string{
|
||||||
|
"proxy": "be_app",
|
||||||
|
"server": sock.Addr().String(),
|
||||||
|
"sv": "host0",
|
||||||
|
}
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"active_servers": uint64(1),
|
||||||
|
"backup_servers": uint64(0),
|
||||||
|
"bin": uint64(510913516),
|
||||||
|
"bout": uint64(2193856571),
|
||||||
|
"check_duration": uint64(10),
|
||||||
|
"cli_abort": uint64(73),
|
||||||
|
"ctime": uint64(2),
|
||||||
|
"downtime": uint64(0),
|
||||||
|
"dresp": uint64(0),
|
||||||
|
"econ": uint64(0),
|
||||||
|
"eresp": uint64(1),
|
||||||
|
"http_response.1xx": uint64(0),
|
||||||
|
"http_response.2xx": uint64(119534),
|
||||||
|
"http_response.3xx": uint64(48051),
|
||||||
|
"http_response.4xx": uint64(2345),
|
||||||
|
"http_response.5xx": uint64(1056),
|
||||||
|
"lbtot": uint64(171013),
|
||||||
|
"qcur": uint64(0),
|
||||||
|
"qmax": uint64(0),
|
||||||
|
"qtime": uint64(0),
|
||||||
|
"rate": uint64(3),
|
||||||
|
"rate_max": uint64(12),
|
||||||
|
"rtime": uint64(312),
|
||||||
|
"scur": uint64(1),
|
||||||
|
"smax": uint64(32),
|
||||||
|
"srv_abort": uint64(1),
|
||||||
|
"stot": uint64(171014),
|
||||||
|
"ttime": uint64(2341),
|
||||||
|
"wredis": uint64(0),
|
||||||
|
"wretr": uint64(1),
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
|
||||||
|
}
|
||||||
|
|
||||||
//When not passing server config, we default to localhost
|
//When not passing server config, we default to localhost
|
||||||
//We just want to make sure we did request stat from localhost
|
//We just want to make sure we did request stat from localhost
|
||||||
func TestHaproxyDefaultGetFromLocalhost(t *testing.T) {
|
func TestHaproxyDefaultGetFromLocalhost(t *testing.T) {
|
||||||
|
|||||||
@@ -5,23 +5,30 @@ This input plugin will test HTTP/HTTPS connections.
|
|||||||
### Configuration:
|
### Configuration:
|
||||||
|
|
||||||
```
|
```
|
||||||
# List of UDP/TCP connections you want to check
|
# HTTP/HTTPS request given an address a method and a timeout
|
||||||
[[inputs.http_response]]
|
[[inputs.http_response]]
|
||||||
## Server address (default http://localhost)
|
## Server address (default http://localhost)
|
||||||
address = "http://github.com"
|
address = "http://github.com"
|
||||||
## Set response_timeout (default 5 seconds)
|
## Set response_timeout (default 5 seconds)
|
||||||
response_timeout = 5
|
response_timeout = "5s"
|
||||||
## HTTP Request Method
|
## HTTP Request Method
|
||||||
method = "GET"
|
method = "GET"
|
||||||
## HTTP Request Headers
|
|
||||||
[inputs.http_response.headers]
|
|
||||||
Host = github.com
|
|
||||||
## Whether to follow redirects from the server (defaults to false)
|
## Whether to follow redirects from the server (defaults to false)
|
||||||
follow_redirects = true
|
follow_redirects = true
|
||||||
|
## HTTP Request Headers (all values must be strings)
|
||||||
|
# [inputs.http_response.headers]
|
||||||
|
# Host = "github.com"
|
||||||
## Optional HTTP Request Body
|
## Optional HTTP Request Body
|
||||||
body = '''
|
# body = '''
|
||||||
{'fake':'data'}
|
# {'fake':'data'}
|
||||||
'''
|
# '''
|
||||||
|
|
||||||
|
## Optional SSL Config
|
||||||
|
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||||
|
# ssl_key = "/etc/telegraf/key.pem"
|
||||||
|
## Use SSL but skip chain & host verification
|
||||||
|
# insecure_skip_verify = false
|
||||||
```
|
```
|
||||||
|
|
||||||
### Measurements & Fields:
|
### Measurements & Fields:
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -17,9 +18,18 @@ type HTTPResponse struct {
|
|||||||
Address string
|
Address string
|
||||||
Body string
|
Body string
|
||||||
Method string
|
Method string
|
||||||
ResponseTimeout int
|
ResponseTimeout internal.Duration
|
||||||
Headers map[string]string
|
Headers map[string]string
|
||||||
FollowRedirects bool
|
FollowRedirects bool
|
||||||
|
|
||||||
|
// Path to CA file
|
||||||
|
SSLCA string `toml:"ssl_ca"`
|
||||||
|
// Path to host cert file
|
||||||
|
SSLCert string `toml:"ssl_cert"`
|
||||||
|
// Path to cert key file
|
||||||
|
SSLKey string `toml:"ssl_key"`
|
||||||
|
// Use SSL but skip chain & host verification
|
||||||
|
InsecureSkipVerify bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Description returns the plugin Description
|
// Description returns the plugin Description
|
||||||
@@ -31,7 +41,7 @@ var sampleConfig = `
|
|||||||
## Server address (default http://localhost)
|
## Server address (default http://localhost)
|
||||||
address = "http://github.com"
|
address = "http://github.com"
|
||||||
## Set response_timeout (default 5 seconds)
|
## Set response_timeout (default 5 seconds)
|
||||||
response_timeout = 5
|
response_timeout = "5s"
|
||||||
## HTTP Request Method
|
## HTTP Request Method
|
||||||
method = "GET"
|
method = "GET"
|
||||||
## Whether to follow redirects from the server (defaults to false)
|
## Whether to follow redirects from the server (defaults to false)
|
||||||
@@ -43,6 +53,13 @@ var sampleConfig = `
|
|||||||
# body = '''
|
# body = '''
|
||||||
# {'fake':'data'}
|
# {'fake':'data'}
|
||||||
# '''
|
# '''
|
||||||
|
|
||||||
|
## Optional SSL Config
|
||||||
|
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||||
|
# ssl_key = "/etc/telegraf/key.pem"
|
||||||
|
## Use SSL but skip chain & host verification
|
||||||
|
# insecure_skip_verify = false
|
||||||
`
|
`
|
||||||
|
|
||||||
// SampleConfig returns the plugin SampleConfig
|
// SampleConfig returns the plugin SampleConfig
|
||||||
@@ -55,27 +72,27 @@ var ErrRedirectAttempted = errors.New("redirect")
|
|||||||
|
|
||||||
// CreateHttpClient creates an http client which will timeout at the specified
|
// CreateHttpClient creates an http client which will timeout at the specified
|
||||||
// timeout period and can follow redirects if specified
|
// timeout period and can follow redirects if specified
|
||||||
func CreateHttpClient(followRedirects bool, ResponseTimeout time.Duration) *http.Client {
|
func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
|
||||||
|
tlsCfg, err := internal.GetTLSConfig(
|
||||||
|
h.SSLCert, h.SSLKey, h.SSLCA, h.InsecureSkipVerify)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tr := &http.Transport{
|
||||||
|
ResponseHeaderTimeout: h.ResponseTimeout.Duration,
|
||||||
|
TLSClientConfig: tlsCfg,
|
||||||
|
}
|
||||||
client := &http.Client{
|
client := &http.Client{
|
||||||
Timeout: time.Second * ResponseTimeout,
|
Transport: tr,
|
||||||
|
Timeout: h.ResponseTimeout.Duration,
|
||||||
}
|
}
|
||||||
|
|
||||||
if followRedirects == false {
|
if h.FollowRedirects == false {
|
||||||
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||||
return ErrRedirectAttempted
|
return ErrRedirectAttempted
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return client
|
return client, nil
|
||||||
}
|
|
||||||
|
|
||||||
// CreateHeaders takes a map of header strings and puts them
|
|
||||||
// into a http.Header Object
|
|
||||||
func CreateHeaders(headers map[string]string) http.Header {
|
|
||||||
httpHeaders := make(http.Header)
|
|
||||||
for key := range headers {
|
|
||||||
httpHeaders.Add(key, headers[key])
|
|
||||||
}
|
|
||||||
return httpHeaders
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HTTPGather gathers all fields and returns any errors it encounters
|
// HTTPGather gathers all fields and returns any errors it encounters
|
||||||
@@ -83,7 +100,10 @@ func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) {
|
|||||||
// Prepare fields
|
// Prepare fields
|
||||||
fields := make(map[string]interface{})
|
fields := make(map[string]interface{})
|
||||||
|
|
||||||
client := CreateHttpClient(h.FollowRedirects, time.Duration(h.ResponseTimeout))
|
client, err := h.createHttpClient()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
var body io.Reader
|
var body io.Reader
|
||||||
if h.Body != "" {
|
if h.Body != "" {
|
||||||
@@ -93,7 +113,13 @@ func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
request.Header = CreateHeaders(h.Headers)
|
|
||||||
|
for key, val := range h.Headers {
|
||||||
|
request.Header.Add(key, val)
|
||||||
|
if key == "Host" {
|
||||||
|
request.Host = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Start Timer
|
// Start Timer
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
@@ -117,8 +143,8 @@ func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) {
|
|||||||
// Gather gets all metric fields and tags and returns any errors it encounters
|
// Gather gets all metric fields and tags and returns any errors it encounters
|
||||||
func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
|
func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
|
||||||
// Set default values
|
// Set default values
|
||||||
if h.ResponseTimeout < 1 {
|
if h.ResponseTimeout.Duration < time.Second {
|
||||||
h.ResponseTimeout = 5
|
h.ResponseTimeout.Duration = time.Second * 5
|
||||||
}
|
}
|
||||||
// Check send and expected string
|
// Check send and expected string
|
||||||
if h.Method == "" {
|
if h.Method == "" {
|
||||||
|
|||||||
@@ -2,28 +2,17 @@ package http_response
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
)
|
|
||||||
|
|
||||||
func TestCreateHeaders(t *testing.T) {
|
"github.com/influxdata/telegraf/internal"
|
||||||
fakeHeaders := map[string]string{
|
|
||||||
"Accept": "text/plain",
|
"github.com/stretchr/testify/assert"
|
||||||
"Content-Type": "application/json",
|
"github.com/stretchr/testify/require"
|
||||||
"Cache-Control": "no-cache",
|
)
|
||||||
}
|
|
||||||
headers := CreateHeaders(fakeHeaders)
|
|
||||||
testHeaders := make(http.Header)
|
|
||||||
testHeaders.Add("Accept", "text/plain")
|
|
||||||
testHeaders.Add("Content-Type", "application/json")
|
|
||||||
testHeaders.Add("Cache-Control", "no-cache")
|
|
||||||
assert.Equal(t, testHeaders, headers)
|
|
||||||
}
|
|
||||||
|
|
||||||
func setUpTestMux() http.Handler {
|
func setUpTestMux() http.Handler {
|
||||||
mux := http.NewServeMux()
|
mux := http.NewServeMux()
|
||||||
@@ -63,6 +52,33 @@ func setUpTestMux() http.Handler {
|
|||||||
return mux
|
return mux
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHeaders(t *testing.T) {
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
cHeader := r.Header.Get("Content-Type")
|
||||||
|
assert.Equal(t, "Hello", r.Host)
|
||||||
|
assert.Equal(t, "application/json", cHeader)
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}))
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
h := &HTTPResponse{
|
||||||
|
Address: ts.URL,
|
||||||
|
Method: "GET",
|
||||||
|
ResponseTimeout: internal.Duration{Duration: time.Second * 2},
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Host": "Hello",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
fields, err := h.HTTPGather()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.NotEmpty(t, fields)
|
||||||
|
if assert.NotNil(t, fields["http_response_code"]) {
|
||||||
|
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||||
|
}
|
||||||
|
assert.NotNil(t, fields["response_time"])
|
||||||
|
}
|
||||||
|
|
||||||
func TestFields(t *testing.T) {
|
func TestFields(t *testing.T) {
|
||||||
mux := setUpTestMux()
|
mux := setUpTestMux()
|
||||||
ts := httptest.NewServer(mux)
|
ts := httptest.NewServer(mux)
|
||||||
@@ -72,7 +88,7 @@ func TestFields(t *testing.T) {
|
|||||||
Address: ts.URL + "/good",
|
Address: ts.URL + "/good",
|
||||||
Body: "{ 'test': 'data'}",
|
Body: "{ 'test': 'data'}",
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
ResponseTimeout: 20,
|
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||||
Headers: map[string]string{
|
Headers: map[string]string{
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
},
|
},
|
||||||
@@ -85,7 +101,6 @@ func TestFields(t *testing.T) {
|
|||||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||||
}
|
}
|
||||||
assert.NotNil(t, fields["response_time"])
|
assert.NotNil(t, fields["response_time"])
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRedirects(t *testing.T) {
|
func TestRedirects(t *testing.T) {
|
||||||
@@ -97,7 +112,7 @@ func TestRedirects(t *testing.T) {
|
|||||||
Address: ts.URL + "/redirect",
|
Address: ts.URL + "/redirect",
|
||||||
Body: "{ 'test': 'data'}",
|
Body: "{ 'test': 'data'}",
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
ResponseTimeout: 20,
|
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||||
Headers: map[string]string{
|
Headers: map[string]string{
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
},
|
},
|
||||||
@@ -114,7 +129,7 @@ func TestRedirects(t *testing.T) {
|
|||||||
Address: ts.URL + "/badredirect",
|
Address: ts.URL + "/badredirect",
|
||||||
Body: "{ 'test': 'data'}",
|
Body: "{ 'test': 'data'}",
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
ResponseTimeout: 20,
|
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||||
Headers: map[string]string{
|
Headers: map[string]string{
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
},
|
},
|
||||||
@@ -133,7 +148,7 @@ func TestMethod(t *testing.T) {
|
|||||||
Address: ts.URL + "/mustbepostmethod",
|
Address: ts.URL + "/mustbepostmethod",
|
||||||
Body: "{ 'test': 'data'}",
|
Body: "{ 'test': 'data'}",
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
ResponseTimeout: 20,
|
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||||
Headers: map[string]string{
|
Headers: map[string]string{
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
},
|
},
|
||||||
@@ -150,7 +165,7 @@ func TestMethod(t *testing.T) {
|
|||||||
Address: ts.URL + "/mustbepostmethod",
|
Address: ts.URL + "/mustbepostmethod",
|
||||||
Body: "{ 'test': 'data'}",
|
Body: "{ 'test': 'data'}",
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
ResponseTimeout: 20,
|
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||||
Headers: map[string]string{
|
Headers: map[string]string{
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
},
|
},
|
||||||
@@ -168,7 +183,7 @@ func TestMethod(t *testing.T) {
|
|||||||
Address: ts.URL + "/mustbepostmethod",
|
Address: ts.URL + "/mustbepostmethod",
|
||||||
Body: "{ 'test': 'data'}",
|
Body: "{ 'test': 'data'}",
|
||||||
Method: "head",
|
Method: "head",
|
||||||
ResponseTimeout: 20,
|
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||||
Headers: map[string]string{
|
Headers: map[string]string{
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
},
|
},
|
||||||
@@ -191,7 +206,7 @@ func TestBody(t *testing.T) {
|
|||||||
Address: ts.URL + "/musthaveabody",
|
Address: ts.URL + "/musthaveabody",
|
||||||
Body: "{ 'test': 'data'}",
|
Body: "{ 'test': 'data'}",
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
ResponseTimeout: 20,
|
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||||
Headers: map[string]string{
|
Headers: map[string]string{
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
},
|
},
|
||||||
@@ -207,7 +222,7 @@ func TestBody(t *testing.T) {
|
|||||||
h = &HTTPResponse{
|
h = &HTTPResponse{
|
||||||
Address: ts.URL + "/musthaveabody",
|
Address: ts.URL + "/musthaveabody",
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
ResponseTimeout: 20,
|
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||||
Headers: map[string]string{
|
Headers: map[string]string{
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
},
|
},
|
||||||
@@ -230,7 +245,7 @@ func TestTimeout(t *testing.T) {
|
|||||||
Address: ts.URL + "/twosecondnap",
|
Address: ts.URL + "/twosecondnap",
|
||||||
Body: "{ 'test': 'data'}",
|
Body: "{ 'test': 'data'}",
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
ResponseTimeout: 1,
|
ResponseTimeout: internal.Duration{Duration: time.Second * 1},
|
||||||
Headers: map[string]string{
|
Headers: map[string]string{
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ InfluxDB-formatted endpoints. See below for more information.
|
|||||||
## See the influxdb plugin's README for more details.
|
## See the influxdb plugin's README for more details.
|
||||||
|
|
||||||
## Multiple URLs from which to read InfluxDB-formatted JSON
|
## Multiple URLs from which to read InfluxDB-formatted JSON
|
||||||
|
## Default is "http://localhost:8086/debug/vars".
|
||||||
urls = [
|
urls = [
|
||||||
"http://localhost:8086/debug/vars"
|
"http://localhost:8086/debug/vars"
|
||||||
]
|
]
|
||||||
@@ -22,16 +23,78 @@ InfluxDB-formatted endpoints. See below for more information.
|
|||||||
|
|
||||||
### Measurements & Fields
|
### Measurements & Fields
|
||||||
|
|
||||||
|
- influxdb
|
||||||
|
- n_shards
|
||||||
- influxdb_database
|
- influxdb_database
|
||||||
- influxdb_httpd
|
- influxdb_httpd
|
||||||
- influxdb_measurement
|
- influxdb_measurement
|
||||||
- influxdb_memstats
|
- influxdb_memstats
|
||||||
|
- heap_inuse
|
||||||
|
- heap_released
|
||||||
|
- mspan_inuse
|
||||||
|
- total_alloc
|
||||||
|
- sys
|
||||||
|
- mallocs
|
||||||
|
- frees
|
||||||
|
- heap_idle
|
||||||
|
- pause_total_ns
|
||||||
|
- lookups
|
||||||
|
- heap_sys
|
||||||
|
- mcache_sys
|
||||||
|
- next_gc
|
||||||
|
- gcc_pu_fraction
|
||||||
|
- other_sys
|
||||||
|
- alloc
|
||||||
|
- stack_inuse
|
||||||
|
- stack_sys
|
||||||
|
- buck_hash_sys
|
||||||
|
- gc_sys
|
||||||
|
- num_gc
|
||||||
|
- heap_alloc
|
||||||
|
- heap_objects
|
||||||
|
- mspan_sys
|
||||||
|
- mcache_inuse
|
||||||
|
- last_gc
|
||||||
- influxdb_shard
|
- influxdb_shard
|
||||||
- influxdb_subscriber
|
- influxdb_subscriber
|
||||||
- influxdb_tsm1_cache
|
- influxdb_tsm1_cache
|
||||||
- influxdb_tsm1_wal
|
- influxdb_tsm1_wal
|
||||||
- influxdb_write
|
- influxdb_write
|
||||||
|
|
||||||
|
### Example Output:
|
||||||
|
|
||||||
|
```
|
||||||
|
telegraf -config ~/ws/telegraf.conf -input-filter influxdb -test
|
||||||
|
* Plugin: influxdb, Collection 1
|
||||||
|
> influxdb_database,database=_internal,host=tyrion,url=http://localhost:8086/debug/vars numMeasurements=10,numSeries=29 1463590500247354636
|
||||||
|
> influxdb_httpd,bind=:8086,host=tyrion,url=http://localhost:8086/debug/vars req=7,reqActive=1,reqDurationNs=14227734 1463590500247354636
|
||||||
|
> influxdb_measurement,database=_internal,host=tyrion,measurement=database,url=http://localhost:8086/debug/vars numSeries=1 1463590500247354636
|
||||||
|
> influxdb_measurement,database=_internal,host=tyrion,measurement=httpd,url=http://localhost:8086/debug/vars numSeries=1 1463590500247354636
|
||||||
|
> influxdb_measurement,database=_internal,host=tyrion,measurement=measurement,url=http://localhost:8086/debug/vars numSeries=10 1463590500247354636
|
||||||
|
> influxdb_measurement,database=_internal,host=tyrion,measurement=runtime,url=http://localhost:8086/debug/vars numSeries=1 1463590500247354636
|
||||||
|
> influxdb_measurement,database=_internal,host=tyrion,measurement=shard,url=http://localhost:8086/debug/vars numSeries=4 1463590500247354636
|
||||||
|
> influxdb_measurement,database=_internal,host=tyrion,measurement=subscriber,url=http://localhost:8086/debug/vars numSeries=1 1463590500247354636
|
||||||
|
> influxdb_measurement,database=_internal,host=tyrion,measurement=tsm1_cache,url=http://localhost:8086/debug/vars numSeries=4 1463590500247354636
|
||||||
|
> influxdb_measurement,database=_internal,host=tyrion,measurement=tsm1_filestore,url=http://localhost:8086/debug/vars numSeries=2 1463590500247354636
|
||||||
|
> influxdb_measurement,database=_internal,host=tyrion,measurement=tsm1_wal,url=http://localhost:8086/debug/vars numSeries=4 1463590500247354636
|
||||||
|
> influxdb_measurement,database=_internal,host=tyrion,measurement=write,url=http://localhost:8086/debug/vars numSeries=1 1463590500247354636
|
||||||
|
> influxdb_memstats,host=tyrion,url=http://localhost:8086/debug/vars alloc=7642384i,buck_hash_sys=1463471i,frees=1169558i,gc_sys=653312i,gcc_pu_fraction=0.00003825652361068311,heap_alloc=7642384i,heap_idle=9912320i,heap_inuse=9125888i,heap_objects=48276i,heap_released=0i,heap_sys=19038208i,last_gc=1463590480877651621i,lookups=90i,mallocs=1217834i,mcache_inuse=4800i,mcache_sys=16384i,mspan_inuse=70920i,mspan_sys=81920i,next_gc=11679787i,num_gc=141i,other_sys=1244233i,pause_total_ns=24034027i,stack_inuse=884736i,stack_sys=884736i,sys=23382264i,total_alloc=679012200i 1463590500277918755
|
||||||
|
> influxdb_shard,database=_internal,engine=tsm1,host=tyrion,id=4,path=/Users/sparrc/.influxdb/data/_internal/monitor/4,retentionPolicy=monitor,url=http://localhost:8086/debug/vars fieldsCreate=65,seriesCreate=26,writePointsOk=7274,writeReq=280 1463590500247354636
|
||||||
|
> influxdb_subscriber,host=tyrion,url=http://localhost:8086/debug/vars pointsWritten=7274 1463590500247354636
|
||||||
|
> influxdb_tsm1_cache,database=_internal,host=tyrion,path=/Users/sparrc/.influxdb/data/_internal/monitor/1,retentionPolicy=monitor,url=http://localhost:8086/debug/vars WALCompactionTimeMs=0,cacheAgeMs=2809192,cachedBytes=0,diskBytes=0,memBytes=0,snapshotCount=0 1463590500247354636
|
||||||
|
> influxdb_tsm1_cache,database=_internal,host=tyrion,path=/Users/sparrc/.influxdb/data/_internal/monitor/2,retentionPolicy=monitor,url=http://localhost:8086/debug/vars WALCompactionTimeMs=0,cacheAgeMs=2809184,cachedBytes=0,diskBytes=0,memBytes=0,snapshotCount=0 1463590500247354636
|
||||||
|
> influxdb_tsm1_cache,database=_internal,host=tyrion,path=/Users/sparrc/.influxdb/data/_internal/monitor/3,retentionPolicy=monitor,url=http://localhost:8086/debug/vars WALCompactionTimeMs=0,cacheAgeMs=2809180,cachedBytes=0,diskBytes=0,memBytes=42368,snapshotCount=0 1463590500247354636
|
||||||
|
> influxdb_tsm1_cache,database=_internal,host=tyrion,path=/Users/sparrc/.influxdb/data/_internal/monitor/4,retentionPolicy=monitor,url=http://localhost:8086/debug/vars WALCompactionTimeMs=0,cacheAgeMs=2799155,cachedBytes=0,diskBytes=0,memBytes=331216,snapshotCount=0 1463590500247354636
|
||||||
|
> influxdb_tsm1_filestore,database=_internal,host=tyrion,path=/Users/sparrc/.influxdb/data/_internal/monitor/1,retentionPolicy=monitor,url=http://localhost:8086/debug/vars diskBytes=37892 1463590500247354636
|
||||||
|
> influxdb_tsm1_filestore,database=_internal,host=tyrion,path=/Users/sparrc/.influxdb/data/_internal/monitor/2,retentionPolicy=monitor,url=http://localhost:8086/debug/vars diskBytes=52907 1463590500247354636
|
||||||
|
> influxdb_tsm1_wal,database=_internal,host=tyrion,path=/Users/sparrc/.influxdb/wal/_internal/monitor/1,retentionPolicy=monitor,url=http://localhost:8086/debug/vars currentSegmentDiskBytes=0,oldSegmentsDiskBytes=0 1463590500247354636
|
||||||
|
> influxdb_tsm1_wal,database=_internal,host=tyrion,path=/Users/sparrc/.influxdb/wal/_internal/monitor/2,retentionPolicy=monitor,url=http://localhost:8086/debug/vars currentSegmentDiskBytes=0,oldSegmentsDiskBytes=0 1463590500247354636
|
||||||
|
> influxdb_tsm1_wal,database=_internal,host=tyrion,path=/Users/sparrc/.influxdb/wal/_internal/monitor/3,retentionPolicy=monitor,url=http://localhost:8086/debug/vars currentSegmentDiskBytes=0,oldSegmentsDiskBytes=65651 1463590500247354636
|
||||||
|
> influxdb_tsm1_wal,database=_internal,host=tyrion,path=/Users/sparrc/.influxdb/wal/_internal/monitor/4,retentionPolicy=monitor,url=http://localhost:8086/debug/vars currentSegmentDiskBytes=495687,oldSegmentsDiskBytes=0 1463590500247354636
|
||||||
|
> influxdb_write,host=tyrion,url=http://localhost:8086/debug/vars pointReq=7274,pointReqLocal=7274,req=280,subWriteOk=280,writeOk=280 1463590500247354636
|
||||||
|
> influxdb_shard,host=tyrion n_shards=4i 1463590500247354636
|
||||||
|
```
|
||||||
|
|
||||||
### InfluxDB-formatted endpoints
|
### InfluxDB-formatted endpoints
|
||||||
|
|
||||||
The influxdb plugin can collect InfluxDB-formatted data from JSON endpoints.
|
The influxdb plugin can collect InfluxDB-formatted data from JSON endpoints.
|
||||||
@@ -46,65 +109,3 @@ With a configuration of:
|
|||||||
"http://192.168.2.1:8086/debug/vars"
|
"http://192.168.2.1:8086/debug/vars"
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
And if 127.0.0.1 responds with this JSON:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"k1": {
|
|
||||||
"name": "fruit",
|
|
||||||
"tags": {
|
|
||||||
"kind": "apple"
|
|
||||||
},
|
|
||||||
"values": {
|
|
||||||
"inventory": 371,
|
|
||||||
"sold": 112
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"k2": {
|
|
||||||
"name": "fruit",
|
|
||||||
"tags": {
|
|
||||||
"kind": "banana"
|
|
||||||
},
|
|
||||||
"values": {
|
|
||||||
"inventory": 1000,
|
|
||||||
"sold": 403
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
And if 192.168.2.1 responds like so:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"k3": {
|
|
||||||
"name": "transactions",
|
|
||||||
"tags": {},
|
|
||||||
"values": {
|
|
||||||
"total": 100,
|
|
||||||
"balance": 184.75
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Then the collected metrics will be:
|
|
||||||
|
|
||||||
```
|
|
||||||
influxdb_fruit,url='http://127.0.0.1:8086/debug/vars',kind='apple' inventory=371.0,sold=112.0
|
|
||||||
influxdb_fruit,url='http://127.0.0.1:8086/debug/vars',kind='banana' inventory=1000.0,sold=403.0
|
|
||||||
|
|
||||||
influxdb_transactions,url='http://192.168.2.1:8086/debug/vars' total=100.0,balance=184.75
|
|
||||||
```
|
|
||||||
|
|
||||||
There are two important details to note about the collected metrics:
|
|
||||||
|
|
||||||
1. Even though the values in JSON are being displayed as integers,
|
|
||||||
the metrics are reported as floats.
|
|
||||||
JSON encoders usually don't print the fractional part for round floats.
|
|
||||||
Because you cannot change the type of an existing field in InfluxDB,
|
|
||||||
we assume all numbers are floats.
|
|
||||||
|
|
||||||
2. The top-level keys' names (in the example above, `"k1"`, `"k2"`, and `"k3"`)
|
|
||||||
are not considered when recording the metrics.
|
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ func (*InfluxDB) SampleConfig() string {
|
|||||||
## See the influxdb plugin's README for more details.
|
## See the influxdb plugin's README for more details.
|
||||||
|
|
||||||
## Multiple URLs from which to read InfluxDB-formatted JSON
|
## Multiple URLs from which to read InfluxDB-formatted JSON
|
||||||
|
## Default is "http://localhost:8086/debug/vars".
|
||||||
urls = [
|
urls = [
|
||||||
"http://localhost:8086/debug/vars"
|
"http://localhost:8086/debug/vars"
|
||||||
]
|
]
|
||||||
@@ -35,6 +36,9 @@ func (*InfluxDB) SampleConfig() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (i *InfluxDB) Gather(acc telegraf.Accumulator) error {
|
func (i *InfluxDB) Gather(acc telegraf.Accumulator) error {
|
||||||
|
if len(i.URLs) == 0 {
|
||||||
|
i.URLs = []string{"http://localhost:8086/debug/vars"}
|
||||||
|
}
|
||||||
errorChannel := make(chan error, len(i.URLs))
|
errorChannel := make(chan error, len(i.URLs))
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
@@ -120,6 +124,9 @@ func (i *InfluxDB) gatherURL(
|
|||||||
acc telegraf.Accumulator,
|
acc telegraf.Accumulator,
|
||||||
url string,
|
url string,
|
||||||
) error {
|
) error {
|
||||||
|
shardCounter := 0
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
resp, err := client.Get(url)
|
resp, err := client.Get(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -154,43 +161,45 @@ func (i *InfluxDB) gatherURL(
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if key.(string) == "memstats" {
|
if keyStr, ok := key.(string); ok {
|
||||||
var m memstats
|
if keyStr == "memstats" {
|
||||||
if err := dec.Decode(&m); err != nil {
|
var m memstats
|
||||||
continue
|
if err := dec.Decode(&m); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
acc.AddFields("influxdb_memstats",
|
||||||
|
map[string]interface{}{
|
||||||
|
"alloc": m.Alloc,
|
||||||
|
"total_alloc": m.TotalAlloc,
|
||||||
|
"sys": m.Sys,
|
||||||
|
"lookups": m.Lookups,
|
||||||
|
"mallocs": m.Mallocs,
|
||||||
|
"frees": m.Frees,
|
||||||
|
"heap_alloc": m.HeapAlloc,
|
||||||
|
"heap_sys": m.HeapSys,
|
||||||
|
"heap_idle": m.HeapIdle,
|
||||||
|
"heap_inuse": m.HeapInuse,
|
||||||
|
"heap_released": m.HeapReleased,
|
||||||
|
"heap_objects": m.HeapObjects,
|
||||||
|
"stack_inuse": m.StackInuse,
|
||||||
|
"stack_sys": m.StackSys,
|
||||||
|
"mspan_inuse": m.MSpanInuse,
|
||||||
|
"mspan_sys": m.MSpanSys,
|
||||||
|
"mcache_inuse": m.MCacheInuse,
|
||||||
|
"mcache_sys": m.MCacheSys,
|
||||||
|
"buck_hash_sys": m.BuckHashSys,
|
||||||
|
"gc_sys": m.GCSys,
|
||||||
|
"other_sys": m.OtherSys,
|
||||||
|
"next_gc": m.NextGC,
|
||||||
|
"last_gc": m.LastGC,
|
||||||
|
"pause_total_ns": m.PauseTotalNs,
|
||||||
|
"num_gc": m.NumGC,
|
||||||
|
"gcc_pu_fraction": m.GCCPUFraction,
|
||||||
|
},
|
||||||
|
map[string]string{
|
||||||
|
"url": url,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
acc.AddFields("influxdb_memstats",
|
|
||||||
map[string]interface{}{
|
|
||||||
"alloc": m.Alloc,
|
|
||||||
"total_alloc": m.TotalAlloc,
|
|
||||||
"sys": m.Sys,
|
|
||||||
"lookups": m.Lookups,
|
|
||||||
"mallocs": m.Mallocs,
|
|
||||||
"frees": m.Frees,
|
|
||||||
"heap_alloc": m.HeapAlloc,
|
|
||||||
"heap_sys": m.HeapSys,
|
|
||||||
"heap_idle": m.HeapIdle,
|
|
||||||
"heap_inuse": m.HeapInuse,
|
|
||||||
"heap_released": m.HeapReleased,
|
|
||||||
"heap_objects": m.HeapObjects,
|
|
||||||
"stack_inuse": m.StackInuse,
|
|
||||||
"stack_sys": m.StackSys,
|
|
||||||
"mspan_inuse": m.MSpanInuse,
|
|
||||||
"mspan_sys": m.MSpanSys,
|
|
||||||
"mcache_inuse": m.MCacheInuse,
|
|
||||||
"mcache_sys": m.MCacheSys,
|
|
||||||
"buck_hash_sys": m.BuckHashSys,
|
|
||||||
"gc_sys": m.GCSys,
|
|
||||||
"other_sys": m.OtherSys,
|
|
||||||
"next_gc": m.NextGC,
|
|
||||||
"last_gc": m.LastGC,
|
|
||||||
"pause_total_ns": m.PauseTotalNs,
|
|
||||||
"num_gc": m.NumGC,
|
|
||||||
"gcc_pu_fraction": m.GCCPUFraction,
|
|
||||||
},
|
|
||||||
map[string]string{
|
|
||||||
"url": url,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to parse a whole object into a point.
|
// Attempt to parse a whole object into a point.
|
||||||
@@ -207,6 +216,10 @@ func (i *InfluxDB) gatherURL(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if p.Name == "shard" {
|
||||||
|
shardCounter++
|
||||||
|
}
|
||||||
|
|
||||||
// Add a tag to indicate the source of the data.
|
// Add a tag to indicate the source of the data.
|
||||||
p.Tags["url"] = url
|
p.Tags["url"] = url
|
||||||
|
|
||||||
@@ -214,9 +227,18 @@ func (i *InfluxDB) gatherURL(
|
|||||||
"influxdb_"+p.Name,
|
"influxdb_"+p.Name,
|
||||||
p.Values,
|
p.Values,
|
||||||
p.Tags,
|
p.Tags,
|
||||||
|
now,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
acc.AddFields("influxdb",
|
||||||
|
map[string]interface{}{
|
||||||
|
"n_shards": shardCounter,
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ func TestBasic(t *testing.T) {
|
|||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
require.NoError(t, plugin.Gather(&acc))
|
require.NoError(t, plugin.Gather(&acc))
|
||||||
|
|
||||||
require.Len(t, acc.Metrics, 2)
|
require.Len(t, acc.Metrics, 3)
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
// JSON will truncate floats to integer representations.
|
// JSON will truncate floats to integer representations.
|
||||||
// Since there's no distinction in JSON, we can't assume it's an int.
|
// Since there's no distinction in JSON, we can't assume it's an int.
|
||||||
@@ -50,6 +50,11 @@ func TestBasic(t *testing.T) {
|
|||||||
"url": fakeServer.URL + "/endpoint",
|
"url": fakeServer.URL + "/endpoint",
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "influxdb_bar", fields, tags)
|
acc.AssertContainsTaggedFields(t, "influxdb_bar", fields, tags)
|
||||||
|
|
||||||
|
acc.AssertContainsTaggedFields(t, "influxdb",
|
||||||
|
map[string]interface{}{
|
||||||
|
"n_shards": 0,
|
||||||
|
}, map[string]string{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInfluxDB(t *testing.T) {
|
func TestInfluxDB(t *testing.T) {
|
||||||
@@ -69,7 +74,7 @@ func TestInfluxDB(t *testing.T) {
|
|||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
require.NoError(t, plugin.Gather(&acc))
|
require.NoError(t, plugin.Gather(&acc))
|
||||||
|
|
||||||
require.Len(t, acc.Metrics, 33)
|
require.Len(t, acc.Metrics, 34)
|
||||||
|
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"heap_inuse": int64(18046976),
|
"heap_inuse": int64(18046976),
|
||||||
@@ -104,6 +109,11 @@ func TestInfluxDB(t *testing.T) {
|
|||||||
"url": fakeInfluxServer.URL + "/endpoint",
|
"url": fakeInfluxServer.URL + "/endpoint",
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "influxdb_memstats", fields, tags)
|
acc.AssertContainsTaggedFields(t, "influxdb_memstats", fields, tags)
|
||||||
|
|
||||||
|
acc.AssertContainsTaggedFields(t, "influxdb",
|
||||||
|
map[string]interface{}{
|
||||||
|
"n_shards": 1,
|
||||||
|
}, map[string]string{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestErrorHandling(t *testing.T) {
|
func TestErrorHandling(t *testing.T) {
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
package ipmi_sensor
|
package ipmi_sensor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
type CommandRunner struct{}
|
type CommandRunner struct{}
|
||||||
@@ -18,21 +20,16 @@ func (t CommandRunner) cmd(conn *Connection, args ...string) *exec.Cmd {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return exec.Command(path, opts...)
|
return exec.Command(path, opts...)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t CommandRunner) Run(conn *Connection, args ...string) (string, error) {
|
func (t CommandRunner) Run(conn *Connection, args ...string) (string, error) {
|
||||||
cmd := t.cmd(conn, args...)
|
cmd := t.cmd(conn, args...)
|
||||||
var stdout bytes.Buffer
|
|
||||||
var stderr bytes.Buffer
|
|
||||||
cmd.Stdout = &stdout
|
|
||||||
cmd.Stderr = &stderr
|
|
||||||
|
|
||||||
err := cmd.Run()
|
output, err := internal.CombinedOutputTimeout(cmd, time.Second*5)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("run %s %s: %s (%s)",
|
return "", fmt.Errorf("run %s %s: %s (%s)",
|
||||||
cmd.Path, strings.Join(cmd.Args, " "), stderr.String(), err)
|
cmd.Path, strings.Join(cmd.Args, " "), string(output), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return stdout.String(), err
|
return string(output), err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,15 +3,27 @@
|
|||||||
#### Configuration
|
#### Configuration
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
|
# Read JMX metrics through Jolokia
|
||||||
[[inputs.jolokia]]
|
[[inputs.jolokia]]
|
||||||
## This is the context root used to compose the jolokia url
|
## This is the context root used to compose the jolokia url
|
||||||
context = "/jolokia/read"
|
context = "/jolokia"
|
||||||
|
|
||||||
|
## This specifies the mode used
|
||||||
|
# mode = "proxy"
|
||||||
|
#
|
||||||
|
## When in proxy mode this section is used to specify further
|
||||||
|
## proxy address configurations.
|
||||||
|
## Remember to change host address to fit your environment.
|
||||||
|
# [inputs.jolokia.proxy]
|
||||||
|
# host = "127.0.0.1"
|
||||||
|
# port = "8080"
|
||||||
|
|
||||||
|
|
||||||
## List of servers exposing jolokia read service
|
## List of servers exposing jolokia read service
|
||||||
[[inputs.jolokia.servers]]
|
[[inputs.jolokia.servers]]
|
||||||
name = "stable"
|
name = "as-server-01"
|
||||||
host = "192.168.103.2"
|
host = "127.0.0.1"
|
||||||
port = "8180"
|
port = "8080"
|
||||||
# username = "myuser"
|
# username = "myuser"
|
||||||
# password = "mypassword"
|
# password = "mypassword"
|
||||||
|
|
||||||
@@ -21,25 +33,29 @@
|
|||||||
## This collect all heap memory usage metrics.
|
## This collect all heap memory usage metrics.
|
||||||
[[inputs.jolokia.metrics]]
|
[[inputs.jolokia.metrics]]
|
||||||
name = "heap_memory_usage"
|
name = "heap_memory_usage"
|
||||||
jmx = "/java.lang:type=Memory/HeapMemoryUsage"
|
mbean = "java.lang:type=Memory"
|
||||||
|
attribute = "HeapMemoryUsage"
|
||||||
|
|
||||||
## This collect thread counts metrics.
|
## This collect thread counts metrics.
|
||||||
[[inputs.jolokia.metrics]]
|
[[inputs.jolokia.metrics]]
|
||||||
name = "thread_count"
|
name = "thread_count"
|
||||||
jmx = "/java.lang:type=Threading/TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
|
mbean = "java.lang:type=Threading"
|
||||||
|
attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
|
||||||
|
|
||||||
## This collect number of class loaded/unloaded counts metrics.
|
## This collect number of class loaded/unloaded counts metrics.
|
||||||
[[inputs.jolokia.metrics]]
|
[[inputs.jolokia.metrics]]
|
||||||
name = "class_count"
|
name = "class_count"
|
||||||
jmx = "/java.lang:type=ClassLoading/LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
|
mbean = "java.lang:type=ClassLoading"
|
||||||
|
attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Description
|
#### Description
|
||||||
|
|
||||||
The Jolokia plugin collects JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics
|
The Jolokia plugin collects JVM metrics exposed as MBean's attributes through
|
||||||
are collected for each server configured.
|
jolokia REST endpoint. All metrics are collected for each server configured.
|
||||||
|
|
||||||
See: https://jolokia.org/
|
See: https://jolokia.org/
|
||||||
|
|
||||||
# Measurements:
|
# Measurements:
|
||||||
Jolokia plugin produces one measure for each metric configured, adding Server's `name`, `host` and `port` as tags.
|
Jolokia plugin produces one measure for each metric configured,
|
||||||
|
adding Server's `jolokia_name`, `jolokia_host` and `jolokia_port` as tags.
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package jolokia
|
package jolokia
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -22,8 +23,10 @@ type Server struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Metric struct {
|
type Metric struct {
|
||||||
Name string
|
Name string
|
||||||
Jmx string
|
Mbean string
|
||||||
|
Attribute string
|
||||||
|
Path string
|
||||||
}
|
}
|
||||||
|
|
||||||
type JolokiaClient interface {
|
type JolokiaClient interface {
|
||||||
@@ -41,20 +44,32 @@ func (c JolokiaClientImpl) MakeRequest(req *http.Request) (*http.Response, error
|
|||||||
type Jolokia struct {
|
type Jolokia struct {
|
||||||
jClient JolokiaClient
|
jClient JolokiaClient
|
||||||
Context string
|
Context string
|
||||||
|
Mode string
|
||||||
Servers []Server
|
Servers []Server
|
||||||
Metrics []Metric
|
Metrics []Metric
|
||||||
|
Proxy Server
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *Jolokia) SampleConfig() string {
|
const sampleConfig = `
|
||||||
return `
|
|
||||||
## This is the context root used to compose the jolokia url
|
## This is the context root used to compose the jolokia url
|
||||||
context = "/jolokia/read"
|
context = "/jolokia"
|
||||||
|
|
||||||
|
## This specifies the mode used
|
||||||
|
# mode = "proxy"
|
||||||
|
#
|
||||||
|
## When in proxy mode this section is used to specify further
|
||||||
|
## proxy address configurations.
|
||||||
|
## Remember to change host address to fit your environment.
|
||||||
|
# [inputs.jolokia.proxy]
|
||||||
|
# host = "127.0.0.1"
|
||||||
|
# port = "8080"
|
||||||
|
|
||||||
|
|
||||||
## List of servers exposing jolokia read service
|
## List of servers exposing jolokia read service
|
||||||
[[inputs.jolokia.servers]]
|
[[inputs.jolokia.servers]]
|
||||||
name = "stable"
|
name = "as-server-01"
|
||||||
host = "192.168.103.2"
|
host = "127.0.0.1"
|
||||||
port = "8180"
|
port = "8080"
|
||||||
# username = "myuser"
|
# username = "myuser"
|
||||||
# password = "mypassword"
|
# password = "mypassword"
|
||||||
|
|
||||||
@@ -64,30 +79,31 @@ func (j *Jolokia) SampleConfig() string {
|
|||||||
## This collect all heap memory usage metrics.
|
## This collect all heap memory usage metrics.
|
||||||
[[inputs.jolokia.metrics]]
|
[[inputs.jolokia.metrics]]
|
||||||
name = "heap_memory_usage"
|
name = "heap_memory_usage"
|
||||||
jmx = "/java.lang:type=Memory/HeapMemoryUsage"
|
mbean = "java.lang:type=Memory"
|
||||||
|
attribute = "HeapMemoryUsage"
|
||||||
|
|
||||||
## This collect thread counts metrics.
|
## This collect thread counts metrics.
|
||||||
[[inputs.jolokia.metrics]]
|
[[inputs.jolokia.metrics]]
|
||||||
name = "thread_count"
|
name = "thread_count"
|
||||||
jmx = "/java.lang:type=Threading/TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
|
mbean = "java.lang:type=Threading"
|
||||||
|
attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
|
||||||
|
|
||||||
## This collect number of class loaded/unloaded counts metrics.
|
## This collect number of class loaded/unloaded counts metrics.
|
||||||
[[inputs.jolokia.metrics]]
|
[[inputs.jolokia.metrics]]
|
||||||
name = "class_count"
|
name = "class_count"
|
||||||
jmx = "/java.lang:type=ClassLoading/LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
|
mbean = "java.lang:type=ClassLoading"
|
||||||
|
attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
|
||||||
`
|
`
|
||||||
|
|
||||||
|
func (j *Jolokia) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *Jolokia) Description() string {
|
func (j *Jolokia) Description() string {
|
||||||
return "Read JMX metrics through Jolokia"
|
return "Read JMX metrics through Jolokia"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
|
func (j *Jolokia) doRequest(req *http.Request) (map[string]interface{}, error) {
|
||||||
// Create + send request
|
|
||||||
req, err := http.NewRequest("GET", requestUrl.String(), nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := j.jClient.MakeRequest(req)
|
resp, err := j.jClient.MakeRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -98,7 +114,7 @@ func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
|
|||||||
// Process response
|
// Process response
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
|
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
|
||||||
requestUrl,
|
req.RequestURI,
|
||||||
resp.StatusCode,
|
resp.StatusCode,
|
||||||
http.StatusText(resp.StatusCode),
|
http.StatusText(resp.StatusCode),
|
||||||
http.StatusOK,
|
http.StatusOK,
|
||||||
@@ -118,51 +134,133 @@ func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
|
|||||||
return nil, errors.New("Error decoding JSON response")
|
return nil, errors.New("Error decoding JSON response")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if status, ok := jsonOut["status"]; ok {
|
||||||
|
if status != float64(200) {
|
||||||
|
return nil, fmt.Errorf("Not expected status value in response body: %3.f",
|
||||||
|
status)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("Missing status in response body")
|
||||||
|
}
|
||||||
|
|
||||||
return jsonOut, nil
|
return jsonOut, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (j *Jolokia) prepareRequest(server Server, metric Metric) (*http.Request, error) {
|
||||||
|
var jolokiaUrl *url.URL
|
||||||
|
context := j.Context // Usually "/jolokia"
|
||||||
|
|
||||||
|
// Create bodyContent
|
||||||
|
bodyContent := map[string]interface{}{
|
||||||
|
"type": "read",
|
||||||
|
"mbean": metric.Mbean,
|
||||||
|
}
|
||||||
|
|
||||||
|
if metric.Attribute != "" {
|
||||||
|
bodyContent["attribute"] = metric.Attribute
|
||||||
|
if metric.Path != "" {
|
||||||
|
bodyContent["path"] = metric.Path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add target, only in proxy mode
|
||||||
|
if j.Mode == "proxy" {
|
||||||
|
serviceUrl := fmt.Sprintf("service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi",
|
||||||
|
server.Host, server.Port)
|
||||||
|
|
||||||
|
target := map[string]string{
|
||||||
|
"url": serviceUrl,
|
||||||
|
}
|
||||||
|
|
||||||
|
if server.Username != "" {
|
||||||
|
target["user"] = server.Username
|
||||||
|
}
|
||||||
|
|
||||||
|
if server.Password != "" {
|
||||||
|
target["password"] = server.Password
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyContent["target"] = target
|
||||||
|
|
||||||
|
proxy := j.Proxy
|
||||||
|
|
||||||
|
// Prepare ProxyURL
|
||||||
|
proxyUrl, err := url.Parse("http://" + proxy.Host + ":" + proxy.Port + context)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if proxy.Username != "" || proxy.Password != "" {
|
||||||
|
proxyUrl.User = url.UserPassword(proxy.Username, proxy.Password)
|
||||||
|
}
|
||||||
|
|
||||||
|
jolokiaUrl = proxyUrl
|
||||||
|
|
||||||
|
} else {
|
||||||
|
serverUrl, err := url.Parse("http://" + server.Host + ":" + server.Port + context)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if server.Username != "" || server.Password != "" {
|
||||||
|
serverUrl.User = url.UserPassword(server.Username, server.Password)
|
||||||
|
}
|
||||||
|
|
||||||
|
jolokiaUrl = serverUrl
|
||||||
|
}
|
||||||
|
|
||||||
|
requestBody, err := json.Marshal(bodyContent)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", jolokiaUrl.String(), bytes.NewBuffer(requestBody))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Add("Content-type", "application/json")
|
||||||
|
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
|
func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
|
||||||
context := j.Context //"/jolokia/read"
|
|
||||||
servers := j.Servers
|
servers := j.Servers
|
||||||
metrics := j.Metrics
|
metrics := j.Metrics
|
||||||
tags := make(map[string]string)
|
tags := make(map[string]string)
|
||||||
|
|
||||||
for _, server := range servers {
|
for _, server := range servers {
|
||||||
tags["server"] = server.Name
|
tags["jolokia_name"] = server.Name
|
||||||
tags["port"] = server.Port
|
tags["jolokia_port"] = server.Port
|
||||||
tags["host"] = server.Host
|
tags["jolokia_host"] = server.Host
|
||||||
fields := make(map[string]interface{})
|
fields := make(map[string]interface{})
|
||||||
|
|
||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
|
|
||||||
measurement := metric.Name
|
measurement := metric.Name
|
||||||
jmxPath := metric.Jmx
|
|
||||||
|
|
||||||
// Prepare URL
|
req, err := j.prepareRequest(server, metric)
|
||||||
requestUrl, err := url.Parse("http://" + server.Host + ":" +
|
|
||||||
server.Port + context + jmxPath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if server.Username != "" || server.Password != "" {
|
|
||||||
requestUrl.User = url.UserPassword(server.Username, server.Password)
|
|
||||||
}
|
|
||||||
|
|
||||||
out, _ := j.getAttr(requestUrl)
|
out, err := j.doRequest(req)
|
||||||
|
|
||||||
if values, ok := out["value"]; ok {
|
if err != nil {
|
||||||
switch t := values.(type) {
|
fmt.Printf("Error handling response: %s\n", err)
|
||||||
case map[string]interface{}:
|
|
||||||
for k, v := range t {
|
|
||||||
fields[measurement+"_"+k] = v
|
|
||||||
}
|
|
||||||
case interface{}:
|
|
||||||
fields[measurement] = t
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("Missing key 'value' in '%s' output response\n",
|
|
||||||
requestUrl.String())
|
if values, ok := out["value"]; ok {
|
||||||
|
switch t := values.(type) {
|
||||||
|
case map[string]interface{}:
|
||||||
|
for k, v := range t {
|
||||||
|
fields[measurement+"_"+k] = v
|
||||||
|
}
|
||||||
|
case interface{}:
|
||||||
|
fields[measurement] = t
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Missing key 'value' in output response\n")
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
acc.AddFields("jolokia", fields, tags)
|
acc.AddFields("jolokia", fields, tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -47,8 +47,10 @@ const invalidJSON = "I don't think this is JSON"
|
|||||||
const empty = ""
|
const empty = ""
|
||||||
|
|
||||||
var Servers = []Server{Server{Name: "as1", Host: "127.0.0.1", Port: "8080"}}
|
var Servers = []Server{Server{Name: "as1", Host: "127.0.0.1", Port: "8080"}}
|
||||||
var HeapMetric = Metric{Name: "heap_memory_usage", Jmx: "/java.lang:type=Memory/HeapMemoryUsage"}
|
var HeapMetric = Metric{Name: "heap_memory_usage",
|
||||||
var UsedHeapMetric = Metric{Name: "heap_memory_usage", Jmx: "/java.lang:type=Memory/HeapMemoryUsage"}
|
Mbean: "java.lang:type=Memory", Attribute: "HeapMemoryUsage"}
|
||||||
|
var UsedHeapMetric = Metric{Name: "heap_memory_usage",
|
||||||
|
Mbean: "java.lang:type=Memory", Attribute: "HeapMemoryUsage"}
|
||||||
|
|
||||||
type jolokiaClientStub struct {
|
type jolokiaClientStub struct {
|
||||||
responseBody string
|
responseBody string
|
||||||
@@ -94,9 +96,9 @@ func TestHttpJsonMultiValue(t *testing.T) {
|
|||||||
"heap_memory_usage_used": 203288528.0,
|
"heap_memory_usage_used": 203288528.0,
|
||||||
}
|
}
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
"host": "127.0.0.1",
|
"jolokia_host": "127.0.0.1",
|
||||||
"port": "8080",
|
"jolokia_port": "8080",
|
||||||
"server": "as1",
|
"jolokia_name": "as1",
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "jolokia", fields, tags)
|
acc.AssertContainsTaggedFields(t, "jolokia", fields, tags)
|
||||||
}
|
}
|
||||||
@@ -114,3 +116,17 @@ func TestHttpJsonOn404(t *testing.T) {
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, 0, len(acc.Metrics))
|
assert.Equal(t, 0, len(acc.Metrics))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test that the proper values are ignored or collected
|
||||||
|
func TestHttpInvalidJson(t *testing.T) {
|
||||||
|
|
||||||
|
jolokia := genJolokiaClientStub(invalidJSON, 200, Servers,
|
||||||
|
[]Metric{UsedHeapMetric})
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
acc.SetDebug(true)
|
||||||
|
err := jolokia.Gather(&acc)
|
||||||
|
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 0, len(acc.Metrics))
|
||||||
|
}
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ var sampleConfig = `
|
|||||||
## an array of Zookeeper connection strings
|
## an array of Zookeeper connection strings
|
||||||
zookeeper_peers = ["localhost:2181"]
|
zookeeper_peers = ["localhost:2181"]
|
||||||
## Zookeeper Chroot
|
## Zookeeper Chroot
|
||||||
zookeeper_chroot = "/"
|
zookeeper_chroot = ""
|
||||||
## the name of the consumer group
|
## the name of the consumer group
|
||||||
consumer_group = "telegraf_metrics_consumers"
|
consumer_group = "telegraf_metrics_consumers"
|
||||||
## Offset (must be either "oldest" or "newest")
|
## Offset (must be either "oldest" or "newest")
|
||||||
|
|||||||
@@ -3,13 +3,16 @@ package leofs
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
)
|
)
|
||||||
|
|
||||||
const oid = ".1.3.6.1.4.1.35450"
|
const oid = ".1.3.6.1.4.1.35450"
|
||||||
@@ -175,14 +178,18 @@ func (l *LeoFS) Gather(acc telegraf.Accumulator) error {
|
|||||||
return outerr
|
return outerr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc telegraf.Accumulator) error {
|
func (l *LeoFS) gatherServer(
|
||||||
|
endpoint string,
|
||||||
|
serverType ServerType,
|
||||||
|
acc telegraf.Accumulator,
|
||||||
|
) error {
|
||||||
cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", endpoint, oid)
|
cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", endpoint, oid)
|
||||||
stdout, err := cmd.StdoutPipe()
|
stdout, err := cmd.StdoutPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cmd.Start()
|
cmd.Start()
|
||||||
defer cmd.Wait()
|
defer internal.WaitTimeout(cmd, time.Second*5)
|
||||||
scanner := bufio.NewScanner(stdout)
|
scanner := bufio.NewScanner(stdout)
|
||||||
if !scanner.Scan() {
|
if !scanner.Scan() {
|
||||||
return fmt.Errorf("Unable to retrieve the node name")
|
return fmt.Errorf("Unable to retrieve the node name")
|
||||||
|
|||||||
89
plugins/inputs/logparser/README.md
Normal file
89
plugins/inputs/logparser/README.md
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
# logparser Input Plugin
|
||||||
|
|
||||||
|
The logparser plugin streams and parses the given logfiles. Currently it only
|
||||||
|
has the capability of parsing "grok" patterns from logfiles, which also supports
|
||||||
|
regex patterns.
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.logparser]]
|
||||||
|
## Log files to parse.
|
||||||
|
## These accept standard unix glob matching rules, but with the addition of
|
||||||
|
## ** as a "super asterisk". ie:
|
||||||
|
## /var/log/**.log -> recursively find all .log files in /var/log
|
||||||
|
## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
|
||||||
|
## /var/log/apache.log -> only tail the apache log file
|
||||||
|
files = ["/var/log/influxdb/influxdb.log"]
|
||||||
|
## Read file from beginning.
|
||||||
|
from_beginning = false
|
||||||
|
|
||||||
|
## Parse logstash-style "grok" patterns:
|
||||||
|
## Telegraf builtin parsing patterns: https://goo.gl/dkay10
|
||||||
|
[inputs.logparser.grok]
|
||||||
|
## This is a list of patterns to check the given log file(s) for.
|
||||||
|
## Note that adding patterns here increases processing time. The most
|
||||||
|
## efficient configuration is to have one file & pattern per logparser.
|
||||||
|
patterns = ["%{INFLUXDB_HTTPD_LOG}"]
|
||||||
|
## Full path(s) to custom pattern files.
|
||||||
|
custom_pattern_files = []
|
||||||
|
## Custom patterns can also be defined here. Put one pattern per line.
|
||||||
|
custom_patterns = '''
|
||||||
|
'''
|
||||||
|
```
|
||||||
|
|
||||||
|
## Grok Parser
|
||||||
|
|
||||||
|
The grok parser uses a slightly modified version of logstash "grok" patterns,
|
||||||
|
with the format `%{<capture_syntax>[:<semantic_name>][:<modifier>]}`
|
||||||
|
|
||||||
|
|
||||||
|
Telegraf has many of it's own
|
||||||
|
[built-in patterns](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/logparser/grok/patterns/influx-patterns),
|
||||||
|
as well as supporting
|
||||||
|
[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns).
|
||||||
|
|
||||||
|
|
||||||
|
The best way to get acquainted with grok patterns is to read the logstash docs,
|
||||||
|
which are available here:
|
||||||
|
https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html
|
||||||
|
|
||||||
|
|
||||||
|
If you need help building patterns to match your logs,
|
||||||
|
you will find the http://grokdebug.herokuapp.com application quite useful!
|
||||||
|
|
||||||
|
|
||||||
|
By default all named captures are converted into string fields.
|
||||||
|
Modifiers can be used to convert captures to other types or tags.
|
||||||
|
Timestamp modifiers can be used to convert captures to the timestamp of the
|
||||||
|
parsed metric.
|
||||||
|
|
||||||
|
|
||||||
|
- Available modifiers:
|
||||||
|
- string (default if nothing is specified)
|
||||||
|
- int
|
||||||
|
- float
|
||||||
|
- duration (ie, 5.23ms gets converted to int nanoseconds)
|
||||||
|
- tag (converts the field into a tag)
|
||||||
|
- drop (drops the field completely)
|
||||||
|
- Timestamp modifiers:
|
||||||
|
- ts-ansic ("Mon Jan _2 15:04:05 2006")
|
||||||
|
- ts-unix ("Mon Jan _2 15:04:05 MST 2006")
|
||||||
|
- ts-ruby ("Mon Jan 02 15:04:05 -0700 2006")
|
||||||
|
- ts-rfc822 ("02 Jan 06 15:04 MST")
|
||||||
|
- ts-rfc822z ("02 Jan 06 15:04 -0700")
|
||||||
|
- ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST")
|
||||||
|
- ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST")
|
||||||
|
- ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700")
|
||||||
|
- ts-rfc3339 ("2006-01-02T15:04:05Z07:00")
|
||||||
|
- ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00")
|
||||||
|
- ts-httpd ("02/Jan/2006:15:04:05 -0700")
|
||||||
|
- ts-epoch (seconds since unix epoch)
|
||||||
|
- ts-epochnano (nanoseconds since unix epoch)
|
||||||
|
- ts-"CUSTOM"
|
||||||
|
|
||||||
|
|
||||||
|
CUSTOM time layouts must be within quotes and be the representation of the
|
||||||
|
"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`
|
||||||
|
See https://golang.org/pkg/time/#Parse for more details.
|
||||||
|
|
||||||
373
plugins/inputs/logparser/grok/grok.go
Normal file
373
plugins/inputs/logparser/grok/grok.go
Normal file
@@ -0,0 +1,373 @@
|
|||||||
|
package grok
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/vjeantet/grok"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
)
|
||||||
|
|
||||||
|
var timeFormats = map[string]string{
|
||||||
|
"ts-ansic": "Mon Jan _2 15:04:05 2006",
|
||||||
|
"ts-unix": "Mon Jan _2 15:04:05 MST 2006",
|
||||||
|
"ts-ruby": "Mon Jan 02 15:04:05 -0700 2006",
|
||||||
|
"ts-rfc822": "02 Jan 06 15:04 MST",
|
||||||
|
"ts-rfc822z": "02 Jan 06 15:04 -0700", // RFC822 with numeric zone
|
||||||
|
"ts-rfc850": "Monday, 02-Jan-06 15:04:05 MST",
|
||||||
|
"ts-rfc1123": "Mon, 02 Jan 2006 15:04:05 MST",
|
||||||
|
"ts-rfc1123z": "Mon, 02 Jan 2006 15:04:05 -0700", // RFC1123 with numeric zone
|
||||||
|
"ts-rfc3339": "2006-01-02T15:04:05Z07:00",
|
||||||
|
"ts-rfc3339nano": "2006-01-02T15:04:05.999999999Z07:00",
|
||||||
|
"ts-httpd": "02/Jan/2006:15:04:05 -0700",
|
||||||
|
"ts-epoch": "EPOCH",
|
||||||
|
"ts-epochnano": "EPOCH_NANO",
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
INT = "int"
|
||||||
|
TAG = "tag"
|
||||||
|
FLOAT = "float"
|
||||||
|
STRING = "string"
|
||||||
|
DURATION = "duration"
|
||||||
|
DROP = "drop"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// matches named captures that contain a type.
|
||||||
|
// ie,
|
||||||
|
// %{NUMBER:bytes:int}
|
||||||
|
// %{IPORHOST:clientip:tag}
|
||||||
|
// %{HTTPDATE:ts1:ts-http}
|
||||||
|
// %{HTTPDATE:ts2:ts-"02 Jan 06 15:04"}
|
||||||
|
typedRe = regexp.MustCompile(`%{\w+:(\w+):(ts-".+"|t?s?-?\w+)}`)
|
||||||
|
// matches a plain pattern name. ie, %{NUMBER}
|
||||||
|
patternOnlyRe = regexp.MustCompile(`%{(\w+)}`)
|
||||||
|
)
|
||||||
|
|
||||||
|
type Parser struct {
|
||||||
|
Patterns []string
|
||||||
|
CustomPatterns string
|
||||||
|
CustomPatternFiles []string
|
||||||
|
|
||||||
|
// typeMap is a map of patterns -> capture name -> modifier,
|
||||||
|
// ie, {
|
||||||
|
// "%{TESTLOG}":
|
||||||
|
// {
|
||||||
|
// "bytes": "int",
|
||||||
|
// "clientip": "tag"
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
typeMap map[string]map[string]string
|
||||||
|
// tsMap is a map of patterns -> capture name -> timestamp layout.
|
||||||
|
// ie, {
|
||||||
|
// "%{TESTLOG}":
|
||||||
|
// {
|
||||||
|
// "httptime": "02/Jan/2006:15:04:05 -0700"
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
tsMap map[string]map[string]string
|
||||||
|
// patterns is a map of all of the parsed patterns from CustomPatterns
|
||||||
|
// and CustomPatternFiles.
|
||||||
|
// ie, {
|
||||||
|
// "DURATION": "%{NUMBER}[nuµm]?s"
|
||||||
|
// "RESPONSE_CODE": "%{NUMBER:rc:tag}"
|
||||||
|
// }
|
||||||
|
patterns map[string]string
|
||||||
|
|
||||||
|
g *grok.Grok
|
||||||
|
tsModder *tsModder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) Compile() error {
|
||||||
|
p.typeMap = make(map[string]map[string]string)
|
||||||
|
p.tsMap = make(map[string]map[string]string)
|
||||||
|
p.patterns = make(map[string]string)
|
||||||
|
p.tsModder = &tsModder{}
|
||||||
|
var err error
|
||||||
|
p.g, err = grok.NewWithConfig(&grok.Config{NamedCapturesOnly: true})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.CustomPatterns = DEFAULT_PATTERNS + p.CustomPatterns
|
||||||
|
|
||||||
|
if len(p.CustomPatterns) != 0 {
|
||||||
|
scanner := bufio.NewScanner(strings.NewReader(p.CustomPatterns))
|
||||||
|
p.addCustomPatterns(scanner)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, filename := range p.CustomPatternFiles {
|
||||||
|
file, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(bufio.NewReader(file))
|
||||||
|
p.addCustomPatterns(scanner)
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.compileCustomPatterns()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||||
|
var err error
|
||||||
|
var values map[string]string
|
||||||
|
// the matching pattern string
|
||||||
|
var patternName string
|
||||||
|
for _, pattern := range p.Patterns {
|
||||||
|
if values, err = p.g.Parse(pattern, line); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(values) != 0 {
|
||||||
|
patternName = pattern
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(values) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
tags := make(map[string]string)
|
||||||
|
timestamp := time.Now()
|
||||||
|
for k, v := range values {
|
||||||
|
if k == "" || v == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var t string
|
||||||
|
// check if pattern has some modifiers
|
||||||
|
if types, ok := p.typeMap[patternName]; ok {
|
||||||
|
t = types[k]
|
||||||
|
}
|
||||||
|
// if we didn't find a modifier, check if we have a timestamp layout
|
||||||
|
if t == "" {
|
||||||
|
if ts, ok := p.tsMap[patternName]; ok {
|
||||||
|
// check if the modifier is a timestamp layout
|
||||||
|
if layout, ok := ts[k]; ok {
|
||||||
|
t = layout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if we didn't find a type OR timestamp modifier, assume string
|
||||||
|
if t == "" {
|
||||||
|
t = STRING
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t {
|
||||||
|
case INT:
|
||||||
|
iv, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR parsing %s to int: %s", v, err)
|
||||||
|
} else {
|
||||||
|
fields[k] = iv
|
||||||
|
}
|
||||||
|
case FLOAT:
|
||||||
|
fv, err := strconv.ParseFloat(v, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR parsing %s to float: %s", v, err)
|
||||||
|
} else {
|
||||||
|
fields[k] = fv
|
||||||
|
}
|
||||||
|
case DURATION:
|
||||||
|
d, err := time.ParseDuration(v)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR parsing %s to duration: %s", v, err)
|
||||||
|
} else {
|
||||||
|
fields[k] = int64(d)
|
||||||
|
}
|
||||||
|
case TAG:
|
||||||
|
tags[k] = v
|
||||||
|
case STRING:
|
||||||
|
fields[k] = strings.Trim(v, `"`)
|
||||||
|
case "EPOCH":
|
||||||
|
iv, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR parsing %s to int: %s", v, err)
|
||||||
|
} else {
|
||||||
|
timestamp = time.Unix(iv, 0)
|
||||||
|
}
|
||||||
|
case "EPOCH_NANO":
|
||||||
|
iv, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR parsing %s to int: %s", v, err)
|
||||||
|
} else {
|
||||||
|
timestamp = time.Unix(0, iv)
|
||||||
|
}
|
||||||
|
case DROP:
|
||||||
|
// goodbye!
|
||||||
|
default:
|
||||||
|
ts, err := time.Parse(t, v)
|
||||||
|
if err == nil {
|
||||||
|
timestamp = ts
|
||||||
|
} else {
|
||||||
|
log.Printf("ERROR parsing %s to time layout [%s]: %s", v, t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return telegraf.NewMetric("logparser_grok", tags, fields, p.tsModder.tsMod(timestamp))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) addCustomPatterns(scanner *bufio.Scanner) {
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
if len(line) > 0 && line[0] != '#' {
|
||||||
|
names := strings.SplitN(line, " ", 2)
|
||||||
|
p.patterns[names[0]] = names[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) compileCustomPatterns() error {
|
||||||
|
var err error
|
||||||
|
// check if the pattern contains a subpattern that is already defined
|
||||||
|
// replace it with the subpattern for modifier inheritance.
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
for name, pattern := range p.patterns {
|
||||||
|
subNames := patternOnlyRe.FindAllStringSubmatch(pattern, -1)
|
||||||
|
for _, subName := range subNames {
|
||||||
|
if subPattern, ok := p.patterns[subName[1]]; ok {
|
||||||
|
pattern = strings.Replace(pattern, subName[0], subPattern, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.patterns[name] = pattern
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if pattern contains modifiers. Parse them out if it does.
|
||||||
|
for name, pattern := range p.patterns {
|
||||||
|
if typedRe.MatchString(pattern) {
|
||||||
|
// this pattern has modifiers, so parse out the modifiers
|
||||||
|
pattern, err = p.parseTypedCaptures(name, pattern)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.patterns[name] = pattern
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.g.AddPatternsFromMap(p.patterns)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseTypedCaptures parses the capture types, and then deletes the type from
|
||||||
|
// the line so that it is a valid "grok" pattern again.
|
||||||
|
// ie,
|
||||||
|
// %{NUMBER:bytes:int} => %{NUMBER:bytes} (stores %{NUMBER}->bytes->int)
|
||||||
|
// %{IPORHOST:clientip:tag} => %{IPORHOST:clientip} (stores %{IPORHOST}->clientip->tag)
|
||||||
|
func (p *Parser) parseTypedCaptures(name, pattern string) (string, error) {
|
||||||
|
matches := typedRe.FindAllStringSubmatch(pattern, -1)
|
||||||
|
|
||||||
|
// grab the name of the capture pattern
|
||||||
|
patternName := "%{" + name + "}"
|
||||||
|
// create type map for this pattern
|
||||||
|
p.typeMap[patternName] = make(map[string]string)
|
||||||
|
p.tsMap[patternName] = make(map[string]string)
|
||||||
|
|
||||||
|
// boolean to verify that each pattern only has a single ts- data type.
|
||||||
|
hasTimestamp := false
|
||||||
|
for _, match := range matches {
|
||||||
|
// regex capture 1 is the name of the capture
|
||||||
|
// regex capture 2 is the type of the capture
|
||||||
|
if strings.HasPrefix(match[2], "ts-") {
|
||||||
|
if hasTimestamp {
|
||||||
|
return pattern, fmt.Errorf("logparser pattern compile error: "+
|
||||||
|
"Each pattern is allowed only one named "+
|
||||||
|
"timestamp data type. pattern: %s", pattern)
|
||||||
|
}
|
||||||
|
if f, ok := timeFormats[match[2]]; ok {
|
||||||
|
p.tsMap[patternName][match[1]] = f
|
||||||
|
} else {
|
||||||
|
p.tsMap[patternName][match[1]] = strings.TrimSuffix(strings.TrimPrefix(match[2], `ts-"`), `"`)
|
||||||
|
}
|
||||||
|
hasTimestamp = true
|
||||||
|
} else {
|
||||||
|
p.typeMap[patternName][match[1]] = match[2]
|
||||||
|
}
|
||||||
|
|
||||||
|
// the modifier is not a valid part of a "grok" pattern, so remove it
|
||||||
|
// from the pattern.
|
||||||
|
pattern = strings.Replace(pattern, ":"+match[2]+"}", "}", 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pattern, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// tsModder is a struct for incrementing identical timestamps of log lines
|
||||||
|
// so that we don't push identical metrics that will get overwritten.
|
||||||
|
type tsModder struct {
|
||||||
|
dupe time.Time
|
||||||
|
last time.Time
|
||||||
|
incr time.Duration
|
||||||
|
incrn time.Duration
|
||||||
|
rollover time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// tsMod increments the given timestamp one unit more from the previous
|
||||||
|
// duplicate timestamp.
|
||||||
|
// the increment unit is determined as the next smallest time unit below the
|
||||||
|
// most significant time unit of ts.
|
||||||
|
// ie, if the input is at ms precision, it will increment it 1µs.
|
||||||
|
func (t *tsModder) tsMod(ts time.Time) time.Time {
|
||||||
|
defer func() { t.last = ts }()
|
||||||
|
// don't mod the time if we don't need to
|
||||||
|
if t.last.IsZero() || ts.IsZero() {
|
||||||
|
t.incrn = 0
|
||||||
|
t.rollover = 0
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
if !ts.Equal(t.last) && !ts.Equal(t.dupe) {
|
||||||
|
t.incr = 0
|
||||||
|
t.incrn = 0
|
||||||
|
t.rollover = 0
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.Equal(t.last) {
|
||||||
|
t.dupe = ts
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.Equal(t.dupe) && t.incr == time.Duration(0) {
|
||||||
|
tsNano := ts.UnixNano()
|
||||||
|
|
||||||
|
d := int64(10)
|
||||||
|
counter := 1
|
||||||
|
for {
|
||||||
|
a := tsNano % d
|
||||||
|
if a > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
d = d * 10
|
||||||
|
counter++
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case counter <= 6:
|
||||||
|
t.incr = time.Nanosecond
|
||||||
|
case counter <= 9:
|
||||||
|
t.incr = time.Microsecond
|
||||||
|
case counter > 9:
|
||||||
|
t.incr = time.Millisecond
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.incrn++
|
||||||
|
if t.incrn == 999 && t.incr > time.Nanosecond {
|
||||||
|
t.rollover = t.incr * t.incrn
|
||||||
|
t.incrn = 1
|
||||||
|
t.incr = t.incr / 1000
|
||||||
|
if t.incr < time.Nanosecond {
|
||||||
|
t.incr = time.Nanosecond
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ts.Add(t.incr*t.incrn + t.rollover)
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user