Compare commits
323 Commits
0.13.0-rc1
...
kb-procsta
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f2eea71c1f | ||
|
|
ce5054c850 | ||
|
|
da9b719889 | ||
|
|
c7834209d2 | ||
|
|
07f57710df | ||
|
|
dc84cce95c | ||
|
|
78ced6bc30 | ||
|
|
ca8e512e5b | ||
|
|
573628dbdd | ||
|
|
e477620dc5 | ||
|
|
32268fb25b | ||
|
|
80391bfe1f | ||
|
|
e19845c202 | ||
|
|
52134555d6 | ||
|
|
e7e39df6a0 | ||
|
|
055ef168ae | ||
|
|
2778b7be30 | ||
|
|
953db51b2c | ||
|
|
c043461f6c | ||
|
|
ddc07f9ef8 | ||
|
|
2cf1db0837 | ||
|
|
17e6496830 | ||
|
|
1d10eda84e | ||
|
|
9ea3dbeee8 | ||
|
|
100501ba72 | ||
|
|
f12368698b | ||
|
|
6b25a73629 | ||
|
|
90c7475c68 | ||
|
|
6648c101dd | ||
|
|
8d3285522c | ||
|
|
b613405f42 | ||
|
|
e999298078 | ||
|
|
0f0ab953f6 | ||
|
|
aaddbd153e | ||
|
|
9b2e2cc41f | ||
|
|
bc22309459 | ||
|
|
b6f81b538a | ||
|
|
c3aa43a6bd | ||
|
|
b2ea39077e | ||
|
|
811567a2f4 | ||
|
|
ca8fb440cc | ||
|
|
ac58a6bb3c | ||
|
|
9757d39240 | ||
|
|
5a9e7d77b8 | ||
|
|
e963b7f01b | ||
|
|
e7899d4dc5 | ||
|
|
301c79e57c | ||
|
|
67c288abda | ||
|
|
8dd2a8527a | ||
|
|
2fe427b3b3 | ||
|
|
6b1cc67664 | ||
|
|
1271f9d71a | ||
|
|
49ea4e9f39 | ||
|
|
50ef3282b6 | ||
|
|
b63dedb74d | ||
|
|
5628049440 | ||
|
|
54c9ba7639 | ||
|
|
b18d375d6c | ||
|
|
6dbbe65897 | ||
|
|
03d8abccdd | ||
|
|
0f6d317a8e | ||
|
|
792682590c | ||
|
|
2d3da343b3 | ||
|
|
094eda22c0 | ||
|
|
4886109d9c | ||
|
|
2dc47285bd | ||
|
|
6e33a6d62f | ||
|
|
a8f9eb23cc | ||
|
|
41a5ee6571 | ||
|
|
7d8de4b8e1 | ||
|
|
cc2b53abf4 | ||
|
|
32aa1cc814 | ||
|
|
38d877165a | ||
|
|
5c5984bfe1 | ||
|
|
30cdc31a27 | ||
|
|
602a36e241 | ||
|
|
b863ee1d65 | ||
|
|
ca49babf3a | ||
|
|
cf37b5cdcf | ||
|
|
969f388ef2 | ||
|
|
0589a1d0a5 | ||
|
|
4e019a176d | ||
|
|
a0e23d30fe | ||
|
|
e931706249 | ||
|
|
2457d95262 | ||
|
|
e9d33726a9 | ||
|
|
2462e04bf2 | ||
|
|
7fac74919c | ||
|
|
b022b5567d | ||
|
|
dbf6380e4b | ||
|
|
a0e42f8a61 | ||
|
|
94e673fe85 | ||
|
|
7600757f16 | ||
|
|
4ce8dd5f9a | ||
|
|
26315bfbea | ||
|
|
a282fb8524 | ||
|
|
dee98612e2 | ||
|
|
69e4e862a3 | ||
|
|
c0e895c3a7 | ||
|
|
fec9760f72 | ||
|
|
1989a5855d | ||
|
|
abcd19493e | ||
|
|
e457b7a8df | ||
|
|
3853d0d065 | ||
|
|
53e31cf1b5 | ||
|
|
c99c22534b | ||
|
|
8e22526756 | ||
|
|
7b6713b094 | ||
|
|
b0ef506a88 | ||
|
|
22c293de62 | ||
|
|
d3bb1e7010 | ||
|
|
49988b15a3 | ||
|
|
f0357b7a12 | ||
|
|
9d3ad6309e | ||
|
|
b55e9e78e3 | ||
|
|
4bc6fdb09e | ||
|
|
2b43b385de | ||
|
|
13865f9e04 | ||
|
|
497353e586 | ||
|
|
2d86dfba8b | ||
|
|
30dbfd9af8 | ||
|
|
c991b579d2 | ||
|
|
841729c0f9 | ||
|
|
412f5b5acb | ||
|
|
0b3958d3cd | ||
|
|
e68f251df7 | ||
|
|
986735234b | ||
|
|
4363eebc1b | ||
|
|
1be6ea5696 | ||
|
|
8acda0da8f | ||
|
|
ee240a5599 | ||
|
|
29ea433763 | ||
|
|
0462af164e | ||
|
|
1c24665b29 | ||
|
|
0af0fa7c2e | ||
|
|
191608041f | ||
|
|
42d9d5d237 | ||
|
|
d54b169d67 | ||
|
|
82166a36d0 | ||
|
|
cbf5a55c7d | ||
|
|
5f14ad9fa1 | ||
|
|
0be69b8a44 | ||
|
|
375710488d | ||
|
|
03d02fa67a | ||
|
|
b58cd78c79 | ||
|
|
dabb6f5466 | ||
|
|
281a4d5500 | ||
|
|
1c2965703d | ||
|
|
5dc4cce157 | ||
|
|
8c7edeb53b | ||
|
|
1d9745ee98 | ||
|
|
2d6c8767f7 | ||
|
|
b4a6d9c647 | ||
|
|
6afe9ceef1 | ||
|
|
704d9ad76c | ||
|
|
300d9adbd0 | ||
|
|
207c5498e7 | ||
|
|
d5e7439343 | ||
|
|
21add2c799 | ||
|
|
4651ab88ad | ||
|
|
53f40063b3 | ||
|
|
97d92bba67 | ||
|
|
bfdd665435 | ||
|
|
821d3fafa6 | ||
|
|
7c9b312cee | ||
|
|
69ab8a645c | ||
|
|
7b550c11cb | ||
|
|
bb4f18ca88 | ||
|
|
6efe91ea9c | ||
|
|
5f0a63f554 | ||
|
|
d14e7536ab | ||
|
|
c873937356 | ||
|
|
e1c3800cd9 | ||
|
|
c046232425 | ||
|
|
2d4864e126 | ||
|
|
048448aa93 | ||
|
|
755b2ec953 | ||
|
|
f62c493c77 | ||
|
|
a6365a6086 | ||
|
|
f7e057ec55 | ||
|
|
30cc00d11b | ||
|
|
d641c42029 | ||
|
|
9c2ca805da | ||
|
|
b0484d8a0c | ||
|
|
5ddd61d2e2 | ||
|
|
50ea7f4a9d | ||
|
|
b18134a4e3 | ||
|
|
7825df4771 | ||
|
|
d6951dacdc | ||
|
|
e603825e37 | ||
|
|
e3448153e1 | ||
|
|
25848c545a | ||
|
|
3098564896 | ||
|
|
4b6f9b93dd | ||
|
|
2beef21231 | ||
|
|
cb3c54a1ae | ||
|
|
d50a1e83ac | ||
|
|
1f10639222 | ||
|
|
af0979cce5 | ||
|
|
5b43901bd8 | ||
|
|
d7efb7a71d | ||
|
|
4d242836ee | ||
|
|
06cb5a041e | ||
|
|
ea2521bf27 | ||
|
|
4cd1f7a104 | ||
|
|
137843b2f6 | ||
|
|
008ed17a79 | ||
|
|
75e6cb9064 | ||
|
|
ad88a9421a | ||
|
|
346deb30a3 | ||
|
|
8c3d7cd145 | ||
|
|
821b30eb92 | ||
|
|
a362352587 | ||
|
|
94f952787f | ||
|
|
3ff184c061 | ||
|
|
80368e3936 | ||
|
|
2c448e22e1 | ||
|
|
1aabd38eb2 | ||
|
|
675457873a | ||
|
|
8173338f8a | ||
|
|
c4841843a9 | ||
|
|
f08a27be5d | ||
|
|
a4b36d12dd | ||
|
|
c842724b61 | ||
|
|
fb5f40319e | ||
|
|
52b9fc837c | ||
|
|
6f991ec78a | ||
|
|
7921d87a45 | ||
|
|
9f7a758bf9 | ||
|
|
0aff7a0bc1 | ||
|
|
c4cfdb8a25 | ||
|
|
342cfc4087 | ||
|
|
bd1282eddf | ||
|
|
892abec025 | ||
|
|
e809c4e445 | ||
|
|
9ff536d94d | ||
|
|
4f27315720 | ||
|
|
958ef2f872 | ||
|
|
069764f05e | ||
|
|
eeeab5192b | ||
|
|
a7dfbce3d3 | ||
|
|
ed2d1d9bb7 | ||
|
|
0fb2d2ffae | ||
|
|
3af65e7abb | ||
|
|
984b6cb0fb | ||
|
|
ca504a19ec | ||
|
|
c2797c85d1 | ||
|
|
d5add07c0b | ||
|
|
0ebf1c1ad7 | ||
|
|
42d7fc5e16 | ||
|
|
6828fc48e1 | ||
|
|
98d91b1c89 | ||
|
|
9bbdb2d562 | ||
|
|
a8334c3261 | ||
|
|
9144f9630b | ||
|
|
3e4a19539a | ||
|
|
5fe7e6e40e | ||
|
|
58f2ba1247 | ||
|
|
5f3a91bffd | ||
|
|
6351aa5167 | ||
|
|
9966099d1a | ||
|
|
1ef5599361 | ||
|
|
c78b6cdb4e | ||
|
|
d736c7235a | ||
|
|
475252d873 | ||
|
|
e103923430 | ||
|
|
cb59517ceb | ||
|
|
1248934f3e | ||
|
|
204ebf6bf6 | ||
|
|
52d5b19219 | ||
|
|
8e92d3a4a0 | ||
|
|
c44ecf54a5 | ||
|
|
c6699c36d3 | ||
|
|
d6ceae7005 | ||
|
|
4dcb82bf08 | ||
|
|
4f5d5926d9 | ||
|
|
3c5c3b98df | ||
|
|
56aee1ceee | ||
|
|
f176c28a56 | ||
|
|
2e68bd1412 | ||
|
|
35eb65460d | ||
|
|
ab54064689 | ||
|
|
debf7bf149 | ||
|
|
1dbe3b8231 | ||
|
|
b065573e23 | ||
|
|
e94e50181c | ||
|
|
69dfe63809 | ||
|
|
f32916a5bd | ||
|
|
be7ca56872 | ||
|
|
33cacc71b8 | ||
|
|
c292e3931a | ||
|
|
a87d6f0545 | ||
|
|
3a01b6d5b7 | ||
|
|
39df2635bd | ||
|
|
08ecfb8a67 | ||
|
|
a59bf7246a | ||
|
|
281296cd3f | ||
|
|
61d190b1ae | ||
|
|
dc89f029ad | ||
|
|
7557056a31 | ||
|
|
20c45a150c | ||
|
|
46bf0ef271 | ||
|
|
a7b632eb5e | ||
|
|
90a98c76a0 | ||
|
|
12357ee8c5 | ||
|
|
bb254fc2b9 | ||
|
|
aeadc2c43a | ||
|
|
ed492fe950 | ||
|
|
775daba8f5 | ||
|
|
677dd7ad53 | ||
|
|
85dee02a3b | ||
|
|
afdebbc3a2 | ||
|
|
5deb22a539 | ||
|
|
36b9e2e077 | ||
|
|
5348937c3d | ||
|
|
72fcacbbc7 | ||
|
|
4c28f15b35 | ||
|
|
095ef04c04 | ||
|
|
7d49979658 | ||
|
|
7a36695a21 | ||
|
|
5865587bd0 | ||
|
|
219bf93566 | ||
|
|
8371546a66 |
4
.gitattributes
vendored
4
.gitattributes
vendored
@@ -1,2 +1,4 @@
|
||||
CHANGELOG.md merge=union
|
||||
|
||||
README.md merge=union
|
||||
plugins/inputs/all/all.go merge=union
|
||||
plugins/outputs/all/all.go merge=union
|
||||
|
||||
44
.github/ISSUE_TEMPLATE.md
vendored
Normal file
44
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
## Directions
|
||||
|
||||
GitHub Issues are reserved for actionable bug reports and feature requests.
|
||||
General questions should be sent to the [InfluxDB mailing list](https://groups.google.com/forum/#!forum/influxdb).
|
||||
|
||||
Before opening an issue, search for similar bug reports or feature requests on GitHub Issues.
|
||||
If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.
|
||||
Erase the other section and everything on and above this line.
|
||||
|
||||
*Please note, the quickest way to fix a bug is to open a Pull Request.*
|
||||
|
||||
## Bug report
|
||||
|
||||
### Relevant telegraf.conf:
|
||||
|
||||
### System info:
|
||||
|
||||
[Include Telegraf version, operating system name, and other relevant details]
|
||||
|
||||
### Steps to reproduce:
|
||||
|
||||
1. ...
|
||||
2. ...
|
||||
|
||||
### Expected behavior:
|
||||
|
||||
### Actual behavior:
|
||||
|
||||
### Additional info:
|
||||
|
||||
[Include gist of relevant config, logs, etc.]
|
||||
|
||||
|
||||
## Feature Request
|
||||
|
||||
Opening a feature request kicks off a discussion.
|
||||
|
||||
### Proposal:
|
||||
|
||||
### Current behavior:
|
||||
|
||||
### Desired behavior:
|
||||
|
||||
### Use case: [Why is this important (helps with prioritizing requests)]
|
||||
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
### Required for all PRs:
|
||||
|
||||
- [ ] CHANGELOG.md updated (we recommend not updating this until the PR has been approved by a maintainer)
|
||||
- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed)
|
||||
- [ ] README.md updated (if adding a new plugin)
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
build
|
||||
tivan
|
||||
.vagrant
|
||||
/telegraf
|
||||
|
||||
247
CHANGELOG.md
247
CHANGELOG.md
@@ -1,4 +1,238 @@
|
||||
## v0.13 [2016-05-09]
|
||||
## v1.1 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
### Features
|
||||
|
||||
- [#1782](https://github.com/influxdata/telegraf/pull/1782): Allow numeric and non-string values for tag_keys.
|
||||
- [#1694](https://github.com/influxdata/telegraf/pull/1694): Adding Gauge and Counter metric types.
|
||||
- [#1606](https://github.com/influxdata/telegraf/pull/1606): Remove carraige returns from exec plugin output on Windows
|
||||
- [#1674](https://github.com/influxdata/telegraf/issues/1674): elasticsearch input: configurable timeout.
|
||||
- [#1607](https://github.com/influxdata/telegraf/pull/1607): Massage metric names in Instrumental output plugin
|
||||
- [#1572](https://github.com/influxdata/telegraf/pull/1572): mesos improvements.
|
||||
- [#1513](https://github.com/influxdata/telegraf/issues/1513): Add Ceph Cluster Performance Statistics
|
||||
- [#1650](https://github.com/influxdata/telegraf/issues/1650): Ability to configure response_timeout in httpjson input.
|
||||
- [#1685](https://github.com/influxdata/telegraf/issues/1685): Add additional redis metrics.
|
||||
- [#1539](https://github.com/influxdata/telegraf/pull/1539): Added capability to send metrics through Http API for OpenTSDB.
|
||||
- [#1471](https://github.com/influxdata/telegraf/pull/1471): iptables input plugin.
|
||||
- [#1542](https://github.com/influxdata/telegraf/pull/1542): Add filestack webhook plugin.
|
||||
- [#1599](https://github.com/influxdata/telegraf/pull/1599): Add server hostname for each docker measurements.
|
||||
- [#1697](https://github.com/influxdata/telegraf/pull/1697): Add NATS output plugin.
|
||||
- [#1407](https://github.com/influxdata/telegraf/pull/1407): HTTP service listener input plugin.
|
||||
- [#1699](https://github.com/influxdata/telegraf/pull/1699): Add database blacklist option for Postgresql
|
||||
- [#1791](https://github.com/influxdata/telegraf/pull/1791): Add Docker container state metrics to Docker input plugin output
|
||||
- [#1755](https://github.com/influxdata/telegraf/issues/1755): Add support to SNMP for IP & MAC address conversion.
|
||||
- [#1729](https://github.com/influxdata/telegraf/issues/1729): Add support to SNMP for OID index suffixes.
|
||||
- [#1813](https://github.com/influxdata/telegraf/pull/1813): Change default arguments for SNMP plugin.
|
||||
- [#1686](https://github.com/influxdata/telegraf/pull/1686): Mesos input plugin: very high-cardinality mesos-task metrics removed.
|
||||
- [#1839](https://github.com/influxdata/telegraf/pull/1839): Exact match with pgrep -x option in procstat
|
||||
- [#1838](https://github.com/influxdata/telegraf/pull/1838): Logging overhaul to centralize the logger & log levels, & provide a logfile config option.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1746](https://github.com/influxdata/telegraf/issues/1746): Fix handling of non-string values for JSON keys listed in tag_keys.
|
||||
- [#1628](https://github.com/influxdata/telegraf/issues/1628): Fix mongodb input panic on version 2.2.
|
||||
- [#1733](https://github.com/influxdata/telegraf/issues/1733): Fix statsd scientific notation parsing
|
||||
- [#1716](https://github.com/influxdata/telegraf/issues/1716): Sensors plugin strconv.ParseFloat: parsing "": invalid syntax
|
||||
- [#1530](https://github.com/influxdata/telegraf/issues/1530): Fix prometheus_client reload panic
|
||||
- [#1764](https://github.com/influxdata/telegraf/issues/1764): Fix kafka consumer panic when nil error is returned down errs channel.
|
||||
- [#1768](https://github.com/influxdata/telegraf/pull/1768): Speed up statsd parsing.
|
||||
- [#1751](https://github.com/influxdata/telegraf/issues/1751): Fix powerdns integer parse error handling.
|
||||
- [#1752](https://github.com/influxdata/telegraf/issues/1752): Fix varnish plugin defaults not being used.
|
||||
- [#1517](https://github.com/influxdata/telegraf/issues/1517): Fix windows glob paths.
|
||||
- [#1137](https://github.com/influxdata/telegraf/issues/1137): Fix issue loading config directory on windows.
|
||||
- [#1772](https://github.com/influxdata/telegraf/pull/1772): Windows remote management interactive service fix.
|
||||
- [#1702](https://github.com/influxdata/telegraf/issues/1702): sqlserver, fix issue when case sensitive collation is activated.
|
||||
- [#1823](https://github.com/influxdata/telegraf/issues/1823): Fix huge allocations in http_listener when dealing with huge payloads.
|
||||
|
||||
## v1.0.1 [unreleased]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Prometheus output: Fix bug with multi-batch writes.
|
||||
- [#1738](https://github.com/influxdata/telegraf/issues/1738): Fix unmarshal of influxdb metrics with null tags.
|
||||
- [#1773](https://github.com/influxdata/telegraf/issues/1773): Add configurable timeout to influxdb input plugin.
|
||||
- [#1785](https://github.com/influxdata/telegraf/pull/1785): Fix statsd no default value panic.
|
||||
|
||||
## v1.0 [2016-09-08]
|
||||
|
||||
### Release Notes
|
||||
|
||||
**Breaking Change** The SNMP plugin is being deprecated in it's current form.
|
||||
There is a [new SNMP plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
|
||||
which fixes many of the issues and confusions
|
||||
of its predecessor. For users wanting to continue to use the deprecated SNMP
|
||||
plugin, you will need to change your config file from `[[inputs.snmp]]` to
|
||||
`[[inputs.snmp_legacy]]`. The configuration of the new SNMP plugin is _not_
|
||||
backwards-compatible.
|
||||
|
||||
**Breaking Change**: Aerospike main server node measurements have been renamed
|
||||
aerospike_node. Aerospike namespace measurements have been renamed to
|
||||
aerospike_namespace. They will also now be tagged with the node_name
|
||||
that they correspond to. This has been done to differentiate measurements
|
||||
that pertain to node vs. namespace statistics.
|
||||
|
||||
**Breaking Change**: users of github_webhooks must change to the new
|
||||
`[[inputs.webhooks]]` plugin.
|
||||
|
||||
This means that the default github_webhooks config:
|
||||
|
||||
```
|
||||
# A Github Webhook Event collector
|
||||
[[inputs.github_webhooks]]
|
||||
## Address and port to host Webhook listener on
|
||||
service_address = ":1618"
|
||||
```
|
||||
|
||||
should now look like:
|
||||
|
||||
```
|
||||
# A Webhooks Event collector
|
||||
[[inputs.webhooks]]
|
||||
## Address and port to host Webhook listener on
|
||||
service_address = ":1618"
|
||||
|
||||
[inputs.webhooks.github]
|
||||
path = "/"
|
||||
```
|
||||
|
||||
- Telegraf now supports being installed as an official windows service,
|
||||
which can be installed via
|
||||
`> C:\Program Files\Telegraf\telegraf.exe --service install`
|
||||
|
||||
- `flush_jitter` behavior has been changed. The random jitter will now be
|
||||
evaluated at every flush interval, rather than once at startup. This makes it
|
||||
consistent with the behavior of `collection_jitter`.
|
||||
|
||||
### Features
|
||||
|
||||
- [#1413](https://github.com/influxdata/telegraf/issues/1413): Separate container_version from container_image tag.
|
||||
- [#1525](https://github.com/influxdata/telegraf/pull/1525): Support setting per-device and total metrics for Docker network and blockio.
|
||||
- [#1466](https://github.com/influxdata/telegraf/pull/1466): MongoDB input plugin: adding per DB stats from db.stats()
|
||||
- [#1503](https://github.com/influxdata/telegraf/pull/1503): Add tls support for certs to RabbitMQ input plugin
|
||||
- [#1289](https://github.com/influxdata/telegraf/pull/1289): webhooks input plugin. Thanks @francois2metz and @cduez!
|
||||
- [#1247](https://github.com/influxdata/telegraf/pull/1247): rollbar webhook plugin.
|
||||
- [#1408](https://github.com/influxdata/telegraf/pull/1408): mandrill webhook plugin.
|
||||
- [#1402](https://github.com/influxdata/telegraf/pull/1402): docker-machine/boot2docker no longer required for unit tests.
|
||||
- [#1350](https://github.com/influxdata/telegraf/pull/1350): cgroup input plugin.
|
||||
- [#1369](https://github.com/influxdata/telegraf/pull/1369): Add input plugin for consuming metrics from NSQD.
|
||||
- [#1369](https://github.com/influxdata/telegraf/pull/1480): add ability to read redis from a socket.
|
||||
- [#1387](https://github.com/influxdata/telegraf/pull/1387): **Breaking Change** - Redis `role` tag renamed to `replication_role` to avoid global_tags override
|
||||
- [#1437](https://github.com/influxdata/telegraf/pull/1437): Fetching Galera status metrics in MySQL
|
||||
- [#1500](https://github.com/influxdata/telegraf/pull/1500): Aerospike plugin refactored to use official client lib.
|
||||
- [#1434](https://github.com/influxdata/telegraf/pull/1434): Add measurement name arg to logparser plugin.
|
||||
- [#1479](https://github.com/influxdata/telegraf/pull/1479): logparser: change resp_code from a field to a tag.
|
||||
- [#1411](https://github.com/influxdata/telegraf/pull/1411): Implement support for fetching hddtemp data
|
||||
- [#1340](https://github.com/influxdata/telegraf/issues/1340): statsd: do not log every dropped metric.
|
||||
- [#1368](https://github.com/influxdata/telegraf/pull/1368): Add precision rounding to all metrics on collection.
|
||||
- [#1390](https://github.com/influxdata/telegraf/pull/1390): Add support for Tengine
|
||||
- [#1320](https://github.com/influxdata/telegraf/pull/1320): Logparser input plugin for parsing grok-style log patterns.
|
||||
- [#1397](https://github.com/influxdata/telegraf/issues/1397): ElasticSearch: now supports connecting to ElasticSearch via SSL
|
||||
- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging.
|
||||
- [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash
|
||||
- [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy!
|
||||
- [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm!
|
||||
- [#1208](https://github.com/influxdata/telegraf/pull/1208): Standardized AWS credentials evaluation & wildcard CloudWatch dimensions. Thanks @johnrengelman!
|
||||
- [#1264](https://github.com/influxdata/telegraf/pull/1264): Add SSL config options to http_response plugin.
|
||||
- [#1272](https://github.com/influxdata/telegraf/pull/1272): graphite parser: add ability to specify multiple tag keys, for consistency with influxdb parser.
|
||||
- [#1265](https://github.com/influxdata/telegraf/pull/1265): Make dns lookups for chrony configurable. Thanks @zbindenren!
|
||||
- [#1275](https://github.com/influxdata/telegraf/pull/1275): Allow wildcard filtering of varnish stats.
|
||||
- [#1142](https://github.com/influxdata/telegraf/pull/1142): Support for glob patterns in exec plugin commands configuration.
|
||||
- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL (http://localhost:15672) if not specified
|
||||
- [#1197](https://github.com/influxdata/telegraf/pull/1197): Limit AWS GetMetricStatistics requests to 10 per second.
|
||||
- [#1278](https://github.com/influxdata/telegraf/pull/1278) & [#1288](https://github.com/influxdata/telegraf/pull/1288) & [#1295](https://github.com/influxdata/telegraf/pull/1295): RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified
|
||||
- [#1296](https://github.com/influxdata/telegraf/issues/1296): Refactor of flush_jitter argument.
|
||||
- [#1213](https://github.com/influxdata/telegraf/issues/1213): Add inactive & active memory to mem plugin.
|
||||
- [#1543](https://github.com/influxdata/telegraf/pull/1543): Official Windows service.
|
||||
- [#1414](https://github.com/influxdata/telegraf/pull/1414): Forking sensors command to remove C package dependency.
|
||||
- [#1389](https://github.com/influxdata/telegraf/pull/1389): Add a new SNMP plugin.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1619](https://github.com/influxdata/telegraf/issues/1619): Fix `make windows` build target
|
||||
- [#1519](https://github.com/influxdata/telegraf/pull/1519): Fix error race conditions and partial failures.
|
||||
- [#1477](https://github.com/influxdata/telegraf/issues/1477): nstat: fix inaccurate config panic.
|
||||
- [#1481](https://github.com/influxdata/telegraf/issues/1481): jolokia: fix handling multiple multi-dimensional attributes.
|
||||
- [#1430](https://github.com/influxdata/telegraf/issues/1430): Fix prometheus character sanitizing. Sanitize more win_perf_counters characters.
|
||||
- [#1534](https://github.com/influxdata/telegraf/pull/1534): Add diskio io_time to FreeBSD & report timing metrics as ms (as linux does).
|
||||
- [#1379](https://github.com/influxdata/telegraf/issues/1379): Fix covering Amazon Linux for post remove flow.
|
||||
- [#1584](https://github.com/influxdata/telegraf/issues/1584): procstat missing fields: read/write bytes & count
|
||||
- [#1472](https://github.com/influxdata/telegraf/pull/1472): diskio input plugin: set 'skip_serial_number = true' by default to avoid high cardinality.
|
||||
- [#1426](https://github.com/influxdata/telegraf/pull/1426): nil metrics panic fix.
|
||||
- [#1384](https://github.com/influxdata/telegraf/pull/1384): Fix datarace in apache input plugin.
|
||||
- [#1399](https://github.com/influxdata/telegraf/issues/1399): Add `read_repairs` statistics to riak plugin.
|
||||
- [#1405](https://github.com/influxdata/telegraf/issues/1405): Fix memory/connection leak in prometheus input plugin.
|
||||
- [#1378](https://github.com/influxdata/telegraf/issues/1378): Trim BOM from config file for Windows support.
|
||||
- [#1339](https://github.com/influxdata/telegraf/issues/1339): Prometheus client output panic on service reload.
|
||||
- [#1461](https://github.com/influxdata/telegraf/pull/1461): Prometheus parser, protobuf format header fix.
|
||||
- [#1334](https://github.com/influxdata/telegraf/issues/1334): Prometheus output, metric refresh and caching fixes.
|
||||
- [#1432](https://github.com/influxdata/telegraf/issues/1432): Panic fix for multiple graphite outputs under very high load.
|
||||
- [#1412](https://github.com/influxdata/telegraf/pull/1412): Instrumental output has better reconnect behavior
|
||||
- [#1460](https://github.com/influxdata/telegraf/issues/1460): Remove PID from procstat plugin to fix cardinality issues.
|
||||
- [#1427](https://github.com/influxdata/telegraf/issues/1427): Cassandra input: version 2.x "column family" fix.
|
||||
- [#1463](https://github.com/influxdata/telegraf/issues/1463): Shared WaitGroup in Exec plugin
|
||||
- [#1436](https://github.com/influxdata/telegraf/issues/1436): logparser: honor modifiers in "pattern" config.
|
||||
- [#1418](https://github.com/influxdata/telegraf/issues/1418): logparser: error and exit on file permissions/missing errors.
|
||||
- [#1499](https://github.com/influxdata/telegraf/pull/1499): Make the user able to specify full path for HAproxy stats
|
||||
- [#1521](https://github.com/influxdata/telegraf/pull/1521): Fix Redis url, an extra "tcp://" was added.
|
||||
- [#1330](https://github.com/influxdata/telegraf/issues/1330): Fix exec plugin panic when using single binary.
|
||||
- [#1336](https://github.com/influxdata/telegraf/issues/1336): Fixed incorrect prometheus metrics source selection.
|
||||
- [#1112](https://github.com/influxdata/telegraf/issues/1112): Set default Zookeeper chroot to empty string.
|
||||
- [#1335](https://github.com/influxdata/telegraf/issues/1335): Fix overall ping timeout to be calculated based on per-ping timeout.
|
||||
- [#1374](https://github.com/influxdata/telegraf/pull/1374): Change "default" retention policy to "".
|
||||
- [#1377](https://github.com/influxdata/telegraf/issues/1377): Graphite output mangling '%' character.
|
||||
- [#1396](https://github.com/influxdata/telegraf/pull/1396): Prometheus input plugin now supports x509 certs authentication
|
||||
- [#1252](https://github.com/influxdata/telegraf/pull/1252) & [#1279](https://github.com/influxdata/telegraf/pull/1279): Fix systemd service. Thanks @zbindenren & @PierreF!
|
||||
- [#1221](https://github.com/influxdata/telegraf/pull/1221): Fix influxdb n_shards counter.
|
||||
- [#1258](https://github.com/influxdata/telegraf/pull/1258): Fix potential kernel plugin integer parse error.
|
||||
- [#1268](https://github.com/influxdata/telegraf/pull/1268): Fix potential influxdb input type assertion panic.
|
||||
- [#1283](https://github.com/influxdata/telegraf/pull/1283): Still send processes metrics if a process exited during metric collection.
|
||||
- [#1297](https://github.com/influxdata/telegraf/issues/1297): disk plugin panic when usage grab fails.
|
||||
- [#1316](https://github.com/influxdata/telegraf/pull/1316): Removed leaked "database" tag on redis metrics. Thanks @PierreF!
|
||||
- [#1323](https://github.com/influxdata/telegraf/issues/1323): Processes plugin: fix potential error with /proc/net/stat directory.
|
||||
- [#1322](https://github.com/influxdata/telegraf/issues/1322): Fix rare RHEL 5.2 panic in gopsutil diskio gathering function.
|
||||
- [#1586](https://github.com/influxdata/telegraf/pull/1586): Remove IF NOT EXISTS from influxdb output database creation.
|
||||
- [#1600](https://github.com/influxdata/telegraf/issues/1600): Fix quoting with text values in postgresql_extensible plugin.
|
||||
- [#1425](https://github.com/influxdata/telegraf/issues/1425): Fix win_perf_counter "index out of range" panic.
|
||||
- [#1634](https://github.com/influxdata/telegraf/issues/1634): Fix ntpq panic when field is missing.
|
||||
- [#1637](https://github.com/influxdata/telegraf/issues/1637): Sanitize graphite output field names.
|
||||
- [#1695](https://github.com/influxdata/telegraf/pull/1695): Fix MySQL plugin not sending 0 value fields.
|
||||
|
||||
## v0.13.1 [2016-05-24]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- net_response and http_response plugins timeouts will now accept duration
|
||||
strings, ie, "2s" or "500ms".
|
||||
- Input plugin Gathers will no longer be logged by default, but a Gather for
|
||||
_each_ plugin will be logged in Debug mode.
|
||||
- Debug mode will no longer print every point added to the accumulator. This
|
||||
functionality can be duplicated using the `file` output plugin and printing
|
||||
to "stdout".
|
||||
|
||||
### Features
|
||||
|
||||
- [#1173](https://github.com/influxdata/telegraf/pull/1173): varnish input plugin. Thanks @sfox-xmatters!
|
||||
- [#1138](https://github.com/influxdata/telegraf/pull/1138): nstat input plugin. Thanks @Maksadbek!
|
||||
- [#1139](https://github.com/influxdata/telegraf/pull/1139): instrumental output plugin. Thanks @jasonroelofs!
|
||||
- [#1172](https://github.com/influxdata/telegraf/pull/1172): Ceph storage stats. Thanks @robinpercy!
|
||||
- [#1233](https://github.com/influxdata/telegraf/pull/1233): Updated golint gopsutil dependency.
|
||||
- [#1238](https://github.com/influxdata/telegraf/pull/1238): chrony input plugin. Thanks @zbindenren!
|
||||
- [#479](https://github.com/influxdata/telegraf/issues/479): per-plugin execution time added to debug output.
|
||||
- [#1249](https://github.com/influxdata/telegraf/issues/1249): influxdb output: added write_consistency argument.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1195](https://github.com/influxdata/telegraf/pull/1195): Docker panic on timeout. Thanks @zstyblik!
|
||||
- [#1211](https://github.com/influxdata/telegraf/pull/1211): mongodb input. Fix possible panic. Thanks @kols!
|
||||
- [#1215](https://github.com/influxdata/telegraf/pull/1215): Fix for possible gopsutil-dependent plugin hangs.
|
||||
- [#1228](https://github.com/influxdata/telegraf/pull/1228): Fix service plugin host tag overwrite.
|
||||
- [#1198](https://github.com/influxdata/telegraf/pull/1198): http_response: override request Host header properly
|
||||
- [#1230](https://github.com/influxdata/telegraf/issues/1230): Fix Telegraf process hangup due to a single plugin hanging.
|
||||
- [#1214](https://github.com/influxdata/telegraf/issues/1214): Use TCP timeout argument in net_response plugin.
|
||||
- [#1243](https://github.com/influxdata/telegraf/pull/1243): Logfile not created on systemd.
|
||||
|
||||
## v0.13 [2016-05-11]
|
||||
|
||||
### Release Notes
|
||||
|
||||
@@ -48,7 +282,15 @@ based on _prefix_ in addition to globs. This means that a filter like
|
||||
- disque: `host -> disque_host`
|
||||
- rethinkdb: `host -> rethinkdb_host`
|
||||
|
||||
- **Breaking Change**: The `win_perf_counters` input has been changed to sanitize field names, replacing `/Sec` and `/sec` with `_persec`, as well as spaces with underscores. This is needed because Graphite doesn't like slashes and spaces, and was failing to accept metrics that had them. The `/[sS]ec` -> `_persec` is just to make things clearer and uniform.
|
||||
- **Breaking Change**: The `win_perf_counters` input has been changed to
|
||||
sanitize field names, replacing `/Sec` and `/sec` with `_persec`, as well as
|
||||
spaces with underscores. This is needed because Graphite doesn't like slashes
|
||||
and spaces, and was failing to accept metrics that had them.
|
||||
The `/[sS]ec` -> `_persec` is just to make things clearer and uniform.
|
||||
|
||||
- **Breaking Change**: snmp plugin. The `host` tag of the snmp plugin has been
|
||||
changed to the `snmp_host` tag.
|
||||
|
||||
- The `disk` input plugin can now be configured with the `HOST_MOUNT_PREFIX` environment variable.
|
||||
This value is prepended to any mountpaths discovered before retrieving stats.
|
||||
It is not included on the report path. This is necessary for reporting host disk stats when running from within a container.
|
||||
@@ -71,6 +313,7 @@ It is not included on the report path. This is necessary for reporting host disk
|
||||
- [#1107](https://github.com/influxdata/telegraf/issues/1107): Support lustre2 job stats. Thanks @hanleyja!
|
||||
- [#1122](https://github.com/influxdata/telegraf/pull/1122): Support setting config path through env variable and default paths.
|
||||
- [#1128](https://github.com/influxdata/telegraf/pull/1128): MongoDB jumbo chunks metric for MongoDB input plugin
|
||||
- [#1146](https://github.com/influxdata/telegraf/pull/1146): HAProxy socket support. Thanks weshmashian!
|
||||
|
||||
### Bugfixes
|
||||
|
||||
|
||||
@@ -11,6 +11,8 @@ Output plugins READMEs are less structured,
|
||||
but any information you can provide on how the data will look is appreciated.
|
||||
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
||||
for a good example.
|
||||
1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin.
|
||||
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf). Or mention @jackzampolin in a PR comment with some common queries that you would want to alert on and he will write one for you.
|
||||
|
||||
## GoDoc
|
||||
|
||||
@@ -30,7 +32,7 @@ Assuming you can already build the project, run these in the telegraf directory:
|
||||
|
||||
1. `go get github.com/sparrc/gdm`
|
||||
1. `gdm restore`
|
||||
1. `gdm save`
|
||||
1. `GOOS=linux gdm save`
|
||||
|
||||
## Input Plugins
|
||||
|
||||
@@ -82,9 +84,9 @@ func (s *Simple) SampleConfig() string {
|
||||
|
||||
func (s *Simple) Gather(acc telegraf.Accumulator) error {
|
||||
if s.Ok {
|
||||
acc.Add("state", "pretty good", nil)
|
||||
acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil)
|
||||
} else {
|
||||
acc.Add("state", "not great", nil)
|
||||
acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -95,6 +97,13 @@ func init() {
|
||||
}
|
||||
```
|
||||
|
||||
## Adding Typed Metrics
|
||||
|
||||
In addition the the `AddFields` function, the accumulator also supports an
|
||||
`AddGauge` and `AddCounter` function. These functions are for adding _typed_
|
||||
metrics. Metric types are ignored for the InfluxDB output, but can be used
|
||||
for other outputs, such as [prometheus](https://prometheus.io/docs/concepts/metric_types/).
|
||||
|
||||
## Input Plugins Accepting Arbitrary Data Formats
|
||||
|
||||
Some input plugins (such as
|
||||
@@ -114,7 +123,7 @@ creating the `Parser` object.
|
||||
You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to consume.
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
@@ -212,8 +221,8 @@ func (s *Simple) Close() error {
|
||||
}
|
||||
|
||||
func (s *Simple) Write(metrics []telegraf.Metric) error {
|
||||
for _, pt := range points {
|
||||
// write `pt` to the output sink here
|
||||
for _, metric := range metrics {
|
||||
// write `metric` to the output sink here
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -244,7 +253,7 @@ instantiating and creating the `Serializer` object.
|
||||
You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to output.
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
@@ -290,10 +299,6 @@ To execute Telegraf tests follow these simple steps:
|
||||
instructions
|
||||
- execute `make test`
|
||||
|
||||
**OSX users**: you will need to install `boot2docker` or `docker-machine`.
|
||||
The Makefile will assume that you have a `docker-machine` box called `default` to
|
||||
get the IP address.
|
||||
|
||||
### Unit test troubleshooting
|
||||
|
||||
Try cleaning up your test environment by executing `make docker-kill` and
|
||||
|
||||
19
Godeps
19
Godeps
@@ -1,5 +1,6 @@
|
||||
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
|
||||
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
|
||||
github.com/aerospike/aerospike-client-go 7f3a312c3b2a60ac083ec6da296091c52c795c63
|
||||
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
|
||||
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
|
||||
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
||||
@@ -16,25 +17,29 @@ github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
|
||||
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
|
||||
github.com/gobwas/glob d877f6352135181470c40c73ebb81aefa22115fa
|
||||
github.com/gobwas/glob 49571a1557cd20e6a2410adc6421f85b66c730b5
|
||||
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
|
||||
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
|
||||
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hashicorp/consul 5aa90455ce78d4d41578bafc86305e6e6b28d7d2
|
||||
github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
|
||||
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
||||
github.com/influxdata/influxdb 21db76b3374c733f37ed16ad93f3484020034351
|
||||
github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab
|
||||
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||
github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc
|
||||
github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a
|
||||
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
||||
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
|
||||
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
|
||||
github.com/nats-io/nats ea8b4fd12ebb823073c0004b9f09ac8748f4f165
|
||||
github.com/nats-io/nuid a5152d67cf63cbfb5d992a395458722a45194715
|
||||
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
|
||||
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
||||
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
|
||||
@@ -42,12 +47,14 @@ github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42
|
||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||
github.com/shirou/gopsutil 4d0c402af66c78735c5ccf820dc2ca7de5e4ff08
|
||||
github.com/soniah/gosnmp eb32571c2410868d85849ad67d1e51d01273eb84
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
|
||||
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/yuin/gopher-lua bf3808abd44b1e55143a2d7f08571aaa80db1808
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
|
||||
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
|
||||
|
||||
@@ -1,59 +1,12 @@
|
||||
github.com/Microsoft/go-winio 9f57cbbcbcb41dea496528872a4f0e37a4f7ae98
|
||||
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
|
||||
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
|
||||
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
|
||||
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
|
||||
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||
github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1
|
||||
github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2
|
||||
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
||||
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
|
||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537
|
||||
github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb
|
||||
github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
|
||||
github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4
|
||||
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
|
||||
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
|
||||
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
|
||||
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
||||
github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48
|
||||
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
||||
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
||||
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
|
||||
github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
|
||||
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
|
||||
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
|
||||
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42
|
||||
github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5
|
||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
|
||||
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
|
||||
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
|
||||
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
|
||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
||||
github.com/lxn/win 950a0e81e7678e63d8e6cd32412bdecb325ccd88
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8
|
||||
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
|
||||
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
|
||||
|
||||
42
Makefile
42
Makefile
@@ -1,5 +1,6 @@
|
||||
UNAME := $(shell sh -c 'uname')
|
||||
VERSION := $(shell sh -c 'git describe --always --tags')
|
||||
BRANCH := $(shell sh -c 'git rev-parse --abbrev-ref HEAD')
|
||||
COMMIT := $(shell sh -c 'git rev-parse HEAD')
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
else
|
||||
@@ -14,21 +15,18 @@ windows: prepare-windows build-windows
|
||||
|
||||
# Only run the build (no dependency grabbing)
|
||||
build:
|
||||
go install -ldflags "-X main.Version=$(VERSION)" ./...
|
||||
go install -ldflags \
|
||||
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" ./...
|
||||
|
||||
build-windows:
|
||||
go build -o telegraf.exe -ldflags \
|
||||
"-X main.Version=$(VERSION)" \
|
||||
GOOS=windows GOARCH=amd64 go build -o telegraf.exe -ldflags \
|
||||
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
build-for-docker:
|
||||
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
|
||||
"-s -X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# Build with race detector
|
||||
dev: prepare
|
||||
go build -race -ldflags "-X main.Version=$(VERSION)" ./...
|
||||
"-s -X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# run package script
|
||||
package:
|
||||
@@ -42,54 +40,44 @@ prepare:
|
||||
# Use the windows godeps file to prepare dependencies
|
||||
prepare-windows:
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
gdm restore -f Godeps_windows
|
||||
|
||||
# Run all docker containers necessary for unit tests
|
||||
docker-run:
|
||||
ifeq ($(UNAME), Darwin)
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=$(shell sh -c 'boot2docker ip || docker-machine ip default') \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
endif
|
||||
ifeq ($(UNAME), Linux)
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
endif
|
||||
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
||||
docker run --name memcached -p "11211:11211" -d memcached
|
||||
docker run --name postgres -p "5432:5432" -d postgres
|
||||
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
||||
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
||||
docker run --name redis -p "6379:6379" -d redis
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
|
||||
# Run docker containers necessary for CircleCI unit tests
|
||||
docker-run-circle:
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
|
||||
# Kill all docker containers, ignore errors
|
||||
docker-kill:
|
||||
-docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||
-docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
|
||||
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
|
||||
|
||||
# Run full unit tests using docker containers (includes setup and teardown)
|
||||
test: vet docker-kill docker-run
|
||||
|
||||
72
README.md
72
README.md
@@ -1,4 +1,4 @@
|
||||
# Telegraf [](https://circleci.com/gh/influxdata/telegraf)
|
||||
# Telegraf [](https://circleci.com/gh/influxdata/telegraf) [](https://hub.docker.com/_/telegraf/)
|
||||
|
||||
Telegraf is an agent written in Go for collecting metrics from the system it's
|
||||
running on, or from other services, and writing them into InfluxDB or other
|
||||
@@ -20,12 +20,12 @@ new plugins.
|
||||
### Linux deb and rpm Packages:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_0.13.0-1_amd64.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1.x86_64.rpm
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0_amd64.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0.x86_64.rpm
|
||||
|
||||
Latest (arm):
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_0.13.0-1_armhf.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1.armhf.rpm
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0_armhf.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0.armhf.rpm
|
||||
|
||||
##### Package Instructions:
|
||||
|
||||
@@ -46,32 +46,14 @@ to use this repo to install & update telegraf.
|
||||
### Linux tarballs:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_linux_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_linux_i386.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_linux_armhf.tar.gz
|
||||
|
||||
##### tarball Instructions:
|
||||
|
||||
To install the full directory structure with config file, run:
|
||||
|
||||
```
|
||||
sudo tar -C / -zxvf ./telegraf-0.13.0-1_linux_amd64.tar.gz
|
||||
```
|
||||
|
||||
To extract only the binary, run:
|
||||
|
||||
```
|
||||
tar -zxvf telegraf-0.13.0-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
|
||||
```
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_linux_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_linux_i386.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_linux_armhf.tar.gz
|
||||
|
||||
### FreeBSD tarball:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_freebsd_amd64.tar.gz
|
||||
|
||||
##### tarball Instructions:
|
||||
|
||||
See linux instructions above.
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_freebsd_amd64.tar.gz
|
||||
|
||||
### Ansible Role:
|
||||
|
||||
@@ -87,8 +69,7 @@ brew install telegraf
|
||||
### Windows Binaries (EXPERIMENTAL)
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_windows_amd64.zip
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_windows_i386.zip
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_windows_amd64.zip
|
||||
|
||||
### From Source:
|
||||
|
||||
@@ -161,6 +142,10 @@ Currently implemented sources:
|
||||
* [apache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/apache)
|
||||
* [bcache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/bcache)
|
||||
* [cassandra](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cassandra)
|
||||
* [ceph](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ceph)
|
||||
* [chrony](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/chrony)
|
||||
* [consul](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/consul)
|
||||
* [conntrack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/conntrack)
|
||||
* [couchbase](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchbase)
|
||||
* [couchdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchdb)
|
||||
* [disque](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/disque)
|
||||
@@ -171,10 +156,12 @@ Currently implemented sources:
|
||||
* [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||
* [filestat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/filestat)
|
||||
* [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy)
|
||||
* [hddtemp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/hddtemp)
|
||||
* [http_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_response)
|
||||
* [httpjson](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||
* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb)
|
||||
* [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor)
|
||||
* [iptables](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/iptables)
|
||||
* [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia)
|
||||
* [leofs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/leofs)
|
||||
* [lustre2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/lustre2)
|
||||
@@ -186,6 +173,7 @@ Currently implemented sources:
|
||||
* [net_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/net_response)
|
||||
* [nginx](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nginx)
|
||||
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq)
|
||||
* [nstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nstat)
|
||||
* [ntpq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ntpq)
|
||||
* [phpfpm](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/phpfpm)
|
||||
* [phusion passenger](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/passenger)
|
||||
@@ -201,10 +189,12 @@ Currently implemented sources:
|
||||
* [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis)
|
||||
* [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb)
|
||||
* [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak)
|
||||
* [sensors ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors) (only available if built from source)
|
||||
* [sensors](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors)
|
||||
* [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
|
||||
* [snmp_legacy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp_legacy)
|
||||
* [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft)
|
||||
* [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy)
|
||||
* [varnish](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/varnish)
|
||||
* [zfs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zfs)
|
||||
* [zookeeper](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zookeeper)
|
||||
* [win_perf_counters ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters) (windows performance counters)
|
||||
@@ -219,16 +209,25 @@ Currently implemented sources:
|
||||
* swap
|
||||
* processes
|
||||
* kernel (/proc/stat)
|
||||
* kernel (/proc/vmstat)
|
||||
|
||||
Telegraf can also collect metrics via the following service plugins:
|
||||
|
||||
* [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd)
|
||||
* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
|
||||
* [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener)
|
||||
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
|
||||
* [http_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_listener)
|
||||
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
|
||||
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
|
||||
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
|
||||
* [github_webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/github_webhooks)
|
||||
* [nsq_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq_consumer)
|
||||
* [logparser](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/logparser)
|
||||
* [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd)
|
||||
* [tail](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail)
|
||||
* [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener)
|
||||
* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
|
||||
* [webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks)
|
||||
* [filestack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/filestack)
|
||||
* [github](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/github)
|
||||
* [mandrill](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/mandrill)
|
||||
* [rollbar](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/rollbar)
|
||||
|
||||
We'll be adding support for many more over the coming months. Read on if you
|
||||
want to add support for another service or third-party API.
|
||||
@@ -243,9 +242,12 @@ want to add support for another service or third-party API.
|
||||
* [datadog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/datadog)
|
||||
* [file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file)
|
||||
* [graphite](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graphite)
|
||||
* [graylog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graylog)
|
||||
* [instrumental](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/instrumental)
|
||||
* [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka)
|
||||
* [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato)
|
||||
* [mqtt](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/mqtt)
|
||||
* [nats](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nats)
|
||||
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nsq)
|
||||
* [opentsdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
||||
* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client)
|
||||
|
||||
@@ -2,20 +2,39 @@ package telegraf
|
||||
|
||||
import "time"
|
||||
|
||||
// Accumulator is an interface for "accumulating" metrics from input plugin(s).
|
||||
// The metrics are sent down a channel shared between all input plugins and then
|
||||
// flushed on the configured flush_interval.
|
||||
type Accumulator interface {
|
||||
// AddFields adds a metric to the accumulator with the given measurement
|
||||
// name, fields, and tags (and timestamp). If a timestamp is not provided,
|
||||
// then the accumulator sets it to "now".
|
||||
// Create a point with a value, decorating it with tags
|
||||
// NOTE: tags is expected to be owned by the caller, don't mutate
|
||||
// it after passing to Add.
|
||||
Add(measurement string,
|
||||
value interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
AddFields(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
// AddGauge is the same as AddFields, but will add the metric as a "Gauge" type
|
||||
AddGauge(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
// AddCounter is the same as AddFields, but will add the metric as a "Counter" type
|
||||
AddCounter(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
AddError(err error)
|
||||
|
||||
Debug() bool
|
||||
SetDebug(enabled bool)
|
||||
|
||||
SetPrecision(precision, interval time.Duration)
|
||||
|
||||
DisablePrecision()
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -12,43 +12,30 @@ import (
|
||||
)
|
||||
|
||||
func NewAccumulator(
|
||||
inputConfig *internal_models.InputConfig,
|
||||
inputConfig *models.InputConfig,
|
||||
metrics chan telegraf.Metric,
|
||||
) *accumulator {
|
||||
acc := accumulator{}
|
||||
acc.metrics = metrics
|
||||
acc.inputConfig = inputConfig
|
||||
acc.precision = time.Nanosecond
|
||||
return &acc
|
||||
}
|
||||
|
||||
type accumulator struct {
|
||||
sync.Mutex
|
||||
|
||||
metrics chan telegraf.Metric
|
||||
|
||||
defaultTags map[string]string
|
||||
|
||||
debug bool
|
||||
// print every point added to the accumulator
|
||||
trace bool
|
||||
|
||||
inputConfig *internal_models.InputConfig
|
||||
inputConfig *models.InputConfig
|
||||
|
||||
prefix string
|
||||
}
|
||||
precision time.Duration
|
||||
|
||||
func (ac *accumulator) Add(
|
||||
measurement string,
|
||||
value interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
fields["value"] = value
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
|
||||
return
|
||||
}
|
||||
|
||||
ac.AddFields(measurement, fields, tags, t...)
|
||||
errCount uint64
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddFields(
|
||||
@@ -57,16 +44,47 @@ func (ac *accumulator) AddFields(
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.makeMetric(measurement, fields, tags, telegraf.Untyped, t...); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddGauge(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.makeMetric(measurement, fields, tags, telegraf.Gauge, t...); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddCounter(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.makeMetric(measurement, fields, tags, telegraf.Counter, t...); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
// makeMetric either returns a metric, or returns nil if the metric doesn't
|
||||
// need to be created (because of filtering, an error, etc.)
|
||||
func (ac *accumulator) makeMetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
mType telegraf.ValueType,
|
||||
t ...time.Time,
|
||||
) telegraf.Metric {
|
||||
if len(fields) == 0 || len(measurement) == 0 {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
|
||||
return
|
||||
}
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldTagsPass(tags) {
|
||||
return
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
|
||||
// Override measurement name if set
|
||||
@@ -81,55 +99,49 @@ func (ac *accumulator) AddFields(
|
||||
measurement = measurement + ac.inputConfig.MeasurementSuffix
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
// Apply plugin-wide tags if set
|
||||
for k, v := range ac.inputConfig.Tags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
// Apply daemon-wide tags if set
|
||||
for k, v := range ac.defaultTags {
|
||||
tags[k] = v
|
||||
}
|
||||
// Apply plugin-wide tags if set
|
||||
for k, v := range ac.inputConfig.Tags {
|
||||
tags[k] = v
|
||||
}
|
||||
ac.inputConfig.Filter.FilterTags(tags)
|
||||
|
||||
result := make(map[string]interface{})
|
||||
for k, v := range fields {
|
||||
// Filter out any filtered fields
|
||||
if ac.inputConfig != nil {
|
||||
if !ac.inputConfig.Filter.ShouldFieldsPass(k) {
|
||||
continue
|
||||
}
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the metric filter(s)
|
||||
if ok := ac.inputConfig.Filter.Apply(measurement, fields, tags); !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
for k, v := range fields {
|
||||
// Validate uint64 and float64 fields
|
||||
switch val := v.(type) {
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
result[k] = int64(val)
|
||||
fields[k] = int64(val)
|
||||
} else {
|
||||
result[k] = int64(9223372036854775807)
|
||||
fields[k] = int64(9223372036854775807)
|
||||
}
|
||||
continue
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
if ac.debug {
|
||||
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
log.Printf("I! Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
}
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
result[k] = v
|
||||
}
|
||||
fields = nil
|
||||
if len(result) == 0 {
|
||||
return
|
||||
fields[k] = v
|
||||
}
|
||||
|
||||
var timestamp time.Time
|
||||
@@ -138,20 +150,39 @@ func (ac *accumulator) AddFields(
|
||||
} else {
|
||||
timestamp = time.Now()
|
||||
}
|
||||
timestamp = timestamp.Round(ac.precision)
|
||||
|
||||
if ac.prefix != "" {
|
||||
measurement = ac.prefix + measurement
|
||||
var m telegraf.Metric
|
||||
var err error
|
||||
switch mType {
|
||||
case telegraf.Counter:
|
||||
m, err = telegraf.NewCounterMetric(measurement, tags, fields, timestamp)
|
||||
case telegraf.Gauge:
|
||||
m, err = telegraf.NewGaugeMetric(measurement, tags, fields, timestamp)
|
||||
default:
|
||||
m, err = telegraf.NewMetric(measurement, tags, fields, timestamp)
|
||||
}
|
||||
|
||||
m, err := telegraf.NewMetric(measurement, tags, result, timestamp)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return
|
||||
log.Printf("E! Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return nil
|
||||
}
|
||||
if ac.debug {
|
||||
|
||||
if ac.trace {
|
||||
fmt.Println("> " + m.String())
|
||||
}
|
||||
ac.metrics <- m
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// AddError passes a runtime error to the accumulator.
|
||||
// The error will be tagged with the plugin name and written to the log.
|
||||
func (ac *accumulator) AddError(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
atomic.AddUint64(&ac.errCount, 1)
|
||||
//TODO suppress/throttle consecutive duplicate errors?
|
||||
log.Printf("E! Error in input [%s]: %s", ac.inputConfig.Name, err)
|
||||
}
|
||||
|
||||
func (ac *accumulator) Debug() bool {
|
||||
@@ -162,6 +193,39 @@ func (ac *accumulator) SetDebug(debug bool) {
|
||||
ac.debug = debug
|
||||
}
|
||||
|
||||
func (ac *accumulator) Trace() bool {
|
||||
return ac.trace
|
||||
}
|
||||
|
||||
func (ac *accumulator) SetTrace(trace bool) {
|
||||
ac.trace = trace
|
||||
}
|
||||
|
||||
// SetPrecision takes two time.Duration objects. If the first is non-zero,
|
||||
// it sets that as the precision. Otherwise, it takes the second argument
|
||||
// as the order of time that the metrics should be rounded to, with the
|
||||
// maximum being 1s.
|
||||
func (ac *accumulator) SetPrecision(precision, interval time.Duration) {
|
||||
if precision > 0 {
|
||||
ac.precision = precision
|
||||
return
|
||||
}
|
||||
switch {
|
||||
case interval >= time.Second:
|
||||
ac.precision = time.Second
|
||||
case interval >= time.Millisecond:
|
||||
ac.precision = time.Millisecond
|
||||
case interval >= time.Microsecond:
|
||||
ac.precision = time.Microsecond
|
||||
default:
|
||||
ac.precision = time.Nanosecond
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accumulator) DisablePrecision() {
|
||||
ac.precision = time.Nanosecond
|
||||
}
|
||||
|
||||
func (ac *accumulator) setDefaultTags(tags map[string]string) {
|
||||
ac.defaultTags = tags
|
||||
}
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -10,6 +13,7 @@ import (
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
@@ -17,11 +21,17 @@ func TestAdd(t *testing.T) {
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.Add("acctest", float64(101), map[string]string{})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
@@ -38,17 +48,241 @@ func TestAdd(t *testing.T) {
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddGauge(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
}
|
||||
|
||||
func TestAddCounter(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
}
|
||||
|
||||
func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.SetPrecision(0, time.Second)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddNoIntervalWithPrecision(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.SetPrecision(time.Second, time.Millisecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddDisablePrecision(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.SetPrecision(time.Second, time.Millisecond)
|
||||
a.DisablePrecision()
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestDifferentPrecisions(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.SetPrecision(0, time.Second)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Millisecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800083000000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Microsecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082913000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Nanosecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddDefaultTags(t *testing.T) {
|
||||
a := accumulator{}
|
||||
a.addDefaultTag("default", "tag")
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.Add("acctest", float64(101), map[string]string{})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
@@ -70,7 +304,7 @@ func TestAddFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
@@ -103,7 +337,7 @@ func TestAddInfFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": inf,
|
||||
@@ -131,7 +365,7 @@ func TestAddNaNFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": nan,
|
||||
@@ -155,7 +389,7 @@ func TestAddUint64Fields(t *testing.T) {
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": uint64(99),
|
||||
@@ -184,7 +418,7 @@ func TestAddUint64Overflow(t *testing.T) {
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": uint64(9223372036854775808),
|
||||
@@ -214,11 +448,17 @@ func TestAddInts(t *testing.T) {
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.Add("acctest", int(101), map[string]string{})
|
||||
a.Add("acctest", int32(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", int64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": int32(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": int64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
@@ -241,10 +481,14 @@ func TestAddFloats(t *testing.T) {
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.Add("acctest", float32(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float32(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
@@ -263,10 +507,14 @@ func TestAddStrings(t *testing.T) {
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.Add("acctest", "test", map[string]string{"acc": "test"})
|
||||
a.Add("acctest", "foo", map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": "test"},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": "foo"},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
@@ -285,10 +533,12 @@ func TestAddBools(t *testing.T) {
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.Add("acctest", true, map[string]string{"acc": "test"})
|
||||
a.Add("acctest", false, map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": true}, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": false}, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
@@ -307,16 +557,22 @@ func TestAccFilterTags(t *testing.T) {
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
filter := internal_models.Filter{
|
||||
filter := models.Filter{
|
||||
TagExclude: []string{"acc"},
|
||||
}
|
||||
assert.NoError(t, filter.CompileFilter())
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
assert.NoError(t, filter.Compile())
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
a.inputConfig.Filter = filter
|
||||
|
||||
a.Add("acctest", float64(101), map[string]string{})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
@@ -332,3 +588,27 @@ func TestAccFilterTags(t *testing.T) {
|
||||
fmt.Sprintf("acctest value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAccAddError(t *testing.T) {
|
||||
errBuf := bytes.NewBuffer(nil)
|
||||
log.SetOutput(errBuf)
|
||||
defer log.SetOutput(os.Stderr)
|
||||
|
||||
a := accumulator{}
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
a.inputConfig.Name = "mock_plugin"
|
||||
|
||||
a.AddError(fmt.Errorf("foo"))
|
||||
a.AddError(fmt.Errorf("bar"))
|
||||
a.AddError(fmt.Errorf("baz"))
|
||||
|
||||
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
|
||||
assert.EqualValues(t, 3, a.errCount)
|
||||
require.Len(t, errs, 4) // 4 because of trailing newline
|
||||
assert.Contains(t, string(errs[0]), "mock_plugin")
|
||||
assert.Contains(t, string(errs[0]), "foo")
|
||||
assert.Contains(t, string(errs[1]), "mock_plugin")
|
||||
assert.Contains(t, string(errs[1]), "bar")
|
||||
assert.Contains(t, string(errs[2]), "mock_plugin")
|
||||
assert.Contains(t, string(errs[2]), "baz")
|
||||
}
|
||||
|
||||
242
agent/agent.go
242
agent/agent.go
@@ -1,17 +1,15 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
cryptorand "crypto/rand"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
)
|
||||
@@ -51,18 +49,16 @@ func (a *Agent) Connect() error {
|
||||
switch ot := o.Output.(type) {
|
||||
case telegraf.ServiceOutput:
|
||||
if err := ot.Start(); err != nil {
|
||||
log.Printf("Service for output %s failed to start, exiting\n%s\n",
|
||||
log.Printf("E! Service for output %s failed to start, exiting\n%s\n",
|
||||
o.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if a.Config.Agent.Debug {
|
||||
log.Printf("Attempting connection to output: %s\n", o.Name)
|
||||
}
|
||||
log.Printf("D! Attempting connection to output: %s\n", o.Name)
|
||||
err := o.Output.Connect()
|
||||
if err != nil {
|
||||
log.Printf("Failed to connect to output %s, retrying in 15s, "+
|
||||
log.Printf("E! Failed to connect to output %s, retrying in 15s, "+
|
||||
"error was '%s' \n", o.Name, err)
|
||||
time.Sleep(15 * time.Second)
|
||||
err = o.Output.Connect()
|
||||
@@ -70,9 +66,7 @@ func (a *Agent) Connect() error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if a.Config.Agent.Debug {
|
||||
log.Printf("Successfully connected to output: %s\n", o.Name)
|
||||
}
|
||||
log.Printf("D! Successfully connected to output: %s\n", o.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -90,105 +84,50 @@ func (a *Agent) Close() error {
|
||||
return err
|
||||
}
|
||||
|
||||
func panicRecover(input *internal_models.RunningInput) {
|
||||
func panicRecover(input *models.RunningInput) {
|
||||
if err := recover(); err != nil {
|
||||
trace := make([]byte, 2048)
|
||||
runtime.Stack(trace, true)
|
||||
log.Printf("FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
|
||||
log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
|
||||
input.Name, err, trace)
|
||||
log.Println("PLEASE REPORT THIS PANIC ON GITHUB with " +
|
||||
log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " +
|
||||
"stack trace, configuration, and OS information: " +
|
||||
"https://github.com/influxdata/telegraf/issues/new")
|
||||
}
|
||||
}
|
||||
|
||||
// gatherParallel runs the inputs that are using the same reporting interval
|
||||
// as the telegraf agent.
|
||||
func (a *Agent) gatherParallel(metricC chan telegraf.Metric) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
start := time.Now()
|
||||
counter := 0
|
||||
jitter := a.Config.Agent.CollectionJitter.Duration.Nanoseconds()
|
||||
for _, input := range a.Config.Inputs {
|
||||
if input.Config.Interval != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
counter++
|
||||
go func(input *internal_models.RunningInput) {
|
||||
defer panicRecover(input)
|
||||
defer wg.Done()
|
||||
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
|
||||
if jitter != 0 {
|
||||
nanoSleep := rand.Int63n(jitter)
|
||||
d, err := time.ParseDuration(fmt.Sprintf("%dns", nanoSleep))
|
||||
if err != nil {
|
||||
log.Printf("Jittering collection interval failed for plugin %s",
|
||||
input.Name)
|
||||
} else {
|
||||
time.Sleep(d)
|
||||
}
|
||||
}
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
log.Printf("Error in input [%s]: %s", input.Name, err)
|
||||
}
|
||||
|
||||
}(input)
|
||||
}
|
||||
|
||||
if counter == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
elapsed := time.Since(start)
|
||||
if !a.Config.Agent.Quiet {
|
||||
log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n",
|
||||
a.Config.Agent.Interval.Duration, counter, elapsed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherSeparate runs the inputs that have been configured with their own
|
||||
// gatherer runs the inputs that have been configured with their own
|
||||
// reporting interval.
|
||||
func (a *Agent) gatherSeparate(
|
||||
func (a *Agent) gatherer(
|
||||
shutdown chan struct{},
|
||||
input *internal_models.RunningInput,
|
||||
input *models.RunningInput,
|
||||
interval time.Duration,
|
||||
metricC chan telegraf.Metric,
|
||||
) error {
|
||||
defer panicRecover(input)
|
||||
|
||||
ticker := time.NewTicker(input.Config.Interval)
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
var outerr error
|
||||
start := time.Now()
|
||||
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
log.Printf("Error in input [%s]: %s", input.Name, err)
|
||||
}
|
||||
internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown)
|
||||
|
||||
start := time.Now()
|
||||
gatherWithTimeout(shutdown, input, acc, interval)
|
||||
elapsed := time.Since(start)
|
||||
if !a.Config.Agent.Quiet {
|
||||
log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
|
||||
input.Config.Interval, input.Name, elapsed)
|
||||
}
|
||||
|
||||
if outerr != nil {
|
||||
return outerr
|
||||
}
|
||||
log.Printf("D! Input [%s] gathered metrics, (%s interval) in %s\n",
|
||||
input.Name, interval, elapsed)
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
@@ -199,6 +138,42 @@ func (a *Agent) gatherSeparate(
|
||||
}
|
||||
}
|
||||
|
||||
// gatherWithTimeout gathers from the given input, with the given timeout.
|
||||
// when the given timeout is reached, gatherWithTimeout logs an error message
|
||||
// but continues waiting for it to return. This is to avoid leaving behind
|
||||
// hung processes, and to prevent re-calling the same hung process over and
|
||||
// over.
|
||||
func gatherWithTimeout(
|
||||
shutdown chan struct{},
|
||||
input *models.RunningInput,
|
||||
acc *accumulator,
|
||||
timeout time.Duration,
|
||||
) {
|
||||
ticker := time.NewTicker(timeout)
|
||||
defer ticker.Stop()
|
||||
done := make(chan error)
|
||||
go func() {
|
||||
done <- input.Input.Gather(acc)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case err := <-done:
|
||||
if err != nil {
|
||||
log.Printf("E! ERROR in input [%s]: %s", input.Name, err)
|
||||
}
|
||||
return
|
||||
case <-ticker.C:
|
||||
log.Printf("E! ERROR: input [%s] took longer to collect than "+
|
||||
"collection interval (%s)",
|
||||
input.Name, timeout)
|
||||
continue
|
||||
case <-shutdown:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test verifies that we can 'Gather' from all inputs with their configured
|
||||
// Config struct
|
||||
func (a *Agent) Test() error {
|
||||
@@ -220,7 +195,9 @@ func (a *Agent) Test() error {
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(true)
|
||||
acc.SetTrace(true)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
|
||||
@@ -231,6 +208,9 @@ func (a *Agent) Test() error {
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
if acc.errCount > 0 {
|
||||
return fmt.Errorf("Errors encountered during processing")
|
||||
}
|
||||
|
||||
// Special instructions for some inputs. cpu, for example, needs to be
|
||||
// run twice in order to return cpu usage percentages.
|
||||
@@ -253,11 +233,11 @@ func (a *Agent) flush() {
|
||||
|
||||
wg.Add(len(a.Config.Outputs))
|
||||
for _, o := range a.Config.Outputs {
|
||||
go func(output *internal_models.RunningOutput) {
|
||||
go func(output *models.RunningOutput) {
|
||||
defer wg.Done()
|
||||
err := output.Write()
|
||||
if err != nil {
|
||||
log.Printf("Error writing to output [%s]: %s\n",
|
||||
log.Printf("E! Error writing to output [%s]: %s\n",
|
||||
output.Name, err.Error())
|
||||
}
|
||||
}(o)
|
||||
@@ -277,51 +257,47 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
log.Println("Hang on, flushing any cached metrics before shutdown")
|
||||
log.Println("I! Hang on, flushing any cached metrics before shutdown")
|
||||
a.flush()
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
|
||||
a.flush()
|
||||
case m := <-metricC:
|
||||
for _, o := range a.Config.Outputs {
|
||||
o.AddMetric(m)
|
||||
for i, o := range a.Config.Outputs {
|
||||
if i == len(a.Config.Outputs)-1 {
|
||||
o.AddMetric(m)
|
||||
} else {
|
||||
o.AddMetric(copyMetric(m))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// jitterInterval applies the the interval jitter to the flush interval using
|
||||
// crypto/rand number generator
|
||||
func jitterInterval(ininterval, injitter time.Duration) time.Duration {
|
||||
var jitter int64
|
||||
outinterval := ininterval
|
||||
if injitter.Nanoseconds() != 0 {
|
||||
maxjitter := big.NewInt(injitter.Nanoseconds())
|
||||
if j, err := cryptorand.Int(cryptorand.Reader, maxjitter); err == nil {
|
||||
jitter = j.Int64()
|
||||
}
|
||||
outinterval = time.Duration(jitter + ininterval.Nanoseconds())
|
||||
func copyMetric(m telegraf.Metric) telegraf.Metric {
|
||||
t := time.Time(m.Time())
|
||||
|
||||
tags := make(map[string]string)
|
||||
fields := make(map[string]interface{})
|
||||
for k, v := range m.Tags() {
|
||||
tags[k] = v
|
||||
}
|
||||
for k, v := range m.Fields() {
|
||||
fields[k] = v
|
||||
}
|
||||
|
||||
if outinterval.Nanoseconds() < time.Duration(500*time.Millisecond).Nanoseconds() {
|
||||
log.Printf("Flush interval %s too low, setting to 500ms\n", outinterval)
|
||||
outinterval = time.Duration(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
return outinterval
|
||||
out, _ := telegraf.NewMetric(m.Name(), tags, fields, t)
|
||||
return out
|
||||
}
|
||||
|
||||
// Run runs the agent daemon, gathering every Interval
|
||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
a.Config.Agent.FlushInterval.Duration = jitterInterval(
|
||||
a.Config.Agent.FlushInterval.Duration,
|
||||
a.Config.Agent.FlushJitter.Duration)
|
||||
|
||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+
|
||||
log.Printf("I! Agent Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+
|
||||
"Flush Interval:%s \n",
|
||||
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet,
|
||||
a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet,
|
||||
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
|
||||
|
||||
// channel shared between all input threads for accumulating metrics
|
||||
@@ -332,10 +308,12 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
switch p := input.Input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
// Service input plugins should set their own precision of their
|
||||
// metrics.
|
||||
acc.DisablePrecision()
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
if err := p.Start(acc); err != nil {
|
||||
log.Printf("Service for input %s failed to start, exiting\n%s\n",
|
||||
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
@@ -348,43 +326,31 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
i := int64(a.Config.Agent.Interval.Duration)
|
||||
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
|
||||
}
|
||||
ticker := time.NewTicker(a.Config.Agent.Interval.Duration)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := a.flusher(shutdown, metricC); err != nil {
|
||||
log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
|
||||
log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error())
|
||||
close(shutdown)
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(len(a.Config.Inputs))
|
||||
for _, input := range a.Config.Inputs {
|
||||
// Special handling for inputs that have their own collection interval
|
||||
// configured. Default intervals are handled below with gatherParallel
|
||||
interval := a.Config.Agent.Interval.Duration
|
||||
// overwrite global interval if this plugin has it's own.
|
||||
if input.Config.Interval != 0 {
|
||||
wg.Add(1)
|
||||
go func(input *internal_models.RunningInput) {
|
||||
defer wg.Done()
|
||||
if err := a.gatherSeparate(shutdown, input, metricC); err != nil {
|
||||
log.Printf(err.Error())
|
||||
}
|
||||
}(input)
|
||||
interval = input.Config.Interval
|
||||
}
|
||||
go func(in *models.RunningInput, interv time.Duration) {
|
||||
defer wg.Done()
|
||||
if err := a.gatherer(shutdown, in, interv, metricC); err != nil {
|
||||
log.Printf("E! " + err.Error())
|
||||
}
|
||||
}(input, interval)
|
||||
}
|
||||
|
||||
defer wg.Wait()
|
||||
|
||||
for {
|
||||
if err := a.gatherParallel(metricC); err != nil {
|
||||
log.Printf(err.Error())
|
||||
}
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package agent
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
|
||||
@@ -110,75 +109,3 @@ func TestAgent_LoadOutput(t *testing.T) {
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
}
|
||||
|
||||
func TestAgent_ZeroJitter(t *testing.T) {
|
||||
flushinterval := jitterInterval(time.Duration(10*time.Second),
|
||||
time.Duration(0*time.Second))
|
||||
|
||||
actual := flushinterval.Nanoseconds()
|
||||
exp := time.Duration(10 * time.Second).Nanoseconds()
|
||||
|
||||
if actual != exp {
|
||||
t.Errorf("Actual %v, expected %v", actual, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_ZeroInterval(t *testing.T) {
|
||||
min := time.Duration(500 * time.Millisecond).Nanoseconds()
|
||||
max := time.Duration(5 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
||||
time.Duration(5*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
|
||||
if actual > max {
|
||||
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
||||
break
|
||||
}
|
||||
if actual < min {
|
||||
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_ZeroBoth(t *testing.T) {
|
||||
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
||||
time.Duration(0*time.Second))
|
||||
|
||||
actual := flushinterval
|
||||
exp := time.Duration(500 * time.Millisecond)
|
||||
|
||||
if actual != exp {
|
||||
t.Errorf("Actual %v, expected %v", actual, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_JitterMax(t *testing.T) {
|
||||
max := time.Duration(32 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
||||
time.Duration(2*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
if actual > max {
|
||||
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_JitterMin(t *testing.T) {
|
||||
min := time.Duration(30 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
||||
time.Duration(2*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
if actual < min {
|
||||
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,9 @@ machine:
|
||||
post:
|
||||
- sudo service zookeeper stop
|
||||
- go version
|
||||
- go version | grep 1.6.2 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.6.2.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.6.2.linux-amd64.tar.gz
|
||||
- go version | grep 1.7.1 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.7.1.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.7.1.linux-amd64.tar.gz
|
||||
- go version
|
||||
|
||||
dependencies:
|
||||
|
||||
@@ -6,19 +6,23 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/influxdata/telegraf/agent"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/logger"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
|
||||
"github.com/kardianos/service"
|
||||
)
|
||||
|
||||
var fDebug = flag.Bool("debug", false,
|
||||
"show metrics as they're generated to stdout")
|
||||
"turn on debug logging")
|
||||
var fQuiet = flag.Bool("quiet", false,
|
||||
"run in quiet mode")
|
||||
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
||||
@@ -39,16 +43,26 @@ var fOutputList = flag.Bool("output-list", false,
|
||||
"print available output plugins.")
|
||||
var fUsage = flag.String("usage", "",
|
||||
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
||||
var fInputFiltersLegacy = flag.String("filter", "",
|
||||
"filter the inputs to enable, separator is :")
|
||||
var fOutputFiltersLegacy = flag.String("outputfilter", "",
|
||||
"filter the outputs to enable, separator is :")
|
||||
var fConfigDirectoryLegacy = flag.String("configdirectory", "",
|
||||
"directory containing additional *.conf files")
|
||||
var fService = flag.String("service", "",
|
||||
"operate on the service")
|
||||
|
||||
// Telegraf version
|
||||
// -ldflags "-X main.Version=`git describe --always --tags`"
|
||||
var Version string
|
||||
// Telegraf version, populated linker.
|
||||
// ie, -ldflags "-X main.version=`git describe --always --tags`"
|
||||
var (
|
||||
version string
|
||||
commit string
|
||||
branch string
|
||||
)
|
||||
|
||||
func init() {
|
||||
// If commit or branch are not set, make that clear.
|
||||
if commit == "" {
|
||||
commit = "unknown"
|
||||
}
|
||||
if branch == "" {
|
||||
branch = "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
@@ -70,6 +84,7 @@ The flags are:
|
||||
-debug print metrics as they're generated to stdout
|
||||
-quiet run in quiet mode
|
||||
-version print the version to stdout
|
||||
-service Control the service, ie, 'telegraf -service install (windows only)'
|
||||
|
||||
In addition to the -config flag, telegraf will also load the config file from
|
||||
an environment variable or default location. Precedence is:
|
||||
@@ -96,7 +111,19 @@ Examples:
|
||||
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
`
|
||||
|
||||
func main() {
|
||||
var stop chan struct{}
|
||||
|
||||
var srvc service.Service
|
||||
|
||||
type program struct{}
|
||||
|
||||
func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
defer func() {
|
||||
if service.Interactive() {
|
||||
os.Exit(0)
|
||||
}
|
||||
return
|
||||
}()
|
||||
reload := make(chan bool, 1)
|
||||
reload <- true
|
||||
for <-reload {
|
||||
@@ -106,24 +133,11 @@ func main() {
|
||||
args := flag.Args()
|
||||
|
||||
var inputFilters []string
|
||||
if *fInputFiltersLegacy != "" {
|
||||
fmt.Printf("WARNING '--filter' flag is deprecated, please use" +
|
||||
" '--input-filter'")
|
||||
inputFilter := strings.TrimSpace(*fInputFiltersLegacy)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
if *fInputFilters != "" {
|
||||
inputFilter := strings.TrimSpace(*fInputFilters)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
|
||||
var outputFilters []string
|
||||
if *fOutputFiltersLegacy != "" {
|
||||
fmt.Printf("WARNING '--outputfilter' flag is deprecated, please use" +
|
||||
" '--output-filter'")
|
||||
outputFilter := strings.TrimSpace(*fOutputFiltersLegacy)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
if *fOutputFilters != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFilters)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
@@ -132,8 +146,7 @@ func main() {
|
||||
if len(args) > 0 {
|
||||
switch args[0] {
|
||||
case "version":
|
||||
v := fmt.Sprintf("Telegraf - Version %s", Version)
|
||||
fmt.Println(v)
|
||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
||||
return
|
||||
case "config":
|
||||
config.PrintSampleConfig(inputFilters, outputFilters)
|
||||
@@ -141,34 +154,27 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
if *fOutputList {
|
||||
// switch for flags which just do something and exit immediately
|
||||
switch {
|
||||
case *fOutputList:
|
||||
fmt.Println("Available Output Plugins:")
|
||||
for k, _ := range outputs.Outputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if *fInputList {
|
||||
case *fInputList:
|
||||
fmt.Println("Available Input Plugins:")
|
||||
for k, _ := range inputs.Inputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if *fVersion {
|
||||
v := fmt.Sprintf("Telegraf - Version %s", Version)
|
||||
fmt.Println(v)
|
||||
case *fVersion:
|
||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
||||
return
|
||||
}
|
||||
|
||||
if *fSampleConfig {
|
||||
case *fSampleConfig:
|
||||
config.PrintSampleConfig(inputFilters, outputFilters)
|
||||
return
|
||||
}
|
||||
|
||||
if *fUsage != "" {
|
||||
case *fUsage != "":
|
||||
if err := config.PrintInputConfig(*fUsage); err != nil {
|
||||
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
||||
log.Fatalf("%s and %s", err, err2)
|
||||
@@ -187,15 +193,6 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if *fConfigDirectoryLegacy != "" {
|
||||
fmt.Printf("WARNING '--configdirectory' flag is deprecated, please use" +
|
||||
" '--config-directory'")
|
||||
err = c.LoadDirectory(*fConfigDirectoryLegacy)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if *fConfigDirectory != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectory)
|
||||
if err != nil {
|
||||
@@ -214,13 +211,12 @@ func main() {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if *fDebug {
|
||||
ag.Config.Agent.Debug = true
|
||||
}
|
||||
|
||||
if *fQuiet {
|
||||
ag.Config.Agent.Quiet = true
|
||||
}
|
||||
// Setup logging
|
||||
logger.SetupLogging(
|
||||
ag.Config.Agent.Debug || *fDebug,
|
||||
ag.Config.Agent.Quiet || *fQuiet,
|
||||
ag.Config.Agent.Logfile,
|
||||
)
|
||||
|
||||
if *fTest {
|
||||
err = ag.Test()
|
||||
@@ -239,22 +235,26 @@ func main() {
|
||||
signals := make(chan os.Signal)
|
||||
signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
|
||||
go func() {
|
||||
sig := <-signals
|
||||
if sig == os.Interrupt {
|
||||
close(shutdown)
|
||||
}
|
||||
if sig == syscall.SIGHUP {
|
||||
log.Printf("Reloading Telegraf config\n")
|
||||
<-reload
|
||||
reload <- true
|
||||
select {
|
||||
case sig := <-signals:
|
||||
if sig == os.Interrupt {
|
||||
close(shutdown)
|
||||
}
|
||||
if sig == syscall.SIGHUP {
|
||||
log.Printf("I! Reloading Telegraf config\n")
|
||||
<-reload
|
||||
reload <- true
|
||||
close(shutdown)
|
||||
}
|
||||
case <-stop:
|
||||
close(shutdown)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("Starting Telegraf (version %s)\n", Version)
|
||||
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("Tags enabled: %s", c.ListTags())
|
||||
log.Printf("I! Starting Telegraf (version %s)\n", version)
|
||||
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("I! Tags enabled: %s", c.ListTags())
|
||||
|
||||
if *fPidfile != "" {
|
||||
f, err := os.Create(*fPidfile)
|
||||
@@ -275,3 +275,55 @@ func usageExit(rc int) {
|
||||
fmt.Println(usage)
|
||||
os.Exit(rc)
|
||||
}
|
||||
|
||||
func (p *program) Start(s service.Service) error {
|
||||
srvc = s
|
||||
go p.run()
|
||||
return nil
|
||||
}
|
||||
func (p *program) run() {
|
||||
stop = make(chan struct{})
|
||||
reloadLoop(stop, srvc)
|
||||
}
|
||||
func (p *program) Stop(s service.Service) error {
|
||||
close(stop)
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if runtime.GOOS == "windows" {
|
||||
svcConfig := &service.Config{
|
||||
Name: "telegraf",
|
||||
DisplayName: "Telegraf Data Collector Service",
|
||||
Description: "Collects data using a series of plugins and publishes it to" +
|
||||
"another series of plugins.",
|
||||
Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
|
||||
}
|
||||
|
||||
prg := &program{}
|
||||
s, err := service.New(prg, svcConfig)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Handle the -service flag here to prevent any issues with tooling that
|
||||
// may not have an interactive session, e.g. installing from Ansible.
|
||||
if *fService != "" {
|
||||
if *fConfig != "" {
|
||||
(*svcConfig).Arguments = []string{"-config", *fConfig}
|
||||
}
|
||||
err := service.Control(s, *fService)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
err = s.Run()
|
||||
if err != nil {
|
||||
log.Println("E! " + err.Error())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
stop = make(chan struct{})
|
||||
reloadLoop(stop, nil)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,6 +86,10 @@ as it is more efficient to filter out tags at the ingestion point.
|
||||
* **taginclude**: taginclude is the inverse of tagexclude. It will only include
|
||||
the tag keys in the final measurement.
|
||||
|
||||
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
|
||||
the plugin definition, otherwise subsequent plugin config options will be
|
||||
interpreted as part of the tagpass/tagdrop map.
|
||||
|
||||
## Input Configuration
|
||||
|
||||
Some configuration options are configurable per input:
|
||||
@@ -129,6 +133,10 @@ fields which begin with `time_`.
|
||||
|
||||
#### Input Config: tagpass and tagdrop
|
||||
|
||||
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
|
||||
the plugin definition, otherwise subsequent plugin config options will be
|
||||
interpreted as part of the tagpass/tagdrop map.
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
|
||||
@@ -186,49 +186,59 @@ name of the plugin.
|
||||
# Graphite:
|
||||
|
||||
The Graphite data format translates graphite _dot_ buckets directly into
|
||||
telegraf measurement names, with a single value field, and without any tags. For
|
||||
more advanced options, Telegraf supports specifying "templates" to translate
|
||||
telegraf measurement names, with a single value field, and without any tags.
|
||||
By default, the separator is left as ".", but this can be changed using the
|
||||
"separator" argument. For more advanced options,
|
||||
Telegraf supports specifying "templates" to translate
|
||||
graphite buckets into Telegraf metrics.
|
||||
|
||||
#### Separator:
|
||||
|
||||
You can specify a separator to use for the parsed metrics.
|
||||
By default, it will leave the metrics with a "." separator.
|
||||
Setting `separator = "_"` will translate:
|
||||
Templates are of the form:
|
||||
|
||||
```
|
||||
cpu.usage.idle 99
|
||||
=> cpu_usage_idle value=99
|
||||
"host.mytag.mytag.measurement.measurement.field*"
|
||||
```
|
||||
|
||||
#### Measurement/Tag Templates:
|
||||
Where the following keywords exist:
|
||||
|
||||
1. `measurement`: specifies that this section of the graphite bucket corresponds
|
||||
to the measurement name. This can be specified multiple times.
|
||||
2. `field`: specifies that this section of the graphite bucket corresponds
|
||||
to the field name. This can be specified multiple times.
|
||||
3. `measurement*`: specifies that all remaining elements of the graphite bucket
|
||||
correspond to the measurement name.
|
||||
4. `field*`: specifies that all remaining elements of the graphite bucket
|
||||
correspond to the field name.
|
||||
|
||||
Any part of the template that is not a keyword is treated as a tag key. This
|
||||
can also be specified multiple times.
|
||||
|
||||
NOTE: `field*` cannot be used in conjunction with `measurement*`!
|
||||
|
||||
#### Measurement & Tag Templates:
|
||||
|
||||
The most basic template is to specify a single transformation to apply to all
|
||||
incoming metrics. _measurement_ is a special keyword that tells Telegraf which
|
||||
parts of the graphite bucket to combine into the measurement name. It can have a
|
||||
trailing `*` to indicate that the remainder of the metric should be used.
|
||||
Other words are considered tag keys. So the following template:
|
||||
incoming metrics. So the following template:
|
||||
|
||||
```toml
|
||||
templates = [
|
||||
"region.measurement*"
|
||||
"region.region.measurement*"
|
||||
]
|
||||
```
|
||||
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
us-west.cpu.load 100
|
||||
=> cpu.load,region=us-west value=100
|
||||
us.west.cpu.load 100
|
||||
=> cpu.load,region=us.west value=100
|
||||
```
|
||||
|
||||
#### Field Templates:
|
||||
|
||||
There is also a _field_ keyword, which can only be specified once.
|
||||
The field keyword tells Telegraf to give the metric that field name.
|
||||
So the following template:
|
||||
|
||||
```toml
|
||||
separator = "_"
|
||||
templates = [
|
||||
"measurement.measurement.field.field.region"
|
||||
]
|
||||
@@ -237,24 +247,26 @@ templates = [
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
cpu.usage.idle.percent.us-west 100
|
||||
=> cpu_usage,region=us-west idle_percent=100
|
||||
cpu.usage.idle.percent.eu-east 100
|
||||
=> cpu_usage,region=eu-east idle_percent=100
|
||||
```
|
||||
|
||||
The field key can also be derived from the second "half" of the input metric-name by specifying ```field*```:
|
||||
The field key can also be derived from all remaining elements of the graphite
|
||||
bucket by specifying `field*`:
|
||||
|
||||
```toml
|
||||
separator = "_"
|
||||
templates = [
|
||||
"measurement.measurement.region.field*"
|
||||
]
|
||||
```
|
||||
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
which would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
cpu.usage.us-west.idle.percentage 100
|
||||
=> cpu_usage,region=us-west idle_percentage=100
|
||||
cpu.usage.eu-east.idle.percentage 100
|
||||
=> cpu_usage,region=eu-east idle_percentage=100
|
||||
```
|
||||
(This cannot be used in conjunction with "measurement*"!)
|
||||
|
||||
#### Filter Templates:
|
||||
|
||||
@@ -271,8 +283,8 @@ templates = [
|
||||
which would result in the following transformation:
|
||||
|
||||
```
|
||||
cpu.load.us-west 100
|
||||
=> cpu_load,region=us-west value=100
|
||||
cpu.load.eu-east 100
|
||||
=> cpu_load,region=eu-east value=100
|
||||
|
||||
mem.cached.localhost 256
|
||||
=> mem_cached,host=localhost value=256
|
||||
@@ -294,8 +306,8 @@ templates = [
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
cpu.usage.idle.us-west 100
|
||||
=> cpu_usage,region=us-west,datacenter=1a idle=100
|
||||
cpu.usage.idle.eu-east 100
|
||||
=> cpu_usage,region=eu-east,datacenter=1a idle=100
|
||||
```
|
||||
|
||||
There are many more options available,
|
||||
@@ -326,12 +338,12 @@ There are many more options available,
|
||||
## similar to the line protocol format. There can be only one default template.
|
||||
## Templates support below format:
|
||||
## 1. filter + template
|
||||
## 2. filter + template + extra tag
|
||||
## 2. filter + template + extra tag(s)
|
||||
## 3. filter + template with field key
|
||||
## 4. default template
|
||||
templates = [
|
||||
"*.app env.service.resource.measurement",
|
||||
"stats.* .host.measurement* region=us-west,agent=sensu",
|
||||
"stats.* .host.measurement* region=eu-east,agent=sensu",
|
||||
"stats2.* .host.measurement.field",
|
||||
"measurement*"
|
||||
]
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
||||
- github.com/kardianos/service [ZLIB LICENSE](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
|
||||
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||
|
||||
@@ -1,36 +1,39 @@
|
||||
# Running Telegraf as a Windows Service
|
||||
|
||||
If you have tried to install Go binaries as Windows Services with the **sc.exe**
|
||||
tool you may have seen that the service errors and stops running after a while.
|
||||
Telegraf natively supports running as a Windows Service. Outlined below is are
|
||||
the general steps to set it up.
|
||||
|
||||
**NSSM** (the Non-Sucking Service Manager) is a tool that helps you in a
|
||||
[number of scenarios](http://nssm.cc/scenarios) including running Go binaries
|
||||
that were not specifically designed to run only in Windows platforms.
|
||||
1. Obtain the telegraf windows distribution
|
||||
2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
|
||||
location simply specify the `-config` parameter with the desired location)
|
||||
3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf`
|
||||
4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""):
|
||||
|
||||
## NSSM Installation via Chocolatey
|
||||
```
|
||||
> C:\"Program Files"\Telegraf\telegraf.exe --service install
|
||||
```
|
||||
|
||||
You can install [Chocolatey](https://chocolatey.org/) and [NSSM](http://nssm.cc/)
|
||||
with these commands
|
||||
5. Edit the configuration file to meet your needs
|
||||
6. To check that it works, run:
|
||||
|
||||
```powershell
|
||||
iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1'))
|
||||
choco install -y nssm
|
||||
```
|
||||
```
|
||||
> C:\"Program Files"\Telegraf\telegraf.exe --config C:\"Program Files"\Telegraf\telegraf.conf --test
|
||||
```
|
||||
|
||||
## Installing Telegraf as a Windows Service with NSSM
|
||||
7. To start collecting data, run:
|
||||
|
||||
You can download the latest Telegraf Windows binaries (still Experimental at
|
||||
the moment) from [the Telegraf Github repo](https://github.com/influxdata/telegraf).
|
||||
```
|
||||
> net start telegraf
|
||||
```
|
||||
|
||||
Then you can create a C:\telegraf folder, unzip the binary there and modify the
|
||||
**telegraf.conf** sample to allocate the metrics you want to send to **InfluxDB**.
|
||||
## Other supported operations
|
||||
|
||||
Once you have NSSM installed in your system, the process is quite straightforward.
|
||||
You only need to type this command in your Windows shell
|
||||
Telegraf can manage its own service through the --service flag:
|
||||
|
||||
```powershell
|
||||
nssm install Telegraf c:\telegraf\telegraf.exe -config c:\telegraf\telegraf.config
|
||||
```
|
||||
| Command | Effect |
|
||||
|------------------------------------|-------------------------------|
|
||||
| `telegraf.exe --service install` | Install telegraf as a service |
|
||||
| `telegraf.exe --service uninstall` | Remove the telegraf service |
|
||||
| `telegraf.exe --service start` | Start the telegraf service |
|
||||
| `telegraf.exe --service stop` | Stop the telegraf service |
|
||||
|
||||
And now your service will be installed in Windows and you will be able to start and
|
||||
stop it gracefully
|
||||
File diff suppressed because it is too large
Load Diff
@@ -42,10 +42,14 @@
|
||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
## Logging configuration:
|
||||
## Run telegraf in debug mode
|
||||
debug = false
|
||||
## Run telegraf in quiet mode
|
||||
quiet = false
|
||||
## Specify the log file name. The empty string means to log to stdout.
|
||||
logfile = "/Program Files/Telegraf/telegraf.log"
|
||||
|
||||
## Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
|
||||
@@ -85,7 +89,7 @@
|
||||
# Windows Performance Counters plugin.
|
||||
# These are the recommended method of monitoring system metrics on windows,
|
||||
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
|
||||
# which utilizes a lot of system resources.
|
||||
# which utilize more system resources.
|
||||
#
|
||||
# See more configuration examples at:
|
||||
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters
|
||||
@@ -95,70 +99,104 @@
|
||||
# Processor usage, alternative to native, reports on a per core.
|
||||
ObjectName = "Processor"
|
||||
Instances = ["*"]
|
||||
Counters = ["% Idle Time", "% Interrupt Time", "% Privileged Time", "% User Time", "% Processor Time"]
|
||||
Counters = [
|
||||
"% Idle Time",
|
||||
"% Interrupt Time",
|
||||
"% Privileged Time",
|
||||
"% User Time",
|
||||
"% Processor Time",
|
||||
]
|
||||
Measurement = "win_cpu"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Disk times and queues
|
||||
ObjectName = "LogicalDisk"
|
||||
Instances = ["*"]
|
||||
Counters = ["% Idle Time", "% Disk Time","% Disk Read Time", "% Disk Write Time", "% User Time", "Current Disk Queue Length"]
|
||||
Counters = [
|
||||
"% Idle Time",
|
||||
"% Disk Time","% Disk Read Time",
|
||||
"% Disk Write Time",
|
||||
"% User Time",
|
||||
"Current Disk Queue Length",
|
||||
]
|
||||
Measurement = "win_disk"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "System"
|
||||
Counters = ["Context Switches/sec","System Calls/sec"]
|
||||
Counters = [
|
||||
"Context Switches/sec",
|
||||
"System Calls/sec",
|
||||
]
|
||||
Instances = ["------"]
|
||||
Measurement = "win_system"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Example query where the Instance portion must be removed to get data back, such as from the Memory object.
|
||||
# Example query where the Instance portion must be removed to get data back,
|
||||
# such as from the Memory object.
|
||||
ObjectName = "Memory"
|
||||
Counters = ["Available Bytes","Cache Faults/sec","Demand Zero Faults/sec","Page Faults/sec","Pages/sec","Transition Faults/sec","Pool Nonpaged Bytes","Pool Paged Bytes"]
|
||||
Instances = ["------"] # Use 6 x - to remove the Instance bit from the query.
|
||||
Counters = [
|
||||
"Available Bytes",
|
||||
"Cache Faults/sec",
|
||||
"Demand Zero Faults/sec",
|
||||
"Page Faults/sec",
|
||||
"Pages/sec",
|
||||
"Transition Faults/sec",
|
||||
"Pool Nonpaged Bytes",
|
||||
"Pool Paged Bytes",
|
||||
]
|
||||
# Use 6 x - to remove the Instance bit from the query.
|
||||
Instances = ["------"]
|
||||
Measurement = "win_mem"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
|
||||
# Windows system plugins using WMI (disabled by default, using
|
||||
# win_perf_counters over WMI is recommended)
|
||||
|
||||
# Read metrics about cpu usage
|
||||
#[[inputs.cpu]]
|
||||
## Whether to report per-cpu stats or not
|
||||
#percpu = true
|
||||
## Whether to report total system cpu stats or not
|
||||
#totalcpu = true
|
||||
## Comment this line if you want the raw CPU time metrics
|
||||
#fielddrop = ["time_*"]
|
||||
# # Read metrics about cpu usage
|
||||
# [[inputs.cpu]]
|
||||
# ## Whether to report per-cpu stats or not
|
||||
# percpu = true
|
||||
# ## Whether to report total system cpu stats or not
|
||||
# totalcpu = true
|
||||
# ## Comment this line if you want the raw CPU time metrics
|
||||
# fielddrop = ["time_*"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
#[[inputs.disk]]
|
||||
## By default, telegraf gather stats for all mountpoints.
|
||||
## Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||
## mount_points=["/"]
|
||||
|
||||
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
## present on /run, /var/run, /dev/shm or /dev).
|
||||
#ignore_fs = ["tmpfs", "devtmpfs"]
|
||||
# # Read metrics about disk usage by mount point
|
||||
# [[inputs.disk]]
|
||||
# ## By default, telegraf gather stats for all mountpoints.
|
||||
# ## Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||
# ## mount_points=["/"]
|
||||
#
|
||||
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
# ## present on /run, /var/run, /dev/shm or /dev).
|
||||
# # ignore_fs = ["tmpfs", "devtmpfs"]
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
#[[inputs.diskio]]
|
||||
## By default, telegraf will gather stats for all devices including
|
||||
## disk partitions.
|
||||
## Setting devices will restrict the stats to the specified devices.
|
||||
## devices = ["sda", "sdb"]
|
||||
## Uncomment the following line if you do not need disk serial numbers.
|
||||
## skip_serial_number = true
|
||||
|
||||
# Read metrics about memory usage
|
||||
#[[inputs.mem]]
|
||||
# no configuration
|
||||
# # Read metrics about disk IO by device
|
||||
# [[inputs.diskio]]
|
||||
# ## By default, telegraf will gather stats for all devices including
|
||||
# ## disk partitions.
|
||||
# ## Setting devices will restrict the stats to the specified devices.
|
||||
# ## devices = ["sda", "sdb"]
|
||||
# ## Uncomment the following line if you do not need disk serial numbers.
|
||||
# ## skip_serial_number = true
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
#[[inputs.swap]]
|
||||
# no configuration
|
||||
|
||||
# # Read metrics about memory usage
|
||||
# [[inputs.mem]]
|
||||
# # no configuration
|
||||
|
||||
|
||||
# # Read metrics about swap memory usage
|
||||
# [[inputs.swap]]
|
||||
# # no configuration
|
||||
|
||||
|
||||
79
filter/filter.go
Normal file
79
filter/filter.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/gobwas/glob"
|
||||
)
|
||||
|
||||
type Filter interface {
|
||||
Match(string) bool
|
||||
}
|
||||
|
||||
// Compile takes a list of string filters and returns a Filter interface
|
||||
// for matching a given string against the filter list. The filter list
|
||||
// supports glob matching too, ie:
|
||||
//
|
||||
// f, _ := Compile([]string{"cpu", "mem", "net*"})
|
||||
// f.Match("cpu") // true
|
||||
// f.Match("network") // true
|
||||
// f.Match("memory") // false
|
||||
//
|
||||
func Compile(filters []string) (Filter, error) {
|
||||
// return if there is nothing to compile
|
||||
if len(filters) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// check if we can compile a non-glob filter
|
||||
noGlob := true
|
||||
for _, filter := range filters {
|
||||
if hasMeta(filter) {
|
||||
noGlob = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case noGlob:
|
||||
// return non-globbing filter if not needed.
|
||||
return compileFilterNoGlob(filters), nil
|
||||
case len(filters) == 1:
|
||||
return glob.Compile(filters[0])
|
||||
default:
|
||||
return glob.Compile("{" + strings.Join(filters, ",") + "}")
|
||||
}
|
||||
}
|
||||
|
||||
// hasMeta reports whether path contains any magic glob characters.
|
||||
func hasMeta(s string) bool {
|
||||
return strings.IndexAny(s, "*?[") >= 0
|
||||
}
|
||||
|
||||
type filter struct {
|
||||
m map[string]struct{}
|
||||
}
|
||||
|
||||
func (f *filter) Match(s string) bool {
|
||||
_, ok := f.m[s]
|
||||
return ok
|
||||
}
|
||||
|
||||
type filtersingle struct {
|
||||
s string
|
||||
}
|
||||
|
||||
func (f *filtersingle) Match(s string) bool {
|
||||
return f.s == s
|
||||
}
|
||||
|
||||
func compileFilterNoGlob(filters []string) Filter {
|
||||
if len(filters) == 1 {
|
||||
return &filtersingle{s: filters[0]}
|
||||
}
|
||||
out := filter{m: make(map[string]struct{})}
|
||||
for _, filter := range filters {
|
||||
out.m[filter] = struct{}{}
|
||||
}
|
||||
return &out
|
||||
}
|
||||
96
filter/filter_test.go
Normal file
96
filter/filter_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCompile(t *testing.T) {
|
||||
f, err := Compile([]string{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, f)
|
||||
|
||||
f, err = Compile([]string{"cpu"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, f.Match("cpu"))
|
||||
assert.False(t, f.Match("cpu0"))
|
||||
assert.False(t, f.Match("mem"))
|
||||
|
||||
f, err = Compile([]string{"cpu*"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, f.Match("cpu"))
|
||||
assert.True(t, f.Match("cpu0"))
|
||||
assert.False(t, f.Match("mem"))
|
||||
|
||||
f, err = Compile([]string{"cpu", "mem"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, f.Match("cpu"))
|
||||
assert.False(t, f.Match("cpu0"))
|
||||
assert.True(t, f.Match("mem"))
|
||||
|
||||
f, err = Compile([]string{"cpu", "mem", "net*"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, f.Match("cpu"))
|
||||
assert.False(t, f.Match("cpu0"))
|
||||
assert.True(t, f.Match("mem"))
|
||||
assert.True(t, f.Match("network"))
|
||||
}
|
||||
|
||||
var benchbool bool
|
||||
|
||||
func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
|
||||
f, _ := Compile([]string{"cpu"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("network")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
|
||||
func BenchmarkFilterSingleNoGlobTrue(b *testing.B) {
|
||||
f, _ := Compile([]string{"cpu"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("cpu")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
|
||||
func BenchmarkFilter(b *testing.B) {
|
||||
f, _ := Compile([]string{"cpu", "mem", "net*"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("network")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
|
||||
func BenchmarkFilterNoGlob(b *testing.B) {
|
||||
f, _ := Compile([]string{"cpu", "mem", "net"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("net")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
|
||||
func BenchmarkFilter2(b *testing.B) {
|
||||
f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
||||
"aw", "az", "axxx", "ab", "cpu", "mem", "net*"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("network")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
|
||||
func BenchmarkFilter2NoGlob(b *testing.B) {
|
||||
f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
||||
"aw", "az", "axxx", "ab", "cpu", "mem", "net"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("net")
|
||||
}
|
||||
benchbool = tmp
|
||||
}
|
||||
49
internal/config/aws/credentials.go
Normal file
49
internal/config/aws/credentials.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
)
|
||||
|
||||
type CredentialConfig struct {
|
||||
Region string
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
RoleARN string
|
||||
Profile string
|
||||
Filename string
|
||||
Token string
|
||||
}
|
||||
|
||||
func (c *CredentialConfig) Credentials() client.ConfigProvider {
|
||||
if c.RoleARN != "" {
|
||||
return c.assumeCredentials()
|
||||
} else {
|
||||
return c.rootCredentials()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CredentialConfig) rootCredentials() client.ConfigProvider {
|
||||
config := &aws.Config{
|
||||
Region: aws.String(c.Region),
|
||||
}
|
||||
if c.AccessKey != "" || c.SecretKey != "" {
|
||||
config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token)
|
||||
} else if c.Profile != "" || c.Filename != "" {
|
||||
config.Credentials = credentials.NewSharedCredentials(c.Filename, c.Profile)
|
||||
}
|
||||
|
||||
return session.New(config)
|
||||
}
|
||||
|
||||
func (c *CredentialConfig) assumeCredentials() client.ConfigProvider {
|
||||
rootCredentials := c.rootCredentials()
|
||||
config := &aws.Config{
|
||||
Region: aws.String(c.Region),
|
||||
}
|
||||
config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN)
|
||||
return session.New(config)
|
||||
}
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -47,8 +48,8 @@ type Config struct {
|
||||
OutputFilters []string
|
||||
|
||||
Agent *AgentConfig
|
||||
Inputs []*internal_models.RunningInput
|
||||
Outputs []*internal_models.RunningOutput
|
||||
Inputs []*models.RunningInput
|
||||
Outputs []*models.RunningOutput
|
||||
}
|
||||
|
||||
func NewConfig() *Config {
|
||||
@@ -58,12 +59,11 @@ func NewConfig() *Config {
|
||||
Interval: internal.Duration{Duration: 10 * time.Second},
|
||||
RoundInterval: true,
|
||||
FlushInterval: internal.Duration{Duration: 10 * time.Second},
|
||||
FlushJitter: internal.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
|
||||
Tags: make(map[string]string),
|
||||
Inputs: make([]*internal_models.RunningInput, 0),
|
||||
Outputs: make([]*internal_models.RunningOutput, 0),
|
||||
Inputs: make([]*models.RunningInput, 0),
|
||||
Outputs: make([]*models.RunningOutput, 0),
|
||||
InputFilters: make([]string, 0),
|
||||
OutputFilters: make([]string, 0),
|
||||
}
|
||||
@@ -78,6 +78,14 @@ type AgentConfig struct {
|
||||
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
||||
RoundInterval bool
|
||||
|
||||
// By default, precision will be set to the same timestamp order as the
|
||||
// collection interval, with the maximum being 1s.
|
||||
// ie, when interval = "10s", precision will be "1s"
|
||||
// when interval = "250ms", precision will be "1ms"
|
||||
// Precision will NOT be used for service inputs. It is up to each individual
|
||||
// service input to set the timestamp at the appropriate precision.
|
||||
Precision internal.Duration
|
||||
|
||||
// CollectionJitter is used to jitter the collection by a random amount.
|
||||
// Each plugin will sleep for a random time within jitter before collecting.
|
||||
// This can be used to avoid many plugins querying things like sysfs at the
|
||||
@@ -109,15 +117,17 @@ type AgentConfig struct {
|
||||
// does _not_ deactivate FlushInterval.
|
||||
FlushBufferWhenFull bool
|
||||
|
||||
// TODO(cam): Remove UTC and Precision parameters, they are no longer
|
||||
// TODO(cam): Remove UTC and parameter, they are no longer
|
||||
// valid for the agent config. Leaving them here for now for backwards-
|
||||
// compatability
|
||||
UTC bool `toml:"utc"`
|
||||
Precision string
|
||||
UTC bool `toml:"utc"`
|
||||
|
||||
// Debug is the option for running in debug mode
|
||||
Debug bool
|
||||
|
||||
// Logfile specifies the file to send logs to
|
||||
Logfile string
|
||||
|
||||
// Quiet is the option for running in quiet mode
|
||||
Quiet bool
|
||||
Hostname string
|
||||
@@ -133,7 +143,7 @@ func (c *Config) InputNames() []string {
|
||||
return name
|
||||
}
|
||||
|
||||
// Outputs returns a list of strings of the configured inputs.
|
||||
// Outputs returns a list of strings of the configured outputs.
|
||||
func (c *Config) OutputNames() []string {
|
||||
var name []string
|
||||
for _, output := range c.Outputs {
|
||||
@@ -188,12 +198,15 @@ var header = `# Telegraf Configuration
|
||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
round_interval = true
|
||||
|
||||
## Telegraf will send metrics to outputs in batches of at
|
||||
## most metric_batch_size metrics.
|
||||
## Telegraf will send metrics to outputs in batches of at most
|
||||
## metric_batch_size metrics.
|
||||
## This controls the size of writes that Telegraf sends to output plugins.
|
||||
metric_batch_size = 1000
|
||||
|
||||
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
|
||||
## output, and will flush this buffer on a successful write. Oldest metrics
|
||||
## are dropped first when this buffer fills.
|
||||
## This buffer only fills when writes fail to output plugin(s).
|
||||
metric_buffer_limit = 10000
|
||||
|
||||
## Collection jitter is used to jitter the collection by a random amount.
|
||||
@@ -210,10 +223,20 @@ var header = `# Telegraf Configuration
|
||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
## Run telegraf in debug mode
|
||||
## By default, precision will be set to the same timestamp order as the
|
||||
## collection interval, with the maximum being 1s.
|
||||
## Precision will NOT be used for service inputs, such as logparser and statsd.
|
||||
## Valid values are "ns", "us" (or "µs"), "ms", "s".
|
||||
precision = ""
|
||||
|
||||
## Logging configuration:
|
||||
## Run telegraf with debug log messages.
|
||||
debug = false
|
||||
## Run telegraf in quiet mode
|
||||
## Run telegraf in quiet mode (error log messages only).
|
||||
quiet = false
|
||||
## Specify the log file name. The empty string means to log to stdout.
|
||||
logfile = ""
|
||||
|
||||
## Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
## If set to true, do no set the "host" tag in the telegraf agent.
|
||||
@@ -357,7 +380,7 @@ func printConfig(name string, p printer, op string, commented bool) {
|
||||
fmt.Print("\n")
|
||||
continue
|
||||
}
|
||||
fmt.Print(comment + line + "\n")
|
||||
fmt.Print(strings.TrimRight(comment+line, " ") + "\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -392,24 +415,21 @@ func PrintOutputConfig(name string) error {
|
||||
}
|
||||
|
||||
func (c *Config) LoadDirectory(path string) error {
|
||||
directoryEntries, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range directoryEntries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
walkfn := func(thispath string, info os.FileInfo, _ error) error {
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
name := entry.Name()
|
||||
name := info.Name()
|
||||
if len(name) < 6 || name[len(name)-5:] != ".conf" {
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
err := c.LoadConfig(filepath.Join(path, name))
|
||||
err := c.LoadConfig(thispath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
return filepath.Walk(path, walkfn)
|
||||
}
|
||||
|
||||
// Try to find a default config file at these locations (in order):
|
||||
@@ -421,9 +441,12 @@ func getDefaultConfigPath() (string, error) {
|
||||
envfile := os.Getenv("TELEGRAF_CONFIG_PATH")
|
||||
homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf")
|
||||
etcfile := "/etc/telegraf/telegraf.conf"
|
||||
if runtime.GOOS == "windows" {
|
||||
etcfile = `C:\Program Files\Telegraf\telegraf.conf`
|
||||
}
|
||||
for _, path := range []string{envfile, homefile, etcfile} {
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
log.Printf("Using config file: %s", path)
|
||||
log.Printf("I! Using config file: %s", path)
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
@@ -454,7 +477,7 @@ func (c *Config) LoadConfig(path string) error {
|
||||
return fmt.Errorf("%s: invalid configuration", path)
|
||||
}
|
||||
if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||
log.Printf("Could not parse [global_tags] config\n")
|
||||
log.Printf("E! Could not parse [global_tags] config\n")
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
}
|
||||
@@ -467,7 +490,7 @@ func (c *Config) LoadConfig(path string) error {
|
||||
return fmt.Errorf("%s: invalid configuration", path)
|
||||
}
|
||||
if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||
log.Printf("Could not parse [agent] config\n")
|
||||
log.Printf("E! Could not parse [agent] config\n")
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
}
|
||||
@@ -528,6 +551,13 @@ func (c *Config) LoadConfig(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
|
||||
// this is for Windows compatability only.
|
||||
// see https://github.com/influxdata/telegraf/issues/1378
|
||||
func trimBOM(f []byte) []byte {
|
||||
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
|
||||
}
|
||||
|
||||
// parseFile loads a TOML configuration from a provided path and
|
||||
// returns the AST produced from the TOML parser. When loading the file, it
|
||||
// will find environment variables and replace them.
|
||||
@@ -536,6 +566,8 @@ func parseFile(fpath string) (*ast.Table, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// ugh windows why
|
||||
contents = trimBOM(contents)
|
||||
|
||||
env_vars := envVarRe.FindAll(contents, -1)
|
||||
for _, env_var := range env_vars {
|
||||
@@ -578,7 +610,7 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
ro := internal_models.NewRunningOutput(name, output, outputConfig,
|
||||
ro := models.NewRunningOutput(name, output, outputConfig,
|
||||
c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit)
|
||||
c.Outputs = append(c.Outputs, ro)
|
||||
return nil
|
||||
@@ -619,7 +651,7 @@ func (c *Config) addInput(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
rp := &internal_models.RunningInput{
|
||||
rp := &models.RunningInput{
|
||||
Name: name,
|
||||
Input: input,
|
||||
Config: pluginConfig,
|
||||
@@ -630,10 +662,10 @@ func (c *Config) addInput(name string, table *ast.Table) error {
|
||||
|
||||
// buildFilter builds a Filter
|
||||
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
|
||||
// be inserted into the internal_models.OutputConfig/internal_models.InputConfig
|
||||
// be inserted into the models.OutputConfig/models.InputConfig
|
||||
// to be used for glob filtering on tags and measurements
|
||||
func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
|
||||
f := internal_models.Filter{}
|
||||
func buildFilter(tbl *ast.Table) (models.Filter, error) {
|
||||
f := models.Filter{}
|
||||
|
||||
if node, ok := tbl.Fields["namepass"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
@@ -641,7 +673,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.NamePass = append(f.NamePass, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -654,7 +685,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.NameDrop = append(f.NameDrop, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -669,7 +699,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.FieldPass = append(f.FieldPass, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -685,7 +714,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.FieldDrop = append(f.FieldDrop, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -697,7 +725,7 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
for name, val := range subtbl.Fields {
|
||||
if kv, ok := val.(*ast.KeyValue); ok {
|
||||
tagfilter := &internal_models.TagFilter{Name: name}
|
||||
tagfilter := &models.TagFilter{Name: name}
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
@@ -706,7 +734,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
|
||||
}
|
||||
}
|
||||
f.TagPass = append(f.TagPass, *tagfilter)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -716,7 +743,7 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
for name, val := range subtbl.Fields {
|
||||
if kv, ok := val.(*ast.KeyValue); ok {
|
||||
tagfilter := &internal_models.TagFilter{Name: name}
|
||||
tagfilter := &models.TagFilter{Name: name}
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
@@ -725,7 +752,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
|
||||
}
|
||||
}
|
||||
f.TagDrop = append(f.TagDrop, *tagfilter)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -754,7 +780,7 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := f.CompileFilter(); err != nil {
|
||||
if err := f.Compile(); err != nil {
|
||||
return f, err
|
||||
}
|
||||
|
||||
@@ -773,9 +799,9 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
|
||||
|
||||
// buildInput parses input specific items from the ast.Table,
|
||||
// builds the filter and returns a
|
||||
// internal_models.InputConfig to be inserted into internal_models.RunningInput
|
||||
func buildInput(name string, tbl *ast.Table) (*internal_models.InputConfig, error) {
|
||||
cp := &internal_models.InputConfig{Name: name}
|
||||
// models.InputConfig to be inserted into models.RunningInput
|
||||
func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
|
||||
cp := &models.InputConfig{Name: name}
|
||||
if node, ok := tbl.Fields["interval"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
@@ -817,7 +843,7 @@ func buildInput(name string, tbl *ast.Table) (*internal_models.InputConfig, erro
|
||||
if node, ok := tbl.Fields["tags"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
log.Printf("Could not parse tags for input %s\n", name)
|
||||
log.Printf("E! Could not parse tags for input %s\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -949,14 +975,14 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
|
||||
|
||||
// buildOutput parses output specific items from the ast.Table,
|
||||
// builds the filter and returns an
|
||||
// internal_models.OutputConfig to be inserted into internal_models.RunningInput
|
||||
// models.OutputConfig to be inserted into models.RunningInput
|
||||
// Note: error exists in the return for future calls that might require error
|
||||
func buildOutput(name string, tbl *ast.Table) (*internal_models.OutputConfig, error) {
|
||||
func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) {
|
||||
filter, err := buildFilter(tbl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
oc := &internal_models.OutputConfig{
|
||||
oc := &models.OutputConfig{
|
||||
Name: name,
|
||||
Filter: filter,
|
||||
}
|
||||
|
||||
@@ -26,27 +26,26 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"192.168.1.1"}
|
||||
|
||||
filter := internal_models.Filter{
|
||||
filter := models.Filter{
|
||||
NameDrop: []string{"metricname2"},
|
||||
NamePass: []string{"metricname1"},
|
||||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
TagDrop: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
TagPass: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
}
|
||||
assert.NoError(t, filter.CompileFilter())
|
||||
mConfig := &internal_models.InputConfig{
|
||||
assert.NoError(t, filter.Compile())
|
||||
mConfig := &models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: filter,
|
||||
Interval: 10 * time.Second,
|
||||
@@ -66,27 +65,26 @@ func TestConfig_LoadSingleInput(t *testing.T) {
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"localhost"}
|
||||
|
||||
filter := internal_models.Filter{
|
||||
filter := models.Filter{
|
||||
NameDrop: []string{"metricname2"},
|
||||
NamePass: []string{"metricname1"},
|
||||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
TagDrop: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
TagPass: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
}
|
||||
assert.NoError(t, filter.CompileFilter())
|
||||
mConfig := &internal_models.InputConfig{
|
||||
assert.NoError(t, filter.Compile())
|
||||
mConfig := &models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: filter,
|
||||
Interval: 5 * time.Second,
|
||||
@@ -113,27 +111,26 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"localhost"}
|
||||
|
||||
filter := internal_models.Filter{
|
||||
filter := models.Filter{
|
||||
NameDrop: []string{"metricname2"},
|
||||
NamePass: []string{"metricname1"},
|
||||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
TagDrop: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
TagPass: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
}
|
||||
assert.NoError(t, filter.CompileFilter())
|
||||
mConfig := &internal_models.InputConfig{
|
||||
assert.NoError(t, filter.Compile())
|
||||
mConfig := &models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: filter,
|
||||
Interval: 5 * time.Second,
|
||||
@@ -150,7 +147,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
ex.SetParser(p)
|
||||
ex.Command = "/usr/bin/myothercollector --foo=bar"
|
||||
eConfig := &internal_models.InputConfig{
|
||||
eConfig := &models.InputConfig{
|
||||
Name: "exec",
|
||||
MeasurementSuffix: "_myothercollector",
|
||||
}
|
||||
@@ -169,7 +166,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||
pstat := inputs.Inputs["procstat"]().(*procstat.Procstat)
|
||||
pstat.PidFile = "/var/run/grafana-server.pid"
|
||||
|
||||
pConfig := &internal_models.InputConfig{Name: "procstat"}
|
||||
pConfig := &models.InputConfig{Name: "procstat"}
|
||||
pConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, pstat, c.Inputs[3].Input,
|
||||
|
||||
37
internal/errchan/errchan.go
Normal file
37
internal/errchan/errchan.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package errchan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ErrChan struct {
|
||||
C chan error
|
||||
}
|
||||
|
||||
// New returns an error channel of max length 'n'
|
||||
// errors can be sent to the ErrChan.C channel, and will be returned when
|
||||
// ErrChan.Error() is called.
|
||||
func New(n int) *ErrChan {
|
||||
return &ErrChan{
|
||||
C: make(chan error, n),
|
||||
}
|
||||
}
|
||||
|
||||
// Error closes the ErrChan.C channel and returns an error if there are any
|
||||
// non-nil errors, otherwise returns nil.
|
||||
func (e *ErrChan) Error() error {
|
||||
close(e.C)
|
||||
|
||||
var out string
|
||||
for err := range e.C {
|
||||
if err != nil {
|
||||
out += "[" + err.Error() + "], "
|
||||
}
|
||||
}
|
||||
|
||||
if out != "" {
|
||||
return fmt.Errorf("Errors encountered: " + strings.TrimRight(out, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -12,21 +12,23 @@ import (
|
||||
var sepStr = fmt.Sprintf("%v", string(os.PathSeparator))
|
||||
|
||||
type GlobPath struct {
|
||||
path string
|
||||
hasMeta bool
|
||||
g glob.Glob
|
||||
root string
|
||||
path string
|
||||
hasMeta bool
|
||||
hasSuperMeta bool
|
||||
g glob.Glob
|
||||
root string
|
||||
}
|
||||
|
||||
func Compile(path string) (*GlobPath, error) {
|
||||
out := GlobPath{
|
||||
hasMeta: hasMeta(path),
|
||||
path: path,
|
||||
hasMeta: hasMeta(path),
|
||||
hasSuperMeta: hasSuperMeta(path),
|
||||
path: path,
|
||||
}
|
||||
|
||||
// if there are no glob meta characters in the path, don't bother compiling
|
||||
// a glob object or finding the root directory. (see short-circuit in Match)
|
||||
if !out.hasMeta {
|
||||
if !out.hasMeta || !out.hasSuperMeta {
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
@@ -48,6 +50,17 @@ func (g *GlobPath) Match() map[string]os.FileInfo {
|
||||
}
|
||||
return out
|
||||
}
|
||||
if !g.hasSuperMeta {
|
||||
out := make(map[string]os.FileInfo)
|
||||
files, _ := filepath.Glob(g.path)
|
||||
for _, file := range files {
|
||||
info, err := os.Stat(file)
|
||||
if !os.IsNotExist(err) {
|
||||
out[file] = info
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
return walkFilePath(g.root, g.g)
|
||||
}
|
||||
|
||||
@@ -96,3 +109,8 @@ func findRootDir(path string) string {
|
||||
func hasMeta(path string) bool {
|
||||
return strings.IndexAny(path, "*?[") >= 0
|
||||
}
|
||||
|
||||
// hasSuperMeta reports whether path contains any super magic glob characters (**).
|
||||
func hasSuperMeta(path string) bool {
|
||||
return strings.Index(path, "**") >= 0
|
||||
}
|
||||
|
||||
@@ -10,8 +10,10 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/big"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
@@ -32,12 +34,25 @@ type Duration struct {
|
||||
|
||||
// UnmarshalTOML parses the duration from the TOML config file
|
||||
func (d *Duration) UnmarshalTOML(b []byte) error {
|
||||
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
|
||||
if err != nil {
|
||||
return err
|
||||
var err error
|
||||
// Parse string duration, ie, "1s"
|
||||
d.Duration, err = time.ParseDuration(string(b[1 : len(b)-1]))
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
d.Duration = dur
|
||||
// First try parsing as integer seconds
|
||||
sI, err := strconv.ParseInt(string(b), 10, 64)
|
||||
if err == nil {
|
||||
d.Duration = time.Second * time.Duration(sI)
|
||||
return nil
|
||||
}
|
||||
// Second try parsing as float seconds
|
||||
sF, err := strconv.ParseFloat(string(b), 64)
|
||||
if err == nil {
|
||||
d.Duration = time.Second * time.Duration(sF)
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -118,8 +133,8 @@ func GetTLSConfig(
|
||||
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf(
|
||||
"Could not load TLS client key/certificate: %s",
|
||||
err))
|
||||
"Could not load TLS client key/certificate from %s:%s: %s",
|
||||
SSLKey, SSLCert, err))
|
||||
}
|
||||
|
||||
t.Certificates = []tls.Certificate{cert}
|
||||
@@ -183,7 +198,7 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
|
||||
return err
|
||||
case <-timer.C:
|
||||
if err := c.Process.Kill(); err != nil {
|
||||
log.Printf("FATAL error killing process: %s", err)
|
||||
log.Printf("E! FATAL error killing process: %s", err)
|
||||
return err
|
||||
}
|
||||
// wait for the command to return after killing it
|
||||
@@ -191,3 +206,27 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
|
||||
return TimeoutErr
|
||||
}
|
||||
}
|
||||
|
||||
// RandomSleep will sleep for a random amount of time up to max.
|
||||
// If the shutdown channel is closed, it will return before it has finished
|
||||
// sleeping.
|
||||
func RandomSleep(max time.Duration, shutdown chan struct{}) {
|
||||
if max == 0 {
|
||||
return
|
||||
}
|
||||
maxSleep := big.NewInt(max.Nanoseconds())
|
||||
|
||||
var sleepns int64
|
||||
if j, err := rand.Int(rand.Reader, maxSleep); err == nil {
|
||||
sleepns = j.Int64()
|
||||
}
|
||||
|
||||
t := time.NewTimer(time.Nanosecond * time.Duration(sleepns))
|
||||
select {
|
||||
case <-t.C:
|
||||
return
|
||||
case <-shutdown:
|
||||
t.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,3 +106,28 @@ func TestRunError(t *testing.T) {
|
||||
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestRandomSleep(t *testing.T) {
|
||||
// test that zero max returns immediately
|
||||
s := time.Now()
|
||||
RandomSleep(time.Duration(0), make(chan struct{}))
|
||||
elapsed := time.Since(s)
|
||||
assert.True(t, elapsed < time.Millisecond)
|
||||
|
||||
// test that max sleep is respected
|
||||
s = time.Now()
|
||||
RandomSleep(time.Millisecond*50, make(chan struct{}))
|
||||
elapsed = time.Since(s)
|
||||
assert.True(t, elapsed < time.Millisecond*100)
|
||||
|
||||
// test that shutdown is respected
|
||||
s = time.Now()
|
||||
shutdown := make(chan struct{})
|
||||
go func() {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
close(shutdown)
|
||||
}()
|
||||
RandomSleep(time.Second, shutdown)
|
||||
elapsed = time.Since(s)
|
||||
assert.True(t, elapsed < time.Millisecond*150)
|
||||
}
|
||||
|
||||
59
internal/limiter/limiter.go
Normal file
59
internal/limiter/limiter.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package limiter
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewRateLimiter returns a rate limiter that will will emit from the C
|
||||
// channel only 'n' times every 'rate' seconds.
|
||||
func NewRateLimiter(n int, rate time.Duration) *rateLimiter {
|
||||
r := &rateLimiter{
|
||||
C: make(chan bool),
|
||||
rate: rate,
|
||||
n: n,
|
||||
shutdown: make(chan bool),
|
||||
}
|
||||
r.wg.Add(1)
|
||||
go r.limiter()
|
||||
return r
|
||||
}
|
||||
|
||||
type rateLimiter struct {
|
||||
C chan bool
|
||||
rate time.Duration
|
||||
n int
|
||||
|
||||
shutdown chan bool
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func (r *rateLimiter) Stop() {
|
||||
close(r.shutdown)
|
||||
r.wg.Wait()
|
||||
close(r.C)
|
||||
}
|
||||
|
||||
func (r *rateLimiter) limiter() {
|
||||
defer r.wg.Done()
|
||||
ticker := time.NewTicker(r.rate)
|
||||
defer ticker.Stop()
|
||||
counter := 0
|
||||
for {
|
||||
select {
|
||||
case <-r.shutdown:
|
||||
return
|
||||
case <-ticker.C:
|
||||
counter = 0
|
||||
default:
|
||||
if counter < r.n {
|
||||
select {
|
||||
case r.C <- true:
|
||||
counter++
|
||||
case <-r.shutdown:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
54
internal/limiter/limiter_test.go
Normal file
54
internal/limiter/limiter_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package limiter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRateLimiter(t *testing.T) {
|
||||
r := NewRateLimiter(5, time.Second)
|
||||
ticker := time.NewTicker(time.Millisecond * 75)
|
||||
|
||||
// test that we can only get 5 receives from the rate limiter
|
||||
counter := 0
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-r.C:
|
||||
counter++
|
||||
case <-ticker.C:
|
||||
break outer
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, 5, counter)
|
||||
r.Stop()
|
||||
// verify that the Stop function closes the channel.
|
||||
_, ok := <-r.C
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestRateLimiterMultipleIterations(t *testing.T) {
|
||||
r := NewRateLimiter(5, time.Millisecond*50)
|
||||
ticker := time.NewTicker(time.Millisecond * 250)
|
||||
|
||||
// test that we can get 15 receives from the rate limiter
|
||||
counter := 0
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
break outer
|
||||
case <-r.C:
|
||||
counter++
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, counter > 10)
|
||||
r.Stop()
|
||||
// verify that the Stop function closes the channel.
|
||||
_, ok := <-r.C
|
||||
assert.False(t, ok)
|
||||
}
|
||||
@@ -1,82 +1,91 @@
|
||||
package internal_models
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/gobwas/glob"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
)
|
||||
|
||||
// TagFilter is the name of a tag, and the values on which to filter
|
||||
type TagFilter struct {
|
||||
Name string
|
||||
Filter []string
|
||||
filter glob.Glob
|
||||
filter filter.Filter
|
||||
}
|
||||
|
||||
// Filter containing drop/pass and tagdrop/tagpass rules
|
||||
type Filter struct {
|
||||
NameDrop []string
|
||||
nameDrop glob.Glob
|
||||
nameDrop filter.Filter
|
||||
NamePass []string
|
||||
namePass glob.Glob
|
||||
namePass filter.Filter
|
||||
|
||||
FieldDrop []string
|
||||
fieldDrop glob.Glob
|
||||
fieldDrop filter.Filter
|
||||
FieldPass []string
|
||||
fieldPass glob.Glob
|
||||
fieldPass filter.Filter
|
||||
|
||||
TagDrop []TagFilter
|
||||
TagPass []TagFilter
|
||||
|
||||
TagExclude []string
|
||||
tagExclude glob.Glob
|
||||
tagExclude filter.Filter
|
||||
TagInclude []string
|
||||
tagInclude glob.Glob
|
||||
tagInclude filter.Filter
|
||||
|
||||
IsActive bool
|
||||
isActive bool
|
||||
}
|
||||
|
||||
// Compile all Filter lists into glob.Glob objects.
|
||||
func (f *Filter) CompileFilter() error {
|
||||
// Compile all Filter lists into filter.Filter objects.
|
||||
func (f *Filter) Compile() error {
|
||||
if len(f.NameDrop) == 0 &&
|
||||
len(f.NamePass) == 0 &&
|
||||
len(f.FieldDrop) == 0 &&
|
||||
len(f.FieldPass) == 0 &&
|
||||
len(f.TagInclude) == 0 &&
|
||||
len(f.TagExclude) == 0 &&
|
||||
len(f.TagPass) == 0 &&
|
||||
len(f.TagDrop) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
f.isActive = true
|
||||
var err error
|
||||
f.nameDrop, err = compileFilter(f.NameDrop)
|
||||
f.nameDrop, err = filter.Compile(f.NameDrop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'namedrop', %s", err)
|
||||
}
|
||||
f.namePass, err = compileFilter(f.NamePass)
|
||||
f.namePass, err = filter.Compile(f.NamePass)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'namepass', %s", err)
|
||||
}
|
||||
|
||||
f.fieldDrop, err = compileFilter(f.FieldDrop)
|
||||
f.fieldDrop, err = filter.Compile(f.FieldDrop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'fielddrop', %s", err)
|
||||
}
|
||||
f.fieldPass, err = compileFilter(f.FieldPass)
|
||||
f.fieldPass, err = filter.Compile(f.FieldPass)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'fieldpass', %s", err)
|
||||
}
|
||||
|
||||
f.tagExclude, err = compileFilter(f.TagExclude)
|
||||
f.tagExclude, err = filter.Compile(f.TagExclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagexclude', %s", err)
|
||||
}
|
||||
f.tagInclude, err = compileFilter(f.TagInclude)
|
||||
f.tagInclude, err = filter.Compile(f.TagInclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'taginclude', %s", err)
|
||||
}
|
||||
|
||||
for i, _ := range f.TagDrop {
|
||||
f.TagDrop[i].filter, err = compileFilter(f.TagDrop[i].Filter)
|
||||
f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
|
||||
}
|
||||
}
|
||||
for i, _ := range f.TagPass {
|
||||
f.TagPass[i].filter, err = compileFilter(f.TagPass[i].Filter)
|
||||
f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagpass', %s", err)
|
||||
}
|
||||
@@ -84,30 +93,52 @@ func (f *Filter) CompileFilter() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func compileFilter(filter []string) (glob.Glob, error) {
|
||||
if len(filter) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var g glob.Glob
|
||||
var err error
|
||||
if len(filter) == 1 {
|
||||
g, err = glob.Compile(filter[0])
|
||||
} else {
|
||||
g, err = glob.Compile("{" + strings.Join(filter, ",") + "}")
|
||||
}
|
||||
return g, err
|
||||
}
|
||||
|
||||
func (f *Filter) ShouldMetricPass(metric telegraf.Metric) bool {
|
||||
if f.ShouldNamePass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) {
|
||||
// Apply applies the filter to the given measurement name, fields map, and
|
||||
// tags map. It will return false if the metric should be "filtered out", and
|
||||
// true if the metric should "pass".
|
||||
// It will modify tags in-place if they need to be deleted.
|
||||
func (f *Filter) Apply(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
) bool {
|
||||
if !f.isActive {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
||||
// check if the measurement name should pass
|
||||
if !f.shouldNamePass(measurement) {
|
||||
return false
|
||||
}
|
||||
|
||||
// check if the tags should pass
|
||||
if !f.shouldTagsPass(tags) {
|
||||
return false
|
||||
}
|
||||
|
||||
// filter fields
|
||||
for fieldkey, _ := range fields {
|
||||
if !f.shouldFieldPass(fieldkey) {
|
||||
delete(fields, fieldkey)
|
||||
}
|
||||
}
|
||||
if len(fields) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// filter tags
|
||||
f.filterTags(tags)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldFieldsPass returns true if the metric should pass, false if should drop
|
||||
func (f *Filter) IsActive() bool {
|
||||
return f.isActive
|
||||
}
|
||||
|
||||
// shouldNamePass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f *Filter) ShouldNamePass(key string) bool {
|
||||
func (f *Filter) shouldNamePass(key string) bool {
|
||||
if f.namePass != nil {
|
||||
if f.namePass.Match(key) {
|
||||
return true
|
||||
@@ -123,9 +154,9 @@ func (f *Filter) ShouldNamePass(key string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldFieldsPass returns true if the metric should pass, false if should drop
|
||||
// shouldFieldPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f *Filter) ShouldFieldsPass(key string) bool {
|
||||
func (f *Filter) shouldFieldPass(key string) bool {
|
||||
if f.fieldPass != nil {
|
||||
if f.fieldPass.Match(key) {
|
||||
return true
|
||||
@@ -141,9 +172,9 @@ func (f *Filter) ShouldFieldsPass(key string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldTagsPass returns true if the metric should pass, false if should drop
|
||||
// shouldTagsPass returns true if the metric should pass, false if should drop
|
||||
// based on the tagdrop/tagpass filter parameters
|
||||
func (f *Filter) ShouldTagsPass(tags map[string]string) bool {
|
||||
func (f *Filter) shouldTagsPass(tags map[string]string) bool {
|
||||
if f.TagPass != nil {
|
||||
for _, pat := range f.TagPass {
|
||||
if pat.filter == nil {
|
||||
@@ -177,7 +208,7 @@ func (f *Filter) ShouldTagsPass(tags map[string]string) bool {
|
||||
|
||||
// Apply TagInclude and TagExclude filters.
|
||||
// modifies the tags map in-place.
|
||||
func (f *Filter) FilterTags(tags map[string]string) {
|
||||
func (f *Filter) filterTags(tags map[string]string) {
|
||||
if f.tagInclude != nil {
|
||||
for k, _ := range tags {
|
||||
if !f.tagInclude.Match(k) {
|
||||
|
||||
@@ -1,14 +1,64 @@
|
||||
package internal_models
|
||||
package models
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFilter_ApplyEmpty(t *testing.T) {
|
||||
f := Filter{}
|
||||
require.NoError(t, f.Compile())
|
||||
assert.False(t, f.IsActive())
|
||||
|
||||
assert.True(t, f.Apply("m", map[string]interface{}{"value": int64(1)}, map[string]string{}))
|
||||
}
|
||||
|
||||
func TestFilter_ApplyTagsDontPass(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
}
|
||||
f := Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
require.NoError(t, f.Compile())
|
||||
assert.True(t, f.IsActive())
|
||||
|
||||
assert.False(t, f.Apply("m",
|
||||
map[string]interface{}{"value": int64(1)},
|
||||
map[string]string{"cpu": "cpu-total"}))
|
||||
}
|
||||
|
||||
func TestFilter_ApplyDeleteFields(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldDrop: []string{"value"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
require.NoError(t, f.Compile())
|
||||
assert.True(t, f.IsActive())
|
||||
|
||||
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
|
||||
assert.True(t, f.Apply("m", fields, nil))
|
||||
assert.Equal(t, map[string]interface{}{"value2": int64(2)}, fields)
|
||||
}
|
||||
|
||||
func TestFilter_ApplyDeleteAllFields(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldDrop: []string{"value*"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
require.NoError(t, f.Compile())
|
||||
assert.True(t, f.IsActive())
|
||||
|
||||
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
|
||||
assert.False(t, f.Apply("m", fields, nil))
|
||||
}
|
||||
|
||||
func TestFilter_Empty(t *testing.T) {
|
||||
f := Filter{}
|
||||
|
||||
@@ -23,7 +73,7 @@ func TestFilter_Empty(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, measurement := range measurements {
|
||||
if !f.ShouldFieldsPass(measurement) {
|
||||
if !f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
@@ -33,7 +83,7 @@ func TestFilter_NamePass(t *testing.T) {
|
||||
f := Filter{
|
||||
NamePass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
@@ -51,13 +101,13 @@ func TestFilter_NamePass(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldNamePass(measurement) {
|
||||
if !f.shouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldNamePass(measurement) {
|
||||
if f.shouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
@@ -67,7 +117,7 @@ func TestFilter_NameDrop(t *testing.T) {
|
||||
f := Filter{
|
||||
NameDrop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
@@ -85,13 +135,13 @@ func TestFilter_NameDrop(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldNamePass(measurement) {
|
||||
if !f.shouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldNamePass(measurement) {
|
||||
if f.shouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
@@ -101,7 +151,7 @@ func TestFilter_FieldPass(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldPass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
@@ -119,13 +169,13 @@ func TestFilter_FieldPass(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldFieldsPass(measurement) {
|
||||
if !f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldFieldsPass(measurement) {
|
||||
if f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
@@ -135,7 +185,7 @@ func TestFilter_FieldDrop(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldDrop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
@@ -153,13 +203,13 @@ func TestFilter_FieldDrop(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldFieldsPass(measurement) {
|
||||
if !f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldFieldsPass(measurement) {
|
||||
if f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
@@ -178,7 +228,7 @@ func TestFilter_TagPass(t *testing.T) {
|
||||
f := Filter{
|
||||
TagPass: filters,
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
@@ -197,13 +247,13 @@ func TestFilter_TagPass(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
if !f.shouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
if f.shouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
@@ -222,7 +272,7 @@ func TestFilter_TagDrop(t *testing.T) {
|
||||
f := Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
@@ -241,75 +291,18 @@ func TestFilter_TagDrop(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
if !f.shouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
if f.shouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_CompileFilterError(t *testing.T) {
|
||||
f := Filter{
|
||||
NameDrop: []string{"", ""},
|
||||
}
|
||||
assert.Error(t, f.CompileFilter())
|
||||
f = Filter{
|
||||
NamePass: []string{"", ""},
|
||||
}
|
||||
assert.Error(t, f.CompileFilter())
|
||||
f = Filter{
|
||||
FieldDrop: []string{"", ""},
|
||||
}
|
||||
assert.Error(t, f.CompileFilter())
|
||||
f = Filter{
|
||||
FieldPass: []string{"", ""},
|
||||
}
|
||||
assert.Error(t, f.CompileFilter())
|
||||
f = Filter{
|
||||
TagExclude: []string{"", ""},
|
||||
}
|
||||
assert.Error(t, f.CompileFilter())
|
||||
f = Filter{
|
||||
TagInclude: []string{"", ""},
|
||||
}
|
||||
assert.Error(t, f.CompileFilter())
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"{foobar}"},
|
||||
}}
|
||||
f = Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
require.Error(t, f.CompileFilter())
|
||||
filters = []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"{foobar}"},
|
||||
}}
|
||||
f = Filter{
|
||||
TagPass: filters,
|
||||
}
|
||||
require.Error(t, f.CompileFilter())
|
||||
}
|
||||
|
||||
func TestFilter_ShouldMetricsPass(t *testing.T) {
|
||||
m := testutil.TestMetric(1, "testmetric")
|
||||
f := Filter{
|
||||
NameDrop: []string{"foobar"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.True(t, f.ShouldMetricPass(m))
|
||||
|
||||
m = testutil.TestMetric(1, "foobar")
|
||||
require.False(t, f.ShouldMetricPass(m))
|
||||
}
|
||||
|
||||
func TestFilter_FilterTagsNoMatches(t *testing.T) {
|
||||
pretags := map[string]string{
|
||||
"host": "localhost",
|
||||
@@ -318,9 +311,9 @@ func TestFilter_FilterTagsNoMatches(t *testing.T) {
|
||||
f := Filter{
|
||||
TagExclude: []string{"nomatch"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
f.FilterTags(pretags)
|
||||
f.filterTags(pretags)
|
||||
assert.Equal(t, map[string]string{
|
||||
"host": "localhost",
|
||||
"mytag": "foobar",
|
||||
@@ -329,9 +322,9 @@ func TestFilter_FilterTagsNoMatches(t *testing.T) {
|
||||
f = Filter{
|
||||
TagInclude: []string{"nomatch"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
f.FilterTags(pretags)
|
||||
f.filterTags(pretags)
|
||||
assert.Equal(t, map[string]string{}, pretags)
|
||||
}
|
||||
|
||||
@@ -343,9 +336,9 @@ func TestFilter_FilterTagsMatches(t *testing.T) {
|
||||
f := Filter{
|
||||
TagExclude: []string{"ho*"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
f.FilterTags(pretags)
|
||||
f.filterTags(pretags)
|
||||
assert.Equal(t, map[string]string{
|
||||
"mytag": "foobar",
|
||||
}, pretags)
|
||||
@@ -357,9 +350,9 @@ func TestFilter_FilterTagsMatches(t *testing.T) {
|
||||
f = Filter{
|
||||
TagInclude: []string{"my*"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
f.FilterTags(pretags)
|
||||
f.filterTags(pretags)
|
||||
assert.Equal(t, map[string]string{
|
||||
"mytag": "foobar",
|
||||
}, pretags)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package internal_models
|
||||
package models
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package internal_models
|
||||
package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
@@ -57,21 +57,17 @@ func NewRunningOutput(
|
||||
// AddMetric adds a metric to the output. This function can also write cached
|
||||
// points if FlushBufferWhenFull is true.
|
||||
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
||||
if ro.Config.Filter.IsActive {
|
||||
if !ro.Config.Filter.ShouldMetricPass(metric) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Filter any tagexclude/taginclude parameters before adding metric
|
||||
if len(ro.Config.Filter.TagExclude) != 0 || len(ro.Config.Filter.TagInclude) != 0 {
|
||||
if ro.Config.Filter.IsActive() {
|
||||
// In order to filter out tags, we need to create a new metric, since
|
||||
// metrics are immutable once created.
|
||||
name := metric.Name()
|
||||
tags := metric.Tags()
|
||||
fields := metric.Fields()
|
||||
t := metric.Time()
|
||||
name := metric.Name()
|
||||
ro.Config.Filter.FilterTags(tags)
|
||||
if ok := ro.Config.Filter.Apply(name, fields, tags); !ok {
|
||||
return
|
||||
}
|
||||
// error is not possible if creating from another metric, so ignore.
|
||||
metric, _ = telegraf.NewMetric(name, tags, fields, t)
|
||||
}
|
||||
@@ -89,7 +85,7 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
||||
// Write writes all cached points to this output.
|
||||
func (ro *RunningOutput) Write() error {
|
||||
if !ro.Quiet {
|
||||
log.Printf("Output [%s] buffer fullness: %d / %d metrics. "+
|
||||
log.Printf("I! Output [%s] buffer fullness: %d / %d metrics. "+
|
||||
"Total gathered metrics: %d. Total dropped metrics: %d.",
|
||||
ro.Name,
|
||||
ro.failMetrics.Len()+ro.metrics.Len(),
|
||||
@@ -138,7 +134,7 @@ func (ro *RunningOutput) Write() error {
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
if len(metrics) == 0 {
|
||||
if metrics == nil || len(metrics) == 0 {
|
||||
return nil
|
||||
}
|
||||
start := time.Now()
|
||||
@@ -146,7 +142,7 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
elapsed := time.Since(start)
|
||||
if err == nil {
|
||||
if !ro.Quiet {
|
||||
log.Printf("Output [%s] wrote batch of %d metrics in %s\n",
|
||||
log.Printf("I! Output [%s] wrote batch of %d metrics in %s\n",
|
||||
ro.Name, len(metrics), elapsed)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package internal_models
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -31,9 +31,7 @@ var next5 = []telegraf.Metric{
|
||||
// Benchmark adding metrics.
|
||||
func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &perfOutput{}
|
||||
@@ -49,9 +47,7 @@ func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
||||
// Benchmark adding metrics.
|
||||
func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &perfOutput{}
|
||||
@@ -69,9 +65,7 @@ func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
|
||||
// Benchmark adding metrics.
|
||||
func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &perfOutput{}
|
||||
@@ -88,11 +82,10 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
|
||||
func TestRunningOutput_DropFilter(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
NameDrop: []string{"metric1", "metric2"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
@@ -114,11 +107,10 @@ func TestRunningOutput_DropFilter(t *testing.T) {
|
||||
func TestRunningOutput_PassFilter(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
NameDrop: []string{"metric1000", "foo*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
@@ -140,11 +132,11 @@ func TestRunningOutput_PassFilter(t *testing.T) {
|
||||
func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
|
||||
TagInclude: []string{"nothing*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
@@ -162,11 +154,11 @@ func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
|
||||
func TestRunningOutput_TagExcludeMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
|
||||
TagExclude: []string{"tag*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
@@ -184,11 +176,11 @@ func TestRunningOutput_TagExcludeMatch(t *testing.T) {
|
||||
func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
|
||||
TagExclude: []string{"nothing*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
@@ -206,11 +198,11 @@ func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
|
||||
func TestRunningOutput_TagIncludeMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
|
||||
TagInclude: []string{"tag*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
@@ -227,9 +219,7 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) {
|
||||
// Test that we can write metrics with simple default setup.
|
||||
func TestRunningOutputDefault(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
@@ -252,9 +242,7 @@ func TestRunningOutputDefault(t *testing.T) {
|
||||
// FlushBufferWhenFull is set.
|
||||
func TestRunningOutputFlushWhenFull(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
@@ -283,9 +271,7 @@ func TestRunningOutputFlushWhenFull(t *testing.T) {
|
||||
// FlushBufferWhenFull is set, twice.
|
||||
func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
@@ -304,9 +290,7 @@ func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
|
||||
|
||||
func TestRunningOutputWriteFail(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
@@ -339,9 +323,7 @@ func TestRunningOutputWriteFail(t *testing.T) {
|
||||
// Verify that the order of points is preserved during a write failure.
|
||||
func TestRunningOutputWriteFailOrder(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
@@ -379,9 +361,7 @@ func TestRunningOutputWriteFailOrder(t *testing.T) {
|
||||
// Verify that the order of points is preserved during many write failures.
|
||||
func TestRunningOutputWriteFailOrder2(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
@@ -452,9 +432,7 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) {
|
||||
//
|
||||
func TestRunningOutputWriteFailOrder3(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
|
||||
58
logger/logger.go
Normal file
58
logger/logger.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/influxdata/wlog"
|
||||
)
|
||||
|
||||
// newTelegrafWriter returns a logging-wrapped writer.
|
||||
func newTelegrafWriter(w io.Writer) io.Writer {
|
||||
return &telegrafLog{
|
||||
writer: wlog.NewWriter(w),
|
||||
}
|
||||
}
|
||||
|
||||
type telegrafLog struct {
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (t *telegrafLog) Write(p []byte) (n int, err error) {
|
||||
return t.writer.Write(p)
|
||||
}
|
||||
|
||||
// SetupLogging configures the logging output.
|
||||
// debug will set the log level to DEBUG
|
||||
// quiet will set the log level to ERROR
|
||||
// logfile will direct the logging output to a file. Empty string is
|
||||
// interpreted as stdout. If there is an error opening the file the
|
||||
// logger will fallback to stdout.
|
||||
func SetupLogging(debug, quiet bool, logfile string) {
|
||||
if debug {
|
||||
wlog.SetLevel(wlog.DEBUG)
|
||||
}
|
||||
if quiet {
|
||||
wlog.SetLevel(wlog.ERROR)
|
||||
}
|
||||
|
||||
var oFile *os.File
|
||||
if logfile != "" {
|
||||
if _, err := os.Stat(logfile); os.IsNotExist(err) {
|
||||
if oFile, err = os.Create(logfile); err != nil {
|
||||
log.Printf("E! Unable to create %s (%s), using stdout", logfile, err)
|
||||
oFile = os.Stdout
|
||||
}
|
||||
} else {
|
||||
if oFile, err = os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, os.ModeAppend); err != nil {
|
||||
log.Printf("E! Unable to append to %s (%s), using stdout", logfile, err)
|
||||
oFile = os.Stdout
|
||||
}
|
||||
}
|
||||
} else {
|
||||
oFile = os.Stdout
|
||||
}
|
||||
|
||||
log.SetOutput(newTelegrafWriter(oFile))
|
||||
}
|
||||
75
metric.go
75
metric.go
@@ -6,6 +6,17 @@ import (
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
)
|
||||
|
||||
// ValueType is an enumeration of metric types that represent a simple value.
|
||||
type ValueType int
|
||||
|
||||
// Possible values for the ValueType enum.
|
||||
const (
|
||||
_ ValueType = iota
|
||||
Counter
|
||||
Gauge
|
||||
Untyped
|
||||
)
|
||||
|
||||
type Metric interface {
|
||||
// Name returns the measurement name of the metric
|
||||
Name() string
|
||||
@@ -16,6 +27,9 @@ type Metric interface {
|
||||
// Time return the timestamp for the metric
|
||||
Time() time.Time
|
||||
|
||||
// Type returns the metric type. Can be either telegraf.Gauge or telegraf.Counter
|
||||
Type() ValueType
|
||||
|
||||
// UnixNano returns the unix nano time of the metric
|
||||
UnixNano() int64
|
||||
|
||||
@@ -35,29 +49,62 @@ type Metric interface {
|
||||
// metric is a wrapper of the influxdb client.Point struct
|
||||
type metric struct {
|
||||
pt *client.Point
|
||||
|
||||
mType ValueType
|
||||
}
|
||||
|
||||
// NewMetric returns a metric with the given timestamp. If a timestamp is not
|
||||
// given, then data is sent to the database without a timestamp, in which case
|
||||
// the server will assign local time upon reception. NOTE: it is recommended to
|
||||
// send data with a timestamp.
|
||||
// NewMetric returns an untyped metric.
|
||||
func NewMetric(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t ...time.Time,
|
||||
t time.Time,
|
||||
) (Metric, error) {
|
||||
var T time.Time
|
||||
if len(t) > 0 {
|
||||
T = t[0]
|
||||
}
|
||||
|
||||
pt, err := client.NewPoint(name, tags, fields, T)
|
||||
pt, err := client.NewPoint(name, tags, fields, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metric{
|
||||
pt: pt,
|
||||
pt: pt,
|
||||
mType: Untyped,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewGaugeMetric returns a gauge metric.
|
||||
// Gauge metrics should be used when the metric is can arbitrarily go up and
|
||||
// down. ie, temperature, memory usage, cpu usage, etc.
|
||||
func NewGaugeMetric(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
) (Metric, error) {
|
||||
pt, err := client.NewPoint(name, tags, fields, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metric{
|
||||
pt: pt,
|
||||
mType: Gauge,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewCounterMetric returns a Counter metric.
|
||||
// Counter metrics should be used when the metric being created is an
|
||||
// always-increasing counter. ie, net bytes received, requests served, errors, etc.
|
||||
func NewCounterMetric(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
) (Metric, error) {
|
||||
pt, err := client.NewPoint(name, tags, fields, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metric{
|
||||
pt: pt,
|
||||
mType: Counter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -73,6 +120,10 @@ func (m *metric) Time() time.Time {
|
||||
return m.pt.Time()
|
||||
}
|
||||
|
||||
func (m *metric) Type() ValueType {
|
||||
return m.mType
|
||||
}
|
||||
|
||||
func (m *metric) UnixNano() int64 {
|
||||
return m.pt.UnixNano()
|
||||
}
|
||||
|
||||
@@ -23,6 +23,51 @@ func TestNewMetric(t *testing.T) {
|
||||
m, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, Untyped, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewGaugeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := NewGaugeMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, Gauge, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewCounterMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := NewCounterMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, Counter, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
@@ -51,23 +96,6 @@ func TestNewMetricString(t *testing.T) {
|
||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||
}
|
||||
|
||||
func TestNewMetricStringNoTime(t *testing.T) {
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
|
||||
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||
}
|
||||
|
||||
func TestNewMetricFailNaN(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
|
||||
@@ -27,6 +27,14 @@ The example plugin gathers metrics about example things
|
||||
- tag2
|
||||
- measurement2 has the following tags:
|
||||
- tag3
|
||||
|
||||
### Sample Queries:
|
||||
|
||||
These are some useful queries (to generate dashboards or other) to run against data from this plugin:
|
||||
|
||||
```
|
||||
SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag
|
||||
```
|
||||
|
||||
### Example Output:
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,104 +1,21 @@
|
||||
package aerospike
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"errors"
|
||||
"log"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
as "github.com/aerospike/aerospike-client-go"
|
||||
)
|
||||
|
||||
const (
|
||||
MSG_HEADER_SIZE = 8
|
||||
MSG_TYPE = 1 // Info is 1
|
||||
MSG_VERSION = 2
|
||||
)
|
||||
|
||||
var (
|
||||
STATISTICS_COMMAND = []byte("statistics\n")
|
||||
NAMESPACES_COMMAND = []byte("namespaces\n")
|
||||
)
|
||||
|
||||
type aerospikeMessageHeader struct {
|
||||
Version uint8
|
||||
Type uint8
|
||||
DataLen [6]byte
|
||||
}
|
||||
|
||||
type aerospikeMessage struct {
|
||||
aerospikeMessageHeader
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func (msg *aerospikeMessage) Serialize() []byte {
|
||||
msg.DataLen = msgLenToBytes(int64(len(msg.Data)))
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
binary.Write(buf, binary.BigEndian, msg.aerospikeMessageHeader)
|
||||
binary.Write(buf, binary.BigEndian, msg.Data[:])
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
type aerospikeInfoCommand struct {
|
||||
msg *aerospikeMessage
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/info.go
|
||||
func (nfo *aerospikeInfoCommand) parseMultiResponse() (map[string]string, error) {
|
||||
responses := make(map[string]string)
|
||||
offset := int64(0)
|
||||
begin := int64(0)
|
||||
|
||||
dataLen := int64(len(nfo.msg.Data))
|
||||
|
||||
// Create reusable StringBuilder for performance.
|
||||
for offset < dataLen {
|
||||
b := nfo.msg.Data[offset]
|
||||
|
||||
if b == '\t' {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
offset++
|
||||
begin = offset
|
||||
|
||||
// Parse field value.
|
||||
for offset < dataLen {
|
||||
if nfo.msg.Data[offset] == '\n' {
|
||||
break
|
||||
}
|
||||
offset++
|
||||
}
|
||||
|
||||
if offset > begin {
|
||||
value := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = string(value)
|
||||
} else {
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
offset++
|
||||
begin = offset
|
||||
} else if b == '\n' {
|
||||
if offset > begin {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
offset++
|
||||
begin = offset
|
||||
} else {
|
||||
offset++
|
||||
}
|
||||
}
|
||||
|
||||
if offset > begin {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
return responses, nil
|
||||
}
|
||||
|
||||
type Aerospike struct {
|
||||
Servers []string
|
||||
}
|
||||
@@ -115,7 +32,7 @@ func (a *Aerospike) SampleConfig() string {
|
||||
}
|
||||
|
||||
func (a *Aerospike) Description() string {
|
||||
return "Read stats from an aerospike server"
|
||||
return "Read stats from aerospike server(s)"
|
||||
}
|
||||
|
||||
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
||||
@@ -124,214 +41,114 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
errChan := errchan.New(len(a.Servers))
|
||||
wg.Add(len(a.Servers))
|
||||
for _, server := range a.Servers {
|
||||
wg.Add(1)
|
||||
go func(server string) {
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = a.gatherServer(server, acc)
|
||||
errChan.C <- a.gatherServer(serv, acc)
|
||||
}(server)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return outerr
|
||||
return errChan.Error()
|
||||
}
|
||||
|
||||
func (a *Aerospike) gatherServer(host string, acc telegraf.Accumulator) error {
|
||||
aerospikeInfo, err := getMap(STATISTICS_COMMAND, host)
|
||||
func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) error {
|
||||
host, port, err := net.SplitHostPort(hostport)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike info failed: %s", err)
|
||||
return err
|
||||
}
|
||||
readAerospikeStats(aerospikeInfo, acc, host, "")
|
||||
namespaces, err := getList(NAMESPACES_COMMAND, host)
|
||||
|
||||
iport, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike namespace list failed: %s", err)
|
||||
iport = 3000
|
||||
}
|
||||
for ix := range namespaces {
|
||||
nsInfo, err := getMap([]byte("namespace/"+namespaces[ix]+"\n"), host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike namespace '%s' query failed: %s", namespaces[ix], err)
|
||||
|
||||
c, err := as.NewClient(host, iport)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
nodes := c.GetNodes()
|
||||
for _, n := range nodes {
|
||||
tags := map[string]string{
|
||||
"aerospike_host": hostport,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"node_name": n.GetName(),
|
||||
}
|
||||
stats, err := as.RequestNodeStats(n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range stats {
|
||||
val, err := parseValue(v)
|
||||
if err == nil {
|
||||
fields[strings.Replace(k, "-", "_", -1)] = val
|
||||
} else {
|
||||
log.Printf("I! skipping aerospike field %v with int64 overflow", k)
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike_node", fields, tags, time.Now())
|
||||
|
||||
info, err := as.RequestNodeInfo(n, "namespaces")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
namespaces := strings.Split(info["namespaces"], ";")
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
nTags := map[string]string{
|
||||
"aerospike_host": hostport,
|
||||
}
|
||||
nTags["namespace"] = namespace
|
||||
nFields := map[string]interface{}{
|
||||
"node_name": n.GetName(),
|
||||
}
|
||||
info, err := as.RequestNodeInfo(n, "namespace/"+namespace)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
stats := strings.Split(info["namespace/"+namespace], ";")
|
||||
for _, stat := range stats {
|
||||
parts := strings.Split(stat, "=")
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
val, err := parseValue(parts[1])
|
||||
if err == nil {
|
||||
nFields[strings.Replace(parts[0], "-", "_", -1)] = val
|
||||
} else {
|
||||
log.Printf("I! skipping aerospike field %v with int64 overflow", parts[0])
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
|
||||
}
|
||||
readAerospikeStats(nsInfo, acc, host, namespaces[ix])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMap(key []byte, host string) (map[string]string, error) {
|
||||
data, err := get(key, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get data: %s", err)
|
||||
func parseValue(v string) (interface{}, error) {
|
||||
if parsed, err := strconv.ParseInt(v, 10, 64); err == nil {
|
||||
return parsed, nil
|
||||
} else if _, err := strconv.ParseUint(v, 10, 64); err == nil {
|
||||
// int64 overflow, yet valid uint64
|
||||
return nil, errors.New("Number is too large")
|
||||
} else if parsed, err := strconv.ParseBool(v); err == nil {
|
||||
return parsed, nil
|
||||
} else {
|
||||
return v, nil
|
||||
}
|
||||
parsed, err := unmarshalMapInfo(data, string(key))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
|
||||
}
|
||||
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func getList(key []byte, host string) ([]string, error) {
|
||||
data, err := get(key, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get data: %s", err)
|
||||
func copyTags(m map[string]string) map[string]string {
|
||||
out := make(map[string]string)
|
||||
for k, v := range m {
|
||||
out[k] = v
|
||||
}
|
||||
parsed, err := unmarshalListInfo(data, string(key))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
|
||||
}
|
||||
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func get(key []byte, host string) (map[string]string, error) {
|
||||
var err error
|
||||
var data map[string]string
|
||||
|
||||
asInfo := &aerospikeInfoCommand{
|
||||
msg: &aerospikeMessage{
|
||||
aerospikeMessageHeader: aerospikeMessageHeader{
|
||||
Version: uint8(MSG_VERSION),
|
||||
Type: uint8(MSG_TYPE),
|
||||
DataLen: msgLenToBytes(int64(len(key))),
|
||||
},
|
||||
Data: key,
|
||||
},
|
||||
}
|
||||
|
||||
cmd := asInfo.msg.Serialize()
|
||||
addr, err := net.ResolveTCPAddr("tcp", host)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Lookup failed for '%s': %s", host, err)
|
||||
}
|
||||
|
||||
conn, err := net.DialTCP("tcp", nil, addr)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Connection failed for '%s': %s", host, err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
_, err = conn.Write(cmd)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to send to '%s': %s", host, err)
|
||||
}
|
||||
|
||||
msgHeader := bytes.NewBuffer(make([]byte, MSG_HEADER_SIZE))
|
||||
_, err = readLenFromConn(conn, msgHeader.Bytes(), MSG_HEADER_SIZE)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to read header: %s", err)
|
||||
}
|
||||
err = binary.Read(msgHeader, binary.BigEndian, &asInfo.msg.aerospikeMessageHeader)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to unmarshal header: %s", err)
|
||||
}
|
||||
|
||||
msgLen := msgLenFromBytes(asInfo.msg.aerospikeMessageHeader.DataLen)
|
||||
|
||||
if int64(len(asInfo.msg.Data)) != msgLen {
|
||||
asInfo.msg.Data = make([]byte, msgLen)
|
||||
}
|
||||
|
||||
_, err = readLenFromConn(conn, asInfo.msg.Data, len(asInfo.msg.Data))
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to read from connection to '%s': %s", host, err)
|
||||
}
|
||||
|
||||
data, err = asInfo.parseMultiResponse()
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to parse response from '%s': %s", host, err)
|
||||
}
|
||||
|
||||
return data, err
|
||||
}
|
||||
|
||||
func readAerospikeStats(
|
||||
stats map[string]string,
|
||||
acc telegraf.Accumulator,
|
||||
host string,
|
||||
namespace string,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{
|
||||
"aerospike_host": host,
|
||||
"namespace": "_service",
|
||||
}
|
||||
|
||||
if namespace != "" {
|
||||
tags["namespace"] = namespace
|
||||
}
|
||||
for key, value := range stats {
|
||||
// We are going to ignore all string based keys
|
||||
val, err := strconv.ParseInt(value, 10, 64)
|
||||
if err == nil {
|
||||
if strings.Contains(key, "-") {
|
||||
key = strings.Replace(key, "-", "_", -1)
|
||||
}
|
||||
fields[key] = val
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) {
|
||||
key = strings.TrimSuffix(key, "\n")
|
||||
res := map[string]string{}
|
||||
|
||||
v, exists := infoMap[key]
|
||||
if !exists {
|
||||
return res, fmt.Errorf("Key '%s' missing from info", key)
|
||||
}
|
||||
|
||||
values := strings.Split(v, ";")
|
||||
for i := range values {
|
||||
kv := strings.Split(values[i], "=")
|
||||
if len(kv) > 1 {
|
||||
res[kv[0]] = kv[1]
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func unmarshalListInfo(infoMap map[string]string, key string) ([]string, error) {
|
||||
key = strings.TrimSuffix(key, "\n")
|
||||
|
||||
v, exists := infoMap[key]
|
||||
if !exists {
|
||||
return []string{}, fmt.Errorf("Key '%s' missing from info", key)
|
||||
}
|
||||
|
||||
values := strings.Split(v, ";")
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func readLenFromConn(c net.Conn, buffer []byte, length int) (total int, err error) {
|
||||
var r int
|
||||
for total < length {
|
||||
r, err = c.Read(buffer[total:length])
|
||||
total += r
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func msgLenToBytes(DataLen int64) [6]byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(DataLen))
|
||||
res := [6]byte{}
|
||||
copy(res[:], b[2:])
|
||||
return res
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func msgLenFromBytes(buf [6]byte) int64 {
|
||||
nbytes := append([]byte{0, 0}, buf[:]...)
|
||||
DataLen := binary.BigEndian.Uint64(nbytes)
|
||||
return int64(DataLen)
|
||||
return out
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package aerospike
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
@@ -11,7 +10,7 @@ import (
|
||||
|
||||
func TestAerospikeStatistics(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
t.Skip("Skipping aerospike integration tests.")
|
||||
}
|
||||
|
||||
a := &Aerospike{
|
||||
@@ -23,96 +22,46 @@ func TestAerospikeStatistics(t *testing.T) {
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Only use a few of the metrics
|
||||
asMetrics := []string{
|
||||
"transactions",
|
||||
"stat_write_errs",
|
||||
"stat_read_reqs",
|
||||
"stat_write_reqs",
|
||||
}
|
||||
|
||||
for _, metric := range asMetrics {
|
||||
assert.True(t, acc.HasIntField("aerospike", metric), metric)
|
||||
}
|
||||
|
||||
assert.True(t, acc.HasMeasurement("aerospike_node"))
|
||||
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
|
||||
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
|
||||
}
|
||||
|
||||
func TestAerospikeMsgLenFromToBytes(t *testing.T) {
|
||||
var i int64 = 8
|
||||
assert.True(t, i == msgLenFromBytes(msgLenToBytes(i)))
|
||||
}
|
||||
func TestAerospikeStatisticsPartialErr(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping aerospike integration tests.")
|
||||
}
|
||||
|
||||
a := &Aerospike{
|
||||
Servers: []string{
|
||||
testutil.GetLocalHost() + ":3000",
|
||||
testutil.GetLocalHost() + ":9999",
|
||||
},
|
||||
}
|
||||
|
||||
func TestReadAerospikeStatsNoNamespace(t *testing.T) {
|
||||
// Also test for re-writing
|
||||
var acc testutil.Accumulator
|
||||
stats := map[string]string{
|
||||
"stat-write-errs": "12345",
|
||||
"stat_read_reqs": "12345",
|
||||
}
|
||||
readAerospikeStats(stats, &acc, "host1", "")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"stat_write_errs": int64(12345),
|
||||
"stat_read_reqs": int64(12345),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"aerospike_host": "host1",
|
||||
"namespace": "_service",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||
err := a.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
|
||||
assert.True(t, acc.HasMeasurement("aerospike_node"))
|
||||
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
|
||||
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
|
||||
}
|
||||
|
||||
func TestReadAerospikeStatsNamespace(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
stats := map[string]string{
|
||||
"stat_write_errs": "12345",
|
||||
"stat_read_reqs": "12345",
|
||||
}
|
||||
readAerospikeStats(stats, &acc, "host1", "test")
|
||||
func TestAerospikeParseValue(t *testing.T) {
|
||||
// uint64 with value bigger than int64 max
|
||||
val, err := parseValue("18446744041841121751")
|
||||
assert.Nil(t, val)
|
||||
assert.Error(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"stat_write_errs": int64(12345),
|
||||
"stat_read_reqs": int64(12345),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"aerospike_host": "host1",
|
||||
"namespace": "test",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func TestAerospikeUnmarshalList(t *testing.T) {
|
||||
i := map[string]string{
|
||||
"test": "one;two;three",
|
||||
}
|
||||
|
||||
expected := []string{"one", "two", "three"}
|
||||
|
||||
list, err := unmarshalListInfo(i, "test2")
|
||||
assert.True(t, err != nil)
|
||||
|
||||
list, err = unmarshalListInfo(i, "test")
|
||||
assert.True(t, err == nil)
|
||||
equal := true
|
||||
for ix := range expected {
|
||||
if list[ix] != expected[ix] {
|
||||
equal = false
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, equal)
|
||||
}
|
||||
|
||||
func TestAerospikeUnmarshalMap(t *testing.T) {
|
||||
i := map[string]string{
|
||||
"test": "key1=value1;key2=value2",
|
||||
}
|
||||
|
||||
expected := map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
}
|
||||
m, err := unmarshalMapInfo(i, "test")
|
||||
assert.True(t, err == nil)
|
||||
assert.True(t, reflect.DeepEqual(m, expected))
|
||||
// int values
|
||||
val, err = parseValue("42")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, val, int64(42), "must be parsed as int")
|
||||
|
||||
// string values
|
||||
val, err = parseValue("BB977942A2CA502")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, val, `BB977942A2CA502`, "must be left as string")
|
||||
}
|
||||
|
||||
@@ -5,7 +5,12 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cgroup"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/chrony"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/conntrack"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/consul"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
|
||||
@@ -15,15 +20,19 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
@@ -35,6 +44,8 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
|
||||
@@ -52,6 +63,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
|
||||
@@ -61,6 +73,8 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/webhooks"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Telegraf plugin: Apache
|
||||
|
||||
#### Plugin arguments:
|
||||
- **urls** []string: List of apache-status URLs to collect from.
|
||||
- **urls** []string: List of apache-status URLs to collect from. Default is "http://localhost/server-status?auto".
|
||||
|
||||
#### Description
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -21,6 +20,7 @@ type Apache struct {
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of Apache status URI to gather stats.
|
||||
## Default is "http://localhost/server-status?auto".
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
`
|
||||
|
||||
@@ -33,8 +33,12 @@ func (n *Apache) Description() string {
|
||||
}
|
||||
|
||||
func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
if len(n.Urls) == 0 {
|
||||
n.Urls = []string{"http://localhost/server-status?auto"}
|
||||
}
|
||||
|
||||
var outerr error
|
||||
var errch = make(chan error)
|
||||
|
||||
for _, u := range n.Urls {
|
||||
addr, err := url.Parse(u)
|
||||
@@ -42,14 +46,17 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
outerr = n.gatherUrl(addr, acc)
|
||||
errch <- n.gatherUrl(addr, acc)
|
||||
}(addr)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
// Drain channel, waiting for all requests to finish and save last error.
|
||||
for range n.Urls {
|
||||
if err := <-errch; err != nil {
|
||||
outerr = err
|
||||
}
|
||||
}
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
@@ -36,7 +36,8 @@ func TestHTTPApache(t *testing.T) {
|
||||
defer ts.Close()
|
||||
|
||||
a := Apache{
|
||||
Urls: []string{ts.URL},
|
||||
// Fetch it 2 times to catch possible data races.
|
||||
Urls: []string{ts.URL, ts.URL},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
@@ -148,7 +148,7 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
||||
tokens := parseJmxMetricRequest(r.(map[string]interface{})["mbean"].(string))
|
||||
// Requests with wildcards for keyspace or table names will return nested
|
||||
// maps in the json response
|
||||
if tokens["type"] == "Table" && (tokens["keyspace"] == "*" ||
|
||||
if (tokens["type"] == "Table" || tokens["type"] == "ColumnFamily") && (tokens["keyspace"] == "*" ||
|
||||
tokens["scope"] == "*") {
|
||||
if valuesMap, ok := out["value"]; ok {
|
||||
for k, v := range valuesMap.(map[string]interface{}) {
|
||||
@@ -274,7 +274,7 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
m = newCassandraMetric(serverTokens["host"], metric, acc)
|
||||
} else {
|
||||
// unsupported metric type
|
||||
log.Printf("Unsupported Cassandra metric [%s], skipping",
|
||||
log.Printf("I! Unsupported Cassandra metric [%s], skipping",
|
||||
metric)
|
||||
continue
|
||||
}
|
||||
|
||||
222
plugins/inputs/ceph/README.md
Normal file
222
plugins/inputs/ceph/README.md
Normal file
@@ -0,0 +1,222 @@
|
||||
# Ceph Storage Input Plugin
|
||||
|
||||
Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
|
||||
|
||||
*Admin Socket Stats*
|
||||
|
||||
This gatherer works by scanning the configured SocketDir for OSD and MON socket files. When it finds
|
||||
a MON socket, it runs **ceph --admin-daemon $file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf dump**
|
||||
|
||||
The resulting JSON is parsed and grouped into collections, based on top-level key. Top-level keys are
|
||||
used as collection tags, and all sub-keys are flattened. For example:
|
||||
|
||||
```
|
||||
{
|
||||
"paxos": {
|
||||
"refresh": 9363435,
|
||||
"refresh_latency": {
|
||||
"avgcount": 9363435,
|
||||
"sum": 5378.794002000
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Would be parsed into the following metrics, all of which would be tagged with collection=paxos:
|
||||
|
||||
- refresh = 9363435
|
||||
- refresh_latency.avgcount: 9363435
|
||||
- refresh_latency.sum: 5378.794002000
|
||||
|
||||
|
||||
*Cluster Stats*
|
||||
|
||||
This gatherer works by invoking ceph commands against the cluster thus only requires the ceph client, valid
|
||||
ceph configuration and an access key to function (the ceph_config and ceph_user configuration variables work
|
||||
in conjunction to specify these prerequisites). It may be run on any server you wish which has access to
|
||||
the cluster. The currently supported commands are:
|
||||
|
||||
* ceph status
|
||||
* ceph df
|
||||
* ceph osd pool stats
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
|
||||
[[inputs.ceph]]
|
||||
## This is the recommended interval to poll. Too frequent and you will lose
|
||||
## data points due to timeouts during rebalancing and recovery
|
||||
interval = '1m'
|
||||
|
||||
## All configuration values are optional, defaults are shown below
|
||||
|
||||
## location of ceph binary
|
||||
ceph_binary = "/usr/bin/ceph"
|
||||
|
||||
## directory in which to look for socket files
|
||||
socket_dir = "/var/run/ceph"
|
||||
|
||||
## prefix of MON and OSD socket files, used to determine socket type
|
||||
mon_prefix = "ceph-mon"
|
||||
osd_prefix = "ceph-osd"
|
||||
|
||||
## suffix used to identify socket files
|
||||
socket_suffix = "asok"
|
||||
|
||||
## Ceph user to authenticate as, ceph will search for the corresponding keyring
|
||||
## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the
|
||||
## client section of ceph.conf for example:
|
||||
##
|
||||
## [client.telegraf]
|
||||
## keyring = /etc/ceph/client.telegraf.keyring
|
||||
##
|
||||
## Consult the ceph documentation for more detail on keyring generation.
|
||||
ceph_user = "client.admin"
|
||||
|
||||
## Ceph configuration to use to locate the cluster
|
||||
ceph_config = "/etc/ceph/ceph.conf"
|
||||
|
||||
## Whether to gather statistics via the admin socket
|
||||
gather_admin_socket_stats = true
|
||||
|
||||
## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config
|
||||
## to be specified
|
||||
gather_cluster_stats = true
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
*Admin Socket Stats*
|
||||
|
||||
All fields are collected under the **ceph** measurement and stored as float64s. For a full list of fields, see the sample perf dumps in ceph_test.go.
|
||||
|
||||
*Cluster Stats*
|
||||
|
||||
* ceph\_osdmap
|
||||
* epoch (float)
|
||||
* full (boolean)
|
||||
* nearfull (boolean)
|
||||
* num\_in\_osds (float)
|
||||
* num\_osds (float)
|
||||
* num\_remremapped\_pgs (float)
|
||||
* num\_up\_osds (float)
|
||||
|
||||
* ceph\_pgmap
|
||||
* bytes\_avail (float)
|
||||
* bytes\_total (float)
|
||||
* bytes\_used (float)
|
||||
* data\_bytes (float)
|
||||
* num\_pgs (float)
|
||||
* op\_per\_sec (float)
|
||||
* read\_bytes\_sec (float)
|
||||
* version (float)
|
||||
* write\_bytes\_sec (float)
|
||||
* recovering\_bytes\_per\_sec (float)
|
||||
* recovering\_keys\_per\_sec (float)
|
||||
* recovering\_objects\_per\_sec (float)
|
||||
|
||||
* ceph\_pgmap\_state
|
||||
* state name e.g. active+clean (float)
|
||||
|
||||
* ceph\_usage
|
||||
* bytes\_used (float)
|
||||
* kb\_used (float)
|
||||
* max\_avail (float)
|
||||
* objects (float)
|
||||
|
||||
* ceph\_pool\_usage
|
||||
* bytes\_used (float)
|
||||
* kb\_used (float)
|
||||
* max\_avail (float)
|
||||
* objects (float)
|
||||
|
||||
* ceph\_pool\_stats
|
||||
* op\_per\_sec (float)
|
||||
* read\_bytes\_sec (float)
|
||||
* write\_bytes\_sec (float)
|
||||
* recovering\_object\_per\_sec (float)
|
||||
* recovering\_bytes\_per\_sec (float)
|
||||
* recovering\_keys\_per\_sec (float)
|
||||
|
||||
### Tags:
|
||||
|
||||
*Admin Socket Stats*
|
||||
|
||||
All measurements will have the following tags:
|
||||
|
||||
- type: either 'osd' or 'mon' to indicate which type of node was queried
|
||||
- id: a unique string identifier, parsed from the socket file name for the node
|
||||
- collection: the top-level key under which these fields were reported. Possible values are:
|
||||
- for MON nodes:
|
||||
- cluster
|
||||
- leveldb
|
||||
- mon
|
||||
- paxos
|
||||
- throttle-mon_client_bytes
|
||||
- throttle-mon_daemon_bytes
|
||||
- throttle-msgr_dispatch_throttler-mon
|
||||
- for OSD nodes:
|
||||
- WBThrottle
|
||||
- filestore
|
||||
- leveldb
|
||||
- mutex-FileJournal::completions_lock
|
||||
- mutex-FileJournal::finisher_lock
|
||||
- mutex-FileJournal::write_lock
|
||||
- mutex-FileJournal::writeq_lock
|
||||
- mutex-JOS::ApplyManager::apply_lock
|
||||
- mutex-JOS::ApplyManager::com_lock
|
||||
- mutex-JOS::SubmitManager::lock
|
||||
- mutex-WBThrottle::lock
|
||||
- objecter
|
||||
- osd
|
||||
- recoverystate_perf
|
||||
- throttle-filestore_bytes
|
||||
- throttle-filestore_ops
|
||||
- throttle-msgr_dispatch_throttler-client
|
||||
- throttle-msgr_dispatch_throttler-cluster
|
||||
- throttle-msgr_dispatch_throttler-hb_back_server
|
||||
- throttle-msgr_dispatch_throttler-hb_front_serve
|
||||
- throttle-msgr_dispatch_throttler-hbclient
|
||||
- throttle-msgr_dispatch_throttler-ms_objecter
|
||||
- throttle-objecter_bytes
|
||||
- throttle-objecter_ops
|
||||
- throttle-osd_client_bytes
|
||||
- throttle-osd_client_messages
|
||||
|
||||
*Cluster Stats*
|
||||
|
||||
* ceph\_pg\_state has the following tags:
|
||||
* state (state for which the value applies e.g. active+clean, active+remapped+backfill)
|
||||
* ceph\_pool\_usage has the following tags:
|
||||
* id
|
||||
* name
|
||||
* ceph\_pool\_stats has the following tags:
|
||||
* id
|
||||
* name
|
||||
|
||||
### Example Output:
|
||||
|
||||
*Admin Socket Stats*
|
||||
|
||||
<pre>
|
||||
telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d -input-filter ceph
|
||||
* Plugin: ceph, Collection 1
|
||||
> ceph,collection=paxos, id=node-2,role=openstack,type=mon accept_timeout=0,begin=14931264,begin_bytes.avgcount=14931264,begin_bytes.sum=180309683362,begin_keys.avgcount=0,begin_keys.sum=0,begin_latency.avgcount=14931264,begin_latency.sum=9293.29589,collect=1,collect_bytes.avgcount=1,collect_bytes.sum=24,collect_keys.avgcount=1,collect_keys.sum=1,collect_latency.avgcount=1,collect_latency.sum=0.00028,collect_timeout=0,collect_uncommitted=0,commit=14931264,commit_bytes.avgcount=0,commit_bytes.sum=0,commit_keys.avgcount=0,commit_keys.sum=0,commit_latency.avgcount=0,commit_latency.sum=0,lease_ack_timeout=0,lease_timeout=0,new_pn=0,new_pn_latency.avgcount=0,new_pn_latency.sum=0,refresh=14931264,refresh_latency.avgcount=14931264,refresh_latency.sum=8706.98498,restart=4,share_state=0,share_state_bytes.avgcount=0,share_state_bytes.sum=0,share_state_keys.avgcount=0,share_state_keys.sum=0,start_leader=0,start_peon=1,store_state=14931264,store_state_bytes.avgcount=14931264,store_state_bytes.sum=353119959211,store_state_keys.avgcount=14931264,store_state_keys.sum=289807523,store_state_latency.avgcount=14931264,store_state_latency.sum=10952.835724 1462821234814535148
|
||||
> ceph,collection=throttle-mon_client_bytes,id=node-2,type=mon get=1413017,get_or_fail_fail=0,get_or_fail_success=0,get_sum=71211705,max=104857600,put=1413013,put_sum=71211459,take=0,take_sum=0,val=246,wait.avgcount=0,wait.sum=0 1462821234814737219
|
||||
> ceph,collection=throttle-mon_daemon_bytes,id=node-2,type=mon get=4058121,get_or_fail_fail=0,get_or_fail_success=0,get_sum=6027348117,max=419430400,put=4058121,put_sum=6027348117,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814815661
|
||||
> ceph,collection=throttle-msgr_dispatch_throttler-mon,id=node-2,type=mon get=54276277,get_or_fail_fail=0,get_or_fail_success=0,get_sum=370232877040,max=104857600,put=54276277,put_sum=370232877040,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814872064
|
||||
</pre>
|
||||
|
||||
*Cluster Stats*
|
||||
|
||||
<pre>
|
||||
> ceph_osdmap,host=ceph-mon-0 epoch=170772,full=false,nearfull=false,num_in_osds=340,num_osds=340,num_remapped_pgs=0,num_up_osds=340 1468841037000000000
|
||||
> ceph_pgmap,host=ceph-mon-0 bytes_avail=634895531270144,bytes_total=812117151809536,bytes_used=177221620539392,data_bytes=56979991615058,num_pgs=22952,op_per_sec=15869,read_bytes_sec=43956026,version=39387592,write_bytes_sec=165344818 1468841037000000000
|
||||
> ceph_pgmap_state,host=ceph-mon-0 active+clean=22952 1468928660000000000
|
||||
> ceph_usage,host=ceph-mon-0 total_avail_bytes=634895514791936,total_bytes=812117151809536,total_used_bytes=177221637017600 1468841037000000000
|
||||
> ceph_pool_usage,host=ceph-mon-0,id=150,name=cinder.volumes bytes_used=12648553794802,kb_used=12352103316,max_avail=154342562489244,objects=3026295 1468841037000000000
|
||||
> ceph_pool_usage,host=ceph-mon-0,id=182,name=cinder.volumes.flash bytes_used=8541308223964,kb_used=8341121313,max_avail=39388593563936,objects=2075066 1468841037000000000
|
||||
> ceph_pool_stats,host=ceph-mon-0,id=150,name=cinder.volumes op_per_sec=1706,read_bytes_sec=28671674,write_bytes_sec=29994541 1468841037000000000
|
||||
> ceph_pool_stats,host=ceph-mon-0,id=182,name=cinder.volumes.flash op_per_sec=9748,read_bytes_sec=9605524,write_bytes_sec=45593310 1468841037000000000
|
||||
</pre>
|
||||
489
plugins/inputs/ceph/ceph.go
Normal file
489
plugins/inputs/ceph/ceph.go
Normal file
@@ -0,0 +1,489 @@
|
||||
package ceph
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
measurement = "ceph"
|
||||
typeMon = "monitor"
|
||||
typeOsd = "osd"
|
||||
osdPrefix = "ceph-osd"
|
||||
monPrefix = "ceph-mon"
|
||||
sockSuffix = "asok"
|
||||
)
|
||||
|
||||
type Ceph struct {
|
||||
CephBinary string
|
||||
OsdPrefix string
|
||||
MonPrefix string
|
||||
SocketDir string
|
||||
SocketSuffix string
|
||||
CephUser string
|
||||
CephConfig string
|
||||
GatherAdminSocketStats bool
|
||||
GatherClusterStats bool
|
||||
}
|
||||
|
||||
func (c *Ceph) Description() string {
|
||||
return "Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster."
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## This is the recommended interval to poll. Too frequent and you will lose
|
||||
## data points due to timeouts during rebalancing and recovery
|
||||
interval = '1m'
|
||||
|
||||
## All configuration values are optional, defaults are shown below
|
||||
|
||||
## location of ceph binary
|
||||
ceph_binary = "/usr/bin/ceph"
|
||||
|
||||
## directory in which to look for socket files
|
||||
socket_dir = "/var/run/ceph"
|
||||
|
||||
## prefix of MON and OSD socket files, used to determine socket type
|
||||
mon_prefix = "ceph-mon"
|
||||
osd_prefix = "ceph-osd"
|
||||
|
||||
## suffix used to identify socket files
|
||||
socket_suffix = "asok"
|
||||
|
||||
## Ceph user to authenticate as
|
||||
ceph_user = "client.admin"
|
||||
|
||||
## Ceph configuration to use to locate the cluster
|
||||
ceph_config = "/etc/ceph/ceph.conf"
|
||||
|
||||
## Whether to gather statistics via the admin socket
|
||||
gather_admin_socket_stats = true
|
||||
|
||||
## Whether to gather statistics via ceph commands
|
||||
gather_cluster_stats = true
|
||||
`
|
||||
|
||||
func (c *Ceph) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (c *Ceph) Gather(acc telegraf.Accumulator) error {
|
||||
if c.GatherAdminSocketStats {
|
||||
if err := c.gatherAdminSocketStats(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.GatherClusterStats {
|
||||
if err := c.gatherClusterStats(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
|
||||
sockets, err := findSockets(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find sockets at path '%s': %v", c.SocketDir, err)
|
||||
}
|
||||
|
||||
for _, s := range sockets {
|
||||
dump, err := perfDump(c.CephBinary, s)
|
||||
if err != nil {
|
||||
log.Printf("E! error reading from socket '%s': %v", s.socket, err)
|
||||
continue
|
||||
}
|
||||
data, err := parseDump(dump)
|
||||
if err != nil {
|
||||
log.Printf("E! error parsing dump from socket '%s': %v", s.socket, err)
|
||||
continue
|
||||
}
|
||||
for tag, metrics := range *data {
|
||||
acc.AddFields(measurement,
|
||||
map[string]interface{}(metrics),
|
||||
map[string]string{"type": s.sockType, "id": s.sockId, "collection": tag})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Ceph) gatherClusterStats(acc telegraf.Accumulator) error {
|
||||
jobs := []struct {
|
||||
command string
|
||||
parser func(telegraf.Accumulator, string) error
|
||||
}{
|
||||
{"status", decodeStatus},
|
||||
{"df", decodeDf},
|
||||
{"osd pool stats", decodeOsdPoolStats},
|
||||
}
|
||||
|
||||
// For each job, execute against the cluster, parse and accumulate the data points
|
||||
for _, job := range jobs {
|
||||
output, err := c.exec(job.command)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error executing command: %v", err)
|
||||
}
|
||||
err = job.parser(acc, output)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing output: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
c := Ceph{
|
||||
CephBinary: "/usr/bin/ceph",
|
||||
OsdPrefix: osdPrefix,
|
||||
MonPrefix: monPrefix,
|
||||
SocketDir: "/var/run/ceph",
|
||||
SocketSuffix: sockSuffix,
|
||||
CephUser: "client.admin",
|
||||
CephConfig: "/etc/ceph/ceph.conf",
|
||||
GatherAdminSocketStats: true,
|
||||
GatherClusterStats: false,
|
||||
}
|
||||
|
||||
inputs.Add(measurement, func() telegraf.Input { return &c })
|
||||
|
||||
}
|
||||
|
||||
var perfDump = func(binary string, socket *socket) (string, error) {
|
||||
cmdArgs := []string{"--admin-daemon", socket.socket}
|
||||
if socket.sockType == typeOsd {
|
||||
cmdArgs = append(cmdArgs, "perf", "dump")
|
||||
} else if socket.sockType == typeMon {
|
||||
cmdArgs = append(cmdArgs, "perfcounters_dump")
|
||||
} else {
|
||||
return "", fmt.Errorf("ignoring unknown socket type: %s", socket.sockType)
|
||||
}
|
||||
|
||||
cmd := exec.Command(binary, cmdArgs...)
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error running ceph dump: %s", err)
|
||||
}
|
||||
|
||||
return out.String(), nil
|
||||
}
|
||||
|
||||
var findSockets = func(c *Ceph) ([]*socket, error) {
|
||||
listing, err := ioutil.ReadDir(c.SocketDir)
|
||||
if err != nil {
|
||||
return []*socket{}, fmt.Errorf("Failed to read socket directory '%s': %v", c.SocketDir, err)
|
||||
}
|
||||
sockets := make([]*socket, 0, len(listing))
|
||||
for _, info := range listing {
|
||||
f := info.Name()
|
||||
var sockType string
|
||||
var sockPrefix string
|
||||
if strings.HasPrefix(f, c.MonPrefix) {
|
||||
sockType = typeMon
|
||||
sockPrefix = monPrefix
|
||||
}
|
||||
if strings.HasPrefix(f, c.OsdPrefix) {
|
||||
sockType = typeOsd
|
||||
sockPrefix = osdPrefix
|
||||
|
||||
}
|
||||
if sockType == typeOsd || sockType == typeMon {
|
||||
path := filepath.Join(c.SocketDir, f)
|
||||
sockets = append(sockets, &socket{parseSockId(f, sockPrefix, c.SocketSuffix), sockType, path})
|
||||
}
|
||||
}
|
||||
return sockets, nil
|
||||
}
|
||||
|
||||
func parseSockId(fname, prefix, suffix string) string {
|
||||
s := fname
|
||||
s = strings.TrimPrefix(s, prefix)
|
||||
s = strings.TrimSuffix(s, suffix)
|
||||
s = strings.Trim(s, ".-_")
|
||||
return s
|
||||
}
|
||||
|
||||
type socket struct {
|
||||
sockId string
|
||||
sockType string
|
||||
socket string
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
pathStack []string // lifo stack of name components
|
||||
value float64
|
||||
}
|
||||
|
||||
// Pops names of pathStack to build the flattened name for a metric
|
||||
func (m *metric) name() string {
|
||||
buf := bytes.Buffer{}
|
||||
for i := len(m.pathStack) - 1; i >= 0; i-- {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteString(".")
|
||||
}
|
||||
buf.WriteString(m.pathStack[i])
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type metricMap map[string]interface{}
|
||||
|
||||
type taggedMetricMap map[string]metricMap
|
||||
|
||||
// Parses a raw JSON string into a taggedMetricMap
|
||||
// Delegates the actual parsing to newTaggedMetricMap(..)
|
||||
func parseDump(dump string) (*taggedMetricMap, error) {
|
||||
data := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(dump), &data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse json: '%s': %v", dump, err)
|
||||
}
|
||||
|
||||
tmm := newTaggedMetricMap(data)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to tag dataset: '%v': %v", tmm, err)
|
||||
}
|
||||
|
||||
return tmm, nil
|
||||
}
|
||||
|
||||
// Builds a TaggedMetricMap out of a generic string map.
|
||||
// The top-level key is used as a tag and all sub-keys are flattened into metrics
|
||||
func newTaggedMetricMap(data map[string]interface{}) *taggedMetricMap {
|
||||
tmm := make(taggedMetricMap)
|
||||
for tag, datapoints := range data {
|
||||
mm := make(metricMap)
|
||||
for _, m := range flatten(datapoints) {
|
||||
mm[m.name()] = m.value
|
||||
}
|
||||
tmm[tag] = mm
|
||||
}
|
||||
return &tmm
|
||||
}
|
||||
|
||||
// Recursively flattens any k-v hierarchy present in data.
|
||||
// Nested keys are flattened into ordered slices associated with a metric value.
|
||||
// The key slices are treated as stacks, and are expected to be reversed and concatenated
|
||||
// when passed as metrics to the accumulator. (see (*metric).name())
|
||||
func flatten(data interface{}) []*metric {
|
||||
var metrics []*metric
|
||||
|
||||
switch val := data.(type) {
|
||||
case float64:
|
||||
metrics = []*metric{&metric{make([]string, 0, 1), val}}
|
||||
case map[string]interface{}:
|
||||
metrics = make([]*metric, 0, len(val))
|
||||
for k, v := range val {
|
||||
for _, m := range flatten(v) {
|
||||
m.pathStack = append(m.pathStack, k)
|
||||
metrics = append(metrics, m)
|
||||
}
|
||||
}
|
||||
default:
|
||||
log.Printf("I! Ignoring unexpected type '%T' for value %v", val, val)
|
||||
}
|
||||
|
||||
return metrics
|
||||
}
|
||||
|
||||
func (c *Ceph) exec(command string) (string, error) {
|
||||
cmdArgs := []string{"--conf", c.CephConfig, "--name", c.CephUser, "--format", "json"}
|
||||
cmdArgs = append(cmdArgs, strings.Split(command, " ")...)
|
||||
|
||||
cmd := exec.Command(c.CephBinary, cmdArgs...)
|
||||
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error running ceph %v: %s", command, err)
|
||||
}
|
||||
|
||||
output := out.String()
|
||||
|
||||
// Ceph doesn't sanitize its output, and may return invalid JSON. Patch this
|
||||
// up for them, as having some inaccurate data is better than none.
|
||||
output = strings.Replace(output, "-inf", "0", -1)
|
||||
output = strings.Replace(output, "inf", "0", -1)
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func decodeStatus(acc telegraf.Accumulator, input string) error {
|
||||
data := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(input), &data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse json: '%s': %v", input, err)
|
||||
}
|
||||
|
||||
err = decodeStatusOsdmap(acc, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = decodeStatusPgmap(acc, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = decodeStatusPgmapState(acc, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeStatusOsdmap(acc telegraf.Accumulator, data map[string]interface{}) error {
|
||||
osdmap, ok := data["osdmap"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement)
|
||||
}
|
||||
fields, ok := osdmap["osdmap"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement)
|
||||
}
|
||||
acc.AddFields("ceph_osdmap", fields, map[string]string{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeStatusPgmap(acc telegraf.Accumulator, data map[string]interface{}) error {
|
||||
pgmap, ok := data["pgmap"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
for key, value := range pgmap {
|
||||
switch value.(type) {
|
||||
case float64:
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
acc.AddFields("ceph_pgmap", fields, map[string]string{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error {
|
||||
pgmap, ok := data["pgmap"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
for key, value := range pgmap {
|
||||
switch value.(type) {
|
||||
case []interface{}:
|
||||
if key != "pgs_by_state" {
|
||||
continue
|
||||
}
|
||||
for _, state := range value.([]interface{}) {
|
||||
state_map, ok := state.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state", measurement)
|
||||
}
|
||||
state_name, ok := state_map["state_name"].(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement)
|
||||
}
|
||||
state_count, ok := state_map["count"].(float64)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement)
|
||||
}
|
||||
fields[state_name] = state_count
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("ceph_pgmap_state", fields, map[string]string{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeDf(acc telegraf.Accumulator, input string) error {
|
||||
data := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(input), &data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse json: '%s': %v", input, err)
|
||||
}
|
||||
|
||||
// ceph.usage: records global utilization and number of objects
|
||||
stats_fields, ok := data["stats"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode df stats", measurement)
|
||||
}
|
||||
acc.AddFields("ceph_usage", stats_fields, map[string]string{})
|
||||
|
||||
// ceph.pool.usage: records per pool utilization and number of objects
|
||||
pools, ok := data["pools"].([]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode df pools", measurement)
|
||||
}
|
||||
|
||||
for _, pool := range pools {
|
||||
pool_map, ok := pool.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode df pool", measurement)
|
||||
}
|
||||
pool_name, ok := pool_map["name"].(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode df pool name", measurement)
|
||||
}
|
||||
fields, ok := pool_map["stats"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode df pool stats", measurement)
|
||||
}
|
||||
tags := map[string]string{
|
||||
"name": pool_name,
|
||||
}
|
||||
acc.AddFields("ceph_pool_usage", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeOsdPoolStats(acc telegraf.Accumulator, input string) error {
|
||||
data := make([]map[string]interface{}, 0)
|
||||
err := json.Unmarshal([]byte(input), &data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse json: '%s': %v", input, err)
|
||||
}
|
||||
|
||||
// ceph.pool.stats: records pre pool IO and recovery throughput
|
||||
for _, pool := range data {
|
||||
pool_name, ok := pool["pool_name"].(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode osd pool stats name", measurement)
|
||||
}
|
||||
// Note: the 'recovery' object looks broken (in hammer), so it's omitted
|
||||
objects := []string{
|
||||
"client_io_rate",
|
||||
"recovery_rate",
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
for _, object := range objects {
|
||||
perfdata, ok := pool[object].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode osd pool stats", measurement)
|
||||
}
|
||||
for key, value := range perfdata {
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
tags := map[string]string{
|
||||
"name": pool_name,
|
||||
}
|
||||
acc.AddFields("ceph_pool_stats", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
687
plugins/inputs/ceph/ceph_test.go
Normal file
687
plugins/inputs/ceph/ceph_test.go
Normal file
@@ -0,0 +1,687 @@
|
||||
package ceph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
epsilon = float64(0.00000001)
|
||||
)
|
||||
|
||||
func TestParseSockId(t *testing.T) {
|
||||
s := parseSockId(sockFile(osdPrefix, 1), osdPrefix, sockSuffix)
|
||||
assert.Equal(t, s, "1")
|
||||
}
|
||||
|
||||
func TestParseMonDump(t *testing.T) {
|
||||
dump, err := parseDump(monPerfDump)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 5678670180, (*dump)["cluster"]["osd_kb_used"], epsilon)
|
||||
assert.InEpsilon(t, 6866.540527000, (*dump)["paxos"]["store_state_latency.sum"], epsilon)
|
||||
}
|
||||
|
||||
func TestParseOsdDump(t *testing.T) {
|
||||
dump, err := parseDump(osdPerfDump)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 552132.109360000, (*dump)["filestore"]["commitcycle_interval.sum"], epsilon)
|
||||
assert.Equal(t, float64(0), (*dump)["mutex-FileJournal::finisher_lock"]["wait.avgcount"])
|
||||
}
|
||||
|
||||
func TestGather(t *testing.T) {
|
||||
saveFind := findSockets
|
||||
saveDump := perfDump
|
||||
defer func() {
|
||||
findSockets = saveFind
|
||||
perfDump = saveDump
|
||||
}()
|
||||
|
||||
findSockets = func(c *Ceph) ([]*socket, error) {
|
||||
return []*socket{&socket{"osd.1", typeOsd, ""}}, nil
|
||||
}
|
||||
|
||||
perfDump = func(binary string, s *socket) (string, error) {
|
||||
return osdPerfDump, nil
|
||||
}
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
c := &Ceph{}
|
||||
c.Gather(acc)
|
||||
|
||||
}
|
||||
|
||||
func TestFindSockets(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "socktest")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := os.Remove(tmpdir)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
c := &Ceph{
|
||||
CephBinary: "foo",
|
||||
OsdPrefix: "ceph-osd",
|
||||
MonPrefix: "ceph-mon",
|
||||
SocketDir: tmpdir,
|
||||
SocketSuffix: "asok",
|
||||
CephUser: "client.admin",
|
||||
CephConfig: "/etc/ceph/ceph.conf",
|
||||
GatherAdminSocketStats: true,
|
||||
GatherClusterStats: false,
|
||||
}
|
||||
|
||||
for _, st := range sockTestParams {
|
||||
createTestFiles(tmpdir, st)
|
||||
|
||||
sockets, err := findSockets(c)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 1; i <= st.osds; i++ {
|
||||
assertFoundSocket(t, tmpdir, typeOsd, i, sockets)
|
||||
}
|
||||
|
||||
for i := 1; i <= st.mons; i++ {
|
||||
assertFoundSocket(t, tmpdir, typeMon, i, sockets)
|
||||
}
|
||||
cleanupTestFiles(tmpdir, st)
|
||||
}
|
||||
}
|
||||
|
||||
func assertFoundSocket(t *testing.T, dir, sockType string, i int, sockets []*socket) {
|
||||
var prefix string
|
||||
if sockType == typeOsd {
|
||||
prefix = osdPrefix
|
||||
} else {
|
||||
prefix = monPrefix
|
||||
}
|
||||
expected := path.Join(dir, sockFile(prefix, i))
|
||||
found := false
|
||||
for _, s := range sockets {
|
||||
fmt.Printf("Checking %s\n", s.socket)
|
||||
if s.socket == expected {
|
||||
found = true
|
||||
assert.Equal(t, s.sockType, sockType, "Unexpected socket type for '%s'", s)
|
||||
assert.Equal(t, s.sockId, strconv.Itoa(i))
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "Did not find socket: %s", expected)
|
||||
}
|
||||
|
||||
func sockFile(prefix string, i int) string {
|
||||
return strings.Join([]string{prefix, strconv.Itoa(i), sockSuffix}, ".")
|
||||
}
|
||||
|
||||
func createTestFiles(dir string, st *SockTest) {
|
||||
writeFile := func(prefix string, i int) {
|
||||
f := sockFile(prefix, i)
|
||||
fpath := path.Join(dir, f)
|
||||
ioutil.WriteFile(fpath, []byte(""), 0777)
|
||||
}
|
||||
tstFileApply(st, writeFile)
|
||||
}
|
||||
|
||||
func cleanupTestFiles(dir string, st *SockTest) {
|
||||
rmFile := func(prefix string, i int) {
|
||||
f := sockFile(prefix, i)
|
||||
fpath := path.Join(dir, f)
|
||||
err := os.Remove(fpath)
|
||||
if err != nil {
|
||||
fmt.Printf("Error removing test file %s: %v\n", fpath, err)
|
||||
}
|
||||
}
|
||||
tstFileApply(st, rmFile)
|
||||
}
|
||||
|
||||
func tstFileApply(st *SockTest, fn func(prefix string, i int)) {
|
||||
for i := 1; i <= st.osds; i++ {
|
||||
fn(osdPrefix, i)
|
||||
}
|
||||
for i := 1; i <= st.mons; i++ {
|
||||
fn(monPrefix, i)
|
||||
}
|
||||
}
|
||||
|
||||
type SockTest struct {
|
||||
osds int
|
||||
mons int
|
||||
}
|
||||
|
||||
var sockTestParams = []*SockTest{
|
||||
&SockTest{
|
||||
osds: 2,
|
||||
mons: 2,
|
||||
},
|
||||
&SockTest{
|
||||
mons: 1,
|
||||
},
|
||||
&SockTest{
|
||||
osds: 1,
|
||||
},
|
||||
&SockTest{},
|
||||
}
|
||||
|
||||
var monPerfDump = `
|
||||
{ "cluster": { "num_mon": 2,
|
||||
"num_mon_quorum": 2,
|
||||
"num_osd": 26,
|
||||
"num_osd_up": 26,
|
||||
"num_osd_in": 26,
|
||||
"osd_epoch": 3306,
|
||||
"osd_kb": 11487846448,
|
||||
"osd_kb_used": 5678670180,
|
||||
"osd_kb_avail": 5809176268,
|
||||
"num_pool": 12,
|
||||
"num_pg": 768,
|
||||
"num_pg_active_clean": 768,
|
||||
"num_pg_active": 768,
|
||||
"num_pg_peering": 0,
|
||||
"num_object": 397616,
|
||||
"num_object_degraded": 0,
|
||||
"num_object_unfound": 0,
|
||||
"num_bytes": 2917848227467,
|
||||
"num_mds_up": 0,
|
||||
"num_mds_in": 0,
|
||||
"num_mds_failed": 0,
|
||||
"mds_epoch": 1},
|
||||
"leveldb": { "leveldb_get": 321950312,
|
||||
"leveldb_transaction": 18729922,
|
||||
"leveldb_compact": 0,
|
||||
"leveldb_compact_range": 74141,
|
||||
"leveldb_compact_queue_merge": 0,
|
||||
"leveldb_compact_queue_len": 0},
|
||||
"mon": {},
|
||||
"paxos": { "start_leader": 0,
|
||||
"start_peon": 1,
|
||||
"restart": 4,
|
||||
"refresh": 9363435,
|
||||
"refresh_latency": { "avgcount": 9363435,
|
||||
"sum": 5378.794002000},
|
||||
"begin": 9363435,
|
||||
"begin_keys": { "avgcount": 0,
|
||||
"sum": 0},
|
||||
"begin_bytes": { "avgcount": 9363435,
|
||||
"sum": 110468605489},
|
||||
"begin_latency": { "avgcount": 9363435,
|
||||
"sum": 5850.060682000},
|
||||
"commit": 9363435,
|
||||
"commit_keys": { "avgcount": 0,
|
||||
"sum": 0},
|
||||
"commit_bytes": { "avgcount": 0,
|
||||
"sum": 0},
|
||||
"commit_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000},
|
||||
"collect": 1,
|
||||
"collect_keys": { "avgcount": 1,
|
||||
"sum": 1},
|
||||
"collect_bytes": { "avgcount": 1,
|
||||
"sum": 24},
|
||||
"collect_latency": { "avgcount": 1,
|
||||
"sum": 0.000280000},
|
||||
"collect_uncommitted": 0,
|
||||
"collect_timeout": 0,
|
||||
"accept_timeout": 0,
|
||||
"lease_ack_timeout": 0,
|
||||
"lease_timeout": 0,
|
||||
"store_state": 9363435,
|
||||
"store_state_keys": { "avgcount": 9363435,
|
||||
"sum": 176572789},
|
||||
"store_state_bytes": { "avgcount": 9363435,
|
||||
"sum": 216355887217},
|
||||
"store_state_latency": { "avgcount": 9363435,
|
||||
"sum": 6866.540527000},
|
||||
"share_state": 0,
|
||||
"share_state_keys": { "avgcount": 0,
|
||||
"sum": 0},
|
||||
"share_state_bytes": { "avgcount": 0,
|
||||
"sum": 0},
|
||||
"new_pn": 0,
|
||||
"new_pn_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-mon_client_bytes": { "val": 246,
|
||||
"max": 104857600,
|
||||
"get": 896030,
|
||||
"get_sum": 45854374,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 896026,
|
||||
"put_sum": 45854128,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-mon_daemon_bytes": { "val": 0,
|
||||
"max": 419430400,
|
||||
"get": 2773768,
|
||||
"get_sum": 3627676976,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 2773768,
|
||||
"put_sum": 3627676976,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-msgr_dispatch_throttler-mon": { "val": 0,
|
||||
"max": 104857600,
|
||||
"get": 34504949,
|
||||
"get_sum": 226860281124,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 34504949,
|
||||
"put_sum": 226860281124,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}}}
|
||||
`
|
||||
|
||||
var osdPerfDump = `
|
||||
{ "WBThrottle": { "bytes_dirtied": 28405539,
|
||||
"bytes_wb": 0,
|
||||
"ios_dirtied": 93,
|
||||
"ios_wb": 0,
|
||||
"inodes_dirtied": 86,
|
||||
"inodes_wb": 0},
|
||||
"filestore": { "journal_queue_max_ops": 0,
|
||||
"journal_queue_ops": 0,
|
||||
"journal_ops": 1108008,
|
||||
"journal_queue_max_bytes": 0,
|
||||
"journal_queue_bytes": 0,
|
||||
"journal_bytes": 73233416196,
|
||||
"journal_latency": { "avgcount": 1108008,
|
||||
"sum": 290.981036000},
|
||||
"journal_wr": 1091866,
|
||||
"journal_wr_bytes": { "avgcount": 1091866,
|
||||
"sum": 74925682688},
|
||||
"journal_full": 0,
|
||||
"committing": 0,
|
||||
"commitcycle": 110389,
|
||||
"commitcycle_interval": { "avgcount": 110389,
|
||||
"sum": 552132.109360000},
|
||||
"commitcycle_latency": { "avgcount": 110389,
|
||||
"sum": 178.657804000},
|
||||
"op_queue_max_ops": 50,
|
||||
"op_queue_ops": 0,
|
||||
"ops": 1108008,
|
||||
"op_queue_max_bytes": 104857600,
|
||||
"op_queue_bytes": 0,
|
||||
"bytes": 73226768148,
|
||||
"apply_latency": { "avgcount": 1108008,
|
||||
"sum": 947.742722000},
|
||||
"queue_transaction_latency_avg": { "avgcount": 1108008,
|
||||
"sum": 0.511327000}},
|
||||
"leveldb": { "leveldb_get": 4361221,
|
||||
"leveldb_transaction": 4351276,
|
||||
"leveldb_compact": 0,
|
||||
"leveldb_compact_range": 0,
|
||||
"leveldb_compact_queue_merge": 0,
|
||||
"leveldb_compact_queue_len": 0},
|
||||
"mutex-FileJournal::completions_lock": { "wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"mutex-FileJournal::finisher_lock": { "wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"mutex-FileJournal::write_lock": { "wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"mutex-FileJournal::writeq_lock": { "wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"mutex-JOS::ApplyManager::apply_lock": { "wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"mutex-JOS::ApplyManager::com_lock": { "wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"mutex-JOS::SubmitManager::lock": { "wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"mutex-WBThrottle::lock": { "wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"objecter": { "op_active": 0,
|
||||
"op_laggy": 0,
|
||||
"op_send": 0,
|
||||
"op_send_bytes": 0,
|
||||
"op_resend": 0,
|
||||
"op_ack": 0,
|
||||
"op_commit": 0,
|
||||
"op": 0,
|
||||
"op_r": 0,
|
||||
"op_w": 0,
|
||||
"op_rmw": 0,
|
||||
"op_pg": 0,
|
||||
"osdop_stat": 0,
|
||||
"osdop_create": 0,
|
||||
"osdop_read": 0,
|
||||
"osdop_write": 0,
|
||||
"osdop_writefull": 0,
|
||||
"osdop_append": 0,
|
||||
"osdop_zero": 0,
|
||||
"osdop_truncate": 0,
|
||||
"osdop_delete": 0,
|
||||
"osdop_mapext": 0,
|
||||
"osdop_sparse_read": 0,
|
||||
"osdop_clonerange": 0,
|
||||
"osdop_getxattr": 0,
|
||||
"osdop_setxattr": 0,
|
||||
"osdop_cmpxattr": 0,
|
||||
"osdop_rmxattr": 0,
|
||||
"osdop_resetxattrs": 0,
|
||||
"osdop_tmap_up": 0,
|
||||
"osdop_tmap_put": 0,
|
||||
"osdop_tmap_get": 0,
|
||||
"osdop_call": 0,
|
||||
"osdop_watch": 0,
|
||||
"osdop_notify": 0,
|
||||
"osdop_src_cmpxattr": 0,
|
||||
"osdop_pgls": 0,
|
||||
"osdop_pgls_filter": 0,
|
||||
"osdop_other": 0,
|
||||
"linger_active": 0,
|
||||
"linger_send": 0,
|
||||
"linger_resend": 0,
|
||||
"poolop_active": 0,
|
||||
"poolop_send": 0,
|
||||
"poolop_resend": 0,
|
||||
"poolstat_active": 0,
|
||||
"poolstat_send": 0,
|
||||
"poolstat_resend": 0,
|
||||
"statfs_active": 0,
|
||||
"statfs_send": 0,
|
||||
"statfs_resend": 0,
|
||||
"command_active": 0,
|
||||
"command_send": 0,
|
||||
"command_resend": 0,
|
||||
"map_epoch": 3300,
|
||||
"map_full": 0,
|
||||
"map_inc": 3293,
|
||||
"osd_sessions": 0,
|
||||
"osd_session_open": 0,
|
||||
"osd_session_close": 0,
|
||||
"osd_laggy": 0},
|
||||
"osd": { "opq": 0,
|
||||
"op_wip": 0,
|
||||
"op": 23939,
|
||||
"op_in_bytes": 1245903961,
|
||||
"op_out_bytes": 29103083856,
|
||||
"op_latency": { "avgcount": 23939,
|
||||
"sum": 440.192015000},
|
||||
"op_process_latency": { "avgcount": 23939,
|
||||
"sum": 30.170685000},
|
||||
"op_r": 23112,
|
||||
"op_r_out_bytes": 29103056146,
|
||||
"op_r_latency": { "avgcount": 23112,
|
||||
"sum": 19.373526000},
|
||||
"op_r_process_latency": { "avgcount": 23112,
|
||||
"sum": 14.625928000},
|
||||
"op_w": 549,
|
||||
"op_w_in_bytes": 1245804358,
|
||||
"op_w_rlat": { "avgcount": 549,
|
||||
"sum": 17.022299000},
|
||||
"op_w_latency": { "avgcount": 549,
|
||||
"sum": 418.494610000},
|
||||
"op_w_process_latency": { "avgcount": 549,
|
||||
"sum": 13.316555000},
|
||||
"op_rw": 278,
|
||||
"op_rw_in_bytes": 99603,
|
||||
"op_rw_out_bytes": 27710,
|
||||
"op_rw_rlat": { "avgcount": 278,
|
||||
"sum": 2.213785000},
|
||||
"op_rw_latency": { "avgcount": 278,
|
||||
"sum": 2.323879000},
|
||||
"op_rw_process_latency": { "avgcount": 278,
|
||||
"sum": 2.228202000},
|
||||
"subop": 1074774,
|
||||
"subop_in_bytes": 26841811636,
|
||||
"subop_latency": { "avgcount": 1074774,
|
||||
"sum": 745.509160000},
|
||||
"subop_w": 0,
|
||||
"subop_w_in_bytes": 26841811636,
|
||||
"subop_w_latency": { "avgcount": 1074774,
|
||||
"sum": 745.509160000},
|
||||
"subop_pull": 0,
|
||||
"subop_pull_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000},
|
||||
"subop_push": 0,
|
||||
"subop_push_in_bytes": 0,
|
||||
"subop_push_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000},
|
||||
"pull": 0,
|
||||
"push": 28,
|
||||
"push_out_bytes": 103483392,
|
||||
"push_in": 0,
|
||||
"push_in_bytes": 0,
|
||||
"recovery_ops": 15,
|
||||
"loadavg": 202,
|
||||
"buffer_bytes": 0,
|
||||
"numpg": 18,
|
||||
"numpg_primary": 8,
|
||||
"numpg_replica": 10,
|
||||
"numpg_stray": 0,
|
||||
"heartbeat_to_peers": 10,
|
||||
"heartbeat_from_peers": 0,
|
||||
"map_messages": 7413,
|
||||
"map_message_epochs": 9792,
|
||||
"map_message_epoch_dups": 10105,
|
||||
"messages_delayed_for_map": 83,
|
||||
"stat_bytes": 102123175936,
|
||||
"stat_bytes_used": 49961820160,
|
||||
"stat_bytes_avail": 52161355776,
|
||||
"copyfrom": 0,
|
||||
"tier_promote": 0,
|
||||
"tier_flush": 0,
|
||||
"tier_flush_fail": 0,
|
||||
"tier_try_flush": 0,
|
||||
"tier_try_flush_fail": 0,
|
||||
"tier_evict": 0,
|
||||
"tier_whiteout": 0,
|
||||
"tier_dirty": 230,
|
||||
"tier_clean": 0,
|
||||
"tier_delay": 0,
|
||||
"agent_wake": 0,
|
||||
"agent_skip": 0,
|
||||
"agent_flush": 0,
|
||||
"agent_evict": 0},
|
||||
"recoverystate_perf": { "initial_latency": { "avgcount": 473,
|
||||
"sum": 0.027207000},
|
||||
"started_latency": { "avgcount": 1480,
|
||||
"sum": 9854902.397648000},
|
||||
"reset_latency": { "avgcount": 1953,
|
||||
"sum": 0.096206000},
|
||||
"start_latency": { "avgcount": 1953,
|
||||
"sum": 0.059947000},
|
||||
"primary_latency": { "avgcount": 765,
|
||||
"sum": 4688922.186935000},
|
||||
"peering_latency": { "avgcount": 704,
|
||||
"sum": 1668.652135000},
|
||||
"backfilling_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000},
|
||||
"waitremotebackfillreserved_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000},
|
||||
"waitlocalbackfillreserved_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000},
|
||||
"notbackfilling_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000},
|
||||
"repnotrecovering_latency": { "avgcount": 462,
|
||||
"sum": 5158922.114600000},
|
||||
"repwaitrecoveryreserved_latency": { "avgcount": 15,
|
||||
"sum": 0.008275000},
|
||||
"repwaitbackfillreserved_latency": { "avgcount": 1,
|
||||
"sum": 0.000095000},
|
||||
"RepRecovering_latency": { "avgcount": 16,
|
||||
"sum": 2274.944727000},
|
||||
"activating_latency": { "avgcount": 514,
|
||||
"sum": 261.008520000},
|
||||
"waitlocalrecoveryreserved_latency": { "avgcount": 20,
|
||||
"sum": 0.175422000},
|
||||
"waitremoterecoveryreserved_latency": { "avgcount": 20,
|
||||
"sum": 0.682778000},
|
||||
"recovering_latency": { "avgcount": 20,
|
||||
"sum": 0.697551000},
|
||||
"recovered_latency": { "avgcount": 511,
|
||||
"sum": 0.011038000},
|
||||
"clean_latency": { "avgcount": 503,
|
||||
"sum": 4686961.154278000},
|
||||
"active_latency": { "avgcount": 506,
|
||||
"sum": 4687223.640464000},
|
||||
"replicaactive_latency": { "avgcount": 446,
|
||||
"sum": 5161197.078966000},
|
||||
"stray_latency": { "avgcount": 794,
|
||||
"sum": 4805.105128000},
|
||||
"getinfo_latency": { "avgcount": 704,
|
||||
"sum": 1138.477937000},
|
||||
"getlog_latency": { "avgcount": 678,
|
||||
"sum": 0.036393000},
|
||||
"waitactingchange_latency": { "avgcount": 69,
|
||||
"sum": 59.172893000},
|
||||
"incomplete_latency": { "avgcount": 0,
|
||||
"sum": 0.000000000},
|
||||
"getmissing_latency": { "avgcount": 609,
|
||||
"sum": 0.012288000},
|
||||
"waitupthru_latency": { "avgcount": 576,
|
||||
"sum": 530.106999000}},
|
||||
"throttle-filestore_bytes": { "val": 0,
|
||||
"max": 0,
|
||||
"get": 0,
|
||||
"get_sum": 0,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 0,
|
||||
"put_sum": 0,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-filestore_ops": { "val": 0,
|
||||
"max": 0,
|
||||
"get": 0,
|
||||
"get_sum": 0,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 0,
|
||||
"put_sum": 0,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-msgr_dispatch_throttler-client": { "val": 0,
|
||||
"max": 104857600,
|
||||
"get": 130730,
|
||||
"get_sum": 1246039872,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 130730,
|
||||
"put_sum": 1246039872,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-msgr_dispatch_throttler-cluster": { "val": 0,
|
||||
"max": 104857600,
|
||||
"get": 1108033,
|
||||
"get_sum": 71277949992,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 1108033,
|
||||
"put_sum": 71277949992,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-msgr_dispatch_throttler-hb_back_server": { "val": 0,
|
||||
"max": 104857600,
|
||||
"get": 18320575,
|
||||
"get_sum": 861067025,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 18320575,
|
||||
"put_sum": 861067025,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-msgr_dispatch_throttler-hb_front_server": { "val": 0,
|
||||
"max": 104857600,
|
||||
"get": 18320575,
|
||||
"get_sum": 861067025,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 18320575,
|
||||
"put_sum": 861067025,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-msgr_dispatch_throttler-hbclient": { "val": 0,
|
||||
"max": 104857600,
|
||||
"get": 40479394,
|
||||
"get_sum": 1902531518,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 40479394,
|
||||
"put_sum": 1902531518,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-msgr_dispatch_throttler-ms_objecter": { "val": 0,
|
||||
"max": 104857600,
|
||||
"get": 0,
|
||||
"get_sum": 0,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 0,
|
||||
"put_sum": 0,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-objecter_bytes": { "val": 0,
|
||||
"max": 104857600,
|
||||
"get": 0,
|
||||
"get_sum": 0,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 0,
|
||||
"put_sum": 0,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-objecter_ops": { "val": 0,
|
||||
"max": 1024,
|
||||
"get": 0,
|
||||
"get_sum": 0,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 0,
|
||||
"put_sum": 0,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-osd_client_bytes": { "val": 0,
|
||||
"max": 524288000,
|
||||
"get": 24241,
|
||||
"get_sum": 1241992581,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 25958,
|
||||
"put_sum": 1241992581,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}},
|
||||
"throttle-osd_client_messages": { "val": 0,
|
||||
"max": 100,
|
||||
"get": 49214,
|
||||
"get_sum": 49214,
|
||||
"get_or_fail_fail": 0,
|
||||
"get_or_fail_success": 0,
|
||||
"take": 0,
|
||||
"take_sum": 0,
|
||||
"put": 49214,
|
||||
"put_sum": 49214,
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}}}
|
||||
`
|
||||
59
plugins/inputs/cgroup/README.md
Normal file
59
plugins/inputs/cgroup/README.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# CGroup Input Plugin For Telegraf Agent
|
||||
|
||||
This input plugin will capture specific statistics per cgroup.
|
||||
|
||||
Following file formats are supported:
|
||||
|
||||
* Single value
|
||||
|
||||
```
|
||||
VAL\n
|
||||
```
|
||||
|
||||
* New line separated values
|
||||
|
||||
```
|
||||
VAL0\n
|
||||
VAL1\n
|
||||
```
|
||||
|
||||
* Space separated values
|
||||
|
||||
```
|
||||
VAL0 VAL1 ...\n
|
||||
```
|
||||
|
||||
* New line separated key-space-value's
|
||||
|
||||
```
|
||||
KEY0 VAL0\n
|
||||
KEY1 VAL1\n
|
||||
```
|
||||
|
||||
|
||||
### Tags:
|
||||
|
||||
Measurements don't have any specific tags unless you define them at the telegraf level (defaults). We
|
||||
used to have the path listed as a tag, but to keep cardinality in check it's easier to move this
|
||||
value to a field. Thanks @sebito91!
|
||||
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# [[inputs.cgroup]]
|
||||
# paths = [
|
||||
# "/cgroup/memory", # root cgroup
|
||||
# "/cgroup/memory/child1", # container cgroup
|
||||
# "/cgroup/memory/child2/*", # all children cgroups under child2, but not child2 itself
|
||||
# ]
|
||||
# files = ["memory.*usage*", "memory.limit_in_bytes"]
|
||||
|
||||
# [[inputs.cgroup]]
|
||||
# paths = [
|
||||
# "/cgroup/cpu", # root cgroup
|
||||
# "/cgroup/cpu/*", # all container cgroups
|
||||
# "/cgroup/cpu/*/*", # all children cgroups under each container cgroup
|
||||
# ]
|
||||
# files = ["cpuacct.usage", "cpu.cfs_period_us", "cpu.cfs_quota_us"]
|
||||
```
|
||||
35
plugins/inputs/cgroup/cgroup.go
Normal file
35
plugins/inputs/cgroup/cgroup.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package cgroup
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type CGroup struct {
|
||||
Paths []string `toml:"paths"`
|
||||
Files []string `toml:"files"`
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Directories in which to look for files, globs are supported.
|
||||
# paths = [
|
||||
# "/cgroup/memory",
|
||||
# "/cgroup/memory/child1",
|
||||
# "/cgroup/memory/child2/*",
|
||||
# ]
|
||||
## cgroup stat fields, as file names, globs are supported.
|
||||
## these file names are appended to each path from above.
|
||||
# files = ["memory.*usage*", "memory.limit_in_bytes"]
|
||||
`
|
||||
|
||||
func (g *CGroup) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (g *CGroup) Description() string {
|
||||
return "Read specific statistics per cgroup"
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("cgroup", func() telegraf.Input { return &CGroup{} })
|
||||
}
|
||||
243
plugins/inputs/cgroup/cgroup_linux.go
Normal file
243
plugins/inputs/cgroup/cgroup_linux.go
Normal file
@@ -0,0 +1,243 @@
|
||||
// +build linux
|
||||
|
||||
package cgroup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
const metricName = "cgroup"
|
||||
|
||||
func (g *CGroup) Gather(acc telegraf.Accumulator) error {
|
||||
list := make(chan pathInfo)
|
||||
go g.generateDirs(list)
|
||||
|
||||
for dir := range list {
|
||||
if dir.err != nil {
|
||||
return dir.err
|
||||
}
|
||||
if err := g.gatherDir(dir.path, acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *CGroup) gatherDir(dir string, acc telegraf.Accumulator) error {
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
list := make(chan pathInfo)
|
||||
go g.generateFiles(dir, list)
|
||||
|
||||
for file := range list {
|
||||
if file.err != nil {
|
||||
return file.err
|
||||
}
|
||||
|
||||
raw, err := ioutil.ReadFile(file.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(raw) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
fd := fileData{data: raw, path: file.path}
|
||||
if err := fd.parse(fields); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fields["path"] = dir
|
||||
|
||||
acc.AddFields(metricName, fields, nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
type pathInfo struct {
|
||||
path string
|
||||
err error
|
||||
}
|
||||
|
||||
func isDir(path string) (bool, error) {
|
||||
result, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return result.IsDir(), nil
|
||||
}
|
||||
|
||||
func (g *CGroup) generateDirs(list chan<- pathInfo) {
|
||||
for _, dir := range g.Paths {
|
||||
// getting all dirs that match the pattern 'dir'
|
||||
items, err := filepath.Glob(dir)
|
||||
if err != nil {
|
||||
list <- pathInfo{err: err}
|
||||
return
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
ok, err := isDir(item)
|
||||
if err != nil {
|
||||
list <- pathInfo{err: err}
|
||||
return
|
||||
}
|
||||
// supply only dirs
|
||||
if ok {
|
||||
list <- pathInfo{path: item}
|
||||
}
|
||||
}
|
||||
}
|
||||
close(list)
|
||||
}
|
||||
|
||||
func (g *CGroup) generateFiles(dir string, list chan<- pathInfo) {
|
||||
for _, file := range g.Files {
|
||||
// getting all file paths that match the pattern 'dir + file'
|
||||
// path.Base make sure that file variable does not contains part of path
|
||||
items, err := filepath.Glob(path.Join(dir, path.Base(file)))
|
||||
if err != nil {
|
||||
list <- pathInfo{err: err}
|
||||
return
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
ok, err := isDir(item)
|
||||
if err != nil {
|
||||
list <- pathInfo{err: err}
|
||||
return
|
||||
}
|
||||
// supply only files not dirs
|
||||
if !ok {
|
||||
list <- pathInfo{path: item}
|
||||
}
|
||||
}
|
||||
}
|
||||
close(list)
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
type fileData struct {
|
||||
data []byte
|
||||
path string
|
||||
}
|
||||
|
||||
func (fd *fileData) format() (*fileFormat, error) {
|
||||
for _, ff := range fileFormats {
|
||||
ok, err := ff.match(fd.data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
return &ff, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("%v: unknown file format", fd.path)
|
||||
}
|
||||
|
||||
func (fd *fileData) parse(fields map[string]interface{}) error {
|
||||
format, err := fd.format()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
format.parser(filepath.Base(fd.path), fields, fd.data)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
type fileFormat struct {
|
||||
name string
|
||||
pattern string
|
||||
parser func(measurement string, fields map[string]interface{}, b []byte)
|
||||
}
|
||||
|
||||
const keyPattern = "[[:alpha:]_]+"
|
||||
const valuePattern = "[\\d-]+"
|
||||
|
||||
var fileFormats = [...]fileFormat{
|
||||
// VAL\n
|
||||
fileFormat{
|
||||
name: "Single value",
|
||||
pattern: "^" + valuePattern + "\n$",
|
||||
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||
re := regexp.MustCompile("^(" + valuePattern + ")\n$")
|
||||
matches := re.FindAllStringSubmatch(string(b), -1)
|
||||
fields[measurement] = numberOrString(matches[0][1])
|
||||
},
|
||||
},
|
||||
// VAL0\n
|
||||
// VAL1\n
|
||||
// ...
|
||||
fileFormat{
|
||||
name: "New line separated values",
|
||||
pattern: "^(" + valuePattern + "\n){2,}$",
|
||||
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||
re := regexp.MustCompile("(" + valuePattern + ")\n")
|
||||
matches := re.FindAllStringSubmatch(string(b), -1)
|
||||
for i, v := range matches {
|
||||
fields[measurement+"."+strconv.Itoa(i)] = numberOrString(v[1])
|
||||
}
|
||||
},
|
||||
},
|
||||
// VAL0 VAL1 ...\n
|
||||
fileFormat{
|
||||
name: "Space separated values",
|
||||
pattern: "^(" + valuePattern + " )+\n$",
|
||||
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||
re := regexp.MustCompile("(" + valuePattern + ") ")
|
||||
matches := re.FindAllStringSubmatch(string(b), -1)
|
||||
for i, v := range matches {
|
||||
fields[measurement+"."+strconv.Itoa(i)] = numberOrString(v[1])
|
||||
}
|
||||
},
|
||||
},
|
||||
// KEY0 VAL0\n
|
||||
// KEY1 VAL1\n
|
||||
// ...
|
||||
fileFormat{
|
||||
name: "New line separated key-space-value's",
|
||||
pattern: "^(" + keyPattern + " " + valuePattern + "\n)+$",
|
||||
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||
re := regexp.MustCompile("(" + keyPattern + ") (" + valuePattern + ")\n")
|
||||
matches := re.FindAllStringSubmatch(string(b), -1)
|
||||
for _, v := range matches {
|
||||
fields[measurement+"."+v[1]] = numberOrString(v[2])
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func numberOrString(s string) interface{} {
|
||||
i, err := strconv.Atoi(s)
|
||||
if err == nil {
|
||||
return i
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (f fileFormat) match(b []byte) (bool, error) {
|
||||
ok, err := regexp.Match(f.pattern, b)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
11
plugins/inputs/cgroup/cgroup_notlinux.go
Normal file
11
plugins/inputs/cgroup/cgroup_notlinux.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build !linux
|
||||
|
||||
package cgroup
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
func (g *CGroup) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
194
plugins/inputs/cgroup/cgroup_test.go
Normal file
194
plugins/inputs/cgroup/cgroup_test.go
Normal file
@@ -0,0 +1,194 @@
|
||||
// +build linux
|
||||
|
||||
package cgroup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var cg1 = &CGroup{
|
||||
Paths: []string{"testdata/memory"},
|
||||
Files: []string{
|
||||
"memory.empty",
|
||||
"memory.max_usage_in_bytes",
|
||||
"memory.limit_in_bytes",
|
||||
"memory.stat",
|
||||
"memory.use_hierarchy",
|
||||
"notify_on_release",
|
||||
},
|
||||
}
|
||||
|
||||
func assertContainsFields(a *testutil.Accumulator, t *testing.T, measurement string, fieldSet []map[string]interface{}) {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
numEquals := 0
|
||||
for _, p := range a.Metrics {
|
||||
if p.Measurement == measurement {
|
||||
for _, fields := range fieldSet {
|
||||
if reflect.DeepEqual(fields, p.Fields) {
|
||||
numEquals++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if numEquals != len(fieldSet) {
|
||||
assert.Fail(t, fmt.Sprintf("only %d of %d are equal", numEquals, len(fieldSet)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_1(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg1.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"memory.stat.cache": 1739362304123123123,
|
||||
"memory.stat.rss": 1775325184,
|
||||
"memory.stat.rss_huge": 778043392,
|
||||
"memory.stat.mapped_file": 421036032,
|
||||
"memory.stat.dirty": -307200,
|
||||
"memory.max_usage_in_bytes.0": 0,
|
||||
"memory.max_usage_in_bytes.1": -1,
|
||||
"memory.max_usage_in_bytes.2": 2,
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"memory.use_hierarchy": "12-781",
|
||||
"notify_on_release": 0,
|
||||
"path": "testdata/memory",
|
||||
}
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields})
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
var cg2 = &CGroup{
|
||||
Paths: []string{"testdata/cpu"},
|
||||
Files: []string{"cpuacct.usage_percpu"},
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_2(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg2.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"cpuacct.usage_percpu.0": -1452543795404,
|
||||
"cpuacct.usage_percpu.1": 1376681271659,
|
||||
"cpuacct.usage_percpu.2": 1450950799997,
|
||||
"cpuacct.usage_percpu.3": -1473113374257,
|
||||
"path": "testdata/cpu",
|
||||
}
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields})
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
var cg3 = &CGroup{
|
||||
Paths: []string{"testdata/memory/*"},
|
||||
Files: []string{"memory.limit_in_bytes"},
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_3(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg3.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_1",
|
||||
}
|
||||
|
||||
fieldsTwo := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_2",
|
||||
}
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo})
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
var cg4 = &CGroup{
|
||||
Paths: []string{"testdata/memory/*/*", "testdata/memory/group_2"},
|
||||
Files: []string{"memory.limit_in_bytes"},
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_4(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg4.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_1/group_1_1",
|
||||
}
|
||||
|
||||
fieldsTwo := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_1/group_1_2",
|
||||
}
|
||||
|
||||
fieldsThree := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_2",
|
||||
}
|
||||
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo, fieldsThree})
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
var cg5 = &CGroup{
|
||||
Paths: []string{"testdata/memory/*/group_1_1"},
|
||||
Files: []string{"memory.limit_in_bytes"},
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_5(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg5.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_1/group_1_1",
|
||||
}
|
||||
|
||||
fieldsTwo := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_2/group_1_1",
|
||||
}
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo})
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
var cg6 = &CGroup{
|
||||
Paths: []string{"testdata/memory"},
|
||||
Files: []string{"memory.us*", "*/memory.kmem.*"},
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_6(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg6.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"memory.usage_in_bytes": 3513667584,
|
||||
"memory.use_hierarchy": "12-781",
|
||||
"memory.kmem.limit_in_bytes": 9223372036854771712,
|
||||
"path": "testdata/memory",
|
||||
}
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields})
|
||||
}
|
||||
1
plugins/inputs/cgroup/testdata/blkio/blkio.io_serviced
vendored
Normal file
1
plugins/inputs/cgroup/testdata/blkio/blkio.io_serviced
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Total 0
|
||||
131
plugins/inputs/cgroup/testdata/blkio/blkio.throttle.io_serviced
vendored
Normal file
131
plugins/inputs/cgroup/testdata/blkio/blkio.throttle.io_serviced
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
11:0 Read 0
|
||||
11:0 Write 0
|
||||
11:0 Sync 0
|
||||
11:0 Async 0
|
||||
11:0 Total 0
|
||||
8:0 Read 49134
|
||||
8:0 Write 216703
|
||||
8:0 Sync 177906
|
||||
8:0 Async 87931
|
||||
8:0 Total 265837
|
||||
7:7 Read 0
|
||||
7:7 Write 0
|
||||
7:7 Sync 0
|
||||
7:7 Async 0
|
||||
7:7 Total 0
|
||||
7:6 Read 0
|
||||
7:6 Write 0
|
||||
7:6 Sync 0
|
||||
7:6 Async 0
|
||||
7:6 Total 0
|
||||
7:5 Read 0
|
||||
7:5 Write 0
|
||||
7:5 Sync 0
|
||||
7:5 Async 0
|
||||
7:5 Total 0
|
||||
7:4 Read 0
|
||||
7:4 Write 0
|
||||
7:4 Sync 0
|
||||
7:4 Async 0
|
||||
7:4 Total 0
|
||||
7:3 Read 0
|
||||
7:3 Write 0
|
||||
7:3 Sync 0
|
||||
7:3 Async 0
|
||||
7:3 Total 0
|
||||
7:2 Read 0
|
||||
7:2 Write 0
|
||||
7:2 Sync 0
|
||||
7:2 Async 0
|
||||
7:2 Total 0
|
||||
7:1 Read 0
|
||||
7:1 Write 0
|
||||
7:1 Sync 0
|
||||
7:1 Async 0
|
||||
7:1 Total 0
|
||||
7:0 Read 0
|
||||
7:0 Write 0
|
||||
7:0 Sync 0
|
||||
7:0 Async 0
|
||||
7:0 Total 0
|
||||
1:15 Read 3
|
||||
1:15 Write 0
|
||||
1:15 Sync 0
|
||||
1:15 Async 3
|
||||
1:15 Total 3
|
||||
1:14 Read 3
|
||||
1:14 Write 0
|
||||
1:14 Sync 0
|
||||
1:14 Async 3
|
||||
1:14 Total 3
|
||||
1:13 Read 3
|
||||
1:13 Write 0
|
||||
1:13 Sync 0
|
||||
1:13 Async 3
|
||||
1:13 Total 3
|
||||
1:12 Read 3
|
||||
1:12 Write 0
|
||||
1:12 Sync 0
|
||||
1:12 Async 3
|
||||
1:12 Total 3
|
||||
1:11 Read 3
|
||||
1:11 Write 0
|
||||
1:11 Sync 0
|
||||
1:11 Async 3
|
||||
1:11 Total 3
|
||||
1:10 Read 3
|
||||
1:10 Write 0
|
||||
1:10 Sync 0
|
||||
1:10 Async 3
|
||||
1:10 Total 3
|
||||
1:9 Read 3
|
||||
1:9 Write 0
|
||||
1:9 Sync 0
|
||||
1:9 Async 3
|
||||
1:9 Total 3
|
||||
1:8 Read 3
|
||||
1:8 Write 0
|
||||
1:8 Sync 0
|
||||
1:8 Async 3
|
||||
1:8 Total 3
|
||||
1:7 Read 3
|
||||
1:7 Write 0
|
||||
1:7 Sync 0
|
||||
1:7 Async 3
|
||||
1:7 Total 3
|
||||
1:6 Read 3
|
||||
1:6 Write 0
|
||||
1:6 Sync 0
|
||||
1:6 Async 3
|
||||
1:6 Total 3
|
||||
1:5 Read 3
|
||||
1:5 Write 0
|
||||
1:5 Sync 0
|
||||
1:5 Async 3
|
||||
1:5 Total 3
|
||||
1:4 Read 3
|
||||
1:4 Write 0
|
||||
1:4 Sync 0
|
||||
1:4 Async 3
|
||||
1:4 Total 3
|
||||
1:3 Read 3
|
||||
1:3 Write 0
|
||||
1:3 Sync 0
|
||||
1:3 Async 3
|
||||
1:3 Total 3
|
||||
1:2 Read 3
|
||||
1:2 Write 0
|
||||
1:2 Sync 0
|
||||
1:2 Async 3
|
||||
1:2 Total 3
|
||||
1:1 Read 3
|
||||
1:1 Write 0
|
||||
1:1 Sync 0
|
||||
1:1 Async 3
|
||||
1:1 Total 3
|
||||
1:0 Read 3
|
||||
1:0 Write 0
|
||||
1:0 Sync 0
|
||||
1:0 Async 3
|
||||
1:0 Total 3
|
||||
Total 265885
|
||||
1
plugins/inputs/cgroup/testdata/cpu/cpu.cfs_quota_us
vendored
Normal file
1
plugins/inputs/cgroup/testdata/cpu/cpu.cfs_quota_us
vendored
Normal file
@@ -0,0 +1 @@
|
||||
-1
|
||||
1
plugins/inputs/cgroup/testdata/cpu/cpuacct.usage_percpu
vendored
Normal file
1
plugins/inputs/cgroup/testdata/cpu/cpuacct.usage_percpu
vendored
Normal file
@@ -0,0 +1 @@
|
||||
-1452543795404 1376681271659 1450950799997 -1473113374257
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_1/group_1_1/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/group_1_1/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
223372036854771712
|
||||
5
plugins/inputs/cgroup/testdata/memory/group_1/group_1_1/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_1/group_1_1/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
cache 1739362304123123123
|
||||
rss 1775325184
|
||||
rss_huge 778043392
|
||||
mapped_file 421036032
|
||||
dirty -307200
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_1/group_1_2/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/group_1_2/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
223372036854771712
|
||||
5
plugins/inputs/cgroup/testdata/memory/group_1/group_1_2/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_1/group_1_2/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
cache 1739362304123123123
|
||||
rss 1775325184
|
||||
rss_huge 778043392
|
||||
mapped_file 421036032
|
||||
dirty -307200
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.kmem.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.kmem.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
9223372036854771712
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.kmem.max_usage_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.kmem.max_usage_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
0
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
223372036854771712
|
||||
5
plugins/inputs/cgroup/testdata/memory/group_1/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_1/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
cache 1739362304123123123
|
||||
rss 1775325184
|
||||
rss_huge 778043392
|
||||
mapped_file 421036032
|
||||
dirty -307200
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_2/group_1_1/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_2/group_1_1/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
223372036854771712
|
||||
5
plugins/inputs/cgroup/testdata/memory/group_2/group_1_1/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_2/group_1_1/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
cache 1739362304123123123
|
||||
rss 1775325184
|
||||
rss_huge 778043392
|
||||
mapped_file 421036032
|
||||
dirty -307200
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_2/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_2/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
223372036854771712
|
||||
5
plugins/inputs/cgroup/testdata/memory/group_2/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_2/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
cache 1739362304123123123
|
||||
rss 1775325184
|
||||
rss_huge 778043392
|
||||
mapped_file 421036032
|
||||
dirty -307200
|
||||
0
plugins/inputs/cgroup/testdata/memory/memory.empty
vendored
Normal file
0
plugins/inputs/cgroup/testdata/memory/memory.empty
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/memory.kmem.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/memory.kmem.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
9223372036854771712
|
||||
1
plugins/inputs/cgroup/testdata/memory/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
223372036854771712
|
||||
3
plugins/inputs/cgroup/testdata/memory/memory.max_usage_in_bytes
vendored
Normal file
3
plugins/inputs/cgroup/testdata/memory/memory.max_usage_in_bytes
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
0
|
||||
-1
|
||||
2
|
||||
8
plugins/inputs/cgroup/testdata/memory/memory.numa_stat
vendored
Normal file
8
plugins/inputs/cgroup/testdata/memory/memory.numa_stat
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
total=858067 N0=858067
|
||||
file=406254 N0=406254
|
||||
anon=451792 N0=451792
|
||||
unevictable=21 N0=21
|
||||
hierarchical_total=858067 N0=858067
|
||||
hierarchical_file=406254 N0=406254
|
||||
hierarchical_anon=451792 N0=451792
|
||||
hierarchical_unevictable=21 N0=21
|
||||
5
plugins/inputs/cgroup/testdata/memory/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
cache 1739362304123123123
|
||||
rss 1775325184
|
||||
rss_huge 778043392
|
||||
mapped_file 421036032
|
||||
dirty -307200
|
||||
1
plugins/inputs/cgroup/testdata/memory/memory.usage_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/memory.usage_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
3513667584
|
||||
1
plugins/inputs/cgroup/testdata/memory/memory.use_hierarchy
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/memory.use_hierarchy
vendored
Normal file
@@ -0,0 +1 @@
|
||||
12-781
|
||||
1
plugins/inputs/cgroup/testdata/memory/notify_on_release
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/notify_on_release
vendored
Normal file
@@ -0,0 +1 @@
|
||||
0
|
||||
92
plugins/inputs/chrony/README.md
Normal file
92
plugins/inputs/chrony/README.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# chrony Input Plugin
|
||||
|
||||
Get standard chrony metrics, requires chronyc executable.
|
||||
|
||||
Below is the documentation of the various headers returned by `chronyc tracking`.
|
||||
|
||||
- Reference ID - This is the refid and name (or IP address) if available, of the
|
||||
server to which the computer is currently synchronised. If this is 127.127.1.1
|
||||
it means the computer is not synchronised to any external source and that you
|
||||
have the ‘local’ mode operating (via the local command in chronyc (see section local),
|
||||
or the local directive in the ‘/etc/chrony.conf’ file (see section local)).
|
||||
- Stratum - The stratum indicates how many hops away from a computer with an attached
|
||||
reference clock we are. Such a computer is a stratum-1 computer, so the computer in the
|
||||
example is two hops away (i.e. a.b.c is a stratum-2 and is synchronised from a stratum-1).
|
||||
- Ref time - This is the time (UTC) at which the last measurement from the reference
|
||||
source was processed.
|
||||
- System time - In normal operation, chronyd never steps the system clock, because any
|
||||
jump in the timescale can have adverse consequences for certain application programs.
|
||||
Instead, any error in the system clock is corrected by slightly speeding up or slowing
|
||||
down the system clock until the error has been removed, and then returning to the system
|
||||
clock’s normal speed. A consequence of this is that there will be a period when the
|
||||
system clock (as read by other programs using the gettimeofday() system call, or by the
|
||||
date command in the shell) will be different from chronyd's estimate of the current true
|
||||
time (which it reports to NTP clients when it is operating in server mode). The value
|
||||
reported on this line is the difference due to this effect.
|
||||
- Last offset - This is the estimated local offset on the last clock update.
|
||||
- RMS offset - This is a long-term average of the offset value.
|
||||
- Frequency - The ‘frequency’ is the rate by which the system’s clock would be
|
||||
wrong if chronyd was not correcting it. It is expressed in ppm (parts per million).
|
||||
For example, a value of 1ppm would mean that when the system’s clock thinks it has
|
||||
advanced 1 second, it has actually advanced by 1.000001 seconds relative to true time.
|
||||
- Residual freq - This shows the ‘residual frequency’ for the currently selected
|
||||
reference source. This reflects any difference between what the measurements from the
|
||||
reference source indicate the frequency should be and the frequency currently being used.
|
||||
The reason this is not always zero is that a smoothing procedure is applied to the
|
||||
frequency. Each time a measurement from the reference source is obtained and a new
|
||||
residual frequency computed, the estimated accuracy of this residual is compared with the
|
||||
estimated accuracy (see ‘skew’ next) of the existing frequency value. A weighted average
|
||||
is computed for the new frequency, with weights depending on these accuracies. If the
|
||||
measurements from the reference source follow a consistent trend, the residual will be
|
||||
driven to zero over time.
|
||||
- Skew - This is the estimated error bound on the frequency.
|
||||
- Root delay - This is the total of the network path delays to the stratum-1 computer
|
||||
from which the computer is ultimately synchronised. In certain extreme situations, this
|
||||
value can be negative. (This can arise in a symmetric peer arrangement where the computers’
|
||||
frequencies are not tracking each other and the network delay is very short relative to the
|
||||
turn-around time at each computer.)
|
||||
- Root dispersion - This is the total dispersion accumulated through all the computers
|
||||
back to the stratum-1 computer from which the computer is ultimately synchronised.
|
||||
Dispersion is due to system clock resolution, statistical measurement variations etc.
|
||||
- Leap status - This is the leap status, which can be Normal, Insert second,
|
||||
Delete second or Not synchronised.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Get standard chrony metrics, requires chronyc executable.
|
||||
[[inputs.chrony]]
|
||||
## If true, chronyc tries to perform a DNS lookup for the time server.
|
||||
# dns_lookup = false
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- chrony
|
||||
- last_offset (float, seconds)
|
||||
- rms_offset (float, seconds)
|
||||
- frequency (float, ppm)
|
||||
- residual_freq (float, ppm)
|
||||
- skew (float, ppm)
|
||||
- root_delay (float, seconds)
|
||||
- root_dispersion (float, seconds)
|
||||
- update_interval (float, seconds)
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- reference_id
|
||||
- stratum
|
||||
- leap_status
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ telegraf -config telegraf.conf -input-filter chrony -test
|
||||
* Plugin: chrony, Collection 1
|
||||
> chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
129
plugins/inputs/chrony/chrony.go
Normal file
129
plugins/inputs/chrony/chrony.go
Normal file
@@ -0,0 +1,129 @@
|
||||
// +build linux
|
||||
|
||||
package chrony
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
var (
|
||||
execCommand = exec.Command // execCommand is used to mock commands in tests.
|
||||
)
|
||||
|
||||
type Chrony struct {
|
||||
DNSLookup bool `toml:"dns_lookup"`
|
||||
path string
|
||||
}
|
||||
|
||||
func (*Chrony) Description() string {
|
||||
return "Get standard chrony metrics, requires chronyc executable."
|
||||
}
|
||||
|
||||
func (*Chrony) SampleConfig() string {
|
||||
return `
|
||||
## If true, chronyc tries to perform a DNS lookup for the time server.
|
||||
# dns_lookup = false
|
||||
`
|
||||
}
|
||||
|
||||
func (c *Chrony) Gather(acc telegraf.Accumulator) error {
|
||||
if len(c.path) == 0 {
|
||||
return errors.New("chronyc not found: verify that chrony is installed and that chronyc is in your PATH")
|
||||
}
|
||||
|
||||
flags := []string{}
|
||||
if !c.DNSLookup {
|
||||
flags = append(flags, "-n")
|
||||
}
|
||||
flags = append(flags, "tracking")
|
||||
|
||||
cmd := execCommand(c.path, flags...)
|
||||
out, err := internal.CombinedOutputTimeout(cmd, time.Second*5)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
|
||||
}
|
||||
fields, tags, err := processChronycOutput(string(out))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc.AddFields("chrony", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
// processChronycOutput takes in a string output from the chronyc command, like:
|
||||
//
|
||||
// Reference ID : 192.168.1.22 (ntp.example.com)
|
||||
// Stratum : 3
|
||||
// Ref time (UTC) : Thu May 12 14:27:07 2016
|
||||
// System time : 0.000020390 seconds fast of NTP time
|
||||
// Last offset : +0.000012651 seconds
|
||||
// RMS offset : 0.000025577 seconds
|
||||
// Frequency : 16.001 ppm slow
|
||||
// Residual freq : -0.000 ppm
|
||||
// Skew : 0.006 ppm
|
||||
// Root delay : 0.001655 seconds
|
||||
// Root dispersion : 0.003307 seconds
|
||||
// Update interval : 507.2 seconds
|
||||
// Leap status : Normal
|
||||
//
|
||||
// The value on the left side of the colon is used as field name, if the first field on
|
||||
// the right side is a float. If it cannot be parsed as float, it is a tag name.
|
||||
//
|
||||
// Ref time is ignored and all names are converted to snake case.
|
||||
//
|
||||
// It returns (<fields>, <tags>)
|
||||
func processChronycOutput(out string) (map[string]interface{}, map[string]string, error) {
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{}
|
||||
lines := strings.Split(strings.TrimSpace(out), "\n")
|
||||
for _, line := range lines {
|
||||
stats := strings.Split(line, ":")
|
||||
if len(stats) < 2 {
|
||||
return nil, nil, fmt.Errorf("unexpected output from chronyc, expected ':' in %s", out)
|
||||
}
|
||||
name := strings.ToLower(strings.Replace(strings.TrimSpace(stats[0]), " ", "_", -1))
|
||||
// ignore reference time
|
||||
if strings.Contains(name, "time") {
|
||||
continue
|
||||
}
|
||||
valueFields := strings.Fields(stats[1])
|
||||
if len(valueFields) == 0 {
|
||||
return nil, nil, fmt.Errorf("unexpected output from chronyc: %s", out)
|
||||
}
|
||||
if strings.Contains(strings.ToLower(name), "stratum") {
|
||||
tags["stratum"] = valueFields[0]
|
||||
continue
|
||||
}
|
||||
value, err := strconv.ParseFloat(valueFields[0], 64)
|
||||
if err != nil {
|
||||
tags[name] = strings.ToLower(valueFields[0])
|
||||
continue
|
||||
}
|
||||
if strings.Contains(stats[1], "slow") {
|
||||
value = -value
|
||||
}
|
||||
fields[name] = value
|
||||
}
|
||||
|
||||
return fields, tags, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
c := Chrony{}
|
||||
path, _ := exec.LookPath("chronyc")
|
||||
if len(path) > 0 {
|
||||
c.path = path
|
||||
}
|
||||
inputs.Add("chrony", func() telegraf.Input {
|
||||
return &c
|
||||
})
|
||||
}
|
||||
3
plugins/inputs/chrony/chrony_notlinux.go
Normal file
3
plugins/inputs/chrony/chrony_notlinux.go
Normal file
@@ -0,0 +1,3 @@
|
||||
// +build !linux
|
||||
|
||||
package chrony
|
||||
109
plugins/inputs/chrony/chrony_test.go
Normal file
109
plugins/inputs/chrony/chrony_test.go
Normal file
@@ -0,0 +1,109 @@
|
||||
// +build linux
|
||||
|
||||
package chrony
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestGather(t *testing.T) {
|
||||
c := Chrony{
|
||||
path: "chronyc",
|
||||
}
|
||||
// overwriting exec commands with mock commands
|
||||
execCommand = fakeExecCommand
|
||||
defer func() { execCommand = exec.Command }()
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := c.Gather(&acc)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"reference_id": "192.168.1.22",
|
||||
"leap_status": "normal",
|
||||
"stratum": "3",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"last_offset": 0.000012651,
|
||||
"rms_offset": 0.000025577,
|
||||
"frequency": -16.001,
|
||||
"residual_freq": 0.0,
|
||||
"skew": 0.006,
|
||||
"root_delay": 0.001655,
|
||||
"root_dispersion": 0.003307,
|
||||
"update_interval": 507.2,
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "chrony", fields, tags)
|
||||
|
||||
// test with dns lookup
|
||||
c.DNSLookup = true
|
||||
err = c.Gather(&acc)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "chrony", fields, tags)
|
||||
|
||||
}
|
||||
|
||||
// fackeExecCommand is a helper function that mock
|
||||
// the exec.Command call (and call the test binary)
|
||||
func fakeExecCommand(command string, args ...string) *exec.Cmd {
|
||||
cs := []string{"-test.run=TestHelperProcess", "--", command}
|
||||
cs = append(cs, args...)
|
||||
cmd := exec.Command(os.Args[0], cs...)
|
||||
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// TestHelperProcess isn't a real test. It's used to mock exec.Command
|
||||
// For example, if you run:
|
||||
// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking
|
||||
// it returns below mockData.
|
||||
func TestHelperProcess(t *testing.T) {
|
||||
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
|
||||
return
|
||||
}
|
||||
|
||||
lookup := "Reference ID : 192.168.1.22 (ntp.example.com)\n"
|
||||
noLookup := "Reference ID : 192.168.1.22 (192.168.1.22)\n"
|
||||
mockData := `Stratum : 3
|
||||
Ref time (UTC) : Thu May 12 14:27:07 2016
|
||||
System time : 0.000020390 seconds fast of NTP time
|
||||
Last offset : +0.000012651 seconds
|
||||
RMS offset : 0.000025577 seconds
|
||||
Frequency : 16.001 ppm slow
|
||||
Residual freq : -0.000 ppm
|
||||
Skew : 0.006 ppm
|
||||
Root delay : 0.001655 seconds
|
||||
Root dispersion : 0.003307 seconds
|
||||
Update interval : 507.2 seconds
|
||||
Leap status : Normal
|
||||
`
|
||||
|
||||
args := os.Args
|
||||
|
||||
// Previous arguments are tests stuff, that looks like :
|
||||
// /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess --
|
||||
cmd, args := args[3], args[4:]
|
||||
|
||||
if cmd == "chronyc" {
|
||||
if args[0] == "tracking" {
|
||||
fmt.Fprint(os.Stdout, lookup+mockData)
|
||||
} else {
|
||||
fmt.Fprint(os.Stdout, noLookup+mockData)
|
||||
}
|
||||
} else {
|
||||
fmt.Fprint(os.Stdout, "command not found")
|
||||
os.Exit(1)
|
||||
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
@@ -6,9 +6,12 @@ This plugin will pull Metric Statistics from Amazon CloudWatch.
|
||||
|
||||
This plugin uses a credential chain for Authentication with the CloudWatch
|
||||
API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
1. [IAMS Role](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
|
||||
2. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables)
|
||||
3. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file)
|
||||
1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules)
|
||||
2. Explicit credentials from `access_key`, `secret_key`, and `token` attributes
|
||||
3. Shared profile from `profile` attribute
|
||||
4. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables)
|
||||
5. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file)
|
||||
6. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
|
||||
|
||||
### Configuration:
|
||||
|
||||
@@ -24,23 +27,32 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
delay = '1m'
|
||||
|
||||
## Override global run interval (optional - defaults to global interval)
|
||||
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||
## gaps or overlap in pulled data
|
||||
interval = '1m'
|
||||
|
||||
## Metric Statistic Namespace (required)
|
||||
namespace = 'AWS/ELB'
|
||||
|
||||
## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
## maximum of 10. Optional - default value is 10.
|
||||
ratelimit = 10
|
||||
|
||||
## Metrics to Pull (optional)
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
## Refreshes Namespace available metrics every 1h
|
||||
[[inputs.cloudwatch.metrics]]
|
||||
names = ['Latency', 'RequestCount']
|
||||
|
||||
|
||||
## Dimension filters for Metric (optional)
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = 'LoadBalancerName'
|
||||
value = 'p-example'
|
||||
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = 'AvailabilityZone'
|
||||
value = '*'
|
||||
```
|
||||
#### Requirements and Terminology
|
||||
|
||||
@@ -52,6 +64,39 @@ Plugin Configuration utilizes [CloudWatch concepts](http://docs.aws.amazon.com/A
|
||||
- `names` must be valid CloudWatch [Metric](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Metric) names
|
||||
- `dimensions` must be valid CloudWatch [Dimension](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Dimension) name/value pairs
|
||||
|
||||
Omitting or specifying a value of `'*'` for a dimension value configures all available metrics that contain a dimension with the specified name
|
||||
to be retrieved. If specifying >1 dimension, then the metric must contain *all* the configured dimensions where the the value of the
|
||||
wildcard dimension is ignored.
|
||||
|
||||
Example:
|
||||
```
|
||||
[[inputs.cloudwatch.metrics]]
|
||||
names = ['Latency']
|
||||
|
||||
## Dimension filters for Metric (optional)
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = 'LoadBalancerName'
|
||||
value = 'p-example'
|
||||
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = 'AvailabilityZone'
|
||||
value = '*'
|
||||
```
|
||||
|
||||
If the following ELBs are available:
|
||||
- name: `p-example`, availabilityZone: `us-east-1a`
|
||||
- name: `p-example`, availabilityZone: `us-east-1b`
|
||||
- name: `q-example`, availabilityZone: `us-east-1a`
|
||||
- name: `q-example`, availabilityZone: `us-east-1b`
|
||||
|
||||
|
||||
Then 2 metrics will be output:
|
||||
- name: `p-example`, availabilityZone: `us-east-1a`
|
||||
- name: `p-example`, availabilityZone: `us-east-1b`
|
||||
|
||||
If the `AvailabilityZone` wildcard dimension was omitted, then a single metric (name: `p-example`)
|
||||
would be exported containing the aggregate values of the ELB across availability zones.
|
||||
|
||||
#### Restrictions and Limitations
|
||||
- CloudWatch metrics are not available instantly via the CloudWatch API. You should adjust your collection `delay` to account for this lag in metrics availability based on your [monitoring subscription level](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html)
|
||||
- CloudWatch API usage incurs cost - see [GetMetricStatistics Pricing](https://aws.amazon.com/cloudwatch/pricing/)
|
||||
|
||||
@@ -3,28 +3,37 @@ package cloudwatch
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
internalaws "github.com/influxdata/telegraf/internal/config/aws"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/internal/limiter"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type (
|
||||
CloudWatch struct {
|
||||
Region string `toml:"region"`
|
||||
AccessKey string `toml:"access_key"`
|
||||
SecretKey string `toml:"secret_key"`
|
||||
Region string `toml:"region"`
|
||||
AccessKey string `toml:"access_key"`
|
||||
SecretKey string `toml:"secret_key"`
|
||||
RoleARN string `toml:"role_arn"`
|
||||
Profile string `toml:"profile"`
|
||||
Filename string `toml:"shared_credential_file"`
|
||||
Token string `toml:"token"`
|
||||
|
||||
Period internal.Duration `toml:"period"`
|
||||
Delay internal.Duration `toml:"delay"`
|
||||
Namespace string `toml:"namespace"`
|
||||
Metrics []*Metric `toml:"metrics"`
|
||||
CacheTTL internal.Duration `toml:"cache_ttl"`
|
||||
RateLimit int `toml:"ratelimit"`
|
||||
client cloudwatchClient
|
||||
metricCache *MetricCache
|
||||
}
|
||||
@@ -58,12 +67,18 @@ func (c *CloudWatch) SampleConfig() string {
|
||||
|
||||
## Amazon Credentials
|
||||
## Credentials are loaded in the following order
|
||||
## 1) explicit credentials from 'access_key' and 'secret_key'
|
||||
## 2) environment variables
|
||||
## 3) shared credentials file
|
||||
## 4) EC2 Instance Profile
|
||||
## 1) Assumed credentials via STS if role_arn is specified
|
||||
## 2) explicit credentials from 'access_key' and 'secret_key'
|
||||
## 3) shared profile from 'profile'
|
||||
## 4) environment variables
|
||||
## 5) shared credentials file
|
||||
## 6) EC2 Instance Profile
|
||||
#access_key = ""
|
||||
#secret_key = ""
|
||||
#token = ""
|
||||
#role_arn = ""
|
||||
#profile = ""
|
||||
#shared_credential_file = ""
|
||||
|
||||
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
|
||||
period = '1m'
|
||||
@@ -75,9 +90,18 @@ func (c *CloudWatch) SampleConfig() string {
|
||||
## gaps or overlap in pulled data
|
||||
interval = '1m'
|
||||
|
||||
## Configure the TTL for the internal cache of metrics.
|
||||
## Defaults to 1 hr if not specified
|
||||
#cache_ttl = '10m'
|
||||
|
||||
## Metric Statistic Namespace (required)
|
||||
namespace = 'AWS/ELB'
|
||||
|
||||
## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
## maximum of 10. Optional - default value is 10.
|
||||
ratelimit = 10
|
||||
|
||||
## Metrics to Pull (optional)
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
## Refreshes Namespace available metrics every 1h
|
||||
@@ -106,20 +130,40 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
if c.Metrics != nil {
|
||||
metrics = []*cloudwatch.Metric{}
|
||||
for _, m := range c.Metrics {
|
||||
dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions))
|
||||
for k, d := range m.Dimensions {
|
||||
dimensions[k] = &cloudwatch.Dimension{
|
||||
Name: aws.String(d.Name),
|
||||
Value: aws.String(d.Value),
|
||||
if !hasWilcard(m.Dimensions) {
|
||||
dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions))
|
||||
for k, d := range m.Dimensions {
|
||||
fmt.Printf("Dimension [%s]:[%s]\n", d.Name, d.Value)
|
||||
dimensions[k] = &cloudwatch.Dimension{
|
||||
Name: aws.String(d.Name),
|
||||
Value: aws.String(d.Value),
|
||||
}
|
||||
}
|
||||
for _, name := range m.MetricNames {
|
||||
metrics = append(metrics, &cloudwatch.Metric{
|
||||
Namespace: aws.String(c.Namespace),
|
||||
MetricName: aws.String(name),
|
||||
Dimensions: dimensions,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
allMetrics, err := c.fetchNamespaceMetrics()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, name := range m.MetricNames {
|
||||
for _, metric := range allMetrics {
|
||||
if isSelected(metric, m.Dimensions) {
|
||||
metrics = append(metrics, &cloudwatch.Metric{
|
||||
Namespace: aws.String(c.Namespace),
|
||||
MetricName: aws.String(name),
|
||||
Dimensions: metric.Dimensions,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, name := range m.MetricNames {
|
||||
metrics = append(metrics, &cloudwatch.Metric{
|
||||
Namespace: aws.String(c.Namespace),
|
||||
MetricName: aws.String(name),
|
||||
Dimensions: dimensions,
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
@@ -130,30 +174,36 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
metricCount := len(metrics)
|
||||
var errChan = make(chan error, metricCount)
|
||||
errChan := errchan.New(metricCount)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// limit concurrency or we can easily exhaust user connection limit
|
||||
semaphore := make(chan byte, 64)
|
||||
|
||||
// see cloudwatch API request limits:
|
||||
// http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html
|
||||
lmtr := limiter.NewRateLimiter(c.RateLimit, time.Second)
|
||||
defer lmtr.Stop()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(metrics))
|
||||
for _, m := range metrics {
|
||||
semaphore <- 0x1
|
||||
go c.gatherMetric(acc, m, now, semaphore, errChan)
|
||||
<-lmtr.C
|
||||
go func(inm *cloudwatch.Metric) {
|
||||
defer wg.Done()
|
||||
c.gatherMetric(acc, inm, now, errChan.C)
|
||||
}(m)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
for i := 1; i <= metricCount; i++ {
|
||||
err := <-errChan
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return errChan.Error()
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("cloudwatch", func() telegraf.Input {
|
||||
return &CloudWatch{}
|
||||
ttl, _ := time.ParseDuration("1hr")
|
||||
return &CloudWatch{
|
||||
CacheTTL: internal.Duration{Duration: ttl},
|
||||
RateLimit: 10,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -161,14 +211,18 @@ func init() {
|
||||
* Initialize CloudWatch client
|
||||
*/
|
||||
func (c *CloudWatch) initializeCloudWatch() error {
|
||||
config := &aws.Config{
|
||||
Region: aws.String(c.Region),
|
||||
}
|
||||
if c.AccessKey != "" || c.SecretKey != "" {
|
||||
config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, "")
|
||||
credentialConfig := &internalaws.CredentialConfig{
|
||||
Region: c.Region,
|
||||
AccessKey: c.AccessKey,
|
||||
SecretKey: c.SecretKey,
|
||||
RoleARN: c.RoleARN,
|
||||
Profile: c.Profile,
|
||||
Filename: c.Filename,
|
||||
Token: c.Token,
|
||||
}
|
||||
configProvider := credentialConfig.Credentials()
|
||||
|
||||
c.client = cloudwatch.New(session.New(config))
|
||||
c.client = cloudwatch.New(configProvider)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -203,11 +257,10 @@ func (c *CloudWatch) fetchNamespaceMetrics() (metrics []*cloudwatch.Metric, err
|
||||
more = token != nil
|
||||
}
|
||||
|
||||
cacheTTL, _ := time.ParseDuration("1hr")
|
||||
c.metricCache = &MetricCache{
|
||||
Metrics: metrics,
|
||||
Fetched: time.Now(),
|
||||
TTL: cacheTTL,
|
||||
TTL: c.CacheTTL.Duration,
|
||||
}
|
||||
|
||||
return
|
||||
@@ -216,12 +269,16 @@ func (c *CloudWatch) fetchNamespaceMetrics() (metrics []*cloudwatch.Metric, err
|
||||
/*
|
||||
* Gather given Metric and emit any error
|
||||
*/
|
||||
func (c *CloudWatch) gatherMetric(acc telegraf.Accumulator, metric *cloudwatch.Metric, now time.Time, semaphore chan byte, errChan chan error) {
|
||||
func (c *CloudWatch) gatherMetric(
|
||||
acc telegraf.Accumulator,
|
||||
metric *cloudwatch.Metric,
|
||||
now time.Time,
|
||||
errChan chan error,
|
||||
) {
|
||||
params := c.getStatisticsInput(metric, now)
|
||||
resp, err := c.client.GetMetricStatistics(params)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
<-semaphore
|
||||
return
|
||||
}
|
||||
|
||||
@@ -258,7 +315,6 @@ func (c *CloudWatch) gatherMetric(acc telegraf.Accumulator, metric *cloudwatch.M
|
||||
}
|
||||
|
||||
errChan <- nil
|
||||
<-semaphore
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -309,3 +365,32 @@ func (c *CloudWatch) getStatisticsInput(metric *cloudwatch.Metric, now time.Time
|
||||
func (c *MetricCache) IsValid() bool {
|
||||
return c.Metrics != nil && time.Since(c.Fetched) < c.TTL
|
||||
}
|
||||
|
||||
func hasWilcard(dimensions []*Dimension) bool {
|
||||
for _, d := range dimensions {
|
||||
if d.Value == "" || d.Value == "*" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isSelected(metric *cloudwatch.Metric, dimensions []*Dimension) bool {
|
||||
if len(metric.Dimensions) != len(dimensions) {
|
||||
return false
|
||||
}
|
||||
for _, d := range dimensions {
|
||||
selected := false
|
||||
for _, d2 := range metric.Dimensions {
|
||||
if d.Name == *d2.Name {
|
||||
if d.Value == "" || d.Value == "*" || d.Value == *d2.Value {
|
||||
selected = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !selected {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -58,6 +58,7 @@ func TestGather(t *testing.T) {
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: internalDuration,
|
||||
Period: internalDuration,
|
||||
RateLimit: 10,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
56
plugins/inputs/conntrack/README.md
Normal file
56
plugins/inputs/conntrack/README.md
Normal file
@@ -0,0 +1,56 @@
|
||||
# Conntrack Plugin
|
||||
|
||||
Collects stats from Netfilter's conntrack-tools.
|
||||
|
||||
The conntrack-tools provide a mechanism for tracking various aspects of
|
||||
network connections as they are processed by netfilter. At runtime,
|
||||
conntrack exposes many of those connection statistics within /proc/sys/net.
|
||||
Depending on your kernel version, these files can be found in either
|
||||
/proc/sys/net/ipv4/netfilter or /proc/sys/net/netfilter and will be
|
||||
prefixed with either ip_ or nf_. This plugin reads the files specified
|
||||
in its configuration and publishes each one as a field, with the prefix
|
||||
normalized to ip_.
|
||||
|
||||
In order to simplify configuration in a heterogeneous environment, a superset
|
||||
of directory and filenames can be specified. Any locations that don't exist
|
||||
will be ignored.
|
||||
|
||||
For more information on conntrack-tools, see the
|
||||
[Netfilter Documentation](http://conntrack-tools.netfilter.org/).
|
||||
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Collects conntrack stats from the configured directories and files.
|
||||
[[inputs.conntrack]]
|
||||
## The following defaults would work with multiple versions of conntrack.
|
||||
## Note the nf_ and ip_ filename prefixes are mutually exclusive across
|
||||
## kernel versions, as are the directory locations.
|
||||
|
||||
## Superset of filenames to look for within the conntrack dirs.
|
||||
## Missing files will be ignored.
|
||||
files = ["ip_conntrack_count","ip_conntrack_max",
|
||||
"nf_conntrack_count","nf_conntrack_max"]
|
||||
|
||||
## Directories to search within for the conntrack files above.
|
||||
## Missing directrories will be ignored.
|
||||
dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- conntrack
|
||||
- ip_conntrack_count (int, count): the number of entries in the conntrack table
|
||||
- ip_conntrack_max (int, size): the max capacity of the conntrack table
|
||||
|
||||
### Tags:
|
||||
|
||||
This input does not use tags.
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter conntrack -test
|
||||
conntrack,host=myhost ip_conntrack_count=2,ip_conntrack_max=262144 1461620427667995735
|
||||
```
|
||||
120
plugins/inputs/conntrack/conntrack.go
Normal file
120
plugins/inputs/conntrack/conntrack.go
Normal file
@@ -0,0 +1,120 @@
|
||||
// +build linux
|
||||
|
||||
package conntrack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"log"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type Conntrack struct {
|
||||
Path string
|
||||
Dirs []string
|
||||
Files []string
|
||||
}
|
||||
|
||||
const (
|
||||
inputName = "conntrack"
|
||||
)
|
||||
|
||||
var dfltDirs = []string{
|
||||
"/proc/sys/net/ipv4/netfilter",
|
||||
"/proc/sys/net/netfilter",
|
||||
}
|
||||
|
||||
var dfltFiles = []string{
|
||||
"ip_conntrack_count",
|
||||
"ip_conntrack_max",
|
||||
"nf_conntrack_count",
|
||||
"nf_conntrack_max",
|
||||
}
|
||||
|
||||
func (c *Conntrack) setDefaults() {
|
||||
if len(c.Dirs) == 0 {
|
||||
c.Dirs = dfltDirs
|
||||
}
|
||||
|
||||
if len(c.Files) == 0 {
|
||||
c.Files = dfltFiles
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conntrack) Description() string {
|
||||
return "Collects conntrack stats from the configured directories and files."
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## The following defaults would work with multiple versions of conntrack.
|
||||
## Note the nf_ and ip_ filename prefixes are mutually exclusive across
|
||||
## kernel versions, as are the directory locations.
|
||||
|
||||
## Superset of filenames to look for within the conntrack dirs.
|
||||
## Missing files will be ignored.
|
||||
files = ["ip_conntrack_count","ip_conntrack_max",
|
||||
"nf_conntrack_count","nf_conntrack_max"]
|
||||
|
||||
## Directories to search within for the conntrack files above.
|
||||
## Missing directrories will be ignored.
|
||||
dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
|
||||
`
|
||||
|
||||
func (c *Conntrack) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
|
||||
c.setDefaults()
|
||||
|
||||
var metricKey string
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
for _, dir := range c.Dirs {
|
||||
for _, file := range c.Files {
|
||||
// NOTE: no system will have both nf_ and ip_ prefixes,
|
||||
// so we're safe to branch on suffix only.
|
||||
parts := strings.SplitN(file, "_", 2)
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
metricKey = "ip_" + parts[1]
|
||||
|
||||
fName := filepath.Join(dir, file)
|
||||
if _, err := os.Stat(fName); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
contents, err := ioutil.ReadFile(fName)
|
||||
if err != nil {
|
||||
log.Printf("E! failed to read file '%s': %v", fName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
v := strings.TrimSpace(string(contents))
|
||||
fields[metricKey], err = strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
log.Printf("E! failed to parse metric, expected number but "+
|
||||
" found '%s': %v", v, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(fields) == 0 {
|
||||
return fmt.Errorf("Conntrack input failed to collect metrics. " +
|
||||
"Is the conntrack kernel module loaded?")
|
||||
}
|
||||
|
||||
acc.AddFields(inputName, fields, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add(inputName, func() telegraf.Input { return &Conntrack{} })
|
||||
}
|
||||
3
plugins/inputs/conntrack/conntrack_notlinux.go
Normal file
3
plugins/inputs/conntrack/conntrack_notlinux.go
Normal file
@@ -0,0 +1,3 @@
|
||||
// +build !linux
|
||||
|
||||
package conntrack
|
||||
90
plugins/inputs/conntrack/conntrack_test.go
Normal file
90
plugins/inputs/conntrack/conntrack_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
// +build linux
|
||||
|
||||
package conntrack
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func restoreDflts(savedFiles, savedDirs []string) {
|
||||
dfltFiles = savedFiles
|
||||
dfltDirs = savedDirs
|
||||
}
|
||||
|
||||
func TestNoFilesFound(t *testing.T) {
|
||||
defer restoreDflts(dfltFiles, dfltDirs)
|
||||
|
||||
dfltFiles = []string{"baz.txt"}
|
||||
dfltDirs = []string{"./foo/bar"}
|
||||
c := &Conntrack{}
|
||||
acc := &testutil.Accumulator{}
|
||||
err := c.Gather(acc)
|
||||
|
||||
assert.EqualError(t, err, "Conntrack input failed to collect metrics. "+
|
||||
"Is the conntrack kernel module loaded?")
|
||||
}
|
||||
|
||||
func TestDefaultsUsed(t *testing.T) {
|
||||
defer restoreDflts(dfltFiles, dfltDirs)
|
||||
tmpdir, err := ioutil.TempDir("", "tmp1")
|
||||
assert.NoError(t, err)
|
||||
defer os.Remove(tmpdir)
|
||||
|
||||
tmpFile, err := ioutil.TempFile(tmpdir, "ip_conntrack_count")
|
||||
assert.NoError(t, err)
|
||||
|
||||
dfltDirs = []string{tmpdir}
|
||||
fname := path.Base(tmpFile.Name())
|
||||
dfltFiles = []string{fname}
|
||||
|
||||
count := 1234321
|
||||
ioutil.WriteFile(tmpFile.Name(), []byte(strconv.Itoa(count)), 0660)
|
||||
c := &Conntrack{}
|
||||
acc := &testutil.Accumulator{}
|
||||
|
||||
c.Gather(acc)
|
||||
acc.AssertContainsFields(t, inputName, map[string]interface{}{
|
||||
fname: float64(count)})
|
||||
}
|
||||
|
||||
func TestConfigsUsed(t *testing.T) {
|
||||
defer restoreDflts(dfltFiles, dfltDirs)
|
||||
tmpdir, err := ioutil.TempDir("", "tmp1")
|
||||
assert.NoError(t, err)
|
||||
defer os.Remove(tmpdir)
|
||||
|
||||
cntFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_count")
|
||||
maxFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_max")
|
||||
assert.NoError(t, err)
|
||||
|
||||
dfltDirs = []string{tmpdir}
|
||||
cntFname := path.Base(cntFile.Name())
|
||||
maxFname := path.Base(maxFile.Name())
|
||||
dfltFiles = []string{cntFname, maxFname}
|
||||
|
||||
count := 1234321
|
||||
max := 9999999
|
||||
ioutil.WriteFile(cntFile.Name(), []byte(strconv.Itoa(count)), 0660)
|
||||
ioutil.WriteFile(maxFile.Name(), []byte(strconv.Itoa(max)), 0660)
|
||||
c := &Conntrack{}
|
||||
acc := &testutil.Accumulator{}
|
||||
|
||||
c.Gather(acc)
|
||||
|
||||
fix := func(s string) string {
|
||||
return strings.Replace(s, "nf_", "ip_", 1)
|
||||
}
|
||||
|
||||
acc.AssertContainsFields(t, inputName,
|
||||
map[string]interface{}{
|
||||
fix(cntFname): float64(count),
|
||||
fix(maxFname): float64(max),
|
||||
})
|
||||
}
|
||||
46
plugins/inputs/consul/README.md
Normal file
46
plugins/inputs/consul/README.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Telegraf Input Plugin: Consul
|
||||
|
||||
This plugin will collect statistics about all helath checks registered in the Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
|
||||
to query the data. It will not report the [telemetry](https://www.consul.io/docs/agent/telemetry.html) but Consul can report those stats already using StatsD protocol if needed.
|
||||
|
||||
## Configuration:
|
||||
|
||||
```
|
||||
# Gather health check statuses from services registered in Consul
|
||||
[[inputs.consul]]
|
||||
## Most of these values defaults to the one configured on a Consul's agent level.
|
||||
## Optional Consul server address (default: "")
|
||||
# address = ""
|
||||
## Optional URI scheme for the Consul server (default: "")
|
||||
# scheme = ""
|
||||
## Optional ACL token used in every request (default: "")
|
||||
# token = ""
|
||||
## Optional username used for request HTTP Basic Authentication (default: "")
|
||||
# username = ""
|
||||
## Optional password used for HTTP Basic Authentication (default: "")
|
||||
# password = ""
|
||||
## Optional data centre to query the health checks from (default: "")
|
||||
# datacentre = ""
|
||||
```
|
||||
|
||||
## Measurements:
|
||||
|
||||
### Consul:
|
||||
Tags:
|
||||
- node: on which node check/service is registered on
|
||||
- service_name: name of the service (this is the service name not the service ID)
|
||||
|
||||
Fields:
|
||||
- check_id
|
||||
- check_name
|
||||
- service_id
|
||||
- status
|
||||
|
||||
## Example output
|
||||
|
||||
```
|
||||
$ telegraf --config ./telegraf.conf -input-filter consul -test
|
||||
* Plugin: consul, Collection 1
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node check_id="serfHealth",check_name="Serf Health Status",service_id="",status="passing" 1464698464486439902
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com check_id="service:www-example-com.test01",check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical" 1464698464486519036
|
||||
```
|
||||
136
plugins/inputs/consul/consul.go
Normal file
136
plugins/inputs/consul/consul.go
Normal file
@@ -0,0 +1,136 @@
|
||||
package consul
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Consul struct {
|
||||
Address string
|
||||
Scheme string
|
||||
Token string
|
||||
Username string
|
||||
Password string
|
||||
Datacentre string
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
// client used to connect to Consul agnet
|
||||
client *api.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Most of these values defaults to the one configured on a Consul's agent level.
|
||||
## Optional Consul server address (default: "localhost")
|
||||
# address = "localhost"
|
||||
## Optional URI scheme for the Consul server (default: "http")
|
||||
# scheme = "http"
|
||||
## Optional ACL token used in every request (default: "")
|
||||
# token = ""
|
||||
## Optional username used for request HTTP Basic Authentication (default: "")
|
||||
# username = ""
|
||||
## Optional password used for HTTP Basic Authentication (default: "")
|
||||
# password = ""
|
||||
## Optional data centre to query the health checks from (default: "")
|
||||
# datacentre = ""
|
||||
`
|
||||
|
||||
func (c *Consul) Description() string {
|
||||
return "Gather health check statuses from services registered in Consul"
|
||||
}
|
||||
|
||||
func (c *Consul) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (c *Consul) createAPIClient() (*api.Client, error) {
|
||||
config := api.DefaultConfig()
|
||||
|
||||
if c.Address != "" {
|
||||
config.Address = c.Address
|
||||
}
|
||||
|
||||
if c.Scheme != "" {
|
||||
config.Scheme = c.Scheme
|
||||
}
|
||||
|
||||
if c.Datacentre != "" {
|
||||
config.Datacenter = c.Datacentre
|
||||
}
|
||||
|
||||
if c.Username != "" {
|
||||
config.HttpAuth = &api.HttpBasicAuth{
|
||||
Username: c.Username,
|
||||
Password: c.Password,
|
||||
}
|
||||
}
|
||||
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
c.SSLCert, c.SSLKey, c.SSLCA, c.InsecureSkipVerify)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config.HttpClient.Transport = &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
|
||||
return api.NewClient(config)
|
||||
}
|
||||
|
||||
func (c *Consul) GatherHealthCheck(acc telegraf.Accumulator, checks []*api.HealthCheck) {
|
||||
for _, check := range checks {
|
||||
record := make(map[string]interface{})
|
||||
tags := make(map[string]string)
|
||||
|
||||
record["check_id"] = check.CheckID
|
||||
record["check_name"] = check.Name
|
||||
record["service_id"] = check.ServiceID
|
||||
record["status"] = check.Status
|
||||
|
||||
tags["node"] = check.Node
|
||||
tags["service_name"] = check.ServiceName
|
||||
|
||||
acc.AddFields("consul_health_checks", record, tags)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Consul) Gather(acc telegraf.Accumulator) error {
|
||||
if c.client == nil {
|
||||
newClient, err := c.createAPIClient()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.client = newClient
|
||||
}
|
||||
|
||||
checks, _, err := c.client.Health().State("any", nil)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.GatherHealthCheck(acc, checks)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("consul", func() telegraf.Input {
|
||||
return &Consul{}
|
||||
})
|
||||
}
|
||||
42
plugins/inputs/consul/consul_test.go
Normal file
42
plugins/inputs/consul/consul_test.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package consul
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
var sampleChecks = []*api.HealthCheck{
|
||||
&api.HealthCheck{
|
||||
Node: "localhost",
|
||||
CheckID: "foo.health123",
|
||||
Name: "foo.health",
|
||||
Status: "passing",
|
||||
Notes: "lorem ipsum",
|
||||
Output: "OK",
|
||||
ServiceID: "foo.123",
|
||||
ServiceName: "foo",
|
||||
},
|
||||
}
|
||||
|
||||
func TestGatherHealtCheck(t *testing.T) {
|
||||
expectedFields := map[string]interface{}{
|
||||
"check_id": "foo.health123",
|
||||
"check_name": "foo.health",
|
||||
"status": "passing",
|
||||
"service_id": "foo.123",
|
||||
}
|
||||
|
||||
expectedTags := map[string]string{
|
||||
"node": "localhost",
|
||||
"service_name": "foo",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
consul := &Consul{}
|
||||
consul.GatherHealthCheck(&acc, sampleChecks)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "consul_health_checks", expectedFields, expectedTags)
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -3,12 +3,14 @@ package dns_query
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/miekg/dns"
|
||||
"net"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type DnsQuery struct {
|
||||
@@ -55,12 +57,12 @@ func (d *DnsQuery) Description() string {
|
||||
}
|
||||
func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
|
||||
d.setDefaultValues()
|
||||
|
||||
errChan := errchan.New(len(d.Domains) * len(d.Servers))
|
||||
for _, domain := range d.Domains {
|
||||
for _, server := range d.Servers {
|
||||
dnsQueryTime, err := d.getDnsQueryTime(domain, server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errChan.C <- err
|
||||
tags := map[string]string{
|
||||
"server": server,
|
||||
"domain": domain,
|
||||
@@ -72,7 +74,7 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return errChan.Error()
|
||||
}
|
||||
|
||||
func (d *DnsQuery) setDefaultValues() {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user