Compare commits
209 Commits
ShubhamDX-
...
ga-azure-m
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
40c37aadc6 | ||
|
|
93a579d7e4 | ||
|
|
5c4c3a1ca9 | ||
|
|
02d86b1b6e | ||
|
|
4f61d2a09c | ||
|
|
c03e8918a2 | ||
|
|
83345ec2b3 | ||
|
|
f094f83da5 | ||
|
|
0768022240 | ||
|
|
92956104d6 | ||
|
|
964856eb5f | ||
|
|
377547aa4c | ||
|
|
1662b6feb9 | ||
|
|
908170b207 | ||
|
|
ec47cab950 | ||
|
|
06671777e9 | ||
|
|
46a8bdbfe5 | ||
|
|
abdff033cc | ||
|
|
535e9e9a68 | ||
|
|
c256f17870 | ||
|
|
b8d5df2076 | ||
|
|
538baee8a4 | ||
|
|
d3d8d52e2f | ||
|
|
286f14f730 | ||
|
|
9f4752ba12 | ||
|
|
f639f994b5 | ||
|
|
911f0e4b57 | ||
|
|
86a3b8cad4 | ||
|
|
a3500cc33a | ||
|
|
bf0c59f56c | ||
|
|
c7b3667ac4 | ||
|
|
638853be05 | ||
|
|
ee9a2f73a1 | ||
|
|
648d7ae922 | ||
|
|
13937d511d | ||
|
|
fe4d3cd117 | ||
|
|
eacf11fcd8 | ||
|
|
3a8ca4d08d | ||
|
|
00e3363d45 | ||
|
|
29b37e67c2 | ||
|
|
42fee824f8 | ||
|
|
120be7e87b | ||
|
|
9e4a330ee5 | ||
|
|
78d4a95ce6 | ||
|
|
571ce86d10 | ||
|
|
dd2c60e620 | ||
|
|
1486ae25c0 | ||
|
|
da5b46e770 | ||
|
|
9ef902f4a1 | ||
|
|
058510464c | ||
|
|
0b4f4b089f | ||
|
|
7c592558d8 | ||
|
|
1e1d9e8acb | ||
|
|
3b3d16273d | ||
|
|
3046f957d5 | ||
|
|
bcf1cf59c1 | ||
|
|
c8d2ba2bc8 | ||
|
|
04ab9a4fe4 | ||
|
|
e4009234e9 | ||
|
|
8d516d26e9 | ||
|
|
0a02363c03 | ||
|
|
2c19d74829 | ||
|
|
3f4e1af222 | ||
|
|
10c7324d74 | ||
|
|
55cfc383f3 | ||
|
|
7b8f12b377 | ||
|
|
15f19375e7 | ||
|
|
93e2381f42 | ||
|
|
387bae9b9f | ||
|
|
34416e0da8 | ||
|
|
32f56140a3 | ||
|
|
64a23c0b18 | ||
|
|
af68975e2f | ||
|
|
0223b22b3e | ||
|
|
1890efbb70 | ||
|
|
e4f8a82ee6 | ||
|
|
a28de4b5cd | ||
|
|
caac224276 | ||
|
|
fe31ce9d7d | ||
|
|
01ede2ea0b | ||
|
|
fb6390e7ab | ||
|
|
ff40da6019 | ||
|
|
43a044542e | ||
|
|
00203fa889 | ||
|
|
7177e0473f | ||
|
|
252101b7c6 | ||
|
|
efdf36746c | ||
|
|
df78133bf3 | ||
|
|
bf915fa79c | ||
|
|
c160b56229 | ||
|
|
627f0e5d9d | ||
|
|
4551b4c5d2 | ||
|
|
a9afd2f030 | ||
|
|
caf860bc88 | ||
|
|
beeab2c509 | ||
|
|
a50acadc44 | ||
|
|
265d0e6d84 | ||
|
|
413cf6dd23 | ||
|
|
7b23287e20 | ||
|
|
f4c0aac898 | ||
|
|
bcaaeda49c | ||
|
|
9d2f3fcbb9 | ||
|
|
0aad487cab | ||
|
|
19c102cf4b | ||
|
|
109c1a4344 | ||
|
|
82448a9dd1 | ||
|
|
64b239663c | ||
|
|
7e3ec16e15 | ||
|
|
a971ffb880 | ||
|
|
461c0dccd8 | ||
|
|
971debb582 | ||
|
|
6d585beedf | ||
|
|
38ec968b0b | ||
|
|
0c1293ad5e | ||
|
|
b99cd14129 | ||
|
|
c2108fcf09 | ||
|
|
04b9afff68 | ||
|
|
a320f91516 | ||
|
|
ef112e6ee7 | ||
|
|
5be1198274 | ||
|
|
8a73dc05c0 | ||
|
|
43bd23e555 | ||
|
|
b0b18df0bf | ||
|
|
cc97b48ca8 | ||
|
|
36b8220181 | ||
|
|
1c0f63a90d | ||
|
|
503881d4d7 | ||
|
|
63de4ffc51 | ||
|
|
4cefe3eadd | ||
|
|
b63073deb2 | ||
|
|
e60abdf8ea | ||
|
|
e5e75a62cc | ||
|
|
a4870e6a6d | ||
|
|
3469e74dd9 | ||
|
|
def76ace3b | ||
|
|
05393da939 | ||
|
|
e8fc3ca70c | ||
|
|
729388f4dd | ||
|
|
be9d4f4be0 | ||
|
|
3658ac8f53 | ||
|
|
d7f279e3d3 | ||
|
|
e28f422d21 | ||
|
|
cd919066d5 | ||
|
|
6200683c29 | ||
|
|
76ce71f7fa | ||
|
|
2160779126 | ||
|
|
6e5e2f713d | ||
|
|
8e515688eb | ||
|
|
6d6631382c | ||
|
|
f1b681cbdc | ||
|
|
4118ec7629 | ||
|
|
f114f6a124 | ||
|
|
8cfd001441 | ||
|
|
9ce70aad77 | ||
|
|
07dbbb27dc | ||
|
|
0e14e31b0a | ||
|
|
8b3767fd6e | ||
|
|
81a93fcddf | ||
|
|
8005883de8 | ||
|
|
f7207f514e | ||
|
|
f1c8abd68c | ||
|
|
e4ce057885 | ||
|
|
a6d366fb84 | ||
|
|
de22480e7d | ||
|
|
2b65915b96 | ||
|
|
9d8b1b1e87 | ||
|
|
b9ddbbd5ed | ||
|
|
c377c8fb7c | ||
|
|
45c22e42da | ||
|
|
ad5e954047 | ||
|
|
93b2870b28 | ||
|
|
3501b65f7c | ||
|
|
35378ae9cc | ||
|
|
1212b2ddc5 | ||
|
|
0a37386c5e | ||
|
|
00a52a67b9 | ||
|
|
dc96c34e2c | ||
|
|
5928219454 | ||
|
|
8c932abff6 | ||
|
|
fcd6d4eb09 | ||
|
|
b355536b20 | ||
|
|
e988c83068 | ||
|
|
80d9417315 | ||
|
|
f4fa05530a | ||
|
|
18aef35c58 | ||
|
|
8147d60973 | ||
|
|
df80fa6099 | ||
|
|
53221d87eb | ||
|
|
ddde8809f4 | ||
|
|
0ca3900abe | ||
|
|
a777ce9293 | ||
|
|
3242f97deb | ||
|
|
6e35071c89 | ||
|
|
cd620ac144 | ||
|
|
6406abbc89 | ||
|
|
9aabf56795 | ||
|
|
4ac78d5c6d | ||
|
|
3fe3d75bb3 | ||
|
|
a55456b56c | ||
|
|
6c656d92a0 | ||
|
|
2ee270f274 | ||
|
|
5b37fd3ae9 | ||
|
|
f82f03b92c | ||
|
|
42ccc9f324 | ||
|
|
a00d5b48f8 | ||
|
|
f5ea13a9ab | ||
|
|
32dd1b3725 | ||
|
|
1b0e87a8b0 | ||
|
|
efa9095829 |
@@ -1,48 +1,105 @@
|
||||
---
|
||||
defaults: &defaults
|
||||
docker:
|
||||
- image: 'circleci/golang:1.9.2'
|
||||
working_directory: '/go/src/github.com/influxdata/telegraf'
|
||||
defaults:
|
||||
defaults: &defaults
|
||||
working_directory: '/go/src/github.com/influxdata/telegraf'
|
||||
go-1_8: &go-1_8
|
||||
docker:
|
||||
- image: 'circleci/golang:1.8.7'
|
||||
go-1_9: &go-1_9
|
||||
docker:
|
||||
- image: 'circleci/golang:1.9.5'
|
||||
go-1_10: &go-1_10
|
||||
docker:
|
||||
- image: 'circleci/golang:1.10.1'
|
||||
|
||||
version: 2
|
||||
jobs:
|
||||
build:
|
||||
<<: *defaults
|
||||
deps:
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- checkout
|
||||
- run: 'make ci-test'
|
||||
- run: 'make deps'
|
||||
- persist_to_workspace:
|
||||
root: '/go/src'
|
||||
paths:
|
||||
- '*'
|
||||
test-go-1.8:
|
||||
<<: [ *defaults, *go-1_8 ]
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: 'make test-ci'
|
||||
test-go-1.9:
|
||||
<<: [ *defaults, *go-1_9 ]
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: 'make test-ci'
|
||||
test-go-1.10:
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: 'make test-ci'
|
||||
- run: 'GOARCH=386 make test-ci'
|
||||
release:
|
||||
<<: *defaults
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: './scripts/release.sh'
|
||||
- store_artifacts:
|
||||
path: './artifacts'
|
||||
destination: '.'
|
||||
nightly:
|
||||
<<: *defaults
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: './scripts/release.sh'
|
||||
- store_artifacts:
|
||||
path: './artifacts'
|
||||
destination: '.'
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build_and_release:
|
||||
jobs:
|
||||
- 'build'
|
||||
- 'deps'
|
||||
- 'test-go-1.8':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'test-go-1.9':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'test-go-1.10':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'release':
|
||||
requires:
|
||||
- 'build'
|
||||
- 'test-go-1.8'
|
||||
- 'test-go-1.9'
|
||||
- 'test-go-1.10'
|
||||
nightly:
|
||||
jobs:
|
||||
- 'build'
|
||||
- 'deps'
|
||||
- 'test-go-1.8':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'test-go-1.9':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'test-go-1.10':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'nightly':
|
||||
requires:
|
||||
- 'build'
|
||||
- 'test-go-1.8'
|
||||
- 'test-go-1.9'
|
||||
- 'test-go-1.10'
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 0 * * *"
|
||||
cron: "0 7 * * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,5 +1,4 @@
|
||||
build
|
||||
/build
|
||||
/telegraf
|
||||
/telegraf.exe
|
||||
/telegraf.gz
|
||||
*~
|
||||
*#
|
||||
|
||||
131
CHANGELOG.md
131
CHANGELOG.md
@@ -1,16 +1,71 @@
|
||||
## v1.6 [unreleased]
|
||||
## v1.7 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- The `mysql` input plugin has been updated to convert values to the
|
||||
correct data type. This may cause a `field type error` when inserting into
|
||||
InfluxDB due the change of types. It is recommended to drop the `mysql`,
|
||||
`mysql_variables`, and `mysql_innodb`:
|
||||
```
|
||||
DROP MEASUREMENT mysql
|
||||
DROP MEASUREMENT mysql_variables
|
||||
DROP MEASUREMENT mysql_innodb
|
||||
```
|
||||
- The `cassandra` input plugin has been deprecated in favor of the `jolokia2`
|
||||
input plugin which is much more configurable and more performant. There is
|
||||
an [example configuration](./plugins/inputs/jolokia2/examples) to help you
|
||||
get started.
|
||||
|
||||
### New Inputs
|
||||
|
||||
- [fibaro](./plugins/inputs/fibaro/README.md) - Contributed by @dynek
|
||||
- [mcrouter](./plugins/inputs/mcrouter/README.md) - Contributed by @cthayer
|
||||
- [nvidia_smi](./plugins/inputs/nvidia_smi/README.md) - Contributed by @jackzampolin
|
||||
|
||||
### Features
|
||||
|
||||
- [#3964](https://github.com/influxdata/telegraf/pull/3964): Add repl_oplog_window_sec metric to mongodb input.
|
||||
- [#3819](https://github.com/influxdata/telegraf/pull/3819): Add per-host shard metrics in mongodb input.
|
||||
- [#3999](https://github.com/influxdata/telegraf/pull/3999): Skip files with leading `..` in config directory.
|
||||
- [#4021](https://github.com/influxdata/telegraf/pull/4021): Add TLS support to socket_writer and socket_listener plugins.
|
||||
- [#4025](https://github.com/influxdata/telegraf/pull/4025): Add snmp input option to strip non fixed length index suffixes.
|
||||
- [#4035](https://github.com/influxdata/telegraf/pull/4035): Add server version tag to docker input.
|
||||
- [#4044](https://github.com/influxdata/telegraf/pull/4044): Add support for LeoFS 1.4 to leofs input.
|
||||
- [#4068](https://github.com/influxdata/telegraf/pull/4068): Add parameter to force the interval of gather for sysstat.
|
||||
- [#3877](https://github.com/influxdata/telegraf/pull/3877): Support busybox ping in the ping input.
|
||||
- [#4077](https://github.com/influxdata/telegraf/pull/4077): Add input plugin for McRouter.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#4018](https://github.com/influxdata/telegraf/pull/4018): Write to working file outputs if any files are not writeable.
|
||||
- [#4036](https://github.com/influxdata/telegraf/pull/4036): Add all win_perf_counters fields for a series in a single metric.
|
||||
|
||||
## v1.6.2 [unreleased]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#4078](https://github.com/influxdata/telegraf/pull/4078): Use same timestamp for fields in system input.
|
||||
|
||||
## v1.6.1 [2018-04-23]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3835](https://github.com/influxdata/telegraf/issues/3835): Report mem input fields as gauges instead counters.
|
||||
- [#4030](https://github.com/influxdata/telegraf/issues/4030): Fix graphite outputs unsigned integers in wrong format.
|
||||
- [#4043](https://github.com/influxdata/telegraf/issues/4043): Report available fields if utmp is unreadable.
|
||||
- [#4039](https://github.com/influxdata/telegraf/issues/4039): Fix potential "no fields" error writing to outputs.
|
||||
- [#4037](https://github.com/influxdata/telegraf/issues/4037): Fix uptime reporting in system input when ran inside docker.
|
||||
- [#3750](https://github.com/influxdata/telegraf/issues/3750): Fix mem input "cannot allocate memory" error on FreeBSD based systems.
|
||||
- [#4056](https://github.com/influxdata/telegraf/pull/4056): Fix duplicate tags when overriding an existing tag.
|
||||
- [#4062](https://github.com/influxdata/telegraf/pull/4062): Add server argument as first argument in unbound input.
|
||||
- [#4063](https://github.com/influxdata/telegraf/issues/4063): Fix handling of floats with multiple leading zeroes.
|
||||
- [#4064](https://github.com/influxdata/telegraf/issues/4064): Return errors in mongodb SSL/TLS configuration.
|
||||
|
||||
## v1.6 [2018-04-16]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- The `mysql` input plugin has been updated fix a number of type convertion
|
||||
issues. This may cause a `field type error` when inserting into InfluxDB due
|
||||
the change of types.
|
||||
|
||||
To address this we have introduced a new `metric_version` option to control
|
||||
enabling the new format. For in depth recommendations on upgrading please
|
||||
reference the [mysql plugin documentation](./plugins/inputs/mysql/README.md#metric-version).
|
||||
|
||||
It is encouraged to migrate to the new model when possible as the old version
|
||||
is deprecated and will be removed in a future version.
|
||||
|
||||
- The `postgresql` plugins now defaults to using a persistent connection to the database.
|
||||
In environments where TCP connections are terminated the `max_lifetime`
|
||||
@@ -26,12 +81,24 @@
|
||||
is set. It is encouraged to enable this option when possible as the old
|
||||
ordering is deprecated.
|
||||
|
||||
- The new `http` input configured with `data_format = "json"` can perform the
|
||||
same task as the, now deprecated, `httpjson` input.
|
||||
|
||||
### New Plugins
|
||||
|
||||
### New Inputs
|
||||
|
||||
- [http](./plugins/inputs/http/README.md) - Thanks to @grange74
|
||||
- [ipset](./plugins/inputs/ipset/README.md) - Thanks to @sajoupa
|
||||
- [nats](./plugins/inputs/nats/README.md) - Thanks to @mjs & @levex
|
||||
|
||||
### New Processors
|
||||
|
||||
- [override](./plugins/processors/override/README.md) - Thanks to @KarstenSchnitter
|
||||
|
||||
### New Parsers
|
||||
|
||||
- [dropwizard](./docs/DATA_FORMATS_INPUT.md#dropwizard) - Thanks to @atzoum
|
||||
|
||||
### Features
|
||||
|
||||
- [#3551](https://github.com/influxdata/telegraf/pull/3551): Add health status mapping from string to int in elasticsearch input.
|
||||
@@ -56,17 +123,57 @@
|
||||
- [#3618](https://github.com/influxdata/telegraf/pull/3618): Add new sqlserver output data model.
|
||||
- [#3559](https://github.com/influxdata/telegraf/pull/3559): Add native Go method for finding pids to procstat.
|
||||
- [#3722](https://github.com/influxdata/telegraf/pull/3722): Add additional metrics and reverse metric names option to openldap.
|
||||
- [#3769](https://github.com/influxdata/telegraf/pull/3769): Add TLS support to the mesos input plugin.
|
||||
- [#3546](https://github.com/influxdata/telegraf/pull/3546): Add http input plugin.
|
||||
- [#3781](https://github.com/influxdata/telegraf/pull/3781): Add keep alive support to the TCP mode of statsd.
|
||||
- [#3783](https://github.com/influxdata/telegraf/pull/3783): Support deadline in ping plugin.
|
||||
- [#3765](https://github.com/influxdata/telegraf/pull/3765): Add option to disable labels in prometheus output for string fields.
|
||||
- [#3808](https://github.com/influxdata/telegraf/pull/3808): Add shard server stats to the mongodb input plugin.
|
||||
- [#3713](https://github.com/influxdata/telegraf/pull/3713): Add server option to unbound plugin.
|
||||
- [#3804](https://github.com/influxdata/telegraf/pull/3804): Convert boolean metric values to float in datadog output.
|
||||
- [#3799](https://github.com/influxdata/telegraf/pull/3799): Add Solr 3 compatibility.
|
||||
- [#3797](https://github.com/influxdata/telegraf/pull/3797): Add sum stat to basicstats aggregator.
|
||||
- [#3626](https://github.com/influxdata/telegraf/pull/3626): Add ability to override proxy from environment in http response.
|
||||
- [#3853](https://github.com/influxdata/telegraf/pull/3853): Add host to ping timeout log message.
|
||||
- [#3773](https://github.com/influxdata/telegraf/pull/3773): Add override processor.
|
||||
- [#3814](https://github.com/influxdata/telegraf/pull/3814): Add status_code and result tags and result_type field to http_response input.
|
||||
- [#3880](https://github.com/influxdata/telegraf/pull/3880): Added config flag to skip collection of network protocol metrics.
|
||||
- [#3927](https://github.com/influxdata/telegraf/pull/3927): Add TLS support to kapacitor input.
|
||||
- [#3496](https://github.com/influxdata/telegraf/pull/3496): Add HTTP basic auth support to the http_listener input.
|
||||
- [#3452](https://github.com/influxdata/telegraf/issues/3452): Tags in output InfluxDB Line Protocol are now sorted.
|
||||
- [#3631](https://github.com/influxdata/telegraf/issues/3631): InfluxDB Line Protocol parser now accepts DOS line endings.
|
||||
- [#2496](https://github.com/influxdata/telegraf/issues/2496): An option has been added to skip database creation in the InfluxDB output.
|
||||
- [#3366](https://github.com/influxdata/telegraf/issues/3366): Add support for connecting to InfluxDB over a unix domain socket.
|
||||
- [#3946](https://github.com/influxdata/telegraf/pull/3946): Add optional unsigned integer support to the influx data format.
|
||||
- [#3811](https://github.com/influxdata/telegraf/pull/3811): Add TLS support to zookeeper input.
|
||||
- [#2737](https://github.com/influxdata/telegraf/issues/2737): Add filters for container state to docker input.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1896](https://github.com/influxdata/telegraf/issues/1896): Fix various mysql data type conversions.
|
||||
- [#3810](https://github.com/influxdata/telegraf/issues/3810): Fix metric buffer limit in internal plugin after reload.
|
||||
- [#3801](https://github.com/influxdata/telegraf/issues/3801): Fix panic in http_response on invalid regex.
|
||||
- [#3973](https://github.com/influxdata/telegraf/issues/3873): Fix socket_listener setting ReadBufferSize on tcp sockets.
|
||||
- [#1575](https://github.com/influxdata/telegraf/issues/1575): Add tag for target url to phpfpm input.
|
||||
- [#3868](https://github.com/influxdata/telegraf/issues/3868): Fix cannot unmarshal object error in DC/OS input.
|
||||
- [#3648](https://github.com/influxdata/telegraf/issues/3648): Fix InfluxDB output not able to reconnect when server address changes.
|
||||
- [#3957](https://github.com/influxdata/telegraf/issues/3957): Fix parsing of dos line endings in the smart input.
|
||||
- [#3754](https://github.com/influxdata/telegraf/issues/3754): Fix precision truncation when no timestamp included.
|
||||
- [#3655](https://github.com/influxdata/telegraf/issues/3655): Fix SNMPv3 connection with Cisco ASA 5515 in snmp input.
|
||||
- [#3981](https://github.com/influxdata/telegraf/pull/3981): Export all vars defined in /etc/default/telegraf.
|
||||
- [#4004](https://github.com/influxdata/telegraf/issues/4004): Allow grok pattern to contain newlines.
|
||||
|
||||
## v1.5.3 [unreleased]
|
||||
## v1.5.3 [2018-03-14]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3729](https://github.com/influxdata/telegraf/issues/3729): Set path to / if HOST_MOUNT_PREFIX matches full path.
|
||||
- [#3739](https://github.com/influxdata/telegraf/issues/3739): Remove userinfo from url tag in prometheus input.
|
||||
- [#3778](https://github.com/influxdata/telegraf/issues/3778): Fix ping plugin not reporting zero durations.
|
||||
- [#3697](https://github.com/influxdata/telegraf/issues/3697): Disable keepalive in mqtt output to prevent deadlock.
|
||||
- [#3786](https://github.com/influxdata/telegraf/pull/3786): Fix collation difference in sqlserver input.
|
||||
- [#3871](https://github.com/influxdata/telegraf/pull/3871): Fix uptime metric in passenger input plugin.
|
||||
- [#3851](https://github.com/influxdata/telegraf/issues/3851): Add output of stderr in case of error to exec log message.
|
||||
|
||||
## v1.5.2 [2018-01-30]
|
||||
|
||||
|
||||
@@ -170,7 +170,7 @@ and `Stop()` methods.
|
||||
### Service Plugin Guidelines
|
||||
|
||||
* Same as the `Plugin` guidelines, except that they must conform to the
|
||||
`inputs.ServiceInput` interface.
|
||||
[`telegraf.ServiceInput`](https://godoc.org/github.com/influxdata/telegraf#ServiceInput) interface.
|
||||
|
||||
## Output Plugins
|
||||
|
||||
|
||||
18
Godeps
18
Godeps
@@ -1,4 +1,7 @@
|
||||
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
|
||||
github.com/Azure/go-autorest 9ad9326b278af8fa5cc67c30c0ce9a58cc0862b2
|
||||
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
|
||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
|
||||
github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
|
||||
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
|
||||
@@ -18,6 +21,7 @@ github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
|
||||
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
|
||||
github.com/eclipse/paho.mqtt.golang aff15770515e3c57fc6109da73d42b0d46f7f483
|
||||
github.com/go-logfmt/logfmt 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
|
||||
github.com/go-redis/redis 73b70592cdaa9e6abdfcfbf97b4a90d80728c836
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
|
||||
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
|
||||
@@ -26,14 +30,15 @@ github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
|
||||
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
|
||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
||||
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
|
||||
github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
|
||||
github.com/gorilla/mux 53c1911da2b537f792e7cafcb446b05ffe33b996
|
||||
github.com/go-redis/redis 73b70592cdaa9e6abdfcfbf97b4a90d80728c836
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
|
||||
github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea
|
||||
github.com/influxdata/tail c43482518d410361b6c383d7aebce33d0471d7bc
|
||||
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
|
||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||
github.com/fsnotify/fsnotify c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9
|
||||
github.com/jackc/pgx 63f58fd32edb5684b9e9f4cfaac847c6b42b3917
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
|
||||
@@ -65,15 +70,15 @@ github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
|
||||
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||
github.com/shirou/gopsutil 384a55110aa5ae052eb93ea94940548c1e305a99
|
||||
github.com/shirou/gopsutil a5c2888e464b14fa882c2a059e0f95716bd45cf1
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
|
||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
|
||||
github.com/soniah/gosnmp f15472a4cd6f6ea7929e4c7d9f163c49f059924f
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||
github.com/stretchr/objx facf9a85c22f48d2f52f2380e4efce1768749a89
|
||||
github.com/stretchr/testify 12b6f73e6084dad08a7c6e575284b177ecafbc71
|
||||
github.com/tidwall/gjson 0623bd8fbdbf97cc62b98d15108832851a658e59
|
||||
github.com/tidwall/match 173748da739a410c5b0b813b956f89ff94730b4c
|
||||
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
|
||||
@@ -87,7 +92,6 @@ golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
|
||||
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
|
||||
gopkg.in/asn1-ber.v1 4e86f4367175e39f69d9358a5f17b4dda270378d
|
||||
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
|
||||
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
|
||||
gopkg.in/gorethink/gorethink.v3 7ab832f7b65573104a555d84a27992ae9ea1f659
|
||||
gopkg.in/ldap.v2 8168ee085ee43257585e50c6441aadf54ecb2c9f
|
||||
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
|
||||
|
||||
36
Makefile
36
Makefile
@@ -3,7 +3,8 @@ VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
|
||||
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
COMMIT := $(shell git rev-parse --short HEAD)
|
||||
GOFILES ?= $(shell git ls-files '*.go')
|
||||
GOFMT ?= $(shell gofmt -l $(GOFILES))
|
||||
GOFMT ?= $(shell gofmt -l $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)))
|
||||
BUILDFLAGS ?=
|
||||
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
@@ -11,31 +12,22 @@ else
|
||||
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
|
||||
endif
|
||||
|
||||
TELEGRAF := telegraf$(shell go tool dist env | grep -q 'GOOS=.windows.' && echo .exe)
|
||||
|
||||
LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)
|
||||
ifdef VERSION
|
||||
LDFLAGS += -X main.version=$(VERSION)
|
||||
endif
|
||||
|
||||
all:
|
||||
$(MAKE) fmtcheck
|
||||
$(MAKE) deps
|
||||
$(MAKE) telegraf
|
||||
|
||||
ci-test:
|
||||
$(MAKE) deps
|
||||
$(MAKE) fmtcheck
|
||||
$(MAKE) vet
|
||||
$(MAKE) test
|
||||
|
||||
deps:
|
||||
go get -u github.com/golang/lint/golint
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
|
||||
telegraf:
|
||||
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
|
||||
go build -i -ldflags "$(LDFLAGS)" ./cmd/telegraf
|
||||
|
||||
go-install:
|
||||
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
|
||||
@@ -48,11 +40,11 @@ test:
|
||||
go test -short ./...
|
||||
|
||||
fmt:
|
||||
@gofmt -w $(GOFILES)
|
||||
@gofmt -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES))
|
||||
|
||||
fmtcheck:
|
||||
@echo '[INFO] running gofmt to identify incorrectly formatted code...'
|
||||
@if [ ! -z $(GOFMT) ]; then \
|
||||
@if [ ! -z "$(GOFMT)" ]; then \
|
||||
echo "[ERROR] gofmt has found errors in the following files:" ; \
|
||||
echo "$(GOFMT)" ; \
|
||||
echo "" ;\
|
||||
@@ -61,27 +53,28 @@ fmtcheck:
|
||||
fi
|
||||
@echo '[INFO] done.'
|
||||
|
||||
lint:
|
||||
golint ./...
|
||||
|
||||
test-windows:
|
||||
go test ./plugins/inputs/ping/...
|
||||
go test ./plugins/inputs/win_perf_counters/...
|
||||
go test ./plugins/inputs/win_services/...
|
||||
go test ./plugins/inputs/procstat/...
|
||||
go test ./plugins/inputs/ntpq/...
|
||||
|
||||
# vet runs the Go source code static analysis tool `vet` to find
|
||||
# any common errors.
|
||||
vet:
|
||||
@echo 'go vet $$(go list ./...)'
|
||||
@go vet $$(go list ./...) ; if [ $$? -eq 1 ]; then \
|
||||
@echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)'
|
||||
@go vet $$(go list ./... | grep -v ./plugins/parsers/influx) ; if [ $$? -ne 0 ]; then \
|
||||
echo ""; \
|
||||
echo "go vet has found suspicious constructs. Please remediate any reported errors"; \
|
||||
echo "to fix them before submitting code for review."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
test-all: vet
|
||||
test-ci: fmtcheck vet
|
||||
go test -short ./...
|
||||
|
||||
test-all: fmtcheck vet
|
||||
go test ./...
|
||||
|
||||
package:
|
||||
@@ -96,4 +89,7 @@ docker-image:
|
||||
cp build/telegraf*$(COMMIT)*.deb .
|
||||
docker build -f scripts/dev.docker --build-arg "package=telegraf*$(COMMIT)*.deb" -t "telegraf-dev:$(COMMIT)" .
|
||||
|
||||
.PHONY: deps telegraf install test test-windows lint vet test-all package clean docker-image fmtcheck
|
||||
plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl
|
||||
ragel -Z -G2 $^ -o $@
|
||||
|
||||
.PHONY: deps telegraf install test test-windows lint vet test-all package clean docker-image fmtcheck uint64
|
||||
|
||||
13
README.md
13
README.md
@@ -5,7 +5,7 @@ and writing metrics.
|
||||
|
||||
Design goals are to have a minimal memory footprint with a plugin system so
|
||||
that developers in the community can easily add support for collecting metrics
|
||||
from local or remote services.
|
||||
. For an example configuration referencet from local or remote services.
|
||||
|
||||
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
|
||||
|
||||
@@ -130,7 +130,7 @@ configuration options.
|
||||
* [aws cloudwatch](./plugins/inputs/cloudwatch)
|
||||
* [bcache](./plugins/inputs/bcache)
|
||||
* [bond](./plugins/inputs/bond)
|
||||
* [cassandra](./plugins/inputs/cassandra)
|
||||
* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
|
||||
* [ceph](./plugins/inputs/ceph)
|
||||
* [cgroup](./plugins/inputs/cgroup)
|
||||
* [chrony](./plugins/inputs/chrony)
|
||||
@@ -147,11 +147,13 @@ configuration options.
|
||||
* [elasticsearch](./plugins/inputs/elasticsearch)
|
||||
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||
* [fail2ban](./plugins/inputs/fail2ban)
|
||||
* [fibaro](./plugins/inputs/fibaro)
|
||||
* [filestat](./plugins/inputs/filestat)
|
||||
* [fluentd](./plugins/inputs/fluentd)
|
||||
* [graylog](./plugins/inputs/graylog)
|
||||
* [haproxy](./plugins/inputs/haproxy)
|
||||
* [hddtemp](./plugins/inputs/hddtemp)
|
||||
* [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats)
|
||||
* [http_response](./plugins/inputs/http_response)
|
||||
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||
* [internal](./plugins/inputs/internal)
|
||||
@@ -161,12 +163,13 @@ configuration options.
|
||||
* [iptables](./plugins/inputs/iptables)
|
||||
* [ipset](./plugins/inputs/ipset)
|
||||
* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
|
||||
* [jolokia2](./plugins/inputs/jolokia2)
|
||||
* [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka)
|
||||
* [kapacitor](./plugins/inputs/kapacitor)
|
||||
* [kubernetes](./plugins/inputs/kubernetes)
|
||||
* [leofs](./plugins/inputs/leofs)
|
||||
* [lustre2](./plugins/inputs/lustre2)
|
||||
* [mailchimp](./plugins/inputs/mailchimp)
|
||||
* [mcrouter](./plugins/inputs/mcrouter)
|
||||
* [memcached](./plugins/inputs/memcached)
|
||||
* [mesos](./plugins/inputs/mesos)
|
||||
* [minecraft](./plugins/inputs/minecraft)
|
||||
@@ -179,6 +182,7 @@ configuration options.
|
||||
* [nsq](./plugins/inputs/nsq)
|
||||
* [nstat](./plugins/inputs/nstat)
|
||||
* [ntpq](./plugins/inputs/ntpq)
|
||||
* [nvidia_smi](./plugins/inputs/nvidia_smi)
|
||||
* [openldap](./plugins/inputs/openldap)
|
||||
* [opensmtpd](./plugins/inputs/opensmtpd)
|
||||
* [pf](./plugins/inputs/pf)
|
||||
@@ -207,7 +211,7 @@ configuration options.
|
||||
* [teamspeak](./plugins/inputs/teamspeak)
|
||||
* [tomcat](./plugins/inputs/tomcat)
|
||||
* [twemproxy](./plugins/inputs/twemproxy)
|
||||
* [unbound](./plugins/input/unbound)
|
||||
* [unbound](./plugins/inputs/unbound)
|
||||
* [varnish](./plugins/inputs/varnish)
|
||||
* [zfs](./plugins/inputs/zfs)
|
||||
* [zookeeper](./plugins/inputs/zookeeper)
|
||||
@@ -263,6 +267,7 @@ formats may be used with input plugins supporting the `data_format` option:
|
||||
## Processor Plugins
|
||||
|
||||
* [printer](./plugins/processors/printer)
|
||||
* [override](./plugins/processors/override)
|
||||
|
||||
## Aggregator Plugins
|
||||
|
||||
|
||||
@@ -15,63 +15,36 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
tags := map[string]string{"foo": "bar"}
|
||||
fields := map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddGauge("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddCounter("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
now := time.Now()
|
||||
a.AddCounter("acctest", fields, tags, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest usage=99")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test usage=99")
|
||||
require.Equal(t, "acctest", testm.Name())
|
||||
actual, ok := testm.GetField("usage")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test usage=99 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, float64(99), actual)
|
||||
|
||||
actual, ok = testm.GetTag("foo")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "bar", actual)
|
||||
|
||||
tm := testm.Time()
|
||||
// okay if monotonic clock differs
|
||||
require.True(t, now.Equal(tm))
|
||||
|
||||
tp := testm.Type()
|
||||
require.Equal(t, telegraf.Counter, tp)
|
||||
}
|
||||
|
||||
func TestAccAddError(t *testing.T) {
|
||||
@@ -98,215 +71,61 @@ func TestAccAddError(t *testing.T) {
|
||||
assert.Contains(t, string(errs[2]), "baz")
|
||||
}
|
||||
|
||||
func TestAddNoIntervalWithPrecision(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
a.SetPrecision(0, time.Second)
|
||||
func TestSetPrecision(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
unset bool
|
||||
precision time.Duration
|
||||
interval time.Duration
|
||||
timestamp time.Time
|
||||
expected time.Time
|
||||
}{
|
||||
{
|
||||
name: "default precision is nanosecond",
|
||||
unset: true,
|
||||
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
|
||||
expected: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
|
||||
},
|
||||
{
|
||||
name: "second interval",
|
||||
interval: time.Second,
|
||||
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
|
||||
expected: time.Date(2006, time.February, 10, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
name: "microsecond interval",
|
||||
interval: time.Microsecond,
|
||||
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
|
||||
expected: time.Date(2006, time.February, 10, 12, 0, 0, 82913000, time.UTC),
|
||||
},
|
||||
{
|
||||
name: "2 second precision",
|
||||
precision: 2 * time.Second,
|
||||
timestamp: time.Date(2006, time.February, 10, 12, 0, 2, 4, time.UTC),
|
||||
expected: time.Date(2006, time.February, 10, 12, 0, 2, 0, time.UTC),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
if !tt.unset {
|
||||
a.SetPrecision(tt.precision, tt.interval)
|
||||
}
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{},
|
||||
tt.timestamp,
|
||||
)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
testm := <-metrics
|
||||
require.Equal(t, tt.expected, testm.Time())
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddDisablePrecision(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(time.Nanosecond, 0)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(0, time.Second)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestDifferentPrecisions(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(0, time.Second)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Millisecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800083000000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Microsecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082913000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Nanosecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddGauge(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
}
|
||||
|
||||
func TestAddCounter(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
close(metrics)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type TestMetricMaker struct {
|
||||
|
||||
@@ -203,11 +203,6 @@ func (a *Agent) Test() error {
|
||||
input.SetTrace(true)
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name())
|
||||
if input.Config.Interval != 0 {
|
||||
fmt.Printf("* Internal: %s\n", input.Config.Interval)
|
||||
}
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -217,7 +212,6 @@ func (a *Agent) Test() error {
|
||||
switch input.Name() {
|
||||
case "inputs.cpu", "inputs.mongodb", "inputs.procstat":
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name())
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -271,11 +265,9 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, ag
|
||||
// if dropOriginal is set to true, then we will only send this
|
||||
// metric to the aggregators, not the outputs.
|
||||
var dropOriginal bool
|
||||
if !m.IsAggregate() {
|
||||
for _, agg := range a.Config.Aggregators {
|
||||
if ok := agg.Add(m.Copy()); ok {
|
||||
dropOriginal = true
|
||||
}
|
||||
for _, agg := range a.Config.Aggregators {
|
||||
if ok := agg.Add(m.Copy()); ok {
|
||||
dropOriginal = true
|
||||
}
|
||||
}
|
||||
if !dropOriginal {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
image: Previous Visual Studio 2015
|
||||
version: "{build}"
|
||||
|
||||
cache:
|
||||
@@ -12,11 +13,11 @@ platform: x64
|
||||
|
||||
install:
|
||||
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
|
||||
- IF NOT EXIST "C:\Cache\go1.9.2.msi" curl -o "C:\Cache\go1.9.2.msi" https://storage.googleapis.com/golang/go1.9.2.windows-amd64.msi
|
||||
- IF NOT EXIST "C:\Cache\go1.10.1.msi" curl -o "C:\Cache\go1.10.1.msi" https://storage.googleapis.com/golang/go1.10.1.windows-amd64.msi
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
|
||||
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
|
||||
- msiexec.exe /i "C:\Cache\go1.9.2.msi" /quiet
|
||||
- msiexec.exe /i "C:\Cache\go1.10.1.msi" /quiet
|
||||
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
|
||||
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
|
||||
- go version
|
||||
|
||||
@@ -57,7 +57,7 @@ var fService = flag.String("service", "",
|
||||
var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)")
|
||||
|
||||
var (
|
||||
nextVersion = "1.6.0"
|
||||
nextVersion = "1.7.0"
|
||||
version string
|
||||
commit string
|
||||
branch string
|
||||
@@ -73,48 +73,6 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf [commands|flags]
|
||||
|
||||
The commands & flags are:
|
||||
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
--config-directory directory containing additional *.conf files
|
||||
--input-filter filter the input plugins to enable, separator is :
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||
--quiet run in quiet mode
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf --config telegraf.conf --test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf --config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
|
||||
# run telegraf with pprof
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
`
|
||||
|
||||
var stop chan struct{}
|
||||
|
||||
func reloadLoop(
|
||||
@@ -365,7 +323,7 @@ func main() {
|
||||
DisplayName: "Telegraf Data Collector Service",
|
||||
Description: "Collects data using a series of plugins and publishes it to" +
|
||||
"another series of plugins.",
|
||||
Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
|
||||
Arguments: []string{"--config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
|
||||
}
|
||||
|
||||
prg := &program{
|
||||
@@ -378,14 +336,14 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
// Handle the -service flag here to prevent any issues with tooling that
|
||||
// Handle the --service flag here to prevent any issues with tooling that
|
||||
// may not have an interactive session, e.g. installing from Ansible.
|
||||
if *fService != "" {
|
||||
if *fConfig != "" {
|
||||
(*svcConfig).Arguments = []string{"-config", *fConfig}
|
||||
(*svcConfig).Arguments = []string{"--config", *fConfig}
|
||||
}
|
||||
if *fConfigDirectory != "" {
|
||||
(*svcConfig).Arguments = append((*svcConfig).Arguments, "-config-directory", *fConfigDirectory)
|
||||
(*svcConfig).Arguments = append((*svcConfig).Arguments, "--config-directory", *fConfigDirectory)
|
||||
}
|
||||
err := service.Control(s, *fService)
|
||||
if err != nil {
|
||||
|
||||
45
cmd/telegraf/usage.go
Normal file
45
cmd/telegraf/usage.go
Normal file
@@ -0,0 +1,45 @@
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf [commands|flags]
|
||||
|
||||
The commands & flags are:
|
||||
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
--config-directory directory containing additional *.conf files
|
||||
--input-filter filter the input plugins to enable, separator is :
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||
--quiet run in quiet mode
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf --config telegraf.conf --test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf --config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
|
||||
# run telegraf with pprof
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
`
|
||||
54
cmd/telegraf/usage_windows.go
Normal file
54
cmd/telegraf/usage_windows.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf [commands|flags]
|
||||
|
||||
The commands & flags are:
|
||||
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
--config-directory directory containing additional *.conf files
|
||||
--input-filter filter the input plugins to enable, separator is :
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||
--quiet run in quiet mode
|
||||
|
||||
--console run as console application
|
||||
--service operate on service, one of: install, uninstall, start, stop
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf --config telegraf.conf --test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf --config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
|
||||
# run telegraf with pprof
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
|
||||
# run telegraf without service controller
|
||||
telegraf --console install --config "C:\Program Files\Telegraf\telegraf.conf"
|
||||
|
||||
# install telegraf service
|
||||
telegraf --service install --config "C:\Program Files\Telegraf\telegraf.conf"
|
||||
`
|
||||
@@ -153,11 +153,11 @@ The inverse of `namepass`. If a match is found the point is discarded. This
|
||||
is tested on points after they have passed the `namepass` test.
|
||||
* **fieldpass**:
|
||||
An array of glob pattern strings. Only fields whose field key matches a
|
||||
pattern in this list are emitted. Not available for outputs.
|
||||
pattern in this list are emitted.
|
||||
* **fielddrop**:
|
||||
The inverse of `fieldpass`. Fields with a field key matching one of the
|
||||
patterns will be discarded from the point. This is tested on points after
|
||||
they have passed the `fieldpass` test. Not available for outputs.
|
||||
they have passed the `fieldpass` test.
|
||||
* **tagpass**:
|
||||
A table mapping tag keys to arrays of glob pattern strings. Only points
|
||||
that contain a tag key in the table and a tag value matching one of its
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
|
||||
Telegraf is able to serialize metrics into the following output data formats:
|
||||
|
||||
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#influx)
|
||||
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#json)
|
||||
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite)
|
||||
1. [InfluxDB Line Protocol](#influx)
|
||||
1. [JSON](#json)
|
||||
1. [Graphite](#graphite)
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||
[points](https://docs.influxdata.com/influxdb/latest/concepts/glossary/#point),
|
||||
are a combination of four basic parts:
|
||||
|
||||
1. Measurement Name
|
||||
@@ -49,8 +49,10 @@ I'll go over below.
|
||||
|
||||
# Influx:
|
||||
|
||||
There are no additional configuration options for InfluxDB line-protocol. The
|
||||
metrics are serialized directly into InfluxDB line-protocol.
|
||||
The `influx` format outputs data as
|
||||
[InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/latest/write_protocols/line_protocol_tutorial/).
|
||||
This is the recommended format to use unless another format is required for
|
||||
interoperability.
|
||||
|
||||
### Influx Configuration:
|
||||
|
||||
@@ -64,6 +66,20 @@ metrics are serialized directly into InfluxDB line-protocol.
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
## Maximum line length in bytes. Useful only for debugging.
|
||||
# influx_max_line_bytes = 0
|
||||
|
||||
## When true, fields will be output in ascending lexical order. Enabling
|
||||
## this option will result in decreased performance and is only recommended
|
||||
## when you need predictable ordering while debugging.
|
||||
# influx_sort_fields = false
|
||||
|
||||
## When true, Telegraf will output unsigned integers as unsigned values,
|
||||
## i.e.: `42u`. You will need a version of InfluxDB supporting unsigned
|
||||
## integer values. Enabling this option will result in field type errors if
|
||||
## existing data has been written.
|
||||
# influx_uint_support = false
|
||||
```
|
||||
|
||||
# Graphite:
|
||||
|
||||
@@ -24,6 +24,7 @@ following works:
|
||||
- github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE)
|
||||
- github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE)
|
||||
- github.com/eclipse/paho.mqtt.golang [ECLIPSE](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE)
|
||||
- github.com/fsnotify/fsnotify [BSD](https://github.com/fsnotify/fsnotify/blob/master/LICENSE)
|
||||
- github.com/fsouza/go-dockerclient [BSD](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
|
||||
- github.com/gobwas/glob [MIT](https://github.com/gobwas/glob/blob/master/LICENSE)
|
||||
- github.com/google/go-cmp [BSD](https://github.com/google/go-cmp/blob/master/LICENSE)
|
||||
@@ -99,7 +100,6 @@ following works:
|
||||
- gopkg.in/asn1-ber.v1 [MIT](https://github.com/go-asn1-ber/asn1-ber/blob/v1.2/LICENSE)
|
||||
- gopkg.in/dancannon/gorethink.v1 [APACHE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||
- gopkg.in/fatih/pool.v2 [MIT](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
|
||||
- gopkg.in/fsnotify.v1 [BSD](https://github.com/fsnotify/fsnotify/blob/v1.4.2/LICENSE)
|
||||
- gopkg.in/ldap.v2 [MIT](https://github.com/go-ldap/ldap/blob/v2.5.0/LICENSE)
|
||||
- gopkg.in/mgo.v2 [BSD](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||
- gopkg.in/olivere/elastic.v5 [MIT](https://github.com/olivere/elastic/blob/v5.0.38/LICENSE)
|
||||
|
||||
@@ -5,7 +5,7 @@ the general steps to set it up.
|
||||
|
||||
1. Obtain the telegraf windows distribution
|
||||
2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
|
||||
location simply specify the `-config` parameter with the desired location)
|
||||
location simply specify the `--config` parameter with the desired location)
|
||||
3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf`
|
||||
4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""):
|
||||
|
||||
@@ -26,6 +26,15 @@ the general steps to set it up.
|
||||
> net start telegraf
|
||||
```
|
||||
|
||||
## Config Directory
|
||||
|
||||
You can also specify a `--config-directory` for the service to use:
|
||||
1. Create a directory for config snippets: `C:\Program Files\Telegraf\telegraf.d`
|
||||
2. Include the `--config-directory` option when registering the service:
|
||||
```
|
||||
> C:\"Program Files"\Telegraf\telegraf.exe --service install --config C:\"Program Files"\Telegraf\telegraf.conf --config-directory C:\"Program Files"\Telegraf\telegraf.d
|
||||
```
|
||||
|
||||
## Other supported operations
|
||||
|
||||
Telegraf can manage its own service through the --service flag:
|
||||
@@ -37,7 +46,6 @@ Telegraf can manage its own service through the --service flag:
|
||||
| `telegraf.exe --service start` | Start the telegraf service |
|
||||
| `telegraf.exe --service stop` | Stop the telegraf service |
|
||||
|
||||
|
||||
Troubleshooting common error #1067
|
||||
|
||||
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start
|
||||
|
||||
@@ -82,31 +82,42 @@
|
||||
# OUTPUT PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
# Configuration for sending metrics to InfluxDB
|
||||
[[outputs.influxdb]]
|
||||
## The full HTTP or UDP URL for your InfluxDB instance.
|
||||
##
|
||||
## Multiple urls can be specified as part of the same cluster,
|
||||
## this means that only ONE of the urls will be written to each interval.
|
||||
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
|
||||
urls = ["http://127.0.0.1:8086"] # required
|
||||
## The target database for metrics (telegraf will create it if not exists).
|
||||
database = "telegraf" # required
|
||||
## Multiple URLs can be specified for a single cluster, only ONE of the
|
||||
## urls will be written to each interval.
|
||||
# urls = ["unix:///var/run/influxdb.sock"]
|
||||
# urls = ["udp://127.0.0.1:8089"]
|
||||
# urls = ["http://127.0.0.1:8086"]
|
||||
|
||||
## The target database for metrics; will be created as needed.
|
||||
# database = "telegraf"
|
||||
|
||||
## If true, no CREATE DATABASE queries will be sent. Set to true when using
|
||||
## Telegraf with a user without permissions to create databases or when the
|
||||
## database already exists.
|
||||
# skip_database_creation = false
|
||||
|
||||
## Name of existing retention policy to write to. Empty string writes to
|
||||
## the default retention policy.
|
||||
retention_policy = ""
|
||||
## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
|
||||
write_consistency = "any"
|
||||
# retention_policy = ""
|
||||
|
||||
## Write timeout (for the InfluxDB client), formatted as a string.
|
||||
## If not provided, will default to 5s. 0s means no timeout (not recommended).
|
||||
timeout = "5s"
|
||||
## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
|
||||
# write_consistency = "any"
|
||||
|
||||
## Timeout for HTTP messages.
|
||||
# timeout = "5s"
|
||||
|
||||
## HTTP Basic Auth
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
## Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
||||
|
||||
## HTTP User-Agent
|
||||
# user_agent = "telegraf"
|
||||
## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
||||
|
||||
## UDP payload size is the maximum packet size to send.
|
||||
# udp_payload = 512
|
||||
|
||||
## Optional SSL Config
|
||||
@@ -116,14 +127,22 @@
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## HTTP Proxy Config
|
||||
## HTTP Proxy override, if unset values the standard proxy environment
|
||||
## variables are consulted to determine which proxy, if any, should be used.
|
||||
# http_proxy = "http://corporate.proxy:3128"
|
||||
|
||||
## Optional HTTP headers
|
||||
## Additional HTTP headers
|
||||
# http_headers = {"X-Special-Header" = "Special-Value"}
|
||||
|
||||
## Compress each HTTP request payload using GZIP.
|
||||
# content_encoding = "gzip"
|
||||
## HTTP Content-Encoding for write request body, can be set to "gzip" to
|
||||
## compress body or "identity" to apply no encoding.
|
||||
# content_encoding = "identity"
|
||||
|
||||
## When true, Telegraf will output unsigned integers as unsigned values,
|
||||
## i.e.: "42u". You will need a version of InfluxDB supporting unsigned
|
||||
## integer values. Enabling this option will result in field type errors if
|
||||
## existing data has been written.
|
||||
# influx_uint_support = false
|
||||
|
||||
|
||||
# # Configuration for Amon Server to send metrics to.
|
||||
@@ -510,6 +529,9 @@
|
||||
# # username = "telegraf"
|
||||
# # password = "metricsmetricsmetricsmetrics"
|
||||
#
|
||||
# ## Timeout for write operations. default: 5s
|
||||
# # timeout = "5s"
|
||||
#
|
||||
# ## client ID, if not set a random ID is generated
|
||||
# # client_id = ""
|
||||
#
|
||||
@@ -594,12 +616,24 @@
|
||||
# ## Address to listen on
|
||||
# # listen = ":9273"
|
||||
#
|
||||
# ## Use TLS
|
||||
# #tls_cert = "/etc/ssl/telegraf.crt"
|
||||
# #tls_key = "/etc/ssl/telegraf.key"
|
||||
#
|
||||
# ## Use http basic authentication
|
||||
# #basic_username = "Foo"
|
||||
# #basic_password = "Bar"
|
||||
#
|
||||
# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration
|
||||
# # expiration_interval = "60s"
|
||||
#
|
||||
# ## Collectors to enable, valid entries are "gocollector" and "process".
|
||||
# ## If unset, both are enabled.
|
||||
# collectors_exclude = ["gocollector", "process"]
|
||||
#
|
||||
# # Send string metrics as Prometheus labels.
|
||||
# # Unless set to false all string metrics will be sent as labels.
|
||||
# string_as_label = true
|
||||
|
||||
|
||||
# # Configuration for the Riemann server to send metrics to
|
||||
@@ -699,7 +733,7 @@
|
||||
# #use_regex = false
|
||||
#
|
||||
# ## point tags to use as the source name for Wavefront (if none found, host will be used)
|
||||
# #source_override = ["hostname", "snmp_host", "node_host"]
|
||||
# #source_override = ["hostname", "agent_host", "node_host"]
|
||||
#
|
||||
# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default true
|
||||
# #convert_bool = true
|
||||
@@ -718,6 +752,18 @@
|
||||
# PROCESSOR PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
# # Apply metric modifications using override semantics.
|
||||
# [[processors.override]]
|
||||
# ## All modifications on inputs and aggregators can be overridden:
|
||||
# # name_override = "new_name"
|
||||
# # name_prefix = "new_name_prefix"
|
||||
# # name_suffix = "new_name_suffix"
|
||||
#
|
||||
# ## Tags to be added (all values must be strings)
|
||||
# # [processors.override.tags]
|
||||
# # additional_tag = "tag_value"
|
||||
|
||||
|
||||
# # Print all metrics that pass through this filter.
|
||||
# [[processors.printer]]
|
||||
|
||||
@@ -792,12 +838,11 @@
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
[[inputs.disk]]
|
||||
## By default, telegraf gather stats for all mountpoints.
|
||||
## Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||
## By default stats will be gathered for all mount points.
|
||||
## Set mount_points will restrict the stats to only the specified mount points.
|
||||
# mount_points = ["/"]
|
||||
|
||||
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
## present on /run, /var/run, /dev/shm or /dev).
|
||||
## Ignore mount points by filesystem type.
|
||||
ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
|
||||
|
||||
|
||||
@@ -806,7 +851,7 @@
|
||||
## By default, telegraf will gather stats for all devices including
|
||||
## disk partitions.
|
||||
## Setting devices will restrict the stats to the specified devices.
|
||||
# devices = ["sda", "sdb"]
|
||||
# devices = ["sda", "sdb", "vd*"]
|
||||
## Uncomment the following line if you need disk serial numbers.
|
||||
# skip_serial_number = false
|
||||
#
|
||||
@@ -1061,19 +1106,28 @@
|
||||
|
||||
# # Gather health check statuses from services registered in Consul
|
||||
# [[inputs.consul]]
|
||||
# ## Most of these values defaults to the one configured on a Consul's agent level.
|
||||
# ## Optional Consul server address (default: "localhost")
|
||||
# ## Consul server address
|
||||
# # address = "localhost"
|
||||
# ## Optional URI scheme for the Consul server (default: "http")
|
||||
#
|
||||
# ## URI scheme for the Consul server, one of "http", "https"
|
||||
# # scheme = "http"
|
||||
# ## Optional ACL token used in every request (default: "")
|
||||
#
|
||||
# ## ACL token used in every request
|
||||
# # token = ""
|
||||
# ## Optional username used for request HTTP Basic Authentication (default: "")
|
||||
#
|
||||
# ## HTTP Basic Authentication username and password.
|
||||
# # username = ""
|
||||
# ## Optional password used for HTTP Basic Authentication (default: "")
|
||||
# # password = ""
|
||||
# ## Optional data centre to query the health checks from (default: "")
|
||||
#
|
||||
# ## Data centre to query the health checks from
|
||||
# # datacentre = ""
|
||||
#
|
||||
# ## SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## If false, skip chain & host verification
|
||||
# # insecure_skip_verify = true
|
||||
|
||||
|
||||
# # Read metrics from one or many couchbase clusters
|
||||
@@ -1196,6 +1250,11 @@
|
||||
# container_name_include = []
|
||||
# container_name_exclude = []
|
||||
#
|
||||
# ## Container states to include and exclude. Globs accepted.
|
||||
# ## When empty only containers in the "running" state will be captured.
|
||||
# # container_state_include = []
|
||||
# # container_state_exclude = []
|
||||
#
|
||||
# ## Timeout for docker list, info, and stats commands
|
||||
# timeout = "5s"
|
||||
#
|
||||
@@ -1265,7 +1324,7 @@
|
||||
#
|
||||
# ## node_stats is a list of sub-stats that you want to have gathered. Valid options
|
||||
# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
|
||||
# ## "breakers". Per default, all stats are gathered.
|
||||
# ## "breaker". Per default, all stats are gathered.
|
||||
# # node_stats = ["jvm", "http"]
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
@@ -1414,11 +1473,51 @@
|
||||
# # devices = ["sda", "*"]
|
||||
|
||||
|
||||
# # Read formatted metrics from one or more HTTP endpoints
|
||||
# [[inputs.http]]
|
||||
# ## One or more URLs from which to read formatted metrics
|
||||
# urls = [
|
||||
# "http://localhost/metrics"
|
||||
# ]
|
||||
#
|
||||
# ## HTTP method
|
||||
# # method = "GET"
|
||||
#
|
||||
# ## Optional HTTP headers
|
||||
# # headers = {"X-Special-Header" = "Special-Value"}
|
||||
#
|
||||
# ## Optional HTTP Basic Auth Credentials
|
||||
# # username = "username"
|
||||
# # password = "pa$$word"
|
||||
#
|
||||
# ## Tag all metrics with the url
|
||||
# # tag_url = true
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## Amount of time allowed to complete the HTTP request
|
||||
# # timeout = "5s"
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# # data_format = "influx"
|
||||
|
||||
|
||||
# # HTTP/HTTPS request given an address a method and a timeout
|
||||
# [[inputs.http_response]]
|
||||
# ## Server address (default http://localhost)
|
||||
# # address = "http://localhost"
|
||||
#
|
||||
# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
|
||||
# # http_proxy = "http://localhost:8888"
|
||||
#
|
||||
# ## Set response_timeout (default 5 seconds)
|
||||
# # response_timeout = "5s"
|
||||
#
|
||||
@@ -1478,6 +1577,13 @@
|
||||
# # "my_tag_2"
|
||||
# # ]
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## HTTP parameters (all values must be strings). For "GET" requests, data
|
||||
# ## will be included in the query. For "POST" requests, data will be included
|
||||
# ## in the request body as "x-www-form-urlencoded".
|
||||
@@ -1489,13 +1595,6 @@
|
||||
# # [inputs.httpjson.headers]
|
||||
# # X-Auth-Token = "my-xauth-token"
|
||||
# # apiVersion = "v1"
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
|
||||
@@ -1538,7 +1637,10 @@
|
||||
# [[inputs.ipmi_sensor]]
|
||||
# ## optionally specify the path to the ipmitool executable
|
||||
# # path = "/usr/bin/ipmitool"
|
||||
# #
|
||||
# ##
|
||||
# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR
|
||||
# # privilege = "ADMINISTRATOR"
|
||||
# ##
|
||||
# ## optionally specify one or more servers via a url matching
|
||||
# ## [username[:password]@][protocol[(address)]]
|
||||
# ## e.g.
|
||||
@@ -1556,6 +1658,17 @@
|
||||
# timeout = "20s"
|
||||
|
||||
|
||||
# # Gather packets and bytes counters from Linux ipsets
|
||||
# [[inputs.ipset]]
|
||||
# ## By default, we only show sets which have already matched at least 1 packet.
|
||||
# ## set include_unmatched_sets = true to gather them all.
|
||||
# include_unmatched_sets = false
|
||||
# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save")
|
||||
# use_sudo = false
|
||||
# ## The default timeout of 1s for ipset execution can be overridden here:
|
||||
# # timeout = "1s"
|
||||
|
||||
|
||||
# # Gather packets and bytes throughput from iptables
|
||||
# [[inputs.iptables]]
|
||||
# ## iptables require root access on most systems.
|
||||
@@ -1710,6 +1823,13 @@
|
||||
#
|
||||
# ## Time limit for http requests
|
||||
# timeout = "5s"
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Get kernel statistics from /proc/vmstat
|
||||
@@ -1789,7 +1909,7 @@
|
||||
# ## Timeout, in ms.
|
||||
# timeout = 100
|
||||
# ## A list of Mesos masters.
|
||||
# masters = ["localhost:5050"]
|
||||
# masters = ["http://localhost:5050"]
|
||||
# ## Master metrics groups to be collected, by default, all enabled.
|
||||
# master_collections = [
|
||||
# "resources",
|
||||
@@ -1813,6 +1933,13 @@
|
||||
# # "tasks",
|
||||
# # "messages",
|
||||
# # ]
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Collects scores from a minecraft server's scoreboard using the RCON protocol
|
||||
@@ -1854,6 +1981,20 @@
|
||||
# #
|
||||
# ## If no servers are specified, then localhost is used as the host.
|
||||
# servers = ["tcp(127.0.0.1:3306)/"]
|
||||
#
|
||||
# ## Selects the metric output format.
|
||||
# ##
|
||||
# ## This option exists to maintain backwards compatibility, if you have
|
||||
# ## existing metrics do not set or change this value until you are ready to
|
||||
# ## migrate to the new format.
|
||||
# ##
|
||||
# ## If you do not have existing metrics from this plugin set to the latest
|
||||
# ## version.
|
||||
# ##
|
||||
# ## Telegraf >=1.6: metric_version = 2
|
||||
# ## <1.6: metric_version = 1 (or unset)
|
||||
# metric_version = 2
|
||||
#
|
||||
# ## the limits for metrics form perf_events_statements
|
||||
# perf_events_statements_digest_text_limit = 120
|
||||
# perf_events_statements_limit = 250
|
||||
@@ -1910,6 +2051,15 @@
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
|
||||
|
||||
# # Provides metrics about the state of a NATS server
|
||||
# [[inputs.nats]]
|
||||
# ## The address of the monitoring endpoint of the NATS server
|
||||
# server = "http://localhost:8222"
|
||||
#
|
||||
# ## Maximum time to receive response
|
||||
# # response_timeout = "5s"
|
||||
|
||||
|
||||
# # Read metrics about network interface usage
|
||||
# [[inputs.net]]
|
||||
# ## By default, telegraf gathers stats from any up interface (excluding loopback)
|
||||
@@ -1917,6 +2067,12 @@
|
||||
# ## regardless of status.
|
||||
# ##
|
||||
# # interfaces = ["eth0"]
|
||||
# ##
|
||||
# ## On linux systems telegraf also collects protocol stats.
|
||||
# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
|
||||
# ##
|
||||
# # ignore_protocol_stats = false
|
||||
# ##
|
||||
|
||||
|
||||
# # TCP or UDP 'ping' given url and collect response time in seconds
|
||||
@@ -2014,6 +2170,10 @@
|
||||
# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
|
||||
# bind_dn = ""
|
||||
# bind_password = ""
|
||||
#
|
||||
# # Reverse metric names so they sort more naturally. Recommended.
|
||||
# # This defaults to false if unset, but is set to true when generating a new config
|
||||
# reverse_metric_names = true
|
||||
|
||||
|
||||
# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver
|
||||
@@ -2087,7 +2247,10 @@
|
||||
# # ping_interval = 1.0
|
||||
# ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
|
||||
# # timeout = 1.0
|
||||
# ## interface to send ping from (ping -I <INTERFACE>)
|
||||
# ## total-ping deadline, in s. 0 == no deadline (ping -w <DEADLINE>)
|
||||
# # deadline = 10
|
||||
# ## interface or source address to send ping from (ping -I <INTERFACE/SRC_ADDR>)
|
||||
# ## on Darwin and Freebsd only source address possible: (ping -S <SRC_ADDR>)
|
||||
# # interface = ""
|
||||
|
||||
|
||||
@@ -2098,90 +2261,6 @@
|
||||
# # queue_directory = "/var/spool/postfix"
|
||||
|
||||
|
||||
# # Read metrics from one or many postgresql servers
|
||||
# [[inputs.postgresql]]
|
||||
# ## specify address via a url matching:
|
||||
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
|
||||
# ## ?sslmode=[disable|verify-ca|verify-full]
|
||||
# ## or a simple string:
|
||||
# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
||||
# ##
|
||||
# ## All connection parameters are optional.
|
||||
# ##
|
||||
# ## Without the dbname parameter, the driver will default to a database
|
||||
# ## with the same name as the user. This dbname is just for instantiating a
|
||||
# ## connection with the server and doesn't restrict the databases we are trying
|
||||
# ## to grab metrics for.
|
||||
# ##
|
||||
# address = "host=localhost user=postgres sslmode=disable"
|
||||
#
|
||||
# ## A list of databases to explicitly ignore. If not specified, metrics for all
|
||||
# ## databases are gathered. Do NOT use with the 'databases' option.
|
||||
# # ignored_databases = ["postgres", "template0", "template1"]
|
||||
#
|
||||
# ## A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# ## databases are gathered. Do NOT use with the 'ignored_databases' option.
|
||||
# # databases = ["app_production", "testing"]
|
||||
|
||||
|
||||
# # Read metrics from one or many postgresql servers
|
||||
# [[inputs.postgresql_extensible]]
|
||||
# ## specify address via a url matching:
|
||||
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
|
||||
# ## ?sslmode=[disable|verify-ca|verify-full]
|
||||
# ## or a simple string:
|
||||
# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
||||
# #
|
||||
# ## All connection parameters are optional. #
|
||||
# ## Without the dbname parameter, the driver will default to a database
|
||||
# ## with the same name as the user. This dbname is just for instantiating a
|
||||
# ## connection with the server and doesn't restrict the databases we are trying
|
||||
# ## to grab metrics for.
|
||||
# #
|
||||
# address = "host=localhost user=postgres sslmode=disable"
|
||||
# ## A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# ## databases are gathered.
|
||||
# ## databases = ["app_production", "testing"]
|
||||
# #
|
||||
# # outputaddress = "db01"
|
||||
# ## A custom name for the database that will be used as the "server" tag in the
|
||||
# ## measurement output. If not specified, a default one generated from
|
||||
# ## the connection address is used.
|
||||
# #
|
||||
# ## Define the toml config where the sql queries are stored
|
||||
# ## New queries can be added, if the withdbname is set to true and there is no
|
||||
# ## databases defined in the 'databases field', the sql query is ended by a
|
||||
# ## 'is not null' in order to make the query succeed.
|
||||
# ## Example :
|
||||
# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
|
||||
# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
|
||||
# ## because the databases variable was set to ['postgres', 'pgbench' ] and the
|
||||
# ## withdbname was true. Be careful that if the withdbname is set to false you
|
||||
# ## don't have to define the where clause (aka with the dbname) the tagvalue
|
||||
# ## field is used to define custom tags (separated by commas)
|
||||
# ## The optional "measurement" value can be used to override the default
|
||||
# ## output measurement name ("postgresql").
|
||||
# #
|
||||
# ## Structure :
|
||||
# ## [[inputs.postgresql_extensible.query]]
|
||||
# ## sqlquery string
|
||||
# ## version string
|
||||
# ## withdbname boolean
|
||||
# ## tagvalue string (comma separated)
|
||||
# ## measurement string
|
||||
# [[inputs.postgresql_extensible.query]]
|
||||
# sqlquery="SELECT * FROM pg_stat_database"
|
||||
# version=901
|
||||
# withdbname=false
|
||||
# tagvalue=""
|
||||
# measurement=""
|
||||
# [[inputs.postgresql_extensible.query]]
|
||||
# sqlquery="SELECT * FROM pg_stat_bgwriter"
|
||||
# version=901
|
||||
# withdbname=false
|
||||
# tagvalue="postgresql.stats"
|
||||
|
||||
|
||||
# # Read metrics from one or many PowerDNS servers
|
||||
# [[inputs.powerdns]]
|
||||
# ## An array of sockets to gather stats about.
|
||||
@@ -2191,7 +2270,6 @@
|
||||
|
||||
# # Monitor process cpu and memory usage
|
||||
# [[inputs.procstat]]
|
||||
# ## Must specify one of: pid_file, exe, or pattern
|
||||
# ## PID file to monitor process
|
||||
# pid_file = "/var/run/nginx.pid"
|
||||
# ## executable name (ie, pgrep <exe>)
|
||||
@@ -2208,12 +2286,20 @@
|
||||
# ## override for process_name
|
||||
# ## This is optional; default is sourced from /proc/<pid>/status
|
||||
# # process_name = "bar"
|
||||
#
|
||||
# ## Field name prefix
|
||||
# prefix = ""
|
||||
# ## comment this out if you want raw cpu_time stats
|
||||
# fielddrop = ["cpu_time_*"]
|
||||
# ## This is optional; moves pid into a tag instead of a field
|
||||
# pid_tag = false
|
||||
# # prefix = ""
|
||||
#
|
||||
# ## Add PID as a tag instead of a field; useful to differentiate between
|
||||
# ## processes whose tags are otherwise the same. Can create a large number
|
||||
# ## of series, use judiciously.
|
||||
# # pid_tag = false
|
||||
#
|
||||
# ## Method to use when finding process IDs. Can be one of 'pgrep', or
|
||||
# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while
|
||||
# ## the native finder performs the search directly in a manor dependent on the
|
||||
# ## platform. Default is 'pgrep'
|
||||
# # pid_finder = "pgrep"
|
||||
|
||||
|
||||
# # Read metrics from one or many prometheus clients
|
||||
@@ -2278,6 +2364,15 @@
|
||||
# ## A list of queues to gather as the rabbitmq_queue measurement. If not
|
||||
# ## specified, metrics for all queues are gathered.
|
||||
# # queues = ["telegraf"]
|
||||
#
|
||||
# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not
|
||||
# ## specified, metrics for all exchanges are gathered.
|
||||
# # exchanges = ["telegraf"]
|
||||
#
|
||||
# ## Queues to include and exclude. Globs accepted.
|
||||
# ## Note that an empty array for both will include all queues
|
||||
# queue_name_include = []
|
||||
# queue_name_exclude = []
|
||||
|
||||
|
||||
# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
|
||||
@@ -2568,6 +2663,28 @@
|
||||
# # servers = [
|
||||
# # "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
|
||||
# # ]
|
||||
#
|
||||
# ## Optional parameter, setting this to 2 will use a new version
|
||||
# ## of the collection queries that break compatibility with the original
|
||||
# ## dashboards.
|
||||
# query_version = 2
|
||||
#
|
||||
# ## If you are using AzureDB, setting this to true will gather resource utilization metrics
|
||||
# # azuredb = false
|
||||
#
|
||||
# ## If you would like to exclude some of the metrics queries, list them here
|
||||
# ## Possible choices:
|
||||
# ## - PerformanceCounters
|
||||
# ## - WaitStatsCategorized
|
||||
# ## - DatabaseIO
|
||||
# ## - DatabaseProperties
|
||||
# ## - CPUHistory
|
||||
# ## - DatabaseSize
|
||||
# ## - DatabaseStats
|
||||
# ## - MemoryClerk
|
||||
# ## - VolumeSpace
|
||||
# ## - PerformanceMetrics
|
||||
# # exclude_query = [ 'DatabaseIO' ]
|
||||
|
||||
|
||||
# # Sysstat metrics collector
|
||||
@@ -2691,6 +2808,10 @@
|
||||
#
|
||||
# ## Use the builtin fielddrop/fieldpass telegraf filters in order to keep/remove specific fields
|
||||
# fieldpass = ["total_*", "num_*","time_up", "mem_*"]
|
||||
#
|
||||
# ## IP of server to connect to, read from unbound conf default, optionally ':port'
|
||||
# ## Will lookup IP if given a hostname
|
||||
# server = "127.0.0.1:8953"
|
||||
|
||||
|
||||
# # A plugin to collect stats from Varnish HTTP Cache
|
||||
@@ -2721,7 +2842,9 @@
|
||||
# ## By default, telegraf gather all zfs stats
|
||||
# ## If not specified, then default is:
|
||||
# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
|
||||
#
|
||||
# ## For Linux, the default is:
|
||||
# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats",
|
||||
# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"]
|
||||
# ## By default, don't gather zpool stats
|
||||
# # poolMetrics = false
|
||||
|
||||
@@ -2734,6 +2857,17 @@
|
||||
# ## If no servers are specified, then localhost is used as the host.
|
||||
# ## If no port is specified, 2181 is used
|
||||
# servers = [":2181"]
|
||||
#
|
||||
# ## Timeout for metric collections from all servers. Minimum timeout is "1s".
|
||||
# # timeout = "5s"
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # enable_ssl = true
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## If false, skip chain & host verification
|
||||
# # insecure_skip_verify = true
|
||||
|
||||
|
||||
|
||||
@@ -2799,6 +2933,11 @@
|
||||
# ## Add service certificate and key
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
#
|
||||
# ## Optional username and password to accept for HTTP basic authentication.
|
||||
# ## You probably want to make sure you have TLS configured above for this.
|
||||
# # basic_username = "foobar"
|
||||
# # basic_password = "barfoo"
|
||||
|
||||
|
||||
# # Read metrics from Kafka topic(s)
|
||||
@@ -2995,6 +3134,105 @@
|
||||
# data_format = "influx"
|
||||
|
||||
|
||||
# # Read metrics from one or many postgresql servers
|
||||
# [[inputs.postgresql]]
|
||||
# ## specify address via a url matching:
|
||||
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
|
||||
# ## ?sslmode=[disable|verify-ca|verify-full]
|
||||
# ## or a simple string:
|
||||
# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
||||
# ##
|
||||
# ## All connection parameters are optional.
|
||||
# ##
|
||||
# ## Without the dbname parameter, the driver will default to a database
|
||||
# ## with the same name as the user. This dbname is just for instantiating a
|
||||
# ## connection with the server and doesn't restrict the databases we are trying
|
||||
# ## to grab metrics for.
|
||||
# ##
|
||||
# address = "host=localhost user=postgres sslmode=disable"
|
||||
# ## A custom name for the database that will be used as the "server" tag in the
|
||||
# ## measurement output. If not specified, a default one generated from
|
||||
# ## the connection address is used.
|
||||
# # outputaddress = "db01"
|
||||
#
|
||||
# ## connection configuration.
|
||||
# ## maxlifetime - specify the maximum lifetime of a connection.
|
||||
# ## default is forever (0s)
|
||||
# max_lifetime = "0s"
|
||||
#
|
||||
# ## A list of databases to explicitly ignore. If not specified, metrics for all
|
||||
# ## databases are gathered. Do NOT use with the 'databases' option.
|
||||
# # ignored_databases = ["postgres", "template0", "template1"]
|
||||
#
|
||||
# ## A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# ## databases are gathered. Do NOT use with the 'ignored_databases' option.
|
||||
# # databases = ["app_production", "testing"]
|
||||
|
||||
|
||||
# # Read metrics from one or many postgresql servers
|
||||
# [[inputs.postgresql_extensible]]
|
||||
# ## specify address via a url matching:
|
||||
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
|
||||
# ## ?sslmode=[disable|verify-ca|verify-full]
|
||||
# ## or a simple string:
|
||||
# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
||||
# #
|
||||
# ## All connection parameters are optional. #
|
||||
# ## Without the dbname parameter, the driver will default to a database
|
||||
# ## with the same name as the user. This dbname is just for instantiating a
|
||||
# ## connection with the server and doesn't restrict the databases we are trying
|
||||
# ## to grab metrics for.
|
||||
# #
|
||||
# address = "host=localhost user=postgres sslmode=disable"
|
||||
#
|
||||
# ## connection configuration.
|
||||
# ## maxlifetime - specify the maximum lifetime of a connection.
|
||||
# ## default is forever (0s)
|
||||
# max_lifetime = "0s"
|
||||
#
|
||||
# ## A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# ## databases are gathered.
|
||||
# ## databases = ["app_production", "testing"]
|
||||
# #
|
||||
# ## A custom name for the database that will be used as the "server" tag in the
|
||||
# ## measurement output. If not specified, a default one generated from
|
||||
# ## the connection address is used.
|
||||
# # outputaddress = "db01"
|
||||
# #
|
||||
# ## Define the toml config where the sql queries are stored
|
||||
# ## New queries can be added, if the withdbname is set to true and there is no
|
||||
# ## databases defined in the 'databases field', the sql query is ended by a
|
||||
# ## 'is not null' in order to make the query succeed.
|
||||
# ## Example :
|
||||
# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
|
||||
# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
|
||||
# ## because the databases variable was set to ['postgres', 'pgbench' ] and the
|
||||
# ## withdbname was true. Be careful that if the withdbname is set to false you
|
||||
# ## don't have to define the where clause (aka with the dbname) the tagvalue
|
||||
# ## field is used to define custom tags (separated by commas)
|
||||
# ## The optional "measurement" value can be used to override the default
|
||||
# ## output measurement name ("postgresql").
|
||||
# #
|
||||
# ## Structure :
|
||||
# ## [[inputs.postgresql_extensible.query]]
|
||||
# ## sqlquery string
|
||||
# ## version string
|
||||
# ## withdbname boolean
|
||||
# ## tagvalue string (comma separated)
|
||||
# ## measurement string
|
||||
# [[inputs.postgresql_extensible.query]]
|
||||
# sqlquery="SELECT * FROM pg_stat_database"
|
||||
# version=901
|
||||
# withdbname=false
|
||||
# tagvalue=""
|
||||
# measurement=""
|
||||
# [[inputs.postgresql_extensible.query]]
|
||||
# sqlquery="SELECT * FROM pg_stat_bgwriter"
|
||||
# version=901
|
||||
# withdbname=false
|
||||
# tagvalue="postgresql.stats"
|
||||
|
||||
|
||||
# # Generic socket listener capable of handling multiple socket types.
|
||||
# [[inputs.socket_listener]]
|
||||
# ## URL to listen on
|
||||
@@ -3046,6 +3284,14 @@
|
||||
# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
|
||||
# max_tcp_connections = 250
|
||||
#
|
||||
# ## Enable TCP keep alive probes (default=false)
|
||||
# tcp_keep_alive = false
|
||||
#
|
||||
# ## Specifies the keep-alive period for an active network connection.
|
||||
# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.
|
||||
# ## Defaults to the OS configuration.
|
||||
# # tcp_keep_alive_period = "2h"
|
||||
#
|
||||
# ## Address and port to host UDP listener on
|
||||
# service_address = ":8125"
|
||||
#
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
@@ -518,7 +519,13 @@ func (c *Config) LoadDirectory(path string) error {
|
||||
log.Printf("W! Telegraf is not permitted to read %s", thispath)
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
if strings.HasPrefix(info.Name(), "..") {
|
||||
// skip Kubernetes mounts, prevening loading the same config twice
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
name := info.Name()
|
||||
@@ -1366,6 +1373,42 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["influx_max_line_bytes"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if integer, ok := kv.Value.(*ast.Integer); ok {
|
||||
v, err := integer.Int()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.InfluxMaxLineBytes = int(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["influx_sort_fields"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if b, ok := kv.Value.(*ast.Boolean); ok {
|
||||
var err error
|
||||
c.InfluxSortFields, err = b.Boolean()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["influx_uint_support"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if b, ok := kv.Value.(*ast.Boolean); ok {
|
||||
var err error
|
||||
c.InfluxUintSupport, err = b.Boolean()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["json_timestamp_units"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
@@ -1382,6 +1425,9 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "influx_max_line_bytes")
|
||||
delete(tbl.Fields, "influx_sort_fields")
|
||||
delete(tbl.Fields, "influx_uint_support")
|
||||
delete(tbl.Fields, "data_format")
|
||||
delete(tbl.Fields, "prefix")
|
||||
delete(tbl.Fields, "template")
|
||||
|
||||
4
internal/config/testdata/subconfig/..4984_10_04_08_28_06.119/invalid-config.conf
vendored
Normal file
4
internal/config/testdata/subconfig/..4984_10_04_08_28_06.119/invalid-config.conf
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# This invalid config file should be skipped during testing
|
||||
# as it is an ..data folder
|
||||
|
||||
[[outputs.influxdb
|
||||
@@ -112,9 +112,10 @@ func RandomString(n int) string {
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files.
|
||||
// you must give the full path to the files.
|
||||
// If all files are blank and InsecureSkipVerify=false, returns a nil pointer.
|
||||
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files
|
||||
// for use with a client.
|
||||
// The full path to each file must be provided.
|
||||
// Returns a nil pointer if all files are blank and InsecureSkipVerify=false.
|
||||
func GetTLSConfig(
|
||||
SSLCert, SSLKey, SSLCA string,
|
||||
InsecureSkipVerify bool,
|
||||
@@ -155,6 +156,50 @@ func GetTLSConfig(
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// GetServerTLSConfig gets a tls.Config object from the given certs, key, and one or more CA files
|
||||
// for use with a server.
|
||||
// The full path to each file must be provided.
|
||||
// Returns a nil pointer if all files are blank.
|
||||
func GetServerTLSConfig(
|
||||
TLSCert, TLSKey string,
|
||||
TLSAllowedCACerts []string,
|
||||
) (*tls.Config, error) {
|
||||
if TLSCert == "" && TLSKey == "" && len(TLSAllowedCACerts) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
t := &tls.Config{}
|
||||
|
||||
if len(TLSAllowedCACerts) != 0 {
|
||||
caCertPool := x509.NewCertPool()
|
||||
for _, cert := range TLSAllowedCACerts {
|
||||
c, err := ioutil.ReadFile(cert)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s",
|
||||
err))
|
||||
}
|
||||
caCertPool.AppendCertsFromPEM(c)
|
||||
}
|
||||
t.ClientCAs = caCertPool
|
||||
t.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
}
|
||||
|
||||
if TLSCert != "" && TLSKey != "" {
|
||||
cert, err := tls.LoadX509KeyPair(TLSCert, TLSKey)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf(
|
||||
"Could not load TLS client key/certificate from %s:%s: %s",
|
||||
TLSKey, TLSCert, err))
|
||||
}
|
||||
|
||||
t.Certificates = []tls.Certificate{cert}
|
||||
}
|
||||
|
||||
t.BuildNameToCertificate()
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// SnakeCase converts the given string to snake case following the Golang format:
|
||||
// acronyms are converted to lower-case and preceded by an underscore.
|
||||
func SnakeCase(in string) string {
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package limiter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRateLimiter(t *testing.T) {
|
||||
r := NewRateLimiter(5, time.Second)
|
||||
ticker := time.NewTicker(time.Millisecond * 75)
|
||||
|
||||
// test that we can only get 5 receives from the rate limiter
|
||||
counter := 0
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-r.C:
|
||||
counter++
|
||||
case <-ticker.C:
|
||||
break outer
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, 5, counter)
|
||||
r.Stop()
|
||||
// verify that the Stop function closes the channel.
|
||||
_, ok := <-r.C
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestRateLimiterMultipleIterations(t *testing.T) {
|
||||
r := NewRateLimiter(5, time.Millisecond*50)
|
||||
ticker := time.NewTicker(time.Millisecond * 250)
|
||||
|
||||
// test that we can get 15 receives from the rate limiter
|
||||
counter := 0
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
break outer
|
||||
case <-r.C:
|
||||
counter++
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, counter > 10)
|
||||
r.Stop()
|
||||
// verify that the Stop function closes the channel.
|
||||
_, ok := <-r.C
|
||||
assert.False(t, ok)
|
||||
}
|
||||
@@ -2,8 +2,6 @@ package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -78,84 +76,6 @@ func makemetric(
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range tags {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
log.Printf("D! Measurement [%s] tag [%s] "+
|
||||
"ends with a backslash, skipping", measurement, k)
|
||||
delete(tags, k)
|
||||
continue
|
||||
} else if strings.HasSuffix(v, `\`) {
|
||||
log.Printf("D! Measurement [%s] tag [%s] has a value "+
|
||||
"ending with a backslash, skipping", measurement, k)
|
||||
delete(tags, k)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range fields {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
log.Printf("D! Measurement [%s] field [%s] "+
|
||||
"ends with a backslash, skipping", measurement, k)
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
// Validate uint64 and float64 fields
|
||||
// convert all int & uint types to int64
|
||||
switch val := v.(type) {
|
||||
case nil:
|
||||
// delete nil fields
|
||||
delete(fields, k)
|
||||
case uint:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint8:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint16:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint32:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int8:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int16:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int32:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
fields[k] = int64(val)
|
||||
} else {
|
||||
fields[k] = int64(9223372036854775807)
|
||||
}
|
||||
continue
|
||||
case float32:
|
||||
fields[k] = float64(val)
|
||||
continue
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
log.Printf("D! Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
case string:
|
||||
fields[k] = v
|
||||
default:
|
||||
fields[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
m, err := metric.New(measurement, tags, fields, t, mType)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
@@ -167,69 +166,6 @@ func TestAddDropOriginal(t *testing.T) {
|
||||
assert.False(t, ra.Add(m2))
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetricA(t *testing.T) {
|
||||
now := time.Now()
|
||||
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
})
|
||||
assert.Equal(t, "aggregators.TestRunningAggregator", ra.Name())
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Untyped,
|
||||
)
|
||||
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Counter,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Counter,
|
||||
)
|
||||
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Gauge,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Gauge,
|
||||
)
|
||||
}
|
||||
|
||||
type TestAggregator struct {
|
||||
sum int64
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/serializers/influx"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
@@ -75,7 +76,12 @@ func (r *RunningInput) MakeMetric(
|
||||
)
|
||||
|
||||
if r.trace && m != nil {
|
||||
fmt.Print("> " + m.String())
|
||||
s := influx.NewSerializer()
|
||||
s.SetFieldSortOrder(influx.SortFields)
|
||||
octets, err := s.Serialize(m)
|
||||
if err == nil {
|
||||
fmt.Print("> " + string(octets))
|
||||
}
|
||||
}
|
||||
|
||||
r.MetricsGathered.Incr(1)
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -45,77 +44,17 @@ func TestMakeMetricNilFields(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
assert.Equal(t, "inputs.TestRunningInput", ri.Name())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
expected, err := metric.New("RITest",
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
map[string]interface{}{
|
||||
"value": int(101),
|
||||
},
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Untyped,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
m = ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Counter,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Counter,
|
||||
)
|
||||
|
||||
m = ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Gauge,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Gauge,
|
||||
)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricWithPluginTags(t *testing.T) {
|
||||
@@ -137,11 +76,18 @@ func TestMakeMetricWithPluginTags(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
|
||||
expected, err := metric.New("RITest",
|
||||
map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 101,
|
||||
},
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricFilteredOut(t *testing.T) {
|
||||
@@ -187,87 +133,17 @@ func TestMakeMetricWithDaemonTags(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetricInfFields(t *testing.T) {
|
||||
inf := math.Inf(1)
|
||||
ninf := math.Inf(-1)
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{
|
||||
"value": int(101),
|
||||
"inf": inf,
|
||||
"ninf": ninf,
|
||||
expected, err := metric.New("RITest",
|
||||
map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 101,
|
||||
},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricAllFieldTypes(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{
|
||||
"a": int(10),
|
||||
"b": int8(10),
|
||||
"c": int16(10),
|
||||
"d": int32(10),
|
||||
"e": uint(10),
|
||||
"f": uint8(10),
|
||||
"g": uint16(10),
|
||||
"h": uint32(10),
|
||||
"i": uint64(10),
|
||||
"j": float32(10),
|
||||
"k": uint64(9223372036854775810),
|
||||
"l": "foobar",
|
||||
"m": true,
|
||||
},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Contains(t, m.String(), "a=10i")
|
||||
assert.Contains(t, m.String(), "b=10i")
|
||||
assert.Contains(t, m.String(), "c=10i")
|
||||
assert.Contains(t, m.String(), "d=10i")
|
||||
assert.Contains(t, m.String(), "e=10i")
|
||||
assert.Contains(t, m.String(), "f=10i")
|
||||
assert.Contains(t, m.String(), "g=10i")
|
||||
assert.Contains(t, m.String(), "h=10i")
|
||||
assert.Contains(t, m.String(), "i=10i")
|
||||
assert.Contains(t, m.String(), "j=10")
|
||||
assert.NotContains(t, m.String(), "j=10i")
|
||||
assert.Contains(t, m.String(), "k=9223372036854775807i")
|
||||
assert.Contains(t, m.String(), "l=\"foobar\"")
|
||||
assert.Contains(t, m.String(), "m=true")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricNameOverride(t *testing.T) {
|
||||
@@ -284,11 +160,15 @@ func TestMakeMetricNameOverride(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("foobar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
expected, err := metric.New("foobar",
|
||||
nil,
|
||||
map[string]interface{}{
|
||||
"value": 101,
|
||||
},
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricNamePrefix(t *testing.T) {
|
||||
@@ -305,11 +185,15 @@ func TestMakeMetricNamePrefix(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("foobar_RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
expected, err := metric.New("foobar_RITest",
|
||||
nil,
|
||||
map[string]interface{}{
|
||||
"value": 101,
|
||||
},
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricNameSuffix(t *testing.T) {
|
||||
@@ -326,134 +210,15 @@ func TestMakeMetricNameSuffix(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest_foobar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
expected, err := metric.New("RITest_foobar",
|
||||
nil,
|
||||
map[string]interface{}{
|
||||
"value": 101,
|
||||
},
|
||||
now,
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetric_TrailingSlash(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
measurement string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
expectedNil bool
|
||||
expectedMeasurement string
|
||||
expectedFields map[string]interface{}
|
||||
expectedTags map[string]string
|
||||
}{
|
||||
{
|
||||
name: "Measurement cannot have trailing slash",
|
||||
measurement: `cpu\`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedNil: true,
|
||||
},
|
||||
{
|
||||
name: "Field key with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
`bad\`: `xyzzy`,
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Field value with trailing slash okay",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
"ok": `xyzzy\`,
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
"ok": `xyzzy\`,
|
||||
},
|
||||
expectedTags: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Must have one field after dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"bad": math.NaN(),
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedNil: true,
|
||||
},
|
||||
{
|
||||
name: "Tag key with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{
|
||||
`host\`: "localhost",
|
||||
"a": "x",
|
||||
},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Tag value with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{
|
||||
`host`: `localhost\`,
|
||||
"a": "x",
|
||||
},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
m := ri.MakeMetric(
|
||||
tc.measurement,
|
||||
tc.fields,
|
||||
tc.tags,
|
||||
telegraf.Untyped,
|
||||
now)
|
||||
|
||||
if tc.expectedNil {
|
||||
require.Nil(t, m)
|
||||
} else {
|
||||
require.NotNil(t, m)
|
||||
require.Equal(t, tc.expectedMeasurement, m.Name())
|
||||
require.Equal(t, tc.expectedFields, m.Fields())
|
||||
require.Equal(t, tc.expectedTags, m.Tags())
|
||||
}
|
||||
})
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, m)
|
||||
}
|
||||
|
||||
type testInput struct{}
|
||||
|
||||
@@ -87,7 +87,7 @@ func NewRunningOutput(
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
}
|
||||
ro.BufferLimit.Incr(int64(ro.MetricBufferLimit))
|
||||
ro.BufferLimit.Set(int64(ro.MetricBufferLimit))
|
||||
return ro
|
||||
}
|
||||
|
||||
@@ -113,6 +113,11 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
|
||||
m, _ = metric.New(name, tags, fields, t)
|
||||
}
|
||||
|
||||
if output, ok := ro.Output.(telegraf.AggregatingOutput); ok {
|
||||
output.Add(m)
|
||||
return
|
||||
}
|
||||
|
||||
ro.metrics.Add(m)
|
||||
if ro.metrics.Len() == ro.MetricBatchSize {
|
||||
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
||||
@@ -125,6 +130,12 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
|
||||
|
||||
// Write writes all cached points to this output.
|
||||
func (ro *RunningOutput) Write() error {
|
||||
if output, ok := ro.Output.(telegraf.AggregatingOutput); ok {
|
||||
metrics := output.Push()
|
||||
ro.metrics.Add(metrics...)
|
||||
output.Reset()
|
||||
}
|
||||
|
||||
nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len()
|
||||
ro.BufferSize.Set(int64(nFails + nMetrics))
|
||||
log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ",
|
||||
|
||||
60
metric.go
60
metric.go
@@ -17,48 +17,50 @@ const (
|
||||
Histogram
|
||||
)
|
||||
|
||||
type Tag struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
type Field struct {
|
||||
Key string
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
type Metric interface {
|
||||
// Serialize serializes the metric into a line-protocol byte buffer,
|
||||
// including a newline at the end.
|
||||
Serialize() []byte
|
||||
// same as Serialize, but avoids an allocation.
|
||||
// returns number of bytes copied into dst.
|
||||
SerializeTo(dst []byte) int
|
||||
// String is the same as Serialize, but returns a string.
|
||||
String() string
|
||||
// Copy deep-copies the metric.
|
||||
Copy() Metric
|
||||
// Split will attempt to return multiple metrics with the same timestamp
|
||||
// whose string representations are no longer than maxSize.
|
||||
// Metrics with a single field may exceed the requested size.
|
||||
Split(maxSize int) []Metric
|
||||
// Getting data structure functions
|
||||
Name() string
|
||||
Tags() map[string]string
|
||||
TagList() []*Tag
|
||||
Fields() map[string]interface{}
|
||||
FieldList() []*Field
|
||||
Time() time.Time
|
||||
Type() ValueType
|
||||
|
||||
// Name functions
|
||||
SetName(name string)
|
||||
AddPrefix(prefix string)
|
||||
AddSuffix(suffix string)
|
||||
|
||||
// Tag functions
|
||||
GetTag(key string) (string, bool)
|
||||
HasTag(key string) bool
|
||||
AddTag(key, value string)
|
||||
RemoveTag(key string)
|
||||
|
||||
// Field functions
|
||||
GetField(key string) (interface{}, bool)
|
||||
HasField(key string) bool
|
||||
AddField(key string, value interface{})
|
||||
RemoveField(key string) error
|
||||
RemoveField(key string)
|
||||
|
||||
// Name functions
|
||||
SetName(name string)
|
||||
SetPrefix(prefix string)
|
||||
SetSuffix(suffix string)
|
||||
|
||||
// Getting data structure functions
|
||||
Name() string
|
||||
Tags() map[string]string
|
||||
Fields() map[string]interface{}
|
||||
Time() time.Time
|
||||
UnixNano() int64
|
||||
Type() ValueType
|
||||
Len() int // returns the length of the serialized metric, including newline
|
||||
// HashID returns an unique identifier for the series.
|
||||
HashID() uint64
|
||||
|
||||
// aggregator things:
|
||||
// Copy returns a deep copy of the Metric.
|
||||
Copy() Metric
|
||||
|
||||
// Mark Metric as an aggregate
|
||||
SetAggregate(bool)
|
||||
IsAggregate() bool
|
||||
}
|
||||
|
||||
53
metric/builder.go
Normal file
53
metric/builder.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type TimeFunc func() time.Time
|
||||
|
||||
type Builder struct {
|
||||
TimeFunc
|
||||
TimePrecision time.Duration
|
||||
|
||||
*metric
|
||||
}
|
||||
|
||||
func NewBuilder() *Builder {
|
||||
b := &Builder{
|
||||
TimeFunc: time.Now,
|
||||
TimePrecision: 1 * time.Nanosecond,
|
||||
}
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Builder) SetName(name string) {
|
||||
b.name = name
|
||||
}
|
||||
|
||||
func (b *Builder) AddTag(key string, value string) {
|
||||
b.metric.AddTag(key, value)
|
||||
}
|
||||
|
||||
func (b *Builder) AddField(key string, value interface{}) {
|
||||
b.metric.AddField(key, value)
|
||||
}
|
||||
|
||||
func (b *Builder) SetTime(tm time.Time) {
|
||||
b.tm = tm
|
||||
}
|
||||
|
||||
func (b *Builder) Reset() {
|
||||
b.metric = &metric{}
|
||||
}
|
||||
|
||||
func (b *Builder) Metric() (telegraf.Metric, error) {
|
||||
if b.tm.IsZero() {
|
||||
b.tm = b.TimeFunc().Truncate(b.TimePrecision)
|
||||
}
|
||||
|
||||
return b.metric, nil
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// escaper is for escaping:
|
||||
// - tag keys
|
||||
// - tag values
|
||||
// - field keys
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`)
|
||||
unEscaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`)
|
||||
|
||||
// nameEscaper is for escaping measurement names only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
nameEscaper = strings.NewReplacer(`,`, `\,`, ` `, `\ `)
|
||||
nameUnEscaper = strings.NewReplacer(`\,`, `,`, `\ `, ` `)
|
||||
|
||||
// stringFieldEscaper is for escaping string field values only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
stringFieldEscaper = strings.NewReplacer(
|
||||
`"`, `\"`,
|
||||
`\`, `\\`,
|
||||
)
|
||||
stringFieldUnEscaper = strings.NewReplacer(
|
||||
`\"`, `"`,
|
||||
`\\`, `\`,
|
||||
)
|
||||
)
|
||||
|
||||
func escape(s string, t string) string {
|
||||
switch t {
|
||||
case "fieldkey", "tagkey", "tagval":
|
||||
return escaper.Replace(s)
|
||||
case "name":
|
||||
return nameEscaper.Replace(s)
|
||||
case "fieldval":
|
||||
return stringFieldEscaper.Replace(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func unescape(s string, t string) string {
|
||||
switch t {
|
||||
case "fieldkey", "tagkey", "tagval":
|
||||
return unEscaper.Replace(s)
|
||||
case "name":
|
||||
return nameUnEscaper.Replace(s)
|
||||
case "fieldval":
|
||||
return stringFieldUnEscaper.Replace(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
|
||||
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseInt(s, base, bitSize)
|
||||
}
|
||||
|
||||
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
|
||||
func parseFloatBytes(b []byte, bitSize int) (float64, error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseFloat(s, bitSize)
|
||||
}
|
||||
|
||||
// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
|
||||
func parseBoolBytes(b []byte) (bool, error) {
|
||||
return strconv.ParseBool(unsafeBytesToString(b))
|
||||
}
|
||||
|
||||
// unsafeBytesToString converts a []byte to a string without a heap allocation.
|
||||
//
|
||||
// It is unsafe, and is intended to prepare input to short-lived functions
|
||||
// that require strings.
|
||||
func unsafeBytesToString(in []byte) string {
|
||||
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
|
||||
dst := reflect.StringHeader{
|
||||
Data: src.Data,
|
||||
Len: src.Len,
|
||||
}
|
||||
s := *(*string)(unsafe.Pointer(&dst))
|
||||
return s
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
func TestParseIntBytesEquivalenceFuzz(t *testing.T) {
|
||||
f := func(b []byte, base int, bitSize int) bool {
|
||||
exp, expErr := strconv.ParseInt(string(b), base, bitSize)
|
||||
got, gotErr := parseIntBytes(b, base, bitSize)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseIntBytesValid64bitBase10EquivalenceFuzz(t *testing.T) {
|
||||
buf := []byte{}
|
||||
f := func(n int64) bool {
|
||||
buf = strconv.AppendInt(buf[:0], n, 10)
|
||||
|
||||
exp, expErr := strconv.ParseInt(string(buf), 10, 64)
|
||||
got, gotErr := parseIntBytes(buf, 10, 64)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFloatBytesEquivalenceFuzz(t *testing.T) {
|
||||
f := func(b []byte, bitSize int) bool {
|
||||
exp, expErr := strconv.ParseFloat(string(b), bitSize)
|
||||
got, gotErr := parseFloatBytes(b, bitSize)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFloatBytesValid64bitEquivalenceFuzz(t *testing.T) {
|
||||
buf := []byte{}
|
||||
f := func(n float64) bool {
|
||||
buf = strconv.AppendFloat(buf[:0], n, 'f', -1, 64)
|
||||
|
||||
exp, expErr := strconv.ParseFloat(string(buf), 64)
|
||||
got, gotErr := parseFloatBytes(buf, 64)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseBoolBytesEquivalence(t *testing.T) {
|
||||
var buf []byte
|
||||
for _, s := range []string{"1", "t", "T", "TRUE", "true", "True", "0", "f", "F", "FALSE", "false", "False", "fail", "TrUe", "FAlSE", "numbers", ""} {
|
||||
buf = append(buf[:0], s...)
|
||||
|
||||
exp, expErr := strconv.ParseBool(s)
|
||||
got, gotErr := parseBoolBytes(buf)
|
||||
|
||||
if got != exp || !checkErrs(expErr, gotErr) {
|
||||
t.Errorf("Failed to parse boolean value %q correctly: wanted (%t, %v), got (%t, %v)", s, exp, expErr, got, gotErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkErrs(a, b error) bool {
|
||||
if (a == nil) != (b == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
return a == nil || a.Error() == b.Error()
|
||||
}
|
||||
789
metric/metric.go
789
metric/metric.go
@@ -1,623 +1,282 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
const MaxInt = int(^uint(0) >> 1)
|
||||
type metric struct {
|
||||
name string
|
||||
tags []*telegraf.Tag
|
||||
fields []*telegraf.Field
|
||||
tm time.Time
|
||||
|
||||
tp telegraf.ValueType
|
||||
aggregate bool
|
||||
}
|
||||
|
||||
func New(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
mType ...telegraf.ValueType,
|
||||
tm time.Time,
|
||||
tp ...telegraf.ValueType,
|
||||
) (telegraf.Metric, error) {
|
||||
if len(name) == 0 {
|
||||
return nil, fmt.Errorf("missing measurement name")
|
||||
}
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("%s: must have one or more fields", name)
|
||||
}
|
||||
if strings.HasSuffix(name, `\`) {
|
||||
return nil, fmt.Errorf("%s: measurement name cannot end with a backslash", name)
|
||||
}
|
||||
|
||||
var thisType telegraf.ValueType
|
||||
if len(mType) > 0 {
|
||||
thisType = mType[0]
|
||||
var vtype telegraf.ValueType
|
||||
if len(tp) > 0 {
|
||||
vtype = tp[0]
|
||||
} else {
|
||||
thisType = telegraf.Untyped
|
||||
vtype = telegraf.Untyped
|
||||
}
|
||||
|
||||
m := &metric{
|
||||
name: []byte(escape(name, "name")),
|
||||
t: []byte(fmt.Sprint(t.UnixNano())),
|
||||
nsec: t.UnixNano(),
|
||||
mType: thisType,
|
||||
name: name,
|
||||
tags: nil,
|
||||
fields: nil,
|
||||
tm: tm,
|
||||
tp: vtype,
|
||||
}
|
||||
|
||||
// pre-allocate exact size of the tags slice
|
||||
taglen := 0
|
||||
for k, v := range tags {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("%s: tag key cannot end with a backslash: %s", name, k)
|
||||
if len(tags) > 0 {
|
||||
m.tags = make([]*telegraf.Tag, 0, len(tags))
|
||||
for k, v := range tags {
|
||||
m.tags = append(m.tags,
|
||||
&telegraf.Tag{Key: k, Value: v})
|
||||
}
|
||||
if strings.HasSuffix(v, `\`) {
|
||||
return nil, fmt.Errorf("%s: tag value cannot end with a backslash: %s", name, v)
|
||||
}
|
||||
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
taglen += 2 + len(escape(k, "tagkey")) + len(escape(v, "tagval"))
|
||||
}
|
||||
m.tags = make([]byte, taglen)
|
||||
|
||||
i := 0
|
||||
for k, v := range tags {
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
m.tags[i] = ','
|
||||
i++
|
||||
i += copy(m.tags[i:], escape(k, "tagkey"))
|
||||
m.tags[i] = '='
|
||||
i++
|
||||
i += copy(m.tags[i:], escape(v, "tagval"))
|
||||
sort.Slice(m.tags, func(i, j int) bool { return m.tags[i].Key < m.tags[j].Key })
|
||||
}
|
||||
|
||||
// pre-allocate capacity of the fields slice
|
||||
fieldlen := 0
|
||||
for k, _ := range fields {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("%s: field key cannot end with a backslash: %s", name, k)
|
||||
}
|
||||
|
||||
// 10 bytes is completely arbitrary, but will at least prevent some
|
||||
// amount of allocations. There's a small possibility this will create
|
||||
// slightly more allocations for a metric that has many short fields.
|
||||
fieldlen += len(k) + 10
|
||||
}
|
||||
m.fields = make([]byte, 0, fieldlen)
|
||||
|
||||
i = 0
|
||||
m.fields = make([]*telegraf.Field, 0, len(fields))
|
||||
for k, v := range fields {
|
||||
if i != 0 {
|
||||
m.fields = append(m.fields, ',')
|
||||
v := convertField(v)
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
m.fields = appendField(m.fields, k, v)
|
||||
i++
|
||||
m.AddField(k, v)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// indexUnescapedByte finds the index of the first byte equal to b in buf that
|
||||
// is not escaped. Does not allow the escape char to be escaped. Returns -1 if
|
||||
// not found.
|
||||
func indexUnescapedByte(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
i := bytes.IndexByte(buf[keyi:], b)
|
||||
if i == -1 {
|
||||
return -1
|
||||
} else if i == 0 {
|
||||
break
|
||||
}
|
||||
keyi += i
|
||||
if buf[keyi-1] != '\\' {
|
||||
break
|
||||
} else {
|
||||
keyi++
|
||||
}
|
||||
}
|
||||
return keyi
|
||||
}
|
||||
|
||||
// indexUnescapedByteBackslashEscaping finds the index of the first byte equal
|
||||
// to b in buf that is not escaped. Allows for the escape char `\` to be
|
||||
// escaped. Returns -1 if not found.
|
||||
func indexUnescapedByteBackslashEscaping(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
i := bytes.IndexByte(buf[keyi:], b)
|
||||
if i == -1 {
|
||||
return -1
|
||||
} else if i == 0 {
|
||||
break
|
||||
}
|
||||
keyi += i
|
||||
if countBackslashes(buf, keyi-1)%2 == 0 {
|
||||
break
|
||||
} else {
|
||||
keyi++
|
||||
}
|
||||
}
|
||||
return keyi
|
||||
}
|
||||
|
||||
// countBackslashes counts the number of preceding backslashes starting at
|
||||
// the 'start' index.
|
||||
func countBackslashes(buf []byte, index int) int {
|
||||
var count int
|
||||
for {
|
||||
if index < 0 {
|
||||
return count
|
||||
}
|
||||
if buf[index] == '\\' {
|
||||
count++
|
||||
index--
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
name []byte
|
||||
tags []byte
|
||||
fields []byte
|
||||
t []byte
|
||||
|
||||
mType telegraf.ValueType
|
||||
aggregate bool
|
||||
|
||||
// cached values for reuse in "get" functions
|
||||
hashID uint64
|
||||
nsec int64
|
||||
}
|
||||
|
||||
func (m *metric) String() string {
|
||||
return string(m.name) + string(m.tags) + " " + string(m.fields) + " " + string(m.t) + "\n"
|
||||
return fmt.Sprintf("%s %v %v %d", m.name, m.Tags(), m.Fields(), m.tm.UnixNano())
|
||||
}
|
||||
|
||||
func (m *metric) Name() string {
|
||||
return m.name
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
tags := make(map[string]string, len(m.tags))
|
||||
for _, tag := range m.tags {
|
||||
tags[tag.Key] = tag.Value
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
func (m *metric) TagList() []*telegraf.Tag {
|
||||
return m.tags
|
||||
}
|
||||
|
||||
func (m *metric) Fields() map[string]interface{} {
|
||||
fields := make(map[string]interface{}, len(m.fields))
|
||||
for _, field := range m.fields {
|
||||
fields[field.Key] = field.Value
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
func (m *metric) FieldList() []*telegraf.Field {
|
||||
return m.fields
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
return m.tm
|
||||
}
|
||||
|
||||
func (m *metric) Type() telegraf.ValueType {
|
||||
return m.tp
|
||||
}
|
||||
|
||||
func (m *metric) SetName(name string) {
|
||||
m.name = name
|
||||
}
|
||||
|
||||
func (m *metric) AddPrefix(prefix string) {
|
||||
m.name = prefix + m.name
|
||||
}
|
||||
|
||||
func (m *metric) AddSuffix(suffix string) {
|
||||
m.name = m.name + suffix
|
||||
}
|
||||
|
||||
func (m *metric) AddTag(key, value string) {
|
||||
for i, tag := range m.tags {
|
||||
if key > tag.Key {
|
||||
continue
|
||||
}
|
||||
|
||||
if key == tag.Key {
|
||||
tag.Value = value
|
||||
return
|
||||
}
|
||||
|
||||
m.tags = append(m.tags, nil)
|
||||
copy(m.tags[i+1:], m.tags[i:])
|
||||
m.tags[i] = &telegraf.Tag{Key: key, Value: value}
|
||||
return
|
||||
}
|
||||
|
||||
m.tags = append(m.tags, &telegraf.Tag{Key: key, Value: value})
|
||||
}
|
||||
|
||||
func (m *metric) HasTag(key string) bool {
|
||||
for _, tag := range m.tags {
|
||||
if tag.Key == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *metric) GetTag(key string) (string, bool) {
|
||||
for _, tag := range m.tags {
|
||||
if tag.Key == key {
|
||||
return tag.Value, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (m *metric) RemoveTag(key string) {
|
||||
for i, tag := range m.tags {
|
||||
if tag.Key == key {
|
||||
copy(m.tags[i:], m.tags[i+1:])
|
||||
m.tags[len(m.tags)-1] = nil
|
||||
m.tags = m.tags[:len(m.tags)-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metric) AddField(key string, value interface{}) {
|
||||
for i, field := range m.fields {
|
||||
if key == field.Key {
|
||||
m.fields[i] = &telegraf.Field{Key: key, Value: convertField(value)}
|
||||
}
|
||||
}
|
||||
m.fields = append(m.fields, &telegraf.Field{Key: key, Value: convertField(value)})
|
||||
}
|
||||
|
||||
func (m *metric) HasField(key string) bool {
|
||||
for _, field := range m.fields {
|
||||
if field.Key == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *metric) GetField(key string) (interface{}, bool) {
|
||||
for _, field := range m.fields {
|
||||
if field.Key == key {
|
||||
return field.Value, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (m *metric) RemoveField(key string) {
|
||||
for i, field := range m.fields {
|
||||
if field.Key == key {
|
||||
copy(m.fields[i:], m.fields[i+1:])
|
||||
m.fields[len(m.fields)-1] = nil
|
||||
m.fields = m.fields[:len(m.fields)-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metric) Copy() telegraf.Metric {
|
||||
m2 := &metric{
|
||||
name: m.name,
|
||||
tags: make([]*telegraf.Tag, len(m.tags)),
|
||||
fields: make([]*telegraf.Field, len(m.fields)),
|
||||
tm: m.tm,
|
||||
tp: m.tp,
|
||||
aggregate: m.aggregate,
|
||||
}
|
||||
|
||||
for i, tag := range m.tags {
|
||||
m2.tags[i] = tag
|
||||
}
|
||||
|
||||
for i, field := range m.fields {
|
||||
m2.fields[i] = field
|
||||
}
|
||||
return m2
|
||||
}
|
||||
|
||||
func (m *metric) SetAggregate(b bool) {
|
||||
m.aggregate = b
|
||||
m.aggregate = true
|
||||
}
|
||||
|
||||
func (m *metric) IsAggregate() bool {
|
||||
return m.aggregate
|
||||
}
|
||||
|
||||
func (m *metric) Type() telegraf.ValueType {
|
||||
return m.mType
|
||||
}
|
||||
|
||||
func (m *metric) Len() int {
|
||||
// 3 is for 2 spaces surrounding the fields array + newline at the end.
|
||||
return len(m.name) + len(m.tags) + len(m.fields) + len(m.t) + 3
|
||||
}
|
||||
|
||||
func (m *metric) Serialize() []byte {
|
||||
tmp := make([]byte, m.Len())
|
||||
i := 0
|
||||
i += copy(tmp[i:], m.name)
|
||||
i += copy(tmp[i:], m.tags)
|
||||
tmp[i] = ' '
|
||||
i++
|
||||
i += copy(tmp[i:], m.fields)
|
||||
tmp[i] = ' '
|
||||
i++
|
||||
i += copy(tmp[i:], m.t)
|
||||
tmp[i] = '\n'
|
||||
return tmp
|
||||
}
|
||||
|
||||
func (m *metric) SerializeTo(dst []byte) int {
|
||||
i := 0
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.name)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.tags)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
dst[i] = ' '
|
||||
i++
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.fields)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
dst[i] = ' '
|
||||
i++
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.t)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
dst[i] = '\n'
|
||||
|
||||
return i + 1
|
||||
}
|
||||
|
||||
func (m *metric) Split(maxSize int) []telegraf.Metric {
|
||||
if m.Len() <= maxSize {
|
||||
return []telegraf.Metric{m}
|
||||
}
|
||||
var out []telegraf.Metric
|
||||
|
||||
// constant number of bytes for each metric (in addition to field bytes)
|
||||
constant := len(m.name) + len(m.tags) + len(m.t) + 3
|
||||
// currently selected fields
|
||||
fields := make([]byte, 0, maxSize)
|
||||
|
||||
i := 0
|
||||
for {
|
||||
if i >= len(m.fields) {
|
||||
// hit the end of the field byte slice
|
||||
if len(fields) > 0 {
|
||||
out = append(out, copyWith(m.name, m.tags, fields, m.t))
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// find the end of the next field
|
||||
j := indexUnescapedByte(m.fields[i:], ',')
|
||||
if j == -1 {
|
||||
j = len(m.fields)
|
||||
} else {
|
||||
j += i
|
||||
}
|
||||
|
||||
// if true, then we need to create a metric _not_ including the currently
|
||||
// selected field
|
||||
if len(m.fields[i:j])+len(fields)+constant >= maxSize {
|
||||
// if false, then we'll create a metric including the currently
|
||||
// selected field anyways. This means that the given maxSize is too
|
||||
// small for a single field to fit.
|
||||
if len(fields) > 0 {
|
||||
out = append(out, copyWith(m.name, m.tags, fields, m.t))
|
||||
}
|
||||
|
||||
fields = make([]byte, 0, maxSize)
|
||||
}
|
||||
if len(fields) > 0 {
|
||||
fields = append(fields, ',')
|
||||
}
|
||||
fields = append(fields, m.fields[i:j]...)
|
||||
|
||||
i = j + 1
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (m *metric) Fields() map[string]interface{} {
|
||||
fieldMap := map[string]interface{}{}
|
||||
i := 0
|
||||
for {
|
||||
if i >= len(m.fields) {
|
||||
break
|
||||
}
|
||||
// end index of field key
|
||||
i1 := indexUnescapedByte(m.fields[i:], '=')
|
||||
if i1 == -1 {
|
||||
break
|
||||
}
|
||||
// start index of field value
|
||||
i2 := i1 + 1
|
||||
|
||||
// end index of field value
|
||||
var i3 int
|
||||
if m.fields[i:][i2] == '"' {
|
||||
i3 = indexUnescapedByteBackslashEscaping(m.fields[i:][i2+1:], '"')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
i3 += i2 + 2 // increment index to the comma
|
||||
} else {
|
||||
i3 = indexUnescapedByte(m.fields[i:], ',')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
}
|
||||
|
||||
switch m.fields[i:][i2] {
|
||||
case '"':
|
||||
// string field
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = unescape(string(m.fields[i:][i2+1:i3-1]), "fieldval")
|
||||
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
// number field
|
||||
switch m.fields[i:][i3-1] {
|
||||
case 'i':
|
||||
// integer field
|
||||
n, err := parseIntBytes(m.fields[i:][i2:i3-1], 10, 64)
|
||||
if err == nil {
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
|
||||
} else {
|
||||
// TODO handle error or just ignore field silently?
|
||||
}
|
||||
default:
|
||||
// float field
|
||||
n, err := parseFloatBytes(m.fields[i:][i2:i3], 64)
|
||||
if err == nil {
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
|
||||
} else {
|
||||
// TODO handle error or just ignore field silently?
|
||||
}
|
||||
}
|
||||
case 'T', 't':
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = true
|
||||
case 'F', 'f':
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = false
|
||||
default:
|
||||
// TODO handle unsupported field type
|
||||
}
|
||||
|
||||
i += i3 + 1
|
||||
}
|
||||
|
||||
return fieldMap
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
tagMap := map[string]string{}
|
||||
if len(m.tags) == 0 {
|
||||
return tagMap
|
||||
}
|
||||
|
||||
i := 0
|
||||
for {
|
||||
// start index of tag key
|
||||
i0 := indexUnescapedByte(m.tags[i:], ',') + 1
|
||||
if i0 == 0 {
|
||||
// didn't find a tag start
|
||||
break
|
||||
}
|
||||
// end index of tag key
|
||||
i1 := indexUnescapedByte(m.tags[i:], '=')
|
||||
// start index of tag value
|
||||
i2 := i1 + 1
|
||||
// end index of tag value (starting from i2)
|
||||
i3 := indexUnescapedByte(m.tags[i+i2:], ',')
|
||||
if i3 == -1 {
|
||||
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:]), "tagval")
|
||||
break
|
||||
}
|
||||
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:i2+i3]), "tagval")
|
||||
// increment start index for the next tag
|
||||
i += i2 + i3
|
||||
}
|
||||
|
||||
return tagMap
|
||||
}
|
||||
|
||||
func (m *metric) Name() string {
|
||||
return unescape(string(m.name), "name")
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
// assume metric has been verified already and ignore error:
|
||||
if m.nsec == 0 {
|
||||
m.nsec, _ = parseIntBytes(m.t, 10, 64)
|
||||
}
|
||||
return time.Unix(0, m.nsec)
|
||||
}
|
||||
|
||||
func (m *metric) UnixNano() int64 {
|
||||
// assume metric has been verified already and ignore error:
|
||||
if m.nsec == 0 {
|
||||
m.nsec, _ = parseIntBytes(m.t, 10, 64)
|
||||
}
|
||||
return m.nsec
|
||||
}
|
||||
|
||||
func (m *metric) SetName(name string) {
|
||||
m.hashID = 0
|
||||
m.name = []byte(nameEscaper.Replace(name))
|
||||
}
|
||||
|
||||
func (m *metric) SetPrefix(prefix string) {
|
||||
m.hashID = 0
|
||||
m.name = append([]byte(nameEscaper.Replace(prefix)), m.name...)
|
||||
}
|
||||
|
||||
func (m *metric) SetSuffix(suffix string) {
|
||||
m.hashID = 0
|
||||
m.name = append(m.name, []byte(nameEscaper.Replace(suffix))...)
|
||||
}
|
||||
|
||||
func (m *metric) AddTag(key, value string) {
|
||||
m.RemoveTag(key)
|
||||
m.tags = append(m.tags, []byte(","+escape(key, "tagkey")+"="+escape(value, "tagval"))...)
|
||||
}
|
||||
|
||||
func (m *metric) HasTag(key string) bool {
|
||||
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *metric) RemoveTag(key string) {
|
||||
m.hashID = 0
|
||||
|
||||
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return
|
||||
}
|
||||
|
||||
tmp := m.tags[0 : i-1]
|
||||
j := indexUnescapedByte(m.tags[i:], ',')
|
||||
if j != -1 {
|
||||
tmp = append(tmp, m.tags[i+j:]...)
|
||||
}
|
||||
m.tags = tmp
|
||||
return
|
||||
}
|
||||
|
||||
func (m *metric) AddField(key string, value interface{}) {
|
||||
m.fields = append(m.fields, ',')
|
||||
m.fields = appendField(m.fields, key, value)
|
||||
}
|
||||
|
||||
func (m *metric) HasField(key string) bool {
|
||||
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *metric) RemoveField(key string) error {
|
||||
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var tmp []byte
|
||||
if i != 0 {
|
||||
tmp = m.fields[0 : i-1]
|
||||
}
|
||||
j := indexUnescapedByte(m.fields[i:], ',')
|
||||
if j != -1 {
|
||||
tmp = append(tmp, m.fields[i+j:]...)
|
||||
}
|
||||
|
||||
if len(tmp) == 0 {
|
||||
return fmt.Errorf("Metric cannot remove final field: %s", m.fields)
|
||||
}
|
||||
|
||||
m.fields = tmp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *metric) Copy() telegraf.Metric {
|
||||
return copyWith(m.name, m.tags, m.fields, m.t)
|
||||
}
|
||||
|
||||
func copyWith(name, tags, fields, t []byte) telegraf.Metric {
|
||||
out := metric{
|
||||
name: make([]byte, len(name)),
|
||||
tags: make([]byte, len(tags)),
|
||||
fields: make([]byte, len(fields)),
|
||||
t: make([]byte, len(t)),
|
||||
}
|
||||
copy(out.name, name)
|
||||
copy(out.tags, tags)
|
||||
copy(out.fields, fields)
|
||||
copy(out.t, t)
|
||||
return &out
|
||||
}
|
||||
|
||||
func (m *metric) HashID() uint64 {
|
||||
if m.hashID == 0 {
|
||||
h := fnv.New64a()
|
||||
h.Write(m.name)
|
||||
|
||||
tags := m.Tags()
|
||||
tmp := make([]string, len(tags))
|
||||
i := 0
|
||||
for k, v := range tags {
|
||||
tmp[i] = k + v
|
||||
i++
|
||||
}
|
||||
sort.Strings(tmp)
|
||||
|
||||
for _, s := range tmp {
|
||||
h.Write([]byte(s))
|
||||
}
|
||||
|
||||
m.hashID = h.Sum64()
|
||||
h := fnv.New64a()
|
||||
h.Write([]byte(m.name))
|
||||
h.Write([]byte("\n"))
|
||||
for _, tag := range m.tags {
|
||||
h.Write([]byte(tag.Key))
|
||||
h.Write([]byte("\n"))
|
||||
h.Write([]byte(tag.Value))
|
||||
h.Write([]byte("\n"))
|
||||
}
|
||||
return m.hashID
|
||||
return h.Sum64()
|
||||
}
|
||||
|
||||
func appendField(b []byte, k string, v interface{}) []byte {
|
||||
if v == nil {
|
||||
return b
|
||||
}
|
||||
b = append(b, []byte(escape(k, "tagkey")+"=")...)
|
||||
|
||||
// check popular types first
|
||||
// Convert field to a supported type or nil if unconvertible
|
||||
func convertField(v interface{}) interface{} {
|
||||
switch v := v.(type) {
|
||||
case float64:
|
||||
b = strconv.AppendFloat(b, v, 'f', -1, 64)
|
||||
return v
|
||||
case int64:
|
||||
b = strconv.AppendInt(b, v, 10)
|
||||
b = append(b, 'i')
|
||||
return v
|
||||
case string:
|
||||
b = append(b, '"')
|
||||
b = append(b, []byte(escape(v, "fieldval"))...)
|
||||
b = append(b, '"')
|
||||
return v
|
||||
case bool:
|
||||
b = strconv.AppendBool(b, v)
|
||||
case int32:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int16:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int8:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
return v
|
||||
case int:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint64:
|
||||
// Cap uints above the maximum int value
|
||||
var intv int64
|
||||
if v <= uint64(MaxInt) {
|
||||
intv = int64(v)
|
||||
} else {
|
||||
intv = int64(MaxInt)
|
||||
}
|
||||
b = strconv.AppendInt(b, intv, 10)
|
||||
b = append(b, 'i')
|
||||
case uint32:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint16:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint8:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
return int64(v)
|
||||
case uint:
|
||||
// Cap uints above the maximum int value
|
||||
var intv int64
|
||||
if v <= uint(MaxInt) {
|
||||
intv = int64(v)
|
||||
} else {
|
||||
intv = int64(MaxInt)
|
||||
}
|
||||
b = strconv.AppendInt(b, intv, 10)
|
||||
b = append(b, 'i')
|
||||
case float32:
|
||||
b = strconv.AppendFloat(b, float64(v), 'f', -1, 32)
|
||||
return uint64(v)
|
||||
case uint64:
|
||||
return uint64(v)
|
||||
case []byte:
|
||||
b = append(b, v...)
|
||||
return string(v)
|
||||
case int32:
|
||||
return int64(v)
|
||||
case int16:
|
||||
return int64(v)
|
||||
case int8:
|
||||
return int64(v)
|
||||
case uint32:
|
||||
return uint64(v)
|
||||
case uint16:
|
||||
return uint64(v)
|
||||
case uint8:
|
||||
return uint64(v)
|
||||
case float32:
|
||||
return float64(v)
|
||||
default:
|
||||
// Can't determine the type, so convert to string
|
||||
b = append(b, '"')
|
||||
b = append(b, []byte(escape(fmt.Sprintf("%v", v), "fieldval"))...)
|
||||
b = append(b, '"')
|
||||
return nil
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -1,148 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
// vars for making sure that the compiler doesnt optimize out the benchmarks:
|
||||
var (
|
||||
s string
|
||||
I interface{}
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
)
|
||||
|
||||
func BenchmarkNewMetric(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
for n := 0; n < b.N; n++ {
|
||||
mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
}
|
||||
s = string(mt.String())
|
||||
}
|
||||
|
||||
func BenchmarkAddTag(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
mt = &metric{
|
||||
name: []byte("cpu"),
|
||||
tags: []byte(",host=localhost"),
|
||||
fields: []byte("a=101"),
|
||||
t: []byte("1480614053000000000"),
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
mt.AddTag("foo", "bar")
|
||||
}
|
||||
s = string(mt.String())
|
||||
}
|
||||
|
||||
func BenchmarkSplit(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
mt = &metric{
|
||||
name: []byte("cpu"),
|
||||
tags: []byte(",host=localhost"),
|
||||
fields: []byte("a=101,b=10i,c=10101,d=101010,e=42"),
|
||||
t: []byte("1480614053000000000"),
|
||||
}
|
||||
var metrics []telegraf.Metric
|
||||
for n := 0; n < b.N; n++ {
|
||||
metrics = mt.Split(60)
|
||||
}
|
||||
s = string(metrics[0].String())
|
||||
}
|
||||
|
||||
func BenchmarkTags(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
var mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
tags = mt.Tags()
|
||||
}
|
||||
s = fmt.Sprint(tags)
|
||||
}
|
||||
|
||||
func BenchmarkFields(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
var mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
fields = mt.Fields()
|
||||
}
|
||||
s = fmt.Sprint(fields)
|
||||
}
|
||||
|
||||
func BenchmarkString(b *testing.B) {
|
||||
mt, _ := New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var S string
|
||||
for n := 0; n < b.N; n++ {
|
||||
S = mt.String()
|
||||
}
|
||||
s = S
|
||||
}
|
||||
|
||||
func BenchmarkSerialize(b *testing.B) {
|
||||
mt, _ := New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var B []byte
|
||||
for n := 0; n < b.N; n++ {
|
||||
B = mt.Serialize()
|
||||
}
|
||||
s = string(B)
|
||||
}
|
||||
@@ -1,14 +1,10 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -25,102 +21,185 @@ func TestNewMetric(t *testing.T) {
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Untyped, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
require.Equal(t, "cpu", m.Name())
|
||||
require.Equal(t, tags, m.Tags())
|
||||
require.Equal(t, fields, m.Fields())
|
||||
require.Equal(t, 2, len(m.FieldList()))
|
||||
require.Equal(t, now, m.Time())
|
||||
}
|
||||
|
||||
func TestNewErrors(t *testing.T) {
|
||||
// creating a metric with an empty name produces an error:
|
||||
m, err := New(
|
||||
"",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
// creating a metric with empty fields produces an error:
|
||||
m, err = New(
|
||||
"foobar",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, m)
|
||||
}
|
||||
|
||||
func TestNewMetric_Tags(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
func baseMetric() telegraf.Metric {
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
assert.True(t, m.HasTag("host"))
|
||||
assert.True(t, m.HasTag("datacenter"))
|
||||
func TestHasTag(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.AddTag("newtag", "foo")
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
require.False(t, m.HasTag("host"))
|
||||
m.AddTag("host", "localhost")
|
||||
require.True(t, m.HasTag("host"))
|
||||
m.RemoveTag("host")
|
||||
require.False(t, m.HasTag("host"))
|
||||
}
|
||||
|
||||
func TestAddTagOverwrites(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.AddTag("host", "localhost")
|
||||
m.AddTag("host", "example.org")
|
||||
|
||||
value, ok := m.GetTag("host")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "example.org", value)
|
||||
require.Equal(t, 1, len(m.TagList()))
|
||||
}
|
||||
|
||||
func TestRemoveTagNoEffectOnMissingTags(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.RemoveTag("foo")
|
||||
m.AddTag("a", "x")
|
||||
m.RemoveTag("foo")
|
||||
m.RemoveTag("bar")
|
||||
value, ok := m.GetTag("a")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "x", value)
|
||||
}
|
||||
|
||||
func TestGetTag(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
value, ok := m.GetTag("host")
|
||||
require.False(t, ok)
|
||||
|
||||
m.AddTag("host", "localhost")
|
||||
|
||||
value, ok = m.GetTag("host")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "localhost", value)
|
||||
|
||||
m.RemoveTag("host")
|
||||
assert.False(t, m.HasTag("host"))
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
assert.True(t, m.HasTag("datacenter"))
|
||||
|
||||
m.RemoveTag("datacenter")
|
||||
assert.False(t, m.HasTag("datacenter"))
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
assert.Equal(t, map[string]string{"newtag": "foo"}, m.Tags())
|
||||
|
||||
m.RemoveTag("newtag")
|
||||
assert.False(t, m.HasTag("newtag"))
|
||||
assert.Equal(t, map[string]string{}, m.Tags())
|
||||
|
||||
assert.Equal(t, "cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n", m.String())
|
||||
value, ok = m.GetTag("host")
|
||||
require.False(t, ok)
|
||||
}
|
||||
|
||||
func TestSerialize(t *testing.T) {
|
||||
func TestHasField(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
require.False(t, m.HasField("x"))
|
||||
m.AddField("x", 42.0)
|
||||
require.True(t, m.HasField("x"))
|
||||
m.RemoveTag("x")
|
||||
require.False(t, m.HasTag("x"))
|
||||
}
|
||||
|
||||
func TestAddFieldOverwrites(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.AddField("value", 1.0)
|
||||
m.AddField("value", 42.0)
|
||||
|
||||
value, ok := m.GetField("value")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 42.0, value)
|
||||
}
|
||||
|
||||
func TestAddFieldChangesType(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.AddField("value", 1.0)
|
||||
m.AddField("value", "xyzzy")
|
||||
|
||||
value, ok := m.GetField("value")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "xyzzy", value)
|
||||
}
|
||||
|
||||
func TestRemoveFieldNoEffectOnMissingFields(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.RemoveField("foo")
|
||||
m.AddField("a", "x")
|
||||
m.RemoveField("foo")
|
||||
m.RemoveField("bar")
|
||||
value, ok := m.GetField("a")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "x", value)
|
||||
}
|
||||
|
||||
func TestGetField(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
value, ok := m.GetField("foo")
|
||||
require.False(t, ok)
|
||||
|
||||
m.AddField("foo", "bar")
|
||||
|
||||
value, ok = m.GetField("foo")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "bar", value)
|
||||
|
||||
m.RemoveTag("foo")
|
||||
value, ok = m.GetTag("foo")
|
||||
require.False(t, ok)
|
||||
}
|
||||
|
||||
func TestTagList_Sorted(t *testing.T) {
|
||||
m := baseMetric()
|
||||
|
||||
m.AddTag("b", "y")
|
||||
m.AddTag("c", "z")
|
||||
m.AddTag("a", "x")
|
||||
|
||||
taglist := m.TagList()
|
||||
require.Equal(t, "a", taglist[0].Key)
|
||||
require.Equal(t, "b", taglist[1].Key)
|
||||
require.Equal(t, "c", taglist[2].Key)
|
||||
}
|
||||
|
||||
func TestEquals(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m1, err := New("cpu",
|
||||
map[string]string{
|
||||
"host": "localhost",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 42.0,
|
||||
},
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t,
|
||||
[]byte("cpu,datacenter=us-east-1 value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
|
||||
m.Serialize())
|
||||
m2, err := New("cpu",
|
||||
map[string]string{
|
||||
"host": "localhost",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": 42.0,
|
||||
},
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
m.RemoveTag("datacenter")
|
||||
assert.Equal(t,
|
||||
[]byte("cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
|
||||
m.Serialize())
|
||||
lhs := m1.(*metric)
|
||||
require.Equal(t, lhs, m2)
|
||||
|
||||
m3 := m2.Copy()
|
||||
require.Equal(t, lhs, m3)
|
||||
m3.AddTag("a", "x")
|
||||
require.NotEqual(t, lhs, m3)
|
||||
}
|
||||
|
||||
func TestHashID(t *testing.T) {
|
||||
@@ -171,567 +250,88 @@ func TestHashID_Consistency(t *testing.T) {
|
||||
)
|
||||
hash := m.HashID()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
m2, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Equal(t, hash, m2.HashID())
|
||||
}
|
||||
m2, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Equal(t, hash, m2.HashID())
|
||||
|
||||
m3 := m.Copy()
|
||||
assert.Equal(t, m2.HashID(), m3.HashID())
|
||||
}
|
||||
|
||||
func TestNewMetric_NameModifiers(t *testing.T) {
|
||||
func TestHashID_Delimiting(t *testing.T) {
|
||||
m1, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"a": "x",
|
||||
"b": "y",
|
||||
"c": "z",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
m2, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"a": "xbycz",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.NotEqual(t, m1.HashID(), m2.HashID())
|
||||
}
|
||||
|
||||
func TestSetName(t *testing.T) {
|
||||
m := baseMetric()
|
||||
m.SetName("foo")
|
||||
require.Equal(t, "foo", m.Name())
|
||||
}
|
||||
|
||||
func TestAddPrefix(t *testing.T) {
|
||||
m := baseMetric()
|
||||
m.AddPrefix("foo_")
|
||||
require.Equal(t, "foo_cpu", m.Name())
|
||||
m.AddPrefix("foo_")
|
||||
require.Equal(t, "foo_foo_cpu", m.Name())
|
||||
}
|
||||
|
||||
func TestAddSuffix(t *testing.T) {
|
||||
m := baseMetric()
|
||||
m.AddSuffix("_foo")
|
||||
require.Equal(t, "cpu_foo", m.Name())
|
||||
m.AddSuffix("_foo")
|
||||
require.Equal(t, "cpu_foo_foo", m.Name())
|
||||
}
|
||||
|
||||
func TestValueType(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hash := m.HashID()
|
||||
suffix := fmt.Sprintf(" value=1 %d\n", now.UnixNano())
|
||||
assert.Equal(t, "cpu"+suffix, m.String())
|
||||
|
||||
m.SetPrefix("pre_")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
assert.Equal(t, "pre_cpu"+suffix, m.String())
|
||||
|
||||
m.SetSuffix("_post")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
assert.Equal(t, "pre_cpu_post"+suffix, m.String())
|
||||
|
||||
m.SetName("mem")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
assert.Equal(t, "mem"+suffix, m.String())
|
||||
}
|
||||
|
||||
func TestNewMetric_FieldModifiers(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, m.HasField("value"))
|
||||
assert.False(t, m.HasField("foo"))
|
||||
|
||||
m.AddField("newfield", "foo")
|
||||
assert.True(t, m.HasField("newfield"))
|
||||
|
||||
assert.NoError(t, m.RemoveField("newfield"))
|
||||
assert.False(t, m.HasField("newfield"))
|
||||
|
||||
// don't allow user to remove all fields:
|
||||
assert.Error(t, m.RemoveField("value"))
|
||||
|
||||
m.AddField("value2", int64(101))
|
||||
assert.NoError(t, m.RemoveField("value"))
|
||||
assert.False(t, m.HasField("value"))
|
||||
}
|
||||
|
||||
func TestNewMetric_Fields(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
"quote_string": `x"y`,
|
||||
"backslash_quote_string": `x\"y`,
|
||||
"backslash": `x\y`,
|
||||
"ends_with_backslash": `x\`,
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
}
|
||||
|
||||
func TestNewMetric_Time(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m = m.Copy()
|
||||
m2 := m.Copy()
|
||||
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m2.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewMetric_Copy(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m2 := m.Copy()
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
m.AddTag("host", "localhost")
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu,host=localhost float=1 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
|
||||
m2.String())
|
||||
}
|
||||
|
||||
func TestNewMetric_AllTypes(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"float64": float64(1),
|
||||
"float32": float32(1),
|
||||
"int64": int64(1),
|
||||
"int32": int32(1),
|
||||
"int16": int16(1),
|
||||
"int8": int8(1),
|
||||
"int": int(1),
|
||||
"uint64": uint64(1),
|
||||
"uint32": uint32(1),
|
||||
"uint16": uint16(1),
|
||||
"uint8": uint8(1),
|
||||
"uint": uint(1),
|
||||
"bytes": []byte("foo"),
|
||||
"nil": nil,
|
||||
"maxuint64": uint64(MaxInt) + 10,
|
||||
"maxuint": uint(MaxInt) + 10,
|
||||
"unsupported": []int{1, 2},
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, m.String(), "float64=1")
|
||||
assert.Contains(t, m.String(), "float32=1")
|
||||
assert.Contains(t, m.String(), "int64=1i")
|
||||
assert.Contains(t, m.String(), "int32=1i")
|
||||
assert.Contains(t, m.String(), "int16=1i")
|
||||
assert.Contains(t, m.String(), "int8=1i")
|
||||
assert.Contains(t, m.String(), "int=1i")
|
||||
assert.Contains(t, m.String(), "uint64=1i")
|
||||
assert.Contains(t, m.String(), "uint32=1i")
|
||||
assert.Contains(t, m.String(), "uint16=1i")
|
||||
assert.Contains(t, m.String(), "uint8=1i")
|
||||
assert.Contains(t, m.String(), "uint=1i")
|
||||
assert.NotContains(t, m.String(), "nil")
|
||||
assert.Contains(t, m.String(), fmt.Sprintf("maxuint64=%di", MaxInt))
|
||||
assert.Contains(t, m.String(), fmt.Sprintf("maxuint=%di", MaxInt))
|
||||
}
|
||||
|
||||
func TestIndexUnescapedByte(t *testing.T) {
|
||||
tests := []struct {
|
||||
in []byte
|
||||
b byte
|
||||
expected int
|
||||
}{
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'b',
|
||||
expected: 3,
|
||||
},
|
||||
{
|
||||
in: []byte(`foo\bar`),
|
||||
b: 'b',
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
in: []byte(`foo\\bar`),
|
||||
b: 'b',
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'f',
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'r',
|
||||
expected: 5,
|
||||
},
|
||||
{
|
||||
in: []byte(`\foobar`),
|
||||
b: 'f',
|
||||
expected: -1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := indexUnescapedByte(test.in, test.b)
|
||||
assert.Equal(t, test.expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewGaugeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
"value": float64(42),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now, telegraf.Gauge)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Gauge, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewCounterMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now, telegraf.Counter)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Counter, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
func TestSplitMetric(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split80 := m.Split(80)
|
||||
assert.Len(t, split80, 2)
|
||||
|
||||
split70 := m.Split(70)
|
||||
assert.Len(t, split70, 3)
|
||||
|
||||
split60 := m.Split(60)
|
||||
assert.Len(t, split60, 5)
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
// use a simple regex check to verify that the split metrics are valid
|
||||
func TestSplitMetric_RegexVerify(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"foo": float64(98934259085),
|
||||
"bar": float64(19385292),
|
||||
"number": float64(19385292),
|
||||
"another": float64(19385292),
|
||||
"n": float64(19385292),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// verification regex
|
||||
re := regexp.MustCompile(`cpu,host=localhost \w+=\d+(,\w+=\d+)* 1480940990034083306`)
|
||||
|
||||
split90 := m.Split(90)
|
||||
assert.Len(t, split90, 2)
|
||||
for _, splitM := range split90 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
|
||||
split70 := m.Split(70)
|
||||
assert.Len(t, split70, 3)
|
||||
for _, splitM := range split70 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
|
||||
split20 := m.Split(20)
|
||||
assert.Len(t, split20, 5)
|
||||
for _, splitM := range split20 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
}
|
||||
|
||||
// test splitting metric even when given length is shorter than
|
||||
// shortest possible length
|
||||
// Split should split metric as short as possible, ie, 1 field per metric
|
||||
func TestSplitMetric_TooShort(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split := m.Split(10)
|
||||
assert.Len(t, split, 5)
|
||||
strings := make([]string, 5)
|
||||
for i, splitM := range split {
|
||||
strings[i] = splitM.String()
|
||||
}
|
||||
|
||||
assert.Contains(t, strings, "cpu,host=localhost float=100001 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost int=100001i 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost bool=true 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost false=false 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost string=\"test\" 1480940990034083306\n")
|
||||
}
|
||||
|
||||
func TestSplitMetric_NoOp(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split := m.Split(1000)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, m, split[0])
|
||||
}
|
||||
|
||||
func TestSplitMetric_OneField(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", m.String())
|
||||
|
||||
split := m.Split(1000)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
|
||||
split = m.Split(1)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
|
||||
split = m.Split(40)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
}
|
||||
|
||||
func TestSplitMetric_ExactSize(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
actual := m.Split(m.Len())
|
||||
// check that no copy was made
|
||||
require.Equal(t, &m, &actual[0])
|
||||
}
|
||||
|
||||
func TestSplitMetric_NoRoomForNewline(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
actual := m.Split(m.Len() - 1)
|
||||
require.Equal(t, 2, len(actual))
|
||||
}
|
||||
|
||||
func TestNewMetricAggregate(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.False(t, m.IsAggregate())
|
||||
m.SetAggregate(true)
|
||||
assert.True(t, m.IsAggregate())
|
||||
}
|
||||
|
||||
func TestNewMetricString(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n",
|
||||
now.UnixNano())
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
}
|
||||
|
||||
func TestNewMetricFailNaN(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": math.NaN(),
|
||||
}
|
||||
|
||||
_, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestEmptyTagValueOrKey(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"emptytag": "",
|
||||
"": "valuewithoutkey",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
|
||||
assert.True(t, m.HasTag("host"))
|
||||
assert.False(t, m.HasTag("emptytag"))
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestNewMetric_TrailingSlash(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
}{
|
||||
{
|
||||
name: `cpu\`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
fields: map[string]interface{}{
|
||||
`value\`: "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
tags: map[string]string{
|
||||
`host\`: "localhost",
|
||||
},
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
tags: map[string]string{
|
||||
"host": `localhost\`,
|
||||
},
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
_, err := New(tc.name, tc.tags, tc.fields, now)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
func TestCopyAggreate(t *testing.T) {
|
||||
m1 := baseMetric()
|
||||
m1.SetAggregate(true)
|
||||
m2 := m1.Copy()
|
||||
assert.True(t, m2.IsAggregate())
|
||||
}
|
||||
|
||||
680
metric/parse.go
680
metric/parse.go
@@ -1,680 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInvalidNumber = errors.New("invalid number")
|
||||
)
|
||||
|
||||
const (
|
||||
// the number of characters for the largest possible int64 (9223372036854775807)
|
||||
maxInt64Digits = 19
|
||||
|
||||
// the number of characters for the smallest possible int64 (-9223372036854775808)
|
||||
minInt64Digits = 20
|
||||
|
||||
// the number of characters required for the largest float64 before a range check
|
||||
// would occur during parsing
|
||||
maxFloat64Digits = 25
|
||||
|
||||
// the number of characters required for smallest float64 before a range check occur
|
||||
// would occur during parsing
|
||||
minFloat64Digits = 27
|
||||
|
||||
MaxKeyLength = 65535
|
||||
)
|
||||
|
||||
// The following constants allow us to specify which state to move to
|
||||
// next, when scanning sections of a Point.
|
||||
const (
|
||||
tagKeyState = iota
|
||||
tagValueState
|
||||
fieldsState
|
||||
)
|
||||
|
||||
func Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
return ParseWithDefaultTimePrecision(buf, time.Now(), "")
|
||||
}
|
||||
|
||||
func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
|
||||
return ParseWithDefaultTimePrecision(buf, t, "")
|
||||
}
|
||||
|
||||
func ParseWithDefaultTimePrecision(
|
||||
buf []byte,
|
||||
t time.Time,
|
||||
precision string,
|
||||
) ([]telegraf.Metric, error) {
|
||||
if len(buf) == 0 {
|
||||
return []telegraf.Metric{}, nil
|
||||
}
|
||||
if len(buf) <= 6 {
|
||||
return []telegraf.Metric{}, makeError("buffer too short", buf, 0)
|
||||
}
|
||||
metrics := make([]telegraf.Metric, 0, bytes.Count(buf, []byte("\n"))+1)
|
||||
var errStr string
|
||||
i := 0
|
||||
for {
|
||||
j := bytes.IndexByte(buf[i:], '\n')
|
||||
if j == -1 {
|
||||
break
|
||||
}
|
||||
if len(buf[i:i+j]) < 2 {
|
||||
i += j + 1 // increment i past the previous newline
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := parseMetric(buf[i:i+j], t, precision)
|
||||
if err != nil {
|
||||
i += j + 1 // increment i past the previous newline
|
||||
errStr += " " + err.Error()
|
||||
continue
|
||||
}
|
||||
i += j + 1 // increment i past the previous newline
|
||||
|
||||
metrics = append(metrics, m)
|
||||
}
|
||||
|
||||
if len(errStr) > 0 {
|
||||
return metrics, fmt.Errorf(errStr)
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func parseMetric(buf []byte,
|
||||
defaultTime time.Time,
|
||||
precision string,
|
||||
) (telegraf.Metric, error) {
|
||||
var dTime string
|
||||
// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
|
||||
pos, key, err := scanKey(buf, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// measurement name is required
|
||||
if len(key) == 0 {
|
||||
return nil, fmt.Errorf("missing measurement")
|
||||
}
|
||||
|
||||
if len(key) > MaxKeyLength {
|
||||
return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
|
||||
}
|
||||
|
||||
// scan the second block is which is field1=value1[,field2=value2,...]
|
||||
pos, fields, err := scanFields(buf, pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// at least one field is required
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("missing fields")
|
||||
}
|
||||
|
||||
// scan the last block which is an optional integer timestamp
|
||||
pos, ts, err := scanTime(buf, pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// apply precision multiplier
|
||||
var nsec int64
|
||||
multiplier := getPrecisionMultiplier(precision)
|
||||
if len(ts) > 0 && multiplier > 1 {
|
||||
tsint, err := parseIntBytes(ts, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nsec := multiplier * tsint
|
||||
ts = []byte(strconv.FormatInt(nsec, 10))
|
||||
}
|
||||
|
||||
m := &metric{
|
||||
fields: fields,
|
||||
t: ts,
|
||||
nsec: nsec,
|
||||
}
|
||||
|
||||
// parse out the measurement name
|
||||
// namei is the index at which the "name" ends
|
||||
namei := indexUnescapedByte(key, ',')
|
||||
if namei < 1 {
|
||||
// no tags
|
||||
m.name = key
|
||||
} else {
|
||||
m.name = key[0:namei]
|
||||
m.tags = key[namei:]
|
||||
}
|
||||
|
||||
if len(m.t) == 0 {
|
||||
if len(dTime) == 0 {
|
||||
dTime = fmt.Sprint(defaultTime.UnixNano())
|
||||
}
|
||||
// use default time
|
||||
m.t = []byte(dTime)
|
||||
}
|
||||
|
||||
// here we copy on return because this allows us to later call
|
||||
// AddTag, AddField, RemoveTag, RemoveField, etc. without worrying about
|
||||
// modifying 'tag' bytes having an affect on 'field' bytes, for example.
|
||||
return m.Copy(), nil
|
||||
}
|
||||
|
||||
// scanKey scans buf starting at i for the measurement and tag portion of the point.
|
||||
// It returns the ending position and the byte slice of key within buf. If there
|
||||
// are tags, they will be sorted if they are not already.
|
||||
func scanKey(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
// First scan the Point's measurement.
|
||||
state, i, err := scanMeasurement(buf, i)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
|
||||
// Optionally scan tags if needed.
|
||||
if state == tagKeyState {
|
||||
i, err = scanTags(buf, i)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// scanMeasurement examines the measurement part of a Point, returning
|
||||
// the next state to move to, and the current location in the buffer.
|
||||
func scanMeasurement(buf []byte, i int) (int, int, error) {
|
||||
// Check first byte of measurement, anything except a comma is fine.
|
||||
// It can't be a space, since whitespace is stripped prior to this
|
||||
// function call.
|
||||
if i >= len(buf) || buf[i] == ',' {
|
||||
return -1, i, makeError("missing measurement", buf, i)
|
||||
}
|
||||
|
||||
for {
|
||||
i++
|
||||
if i >= len(buf) {
|
||||
// cpu
|
||||
return -1, i, makeError("missing fields", buf, i)
|
||||
}
|
||||
|
||||
if buf[i-1] == '\\' {
|
||||
// Skip character (it's escaped).
|
||||
continue
|
||||
}
|
||||
|
||||
// Unescaped comma; move onto scanning the tags.
|
||||
if buf[i] == ',' {
|
||||
return tagKeyState, i + 1, nil
|
||||
}
|
||||
|
||||
// Unescaped space; move onto scanning the fields.
|
||||
if buf[i] == ' ' {
|
||||
// cpu value=1.0
|
||||
return fieldsState, i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTags examines all the tags in a Point, keeping track of and
|
||||
// returning the updated indices slice, number of commas and location
|
||||
// in buf where to start examining the Point fields.
|
||||
func scanTags(buf []byte, i int) (int, error) {
|
||||
var (
|
||||
err error
|
||||
state = tagKeyState
|
||||
)
|
||||
|
||||
for {
|
||||
switch state {
|
||||
case tagKeyState:
|
||||
i, err = scanTagsKey(buf, i)
|
||||
state = tagValueState // tag value always follows a tag key
|
||||
case tagValueState:
|
||||
state, i, err = scanTagsValue(buf, i)
|
||||
case fieldsState:
|
||||
return i, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTagsKey scans each character in a tag key.
|
||||
func scanTagsKey(buf []byte, i int) (int, error) {
|
||||
// First character of the key.
|
||||
if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
|
||||
// cpu,{'', ' ', ',', '='}
|
||||
return i, makeError("missing tag key", buf, i)
|
||||
}
|
||||
|
||||
// Examine each character in the tag key until we hit an unescaped
|
||||
// equals (the tag value), or we hit an error (i.e., unescaped
|
||||
// space or comma).
|
||||
for {
|
||||
i++
|
||||
|
||||
// Either we reached the end of the buffer or we hit an
|
||||
// unescaped comma or space.
|
||||
if i >= len(buf) ||
|
||||
((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
|
||||
// cpu,tag{'', ' ', ','}
|
||||
return i, makeError("missing tag value", buf, i)
|
||||
}
|
||||
|
||||
if buf[i] == '=' && buf[i-1] != '\\' {
|
||||
// cpu,tag=
|
||||
return i + 1, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTagsValue scans each character in a tag value.
|
||||
func scanTagsValue(buf []byte, i int) (int, int, error) {
|
||||
// Tag value cannot be empty.
|
||||
if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
|
||||
// cpu,tag={',', ' '}
|
||||
return -1, i, makeError("missing tag value", buf, i)
|
||||
}
|
||||
|
||||
// Examine each character in the tag value until we hit an unescaped
|
||||
// comma (move onto next tag key), an unescaped space (move onto
|
||||
// fields), or we error out.
|
||||
for {
|
||||
i++
|
||||
if i >= len(buf) {
|
||||
// cpu,tag=value
|
||||
return -1, i, makeError("missing fields", buf, i)
|
||||
}
|
||||
|
||||
// An unescaped equals sign is an invalid tag value.
|
||||
if buf[i] == '=' && buf[i-1] != '\\' {
|
||||
// cpu,tag={'=', 'fo=o'}
|
||||
return -1, i, makeError("invalid tag format", buf, i)
|
||||
}
|
||||
|
||||
if buf[i] == ',' && buf[i-1] != '\\' {
|
||||
// cpu,tag=foo,
|
||||
return tagKeyState, i + 1, nil
|
||||
}
|
||||
|
||||
// cpu,tag=foo value=1.0
|
||||
// cpu, tag=foo\= value=1.0
|
||||
if buf[i] == ' ' && buf[i-1] != '\\' {
|
||||
return fieldsState, i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanFields scans buf, starting at i for the fields section of a point. It returns
|
||||
// the ending position and the byte slice of the fields within buf
|
||||
func scanFields(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
// track how many '"" we've seen since last '='
|
||||
quotes := 0
|
||||
|
||||
// tracks how many '=' we've seen
|
||||
equals := 0
|
||||
|
||||
// tracks how many commas we've seen
|
||||
commas := 0
|
||||
|
||||
for {
|
||||
// reached the end of buf?
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
// escaped characters?
|
||||
if buf[i] == '\\' && i+1 < len(buf) {
|
||||
i += 2
|
||||
continue
|
||||
}
|
||||
|
||||
// If the value is quoted, scan until we get to the end quote
|
||||
// Only quote values in the field value since quotes are not significant
|
||||
// in the field key
|
||||
if buf[i] == '"' && equals > commas {
|
||||
i++
|
||||
quotes++
|
||||
if quotes > 2 {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If we see an =, ensure that there is at least on char before and after it
|
||||
if buf[i] == '=' && quotes != 1 {
|
||||
quotes = 0
|
||||
equals++
|
||||
|
||||
// check for "... =123" but allow "a\ =123"
|
||||
if buf[i-1] == ' ' && buf[i-2] != '\\' {
|
||||
return i, buf[start:i], makeError("missing field key", buf, i)
|
||||
}
|
||||
|
||||
// check for "...a=123,=456" but allow "a=123,a\,=456"
|
||||
if buf[i-1] == ',' && buf[i-2] != '\\' {
|
||||
return i, buf[start:i], makeError("missing field key", buf, i)
|
||||
}
|
||||
|
||||
// check for "... value="
|
||||
if i+1 >= len(buf) {
|
||||
return i, buf[start:i], makeError("missing field value", buf, i)
|
||||
}
|
||||
|
||||
// check for "... value=,value2=..."
|
||||
if buf[i+1] == ',' || buf[i+1] == ' ' {
|
||||
return i, buf[start:i], makeError("missing field value", buf, i)
|
||||
}
|
||||
|
||||
if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
|
||||
var err error
|
||||
i, err = scanNumber(buf, i+1)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
continue
|
||||
}
|
||||
// If next byte is not a double-quote, the value must be a boolean
|
||||
if buf[i+1] != '"' {
|
||||
var err error
|
||||
i, _, err = scanBoolean(buf, i+1)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if buf[i] == ',' && quotes != 1 {
|
||||
commas++
|
||||
}
|
||||
|
||||
// reached end of block?
|
||||
if buf[i] == ' ' && quotes != 1 {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if quotes != 0 && quotes != 2 {
|
||||
return i, buf[start:i], makeError("unbalanced quotes", buf, i)
|
||||
}
|
||||
|
||||
// check that all field sections had key and values (e.g. prevent "a=1,b"
|
||||
if equals == 0 || commas != equals-1 {
|
||||
return i, buf[start:i], makeError("invalid field format", buf, i)
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// scanTime scans buf, starting at i for the time section of a point. It
|
||||
// returns the ending position and the byte slice of the timestamp within buf
|
||||
// and and error if the timestamp is not in the correct numeric format.
|
||||
func scanTime(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
for {
|
||||
// reached the end of buf?
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
// Reached end of block or trailing whitespace?
|
||||
if buf[i] == '\n' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
|
||||
// Handle negative timestamps
|
||||
if i == start && buf[i] == '-' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// Timestamps should be integers, make sure they are so we don't need
|
||||
// to actually parse the timestamp until needed.
|
||||
if buf[i] < '0' || buf[i] > '9' {
|
||||
return i, buf[start:i], makeError("invalid timestamp", buf, i)
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
func isNumeric(b byte) bool {
|
||||
return (b >= '0' && b <= '9') || b == '.'
|
||||
}
|
||||
|
||||
// scanNumber returns the end position within buf, start at i after
|
||||
// scanning over buf for an integer, or float. It returns an
|
||||
// error if a invalid number is scanned.
|
||||
func scanNumber(buf []byte, i int) (int, error) {
|
||||
start := i
|
||||
var isInt bool
|
||||
|
||||
// Is negative number?
|
||||
if i < len(buf) && buf[i] == '-' {
|
||||
i++
|
||||
// There must be more characters now, as just '-' is illegal.
|
||||
if i == len(buf) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
}
|
||||
|
||||
// how many decimal points we've see
|
||||
decimal := false
|
||||
|
||||
// indicates the number is float in scientific notation
|
||||
scientific := false
|
||||
|
||||
for {
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == ',' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == 'i' && i > start && !isInt {
|
||||
isInt = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
if buf[i] == '.' {
|
||||
// Can't have more than 1 decimal (e.g. 1.1.1 should fail)
|
||||
if decimal {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
decimal = true
|
||||
}
|
||||
|
||||
// `e` is valid for floats but not as the first char
|
||||
if i > start && (buf[i] == 'e' || buf[i] == 'E') {
|
||||
scientific = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// + and - are only valid at this point if they follow an e (scientific notation)
|
||||
if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// NaN is an unsupported value
|
||||
if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
if !isNumeric(buf[i]) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if isInt && (decimal || scientific) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
numericDigits := i - start
|
||||
if isInt {
|
||||
numericDigits--
|
||||
}
|
||||
if decimal {
|
||||
numericDigits--
|
||||
}
|
||||
if buf[start] == '-' {
|
||||
numericDigits--
|
||||
}
|
||||
|
||||
if numericDigits == 0 {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
// It's more common that numbers will be within min/max range for their type but we need to prevent
|
||||
// out or range numbers from being parsed successfully. This uses some simple heuristics to decide
|
||||
// if we should parse the number to the actual type. It does not do it all the time because it incurs
|
||||
// extra allocations and we end up converting the type again when writing points to disk.
|
||||
if isInt {
|
||||
// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
|
||||
if buf[i-1] != 'i' {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
// Parse the int to check bounds the number of digits could be larger than the max range
|
||||
// We subtract 1 from the index to remove the `i` from our tests
|
||||
if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
|
||||
if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil {
|
||||
return i, makeError(fmt.Sprintf("unable to parse integer %s: %s", buf[start:i-1], err), buf, i)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
|
||||
if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
|
||||
if _, err := parseFloatBytes(buf[start:i], 10); err != nil {
|
||||
return i, makeError("invalid float", buf, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// scanBoolean returns the end position within buf, start at i after
|
||||
// scanning over buf for boolean. Valid values for a boolean are
|
||||
// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean
|
||||
// is scanned.
|
||||
func scanBoolean(buf []byte, i int) (int, []byte, error) {
|
||||
start := i
|
||||
|
||||
if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
|
||||
return i, buf[start:i], makeError("invalid value", buf, i)
|
||||
}
|
||||
|
||||
i++
|
||||
for {
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == ',' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
// Single char bool (t, T, f, F) is ok
|
||||
if i-start == 1 {
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// length must be 4 for true or TRUE
|
||||
if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
// length must be 5 for false or FALSE
|
||||
if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
// Otherwise
|
||||
valid := false
|
||||
switch buf[start] {
|
||||
case 't':
|
||||
valid = bytes.Equal(buf[start:i], []byte("true"))
|
||||
case 'f':
|
||||
valid = bytes.Equal(buf[start:i], []byte("false"))
|
||||
case 'T':
|
||||
valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
|
||||
case 'F':
|
||||
valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
|
||||
}
|
||||
|
||||
if !valid {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
|
||||
}
|
||||
|
||||
// skipWhitespace returns the end position within buf, starting at i after
|
||||
// scanning over spaces in tags
|
||||
func skipWhitespace(buf []byte, i int) int {
|
||||
for i < len(buf) {
|
||||
if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// makeError is a helper function for making a metric parsing error.
|
||||
// reason is the reason why the error occurred.
|
||||
// buf should be the current buffer we are parsing.
|
||||
// i is the current index, to give some context on where in the buffer we are.
|
||||
func makeError(reason string, buf []byte, i int) error {
|
||||
return fmt.Errorf("metric parsing error, reason: [%s], buffer: [%s], index: [%d]",
|
||||
reason, buf, i)
|
||||
}
|
||||
|
||||
// getPrecisionMultiplier will return a multiplier for the precision specified.
|
||||
func getPrecisionMultiplier(precision string) int64 {
|
||||
d := time.Nanosecond
|
||||
switch precision {
|
||||
case "u":
|
||||
d = time.Microsecond
|
||||
case "ms":
|
||||
d = time.Millisecond
|
||||
case "s":
|
||||
d = time.Second
|
||||
case "m":
|
||||
d = time.Minute
|
||||
case "h":
|
||||
d = time.Hour
|
||||
}
|
||||
return int64(d)
|
||||
}
|
||||
@@ -1,413 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const trues = `booltest b=T
|
||||
booltest b=t
|
||||
booltest b=True
|
||||
booltest b=TRUE
|
||||
booltest b=true
|
||||
`
|
||||
|
||||
const falses = `booltest b=F
|
||||
booltest b=f
|
||||
booltest b=False
|
||||
booltest b=FALSE
|
||||
booltest b=false
|
||||
`
|
||||
|
||||
const withEscapes = `w\,\ eather,host=local temp=99 1465839830100400200
|
||||
w\,eather,host=local temp=99 1465839830100400200
|
||||
weather,location=us\,midwest temperature=82 1465839830100400200
|
||||
weather,location=us-midwest temp\=rature=82 1465839830100400200
|
||||
weather,location\ place=us-midwest temperature=82 1465839830100400200
|
||||
weather,location=us-midwest temperature="too\"hot\"" 1465839830100400200
|
||||
`
|
||||
|
||||
const withTimestamps = `cpu usage=99 1480595849000000000
|
||||
cpu usage=99 1480595850000000000
|
||||
cpu usage=99 1480595851700030000
|
||||
cpu usage=99 1480595852000000300
|
||||
`
|
||||
|
||||
const sevenMetrics = `cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
`
|
||||
|
||||
const negMetrics = `weather,host=local temp=-99i,temp_float=-99.4 1465839830100400200
|
||||
`
|
||||
|
||||
// some metrics are invalid
|
||||
const someInvalid = `cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu3, host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu4 , usage_idle=99,usage_busy=1
|
||||
cpu 1480595852000000300
|
||||
cpu usage=99 1480595852foobar300
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
`
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
start := time.Now()
|
||||
metrics, err := Parse([]byte(sevenMetrics))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 7)
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
firstTime := metrics[0].Time()
|
||||
for _, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"idle": float64(99),
|
||||
"busy": int64(1),
|
||||
"b": true,
|
||||
"s": "string",
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
},
|
||||
m.Tags(),
|
||||
)
|
||||
assert.True(t, m.Time().After(start))
|
||||
assert.True(t, m.Time().Equal(firstTime))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNegNumbers(t *testing.T) {
|
||||
metrics, err := Parse([]byte(negMetrics))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"temp": int64(-99),
|
||||
"temp_float": float64(-99.4),
|
||||
},
|
||||
metrics[0].Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "local",
|
||||
},
|
||||
metrics[0].Tags(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestParseErrors(t *testing.T) {
|
||||
start := time.Now()
|
||||
metrics, err := Parse([]byte(someInvalid))
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, metrics, 4)
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
firstTime := metrics[0].Time()
|
||||
for _, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
},
|
||||
m.Tags(),
|
||||
)
|
||||
assert.True(t, m.Time().After(start))
|
||||
assert.True(t, m.Time().Equal(firstTime))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseWithTimestamps(t *testing.T) {
|
||||
metrics, err := Parse([]byte(withTimestamps))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 4)
|
||||
|
||||
expectedTimestamps := []time.Time{
|
||||
time.Unix(0, 1480595849000000000),
|
||||
time.Unix(0, 1480595850000000000),
|
||||
time.Unix(0, 1480595851700030000),
|
||||
time.Unix(0, 1480595852000000300),
|
||||
}
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
for i, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.True(t, m.Time().Equal(expectedTimestamps[i]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEscapes(t *testing.T) {
|
||||
metrics, err := Parse([]byte(withEscapes))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 6)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
}{
|
||||
{
|
||||
name: `w, eather`,
|
||||
fields: map[string]interface{}{"temp": float64(99)},
|
||||
tags: map[string]string{"host": "local"},
|
||||
},
|
||||
{
|
||||
name: `w,eather`,
|
||||
fields: map[string]interface{}{"temp": float64(99)},
|
||||
tags: map[string]string{"host": "local"},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{"temperature": float64(82)},
|
||||
tags: map[string]string{"location": `us,midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{`temp=rature`: float64(82)},
|
||||
tags: map[string]string{"location": `us-midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{"temperature": float64(82)},
|
||||
tags: map[string]string{`location place`: `us-midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{`temperature`: `too"hot"`},
|
||||
tags: map[string]string{"location": `us-midwest`},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
assert.Equal(t, test.name, metrics[i].Name())
|
||||
assert.Equal(t, test.fields, metrics[i].Fields())
|
||||
assert.Equal(t, test.tags, metrics[i].Tags())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTrueBooleans(t *testing.T) {
|
||||
metrics, err := Parse([]byte(trues))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 5)
|
||||
|
||||
for _, metric := range metrics {
|
||||
assert.Equal(t, "booltest", metric.Name())
|
||||
assert.Equal(t, true, metric.Fields()["b"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFalseBooleans(t *testing.T) {
|
||||
metrics, err := Parse([]byte(falses))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 5)
|
||||
|
||||
for _, metric := range metrics {
|
||||
assert.Equal(t, "booltest", metric.Name())
|
||||
assert.Equal(t, false, metric.Fields()["b"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointBadNumber(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"cpu v=- ",
|
||||
"cpu v=-i ",
|
||||
"cpu v=-. ",
|
||||
"cpu v=. ",
|
||||
"cpu v=1.0i ",
|
||||
"cpu v=1ii ",
|
||||
"cpu v=1a ",
|
||||
"cpu v=-e-e-e ",
|
||||
"cpu v=42+3 ",
|
||||
"cpu v= ",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTagsMissingParts(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
`cpu,host`,
|
||||
`cpu,host,`,
|
||||
`cpu,host=`,
|
||||
`cpu,f=oo=bar value=1`,
|
||||
`cpu,host value=1i`,
|
||||
`cpu,host=serverA,region value=1i`,
|
||||
`cpu,host=serverA,region= value=1i`,
|
||||
`cpu,host=serverA,region=,zone=us-west value=1i`,
|
||||
`cpu, value=1`,
|
||||
`cpu, ,,`,
|
||||
`cpu,,,`,
|
||||
`cpu,host=serverA,=us-east value=1i`,
|
||||
`cpu,host=serverAa\,,=us-east value=1i`,
|
||||
`cpu,host=serverA\,,=us-east value=1i`,
|
||||
`cpu, =serverA value=1i`,
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointWhitespace(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000 `,
|
||||
} {
|
||||
m, err := Parse([]byte(tt + "\n"))
|
||||
assert.NoError(t, err, tt)
|
||||
assert.Equal(t, "cpu", m[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{"value": float64(1)}, m[0].Fields())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointInvalidFields(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test,foo=bar a=101,=value",
|
||||
"test,foo=bar =value",
|
||||
"test,foo=bar a=101,key=",
|
||||
"test,foo=bar key=",
|
||||
`test,foo=bar a=101,b="foo`,
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointNoFields(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"cpu_load_short,host=server01,region=us-west",
|
||||
"very_long_measurement_name",
|
||||
"cpu,host==",
|
||||
"============",
|
||||
"cpu",
|
||||
"cpu\n\n\n\n\n\n\n",
|
||||
" ",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
// a b=1 << this is the shortest possible metric
|
||||
// any shorter is just ignored
|
||||
func TestParseBufTooShort(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"",
|
||||
"a",
|
||||
"a ",
|
||||
"a b=",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidBooleans(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test b=tru",
|
||||
"test b=fals",
|
||||
"test b=faLse",
|
||||
"test q=foo",
|
||||
"test b=lambchops",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidNumbers(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test b=-",
|
||||
"test b=1.1.1",
|
||||
"test b=nan",
|
||||
"test b=9i10",
|
||||
"test b=9999999999999999999i",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNegativeTimestamps(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test foo=101 -1257894000000000000",
|
||||
} {
|
||||
metrics, err := Parse([]byte(tt + "\n"))
|
||||
assert.NoError(t, err, tt)
|
||||
assert.True(t, metrics[0].Time().Equal(time.Unix(0, -1257894000000000000)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePrecision(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
line string
|
||||
precision string
|
||||
expected int64
|
||||
}{
|
||||
{"test v=42 1491847420", "s", 1491847420000000000},
|
||||
{"test v=42 1491847420123", "ms", 1491847420123000000},
|
||||
{"test v=42 1491847420123456", "u", 1491847420123456000},
|
||||
{"test v=42 1491847420123456789", "ns", 1491847420123456789},
|
||||
|
||||
{"test v=42 1491847420123456789", "1s", 1491847420123456789},
|
||||
{"test v=42 1491847420123456789", "asdf", 1491847420123456789},
|
||||
} {
|
||||
metrics, err := ParseWithDefaultTimePrecision(
|
||||
[]byte(tt.line+"\n"), time.Now(), tt.precision)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expected, metrics[0].UnixNano())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePrecisionUnsetTime(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
line string
|
||||
precision string
|
||||
}{
|
||||
{"test v=42", "s"},
|
||||
{"test v=42", "ns"},
|
||||
} {
|
||||
_, err := ParseWithDefaultTimePrecision(
|
||||
[]byte(tt.line+"\n"), time.Now(), tt.precision)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMaxKeyLength(t *testing.T) {
|
||||
key := ""
|
||||
for {
|
||||
if len(key) > MaxKeyLength {
|
||||
break
|
||||
}
|
||||
key += "test"
|
||||
}
|
||||
|
||||
_, err := Parse([]byte(key + " value=1\n"))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
159
metric/reader.go
159
metric/reader.go
@@ -1,159 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type state int
|
||||
|
||||
const (
|
||||
_ state = iota
|
||||
// normal state copies whole metrics into the given buffer until we can't
|
||||
// fit the next metric.
|
||||
normal
|
||||
// split state means that we have a metric that we were able to split, so
|
||||
// that we can fit it into multiple metrics (and calls to Read)
|
||||
split
|
||||
// overflow state means that we have a metric that didn't fit into a single
|
||||
// buffer, and needs to be split across multiple calls to Read.
|
||||
overflow
|
||||
// splitOverflow state means that a split metric didn't fit into a single
|
||||
// buffer, and needs to be split across multiple calls to Read.
|
||||
splitOverflow
|
||||
// done means we're done reading metrics, and now always return (0, io.EOF)
|
||||
done
|
||||
)
|
||||
|
||||
type reader struct {
|
||||
metrics []telegraf.Metric
|
||||
splitMetrics []telegraf.Metric
|
||||
buf []byte
|
||||
state state
|
||||
|
||||
// metric index
|
||||
iM int
|
||||
// split metric index
|
||||
iSM int
|
||||
// buffer index
|
||||
iB int
|
||||
}
|
||||
|
||||
func NewReader(metrics []telegraf.Metric) io.Reader {
|
||||
return &reader{
|
||||
metrics: metrics,
|
||||
state: normal,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reader) Read(p []byte) (n int, err error) {
|
||||
var i int
|
||||
switch r.state {
|
||||
case done:
|
||||
return 0, io.EOF
|
||||
case normal:
|
||||
for {
|
||||
// this for-loop is the sunny-day scenario, where we are given a
|
||||
// buffer that is large enough to hold at least a single metric.
|
||||
// all of the cases below it are edge-cases.
|
||||
if r.metrics[r.iM].Len() <= len(p[i:]) {
|
||||
i += r.metrics[r.iM].SerializeTo(p[i:])
|
||||
} else {
|
||||
break
|
||||
}
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
}
|
||||
|
||||
// if we haven't written any bytes, check if we can split the current
|
||||
// metric into multiple full metrics at a smaller size.
|
||||
if i == 0 {
|
||||
tmp := r.metrics[r.iM].Split(len(p))
|
||||
if len(tmp) > 1 {
|
||||
r.splitMetrics = tmp
|
||||
r.state = split
|
||||
if r.splitMetrics[0].Len() <= len(p) {
|
||||
i += r.splitMetrics[0].SerializeTo(p)
|
||||
r.iSM = 1
|
||||
} else {
|
||||
// splitting didn't quite work, so we'll drop down and
|
||||
// overflow the metric.
|
||||
r.state = normal
|
||||
r.iSM = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we haven't written any bytes and we're not at the end of the metrics
|
||||
// slice, then it means we have a single metric that is larger than the
|
||||
// provided buffer.
|
||||
if i == 0 {
|
||||
r.buf = r.metrics[r.iM].Serialize()
|
||||
i += copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
r.state = overflow
|
||||
}
|
||||
|
||||
case split:
|
||||
if r.splitMetrics[r.iSM].Len() <= len(p) {
|
||||
// write the current split metric
|
||||
i += r.splitMetrics[r.iSM].SerializeTo(p)
|
||||
r.iSM++
|
||||
if r.iSM >= len(r.splitMetrics) {
|
||||
// done writing the current split metrics
|
||||
r.iSM = 0
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
}
|
||||
} else {
|
||||
// This would only happen if we split the metric, and then a
|
||||
// subsequent buffer was smaller than the initial one given,
|
||||
// so that our split metric no longer fits.
|
||||
r.buf = r.splitMetrics[r.iSM].Serialize()
|
||||
i += copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
r.state = splitOverflow
|
||||
}
|
||||
|
||||
case splitOverflow:
|
||||
i = copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
if r.iB >= len(r.buf) {
|
||||
r.iB = 0
|
||||
r.iSM++
|
||||
if r.iSM == len(r.splitMetrics) {
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
} else {
|
||||
r.state = split
|
||||
}
|
||||
}
|
||||
|
||||
case overflow:
|
||||
i = copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
if r.iB >= len(r.buf) {
|
||||
r.iB = 0
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
}
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
@@ -1,713 +0,0 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func BenchmarkMetricReader(b *testing.B) {
|
||||
metrics := make([]telegraf.Metric, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
metrics[i], _ = New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(1)}, time.Now())
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
r := NewReader(metrics)
|
||||
io.Copy(ioutil.Discard, r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
metrics := make([]telegraf.Metric, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
metrics[i], _ = New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(1)}, ts)
|
||||
}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
buf := make([]byte, 35)
|
||||
for i := 0; i < 10; i++ {
|
||||
n, err := r.Read(buf)
|
||||
if err != nil {
|
||||
assert.True(t, err == io.EOF, err.Error())
|
||||
}
|
||||
assert.Equal(t, 33, n)
|
||||
assert.Equal(t, "foo value=1i 1481032190000000000\n", string(buf[0:n]))
|
||||
}
|
||||
|
||||
// reader should now be done, and always return 0, io.EOF
|
||||
for i := 0; i < 10; i++ {
|
||||
n, err := r.Read(buf)
|
||||
assert.True(t, err == io.EOF, err.Error())
|
||||
assert.Equal(t, 0, n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader_OverflowMetric(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(10)}, ts)
|
||||
metrics := []telegraf.Metric{m}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 5)
|
||||
|
||||
tests := []struct {
|
||||
exp string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
"foo v",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"alue=",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"10i 1",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"48103",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"21900",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"00000",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
io.EOF,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
assert.Equal(t, test.exp, string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric is the same size as the buffer.
|
||||
//
|
||||
// Previously EOF would not be set until the next call to Read.
|
||||
func TestMetricReader_MetricSizeEqualsBufferSize(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, m1.Len())
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric requires to be split and one of the
|
||||
// split metrics is exactly the size of the buffer.
|
||||
//
|
||||
// Previously an empty string would be returned on the next Read without error,
|
||||
// and then next Read call would panic.
|
||||
func TestMetricReader_SplitWithExactLengthSplit(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1), "bb": int64(2)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
// foo a=1i,bb=2i 1481032190000000000\n // len 35
|
||||
//
|
||||
// Requires this specific split order:
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo bb=2i 1481032190000000000\n // len 30
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric requires to be split and one of the
|
||||
// split metrics is larger than the buffer.
|
||||
//
|
||||
// Previously the metric index would be set incorrectly causing a panic.
|
||||
func TestMetricReader_SplitOverflowOversized(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
"bbb": int64(2),
|
||||
}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
// foo a=1i,bbb=2i 1481032190000000000\n // len 36
|
||||
//
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo bbb=2i 1481032190000000000\n // len 31
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a split metric exactly fits in the buffer.
|
||||
//
|
||||
// Previously the metric would be overflow split when not required.
|
||||
func TestMetricReader_SplitOverflowUneeded(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1), "b": int64(2)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 29)
|
||||
|
||||
// foo a=1i,b=2i 1481032190000000000\n // len 34
|
||||
//
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo b=2i 1481032190000000000\n // len 29
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader_OverflowMultipleMetrics(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(10)}, ts)
|
||||
metrics := []telegraf.Metric{m, m.Copy()}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 10)
|
||||
|
||||
tests := []struct {
|
||||
exp string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
"foo value=",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"10i 148103",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"2190000000",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
nil,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"foo value=",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"10i 148103",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"2190000000",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
io.EOF,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
assert.Equal(t, test.exp, string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test splitting a metric
|
||||
func TestMetricReader_SplitMetric(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
"value4": int64(10),
|
||||
"value5": int64(10),
|
||||
"value6": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 60)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
57,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test an array with one split metric and one unsplit
|
||||
func TestMetricReader_SplitMetric2(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
"value4": int64(10),
|
||||
"value5": int64(10),
|
||||
"value6": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 60)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test split that results in metrics that are still too long, which results in
|
||||
// the reader falling back to regular overflow.
|
||||
func TestMetricReader_SplitMetricTooLong(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i 1481`,
|
||||
nil,
|
||||
30,
|
||||
},
|
||||
{
|
||||
`032190000000000\n`,
|
||||
io.EOF,
|
||||
16,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test split with a changing buffer size in the middle of subsequent calls
|
||||
// to Read
|
||||
func TestMetricReader_SplitMetricChangingBuffer(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
buf []byte
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 148103219000000`,
|
||||
nil,
|
||||
30,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`0000\n`,
|
||||
nil,
|
||||
5,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
make([]byte, 36),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(test.buf)
|
||||
assert.Equal(t, test.n, n, test.expRegex)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(test.buf[0:n])), string(test.buf[0:n]))
|
||||
assert.Equal(t, test.err, err, test.expRegex)
|
||||
}
|
||||
}
|
||||
|
||||
// test split with a changing buffer size in the middle of subsequent calls
|
||||
// to Read
|
||||
func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
buf []byte
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 148103219000000`,
|
||||
nil,
|
||||
30,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`0000\n`,
|
||||
nil,
|
||||
5,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
make([]byte, 36),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(test.buf)
|
||||
assert.Equal(t, test.n, n, test.expRegex)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(test.buf[0:n])), string(test.buf[0:n]))
|
||||
assert.Equal(t, test.err, err, test.expRegex)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader_Read(t *testing.T) {
|
||||
epoch := time.Unix(0, 0)
|
||||
|
||||
type args struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
t time.Time
|
||||
mType []telegraf.ValueType
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
expected []byte
|
||||
}{
|
||||
{
|
||||
name: "escape backslashes in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape quote in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test"`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\"" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape quote and backslash in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\"`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\\"" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape multiple backslash in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\\`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\\\" 0`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
buf := make([]byte, 512)
|
||||
m, err := New(tt.args.name, tt.args.tags, tt.args.fields, tt.args.t, tt.args.mType...)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := NewReader([]telegraf.Metric{m})
|
||||
num, err := r.Read(buf)
|
||||
if err != io.EOF {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
line := string(buf[:num])
|
||||
// This is done so that we can use raw strings in the test spec
|
||||
noeol := strings.TrimRight(line, "\n")
|
||||
require.Equal(t, string(tt.expected), noeol)
|
||||
require.Equal(t, len(tt.expected)+1, num)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricRoundtrip(t *testing.T) {
|
||||
const lp = `nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=netstat,sr=database IpExtInBcastOctets=12570626154i,IpExtInBcastPkts=95541226i,IpExtInCEPkts=0i,IpExtInCsumErrors=0i,IpExtInECT0Pkts=55674i,IpExtInECT1Pkts=0i,IpExtInMcastOctets=5928296i,IpExtInMcastPkts=174365i,IpExtInNoECTPkts=17965863529i,IpExtInNoRoutes=20i,IpExtInOctets=3334866321815i,IpExtInTruncatedPkts=0i,IpExtOutBcastOctets=0i,IpExtOutBcastPkts=0i,IpExtOutMcastOctets=0i,IpExtOutMcastPkts=0i,IpExtOutOctets=31397892391399i,TcpExtArpFilter=0i,TcpExtBusyPollRxPackets=0i,TcpExtDelayedACKLocked=14094i,TcpExtDelayedACKLost=302083i,TcpExtDelayedACKs=55486507i,TcpExtEmbryonicRsts=11879i,TcpExtIPReversePathFilter=0i,TcpExtListenDrops=1736i,TcpExtListenOverflows=0i,TcpExtLockDroppedIcmps=0i,TcpExtOfoPruned=0i,TcpExtOutOfWindowIcmps=8i,TcpExtPAWSActive=0i,TcpExtPAWSEstab=974i,TcpExtPAWSPassive=0i,TcpExtPruneCalled=0i,TcpExtRcvPruned=0i,TcpExtSyncookiesFailed=12593i,TcpExtSyncookiesRecv=0i,TcpExtSyncookiesSent=0i,TcpExtTCPACKSkippedChallenge=0i,TcpExtTCPACKSkippedFinWait2=0i,TcpExtTCPACKSkippedPAWS=806i,TcpExtTCPACKSkippedSeq=519i,TcpExtTCPACKSkippedSynRecv=0i,TcpExtTCPACKSkippedTimeWait=0i,TcpExtTCPAbortFailed=0i,TcpExtTCPAbortOnClose=22i,TcpExtTCPAbortOnData=36593i,TcpExtTCPAbortOnLinger=0i,TcpExtTCPAbortOnMemory=0i,TcpExtTCPAbortOnTimeout=674i,TcpExtTCPAutoCorking=494253233i,TcpExtTCPBacklogDrop=0i,TcpExtTCPChallengeACK=281i,TcpExtTCPDSACKIgnoredNoUndo=93354i,TcpExtTCPDSACKIgnoredOld=336i,TcpExtTCPDSACKOfoRecv=0i,TcpExtTCPDSACKOfoSent=7i,TcpExtTCPDSACKOldSent=302073i,TcpExtTCPDSACKRecv=215884i,TcpExtTCPDSACKUndo=7633i,TcpExtTCPDeferAcceptDrop=0i,TcpExtTCPDirectCopyFromBacklog=0i,TcpExtTCPDirectCopyFromPrequeue=0i,TcpExtTCPFACKReorder=1320i,TcpExtTCPFastOpenActive=0i,TcpExtTCPFastOpenActiveFail=0i,TcpExtTCPFastOpenCookieReqd=0i,TcpExtTCPFastOpenListenOverflow=0i,TcpExtTCPFastOpenPassive=0i,TcpExtTCPFastOpenPassiveFail=0i,TcpExtTCPFastRetrans=350681i,TcpExtTCPForwardRetrans=142168i,TcpExtTCPFromZeroWindowAdv=4317i,TcpExtTCPFullUndo=29502i,TcpExtTCPHPAcks=10267073000i,TcpExtTCPHPHits=5629837098i,TcpExtTCPHPHitsToUser=0i,TcpExtTCPHystartDelayCwnd=285127i,TcpExtTCPHystartDelayDetect=12318i,TcpExtTCPHystartTrainCwnd=69160570i,TcpExtTCPHystartTrainDetect=3315799i,TcpExtTCPLossFailures=109i,TcpExtTCPLossProbeRecovery=110819i,TcpExtTCPLossProbes=233995i,TcpExtTCPLossUndo=5276i,TcpExtTCPLostRetransmit=397i,TcpExtTCPMD5NotFound=0i,TcpExtTCPMD5Unexpected=0i,TcpExtTCPMemoryPressures=0i,TcpExtTCPMinTTLDrop=0i,TcpExtTCPOFODrop=0i,TcpExtTCPOFOMerge=7i,TcpExtTCPOFOQueue=15196i,TcpExtTCPOrigDataSent=29055119435i,TcpExtTCPPartialUndo=21320i,TcpExtTCPPrequeueDropped=0i,TcpExtTCPPrequeued=0i,TcpExtTCPPureAcks=1236441827i,TcpExtTCPRcvCoalesce=225590473i,TcpExtTCPRcvCollapsed=0i,TcpExtTCPRenoFailures=0i,TcpExtTCPRenoRecovery=0i,TcpExtTCPRenoRecoveryFail=0i,TcpExtTCPRenoReorder=0i,TcpExtTCPReqQFullDoCookies=0i,TcpExtTCPReqQFullDrop=0i,TcpExtTCPRetransFail=41i,TcpExtTCPSACKDiscard=0i,TcpExtTCPSACKReneging=0i,TcpExtTCPSACKReorder=4307i,TcpExtTCPSYNChallenge=244i,TcpExtTCPSackFailures=1698i,TcpExtTCPSackMerged=184668i,TcpExtTCPSackRecovery=97369i,TcpExtTCPSackRecoveryFail=381i,TcpExtTCPSackShiftFallback=2697079i,TcpExtTCPSackShifted=760299i,TcpExtTCPSchedulerFailed=0i,TcpExtTCPSlowStartRetrans=9276i,TcpExtTCPSpuriousRTOs=959i,TcpExtTCPSpuriousRtxHostQueues=2973i,TcpExtTCPSynRetrans=200970i,TcpExtTCPTSReorder=15221i,TcpExtTCPTimeWaitOverflow=0i,TcpExtTCPTimeouts=70127i,TcpExtTCPToZeroWindowAdv=4317i,TcpExtTCPWantZeroWindowAdv=2133i,TcpExtTW=24809813i,TcpExtTWKilled=0i,TcpExtTWRecycled=0i 1496460785000000000
|
||||
nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=snmp,sr=database IcmpInAddrMaskReps=0i,IcmpInAddrMasks=90i,IcmpInCsumErrors=0i,IcmpInDestUnreachs=284401i,IcmpInEchoReps=9i,IcmpInEchos=1761912i,IcmpInErrors=407i,IcmpInMsgs=2047767i,IcmpInParmProbs=0i,IcmpInRedirects=0i,IcmpInSrcQuenchs=0i,IcmpInTimeExcds=46i,IcmpInTimestampReps=0i,IcmpInTimestamps=1309i,IcmpMsgInType0=9i,IcmpMsgInType11=46i,IcmpMsgInType13=1309i,IcmpMsgInType17=90i,IcmpMsgInType3=284401i,IcmpMsgInType8=1761912i,IcmpMsgOutType0=1761912i,IcmpMsgOutType14=1248i,IcmpMsgOutType3=108709i,IcmpMsgOutType8=9i,IcmpOutAddrMaskReps=0i,IcmpOutAddrMasks=0i,IcmpOutDestUnreachs=108709i,IcmpOutEchoReps=1761912i,IcmpOutEchos=9i,IcmpOutErrors=0i,IcmpOutMsgs=1871878i,IcmpOutParmProbs=0i,IcmpOutRedirects=0i,IcmpOutSrcQuenchs=0i,IcmpOutTimeExcds=0i,IcmpOutTimestampReps=1248i,IcmpOutTimestamps=0i,IpDefaultTTL=64i,IpForwDatagrams=0i,IpForwarding=2i,IpFragCreates=0i,IpFragFails=0i,IpFragOKs=0i,IpInAddrErrors=0i,IpInDelivers=17658795773i,IpInDiscards=0i,IpInHdrErrors=0i,IpInReceives=17659269339i,IpInUnknownProtos=0i,IpOutDiscards=236976i,IpOutNoRoutes=1009i,IpOutRequests=23466783734i,IpReasmFails=0i,IpReasmOKs=0i,IpReasmReqds=0i,IpReasmTimeout=0i,TcpActiveOpens=23308977i,TcpAttemptFails=3757543i,TcpCurrEstab=280i,TcpEstabResets=184792i,TcpInCsumErrors=0i,TcpInErrs=232i,TcpInSegs=17536573089i,TcpMaxConn=-1i,TcpOutRsts=4051451i,TcpOutSegs=29836254873i,TcpPassiveOpens=176546974i,TcpRetransSegs=878085i,TcpRtoAlgorithm=1i,TcpRtoMax=120000i,TcpRtoMin=200i,UdpInCsumErrors=0i,UdpInDatagrams=24441661i,UdpInErrors=0i,UdpLiteInCsumErrors=0i,UdpLiteInDatagrams=0i,UdpLiteInErrors=0i,UdpLiteNoPorts=0i,UdpLiteOutDatagrams=0i,UdpLiteRcvbufErrors=0i,UdpLiteSndbufErrors=0i,UdpNoPorts=17660i,UdpOutDatagrams=51807896i,UdpRcvbufErrors=0i,UdpSndbufErrors=236922i 1496460785000000000
|
||||
`
|
||||
metrics, err := Parse([]byte(lp))
|
||||
require.NoError(t, err)
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 128)
|
||||
_, err = r.Read(buf)
|
||||
require.NoError(t, err)
|
||||
metrics, err = Parse(buf)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
7
metric/uint_support.go
Normal file
7
metric/uint_support.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// +build uint64
|
||||
|
||||
package metric
|
||||
|
||||
func init() {
|
||||
EnableUintSupport()
|
||||
}
|
||||
@@ -13,6 +13,12 @@ type Output interface {
|
||||
Write(metrics []Metric) error
|
||||
}
|
||||
|
||||
type AggregatingOutput interface {
|
||||
Add(in Metric)
|
||||
Push() []Metric
|
||||
Reset()
|
||||
}
|
||||
|
||||
type ServiceOutput interface {
|
||||
// Connect to the Output
|
||||
Connect() error
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# BasicStats Aggregator Plugin
|
||||
|
||||
The BasicStats aggregator plugin give us count,max,min,mean,s2(variance), stdev for a set of values,
|
||||
The BasicStats aggregator plugin give us count,max,min,mean,sum,s2(variance), stdev for a set of values,
|
||||
emitting the aggregate every `period` seconds.
|
||||
|
||||
### Configuration:
|
||||
@@ -21,11 +21,11 @@ emitting the aggregate every `period` seconds.
|
||||
## BasicStats Arguments:
|
||||
|
||||
## Configures which basic stats to push as fields
|
||||
stats = ["count","min","max","mean","stdev","s2"]
|
||||
stats = ["count","min","max","mean","stdev","s2","sum"]
|
||||
```
|
||||
|
||||
- stats
|
||||
- If not specified, all stats are aggregated and pushed as fields
|
||||
- If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are aggregated and pushed as fields. `sum` is not aggregated by default to maintain backwards compatibility.
|
||||
- If empty array, no stats are aggregated
|
||||
|
||||
### Measurements & Fields:
|
||||
@@ -35,6 +35,7 @@ emitting the aggregate every `period` seconds.
|
||||
- field1_max
|
||||
- field1_min
|
||||
- field1_mean
|
||||
- field1_sum
|
||||
- field1_s2 (variance)
|
||||
- field1_stdev (standard deviation)
|
||||
|
||||
@@ -48,8 +49,8 @@ No tags are applied by this aggregator.
|
||||
$ telegraf --config telegraf.conf --quiet
|
||||
system,host=tars load1=1 1475583980000000000
|
||||
system,host=tars load1=1 1475583990000000000
|
||||
system,host=tars load1_count=2,load1_max=1,load1_min=1,load1_mean=1,load1_s2=0,load1_stdev=0 1475584010000000000
|
||||
system,host=tars load1_count=2,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0 1475584010000000000
|
||||
system,host=tars load1=1 1475584020000000000
|
||||
system,host=tars load1=3 1475584030000000000
|
||||
system,host=tars load1_count=2,load1_max=3,load1_min=1,load1_mean=2,load1_s2=2,load1_stdev=1.414162 1475584010000000000
|
||||
system,host=tars load1_count=2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162 1475584010000000000
|
||||
```
|
||||
|
||||
@@ -22,6 +22,7 @@ type configuredStats struct {
|
||||
mean bool
|
||||
variance bool
|
||||
stdev bool
|
||||
sum bool
|
||||
}
|
||||
|
||||
func NewBasicStats() *BasicStats {
|
||||
@@ -40,6 +41,7 @@ type basicstats struct {
|
||||
count float64
|
||||
min float64
|
||||
max float64
|
||||
sum float64
|
||||
mean float64
|
||||
M2 float64 //intermedia value for variance/stdev
|
||||
}
|
||||
@@ -77,6 +79,7 @@ func (m *BasicStats) Add(in telegraf.Metric) {
|
||||
min: fv,
|
||||
max: fv,
|
||||
mean: fv,
|
||||
sum: fv,
|
||||
M2: 0.0,
|
||||
}
|
||||
}
|
||||
@@ -92,6 +95,7 @@ func (m *BasicStats) Add(in telegraf.Metric) {
|
||||
min: fv,
|
||||
max: fv,
|
||||
mean: fv,
|
||||
sum: fv,
|
||||
M2: 0.0,
|
||||
}
|
||||
continue
|
||||
@@ -119,6 +123,8 @@ func (m *BasicStats) Add(in telegraf.Metric) {
|
||||
} else if fv > tmp.max {
|
||||
tmp.max = fv
|
||||
}
|
||||
//sum compute
|
||||
tmp.sum += fv
|
||||
//store final data
|
||||
m.cache[id].fields[k] = tmp
|
||||
}
|
||||
@@ -146,6 +152,9 @@ func (m *BasicStats) Push(acc telegraf.Accumulator) {
|
||||
if config.mean {
|
||||
fields[k+"_mean"] = v.mean
|
||||
}
|
||||
if config.sum {
|
||||
fields[k+"_sum"] = v.sum
|
||||
}
|
||||
|
||||
//v.count always >=1
|
||||
if v.count > 1 {
|
||||
@@ -187,6 +196,8 @@ func parseStats(names []string) *configuredStats {
|
||||
parsed.variance = true
|
||||
case "stdev":
|
||||
parsed.stdev = true
|
||||
case "sum":
|
||||
parsed.sum = true
|
||||
|
||||
default:
|
||||
log.Printf("W! Unrecognized basic stat '%s', ignoring", name)
|
||||
@@ -206,6 +217,7 @@ func defaultStats() *configuredStats {
|
||||
defaults.mean = true
|
||||
defaults.variance = true
|
||||
defaults.stdev = true
|
||||
defaults.sum = false
|
||||
|
||||
return defaults
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var m1, _ = metric.New("m1",
|
||||
@@ -250,6 +251,83 @@ func TestBasicStatsWithOnlyMean(t *testing.T) {
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating sum
|
||||
func TestBasicStatsWithOnlySum(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"sum"}
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_sum": float64(2),
|
||||
"b_sum": float64(4),
|
||||
"c_sum": float64(6),
|
||||
"d_sum": float64(8),
|
||||
"e_sum": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Verify that sum doesn't suffer from floating point errors. Early
|
||||
// implementations of sum were calulated from mean and count, which
|
||||
// e.g. summed "1, 1, 5, 1" as "7.999999..." instead of 8.
|
||||
func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) {
|
||||
|
||||
var sum1, _ = metric.New("m1",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var sum2, _ = metric.New("m1",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var sum3, _ = metric.New("m1",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(5),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var sum4, _ = metric.New("m1",
|
||||
map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
aggregator.Stats = []string{"sum"}
|
||||
|
||||
aggregator.Add(sum1)
|
||||
aggregator.Add(sum2)
|
||||
aggregator.Add(sum3)
|
||||
aggregator.Add(sum4)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_sum": float64(8),
|
||||
}
|
||||
expectedTags := map[string]string{}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test only aggregating variance
|
||||
func TestBasicStatsWithOnlyVariance(t *testing.T) {
|
||||
|
||||
@@ -328,6 +406,57 @@ func TestBasicStatsWithMinAndMax(t *testing.T) {
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test aggregating with all stats
|
||||
func TestBasicStatsWithAllStats(t *testing.T) {
|
||||
acc := testutil.Accumulator{}
|
||||
minmax := NewBasicStats()
|
||||
minmax.Stats = []string{"count", "min", "max", "mean", "stdev", "s2", "sum"}
|
||||
|
||||
minmax.Add(m1)
|
||||
minmax.Add(m2)
|
||||
minmax.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_count": float64(2), //a
|
||||
"a_max": float64(1),
|
||||
"a_min": float64(1),
|
||||
"a_mean": float64(1),
|
||||
"a_stdev": float64(0),
|
||||
"a_s2": float64(0),
|
||||
"a_sum": float64(2),
|
||||
"b_count": float64(2), //b
|
||||
"b_max": float64(3),
|
||||
"b_min": float64(1),
|
||||
"b_mean": float64(2),
|
||||
"b_s2": float64(2),
|
||||
"b_sum": float64(4),
|
||||
"b_stdev": math.Sqrt(2),
|
||||
"c_count": float64(2), //c
|
||||
"c_max": float64(4),
|
||||
"c_min": float64(2),
|
||||
"c_mean": float64(3),
|
||||
"c_s2": float64(2),
|
||||
"c_stdev": math.Sqrt(2),
|
||||
"c_sum": float64(6),
|
||||
"d_count": float64(2), //d
|
||||
"d_max": float64(6),
|
||||
"d_min": float64(2),
|
||||
"d_mean": float64(4),
|
||||
"d_s2": float64(8),
|
||||
"d_stdev": math.Sqrt(8),
|
||||
"d_sum": float64(8),
|
||||
"e_count": float64(1), //e
|
||||
"e_max": float64(200),
|
||||
"e_min": float64(200),
|
||||
"e_mean": float64(200),
|
||||
"e_sum": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test that if an empty array is passed, no points are pushed
|
||||
func TestBasicStatsWithNoStats(t *testing.T) {
|
||||
|
||||
@@ -357,3 +486,26 @@ func TestBasicStatsWithUnknownStat(t *testing.T) {
|
||||
|
||||
acc.AssertDoesNotContainMeasurement(t, "m1")
|
||||
}
|
||||
|
||||
// Test that if Stats isn't supplied, then we only do count, min, max, mean,
|
||||
// stdev, and s2. We purposely exclude sum for backwards compatability,
|
||||
// otherwise user's working systems will suddenly (and surprisingly) start
|
||||
// capturing sum without their input.
|
||||
func TestBasicStatsWithDefaultStats(t *testing.T) {
|
||||
|
||||
aggregator := NewBasicStats()
|
||||
|
||||
aggregator.Add(m1)
|
||||
aggregator.Add(m2)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
aggregator.Push(&acc)
|
||||
|
||||
assert.True(t, acc.HasField("m1", "a_count"))
|
||||
assert.True(t, acc.HasField("m1", "a_min"))
|
||||
assert.True(t, acc.HasField("m1", "a_max"))
|
||||
assert.True(t, acc.HasField("m1", "a_mean"))
|
||||
assert.True(t, acc.HasField("m1", "a_stdev"))
|
||||
assert.True(t, acc.HasField("m1", "a_s2"))
|
||||
assert.False(t, acc.HasField("m1", "a_sum"))
|
||||
}
|
||||
|
||||
@@ -24,11 +24,13 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/fail2ban"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/fibaro"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/fluentd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
||||
@@ -48,6 +50,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mcrouter"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mesos"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/minecraft"
|
||||
@@ -63,6 +66,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/openldap"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||
|
||||
@@ -145,23 +145,25 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error {
|
||||
go a.process(msgs, acc)
|
||||
|
||||
go func() {
|
||||
err := <-a.conn.NotifyClose(make(chan *amqp.Error))
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("I! AMQP consumer connection closed: %s; trying to reconnect", err)
|
||||
for {
|
||||
msgs, err := a.connect(amqpConf)
|
||||
if err != nil {
|
||||
log.Printf("E! AMQP connection failed: %s", err)
|
||||
time.Sleep(10 * time.Second)
|
||||
continue
|
||||
err := <-a.conn.NotifyClose(make(chan *amqp.Error))
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
a.wg.Add(1)
|
||||
go a.process(msgs, acc)
|
||||
break
|
||||
log.Printf("I! AMQP consumer connection closed: %s; trying to reconnect", err)
|
||||
for {
|
||||
msgs, err := a.connect(amqpConf)
|
||||
if err != nil {
|
||||
log.Printf("E! AMQP connection failed: %s", err)
|
||||
time.Sleep(10 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
a.wg.Add(1)
|
||||
go a.process(msgs, acc)
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
|
||||
# Telegraf plugin: Cassandra
|
||||
|
||||
### **Deprecated in version 1.7**: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin with the [cassandra.conf](/plugins/inputs/jolokia2/examples/cassandra.conf) example configuration.
|
||||
|
||||
#### Plugin arguments:
|
||||
- **context** string: Context root used for jolokia url
|
||||
- **servers** []string: List of servers with the format "<user:passwd@><host>:port"
|
||||
|
||||
@@ -4,12 +4,14 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type JolokiaClient interface {
|
||||
@@ -60,7 +62,8 @@ func newCassandraMetric(host string, metric string,
|
||||
func addValuesAsFields(values map[string]interface{}, fields map[string]interface{},
|
||||
mname string) {
|
||||
for k, v := range values {
|
||||
if v != nil {
|
||||
switch v.(type) {
|
||||
case int64, float64, string, bool:
|
||||
fields[mname+"_"+k] = v
|
||||
}
|
||||
}
|
||||
@@ -117,7 +120,7 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) {
|
||||
switch t := values.(type) {
|
||||
case map[string]interface{}:
|
||||
addValuesAsFields(values.(map[string]interface{}), fields, attribute)
|
||||
case interface{}:
|
||||
case int64, float64, string, bool:
|
||||
fields[attribute] = t
|
||||
}
|
||||
j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
|
||||
@@ -172,7 +175,11 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
||||
|
||||
func (j *Cassandra) SampleConfig() string {
|
||||
return `
|
||||
# This is the context root used to compose the jolokia url
|
||||
## DEPRECATED: The cassandra plugin has been deprecated. Please use the
|
||||
## jolokia2 plugin instead.
|
||||
##
|
||||
## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
|
||||
|
||||
context = "/jolokia/read"
|
||||
## List of cassandra servers exposing jolokia read service
|
||||
servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
|
||||
@@ -256,6 +263,16 @@ func parseServerTokens(server string) map[string]string {
|
||||
return serverTokens
|
||||
}
|
||||
|
||||
func (c *Cassandra) Start(acc telegraf.Accumulator) error {
|
||||
log.Println("W! DEPRECATED: The cassandra plugin has been deprecated. " +
|
||||
"Please use the jolokia2 plugin instead. " +
|
||||
"https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cassandra) Stop() {
|
||||
}
|
||||
|
||||
func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
context := c.Context
|
||||
servers := c.Servers
|
||||
|
||||
@@ -1,43 +1,54 @@
|
||||
# Telegraf Input Plugin: Consul
|
||||
# Consul Input Plugin
|
||||
|
||||
This plugin will collect statistics about all health checks registered in the Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
|
||||
to query the data. It will not report the [telemetry](https://www.consul.io/docs/agent/telemetry.html) but Consul can report those stats already using StatsD protocol if needed.
|
||||
This plugin will collect statistics about all health checks registered in the
|
||||
Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
|
||||
to query the data. It will not report the
|
||||
[telemetry](https://www.consul.io/docs/agent/telemetry.html) but Consul can
|
||||
report those stats already using StatsD protocol if needed.
|
||||
|
||||
## Configuration:
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
```toml
|
||||
# Gather health check statuses from services registered in Consul
|
||||
[[inputs.consul]]
|
||||
## Most of these values defaults to the one configured on a Consul's agent level.
|
||||
## Optional Consul server address (default: "")
|
||||
# address = ""
|
||||
## Optional URI scheme for the Consul server (default: "")
|
||||
# scheme = ""
|
||||
## Optional ACL token used in every request (default: "")
|
||||
## Consul server address
|
||||
# address = "localhost"
|
||||
|
||||
## URI scheme for the Consul server, one of "http", "https"
|
||||
# scheme = "http"
|
||||
|
||||
## ACL token used in every request
|
||||
# token = ""
|
||||
## Optional username used for request HTTP Basic Authentication (default: "")
|
||||
|
||||
## HTTP Basic Authentication username and password.
|
||||
# username = ""
|
||||
## Optional password used for HTTP Basic Authentication (default: "")
|
||||
# password = ""
|
||||
## Optional data centre to query the health checks from (default: "")
|
||||
|
||||
## Data centre to query the health checks from
|
||||
# datacentre = ""
|
||||
|
||||
## SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## If false, skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
```
|
||||
|
||||
## Measurements:
|
||||
### Metrics:
|
||||
|
||||
### Consul:
|
||||
Tags:
|
||||
- node: on which node check/service is registered on
|
||||
- service_name: name of the service (this is the service name not the service ID)
|
||||
- check_id
|
||||
|
||||
Fields:
|
||||
- check_name
|
||||
- service_id
|
||||
- status
|
||||
- passing
|
||||
- critical
|
||||
- warning
|
||||
- consul_health_checks
|
||||
- tags:
|
||||
- node (node that check/service is registred on)
|
||||
- service_name
|
||||
- check_id
|
||||
- fields:
|
||||
- check_name
|
||||
- service_id
|
||||
- status
|
||||
- passing (integer)
|
||||
- critical (integer)
|
||||
- warning (integer)
|
||||
|
||||
`passing`, `critical`, and `warning` are integer representations of the health
|
||||
check state. A value of `1` represents that the status was the state of the
|
||||
@@ -46,8 +57,6 @@ the health check at this sample.
|
||||
## Example output
|
||||
|
||||
```
|
||||
$ telegraf --config ./telegraf.conf --input-filter consul --test
|
||||
* Plugin: consul, Collection 1
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing",passing=1i,critical=0i,warning=0i 1464698464486439902
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical",passing=0i,critical=1i,warning=0i 1464698464486519036
|
||||
consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing",passing=1i,critical=0i,warning=0i 1464698464486439902
|
||||
consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical",passing=0i,critical=1i,warning=0i 1464698464486519036
|
||||
```
|
||||
|
||||
@@ -31,19 +31,28 @@ type Consul struct {
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Most of these values defaults to the one configured on a Consul's agent level.
|
||||
## Optional Consul server address (default: "localhost")
|
||||
## Consul server address
|
||||
# address = "localhost"
|
||||
## Optional URI scheme for the Consul server (default: "http")
|
||||
|
||||
## URI scheme for the Consul server, one of "http", "https"
|
||||
# scheme = "http"
|
||||
## Optional ACL token used in every request (default: "")
|
||||
|
||||
## ACL token used in every request
|
||||
# token = ""
|
||||
## Optional username used for request HTTP Basic Authentication (default: "")
|
||||
|
||||
## HTTP Basic Authentication username and password.
|
||||
# username = ""
|
||||
## Optional password used for HTTP Basic Authentication (default: "")
|
||||
# password = ""
|
||||
## Optional data centre to query the health checks from (default: "")
|
||||
|
||||
## Data centre to query the health checks from
|
||||
# datacentre = ""
|
||||
|
||||
## SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## If false, skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
`
|
||||
|
||||
func (c *Consul) Description() string {
|
||||
|
||||
@@ -8,11 +8,18 @@ Depending on the work load of your DC/OS cluster, this plugin can quickly
|
||||
create a high number of series which, when unchecked, can cause high load on
|
||||
your database.
|
||||
|
||||
- Use [measurement filtering](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#measurement-filtering) liberally to exclude unneeded metrics as well as the node, container, and app inclue/exclude options.
|
||||
- Write to a database with an appropriate [retention policy](https://docs.influxdata.com/influxdb/v1.3/concepts/glossary/#retention-policy-rp).
|
||||
- Limit the number of series allowed in your database using the `max-series-per-database` and `max-values-per-tag` settings.
|
||||
- Consider enabling the [TSI](https://docs.influxdata.com/influxdb/v1.3/about_the_project/releasenotes-changelog/#release-notes-8) engine.
|
||||
- Monitor your [series cardinality](https://docs.influxdata.com/influxdb/v1.3/troubleshooting/frequently-asked-questions/#how-can-i-query-for-series-cardinality).
|
||||
- Use the
|
||||
[measurement filtering](https://docs.influxdata.com/telegraf/latest/administration/configuration/#measurement-filtering)
|
||||
options to exclude unneeded tags.
|
||||
- Write to a database with an appropriate
|
||||
[retention policy](https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/).
|
||||
- Limit series cardinality in your database using the
|
||||
[`max-series-per-database`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000) and
|
||||
[`max-values-per-tag`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000) settings.
|
||||
- Consider using the
|
||||
[Time Series Index](https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/).
|
||||
- Monitor your databases
|
||||
[series cardinality](https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality).
|
||||
|
||||
### Configuration:
|
||||
```toml
|
||||
|
||||
@@ -31,6 +31,7 @@ type Client interface {
|
||||
}
|
||||
|
||||
type APIError struct {
|
||||
URL string
|
||||
StatusCode int
|
||||
Title string
|
||||
Description string
|
||||
@@ -105,9 +106,9 @@ type claims struct {
|
||||
|
||||
func (e APIError) Error() string {
|
||||
if e.Description != "" {
|
||||
return fmt.Sprintf("%s: %s", e.Title, e.Description)
|
||||
return fmt.Sprintf("[%s] %s: %s", e.URL, e.Title, e.Description)
|
||||
}
|
||||
return e.Title
|
||||
return fmt.Sprintf("[%s] %s", e.URL, e.Title)
|
||||
}
|
||||
|
||||
func NewClusterClient(
|
||||
@@ -156,7 +157,8 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", c.url("/acs/api/v1/auth/login"), bytes.NewBuffer(octets))
|
||||
loc := c.url("/acs/api/v1/auth/login")
|
||||
req, err := http.NewRequest("POST", loc, bytes.NewBuffer(octets))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -189,6 +191,7 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok
|
||||
err = dec.Decode(loginError)
|
||||
if err != nil {
|
||||
err := &APIError{
|
||||
URL: loc,
|
||||
StatusCode: resp.StatusCode,
|
||||
Title: resp.Status,
|
||||
}
|
||||
@@ -196,6 +199,7 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok
|
||||
}
|
||||
|
||||
err = &APIError{
|
||||
URL: loc,
|
||||
StatusCode: resp.StatusCode,
|
||||
Title: loginError.Title,
|
||||
Description: loginError.Description,
|
||||
@@ -301,6 +305,7 @@ func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) er
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return &APIError{
|
||||
URL: url,
|
||||
StatusCode: resp.StatusCode,
|
||||
Title: resp.Status,
|
||||
}
|
||||
@@ -315,7 +320,7 @@ func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) er
|
||||
}
|
||||
|
||||
func (c *ClusterClient) url(path string) string {
|
||||
url := c.clusterURL
|
||||
url := *c.clusterURL
|
||||
url.Path = path
|
||||
return url.String()
|
||||
}
|
||||
|
||||
@@ -31,6 +31,9 @@ P0a+YZUeHNRqT2pPN9lMTAZGGi3CtcF2XScbLNEBeXge
|
||||
)
|
||||
|
||||
func TestLogin(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
responseCode int
|
||||
@@ -40,16 +43,21 @@ func TestLogin(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "Login successful",
|
||||
responseCode: 200,
|
||||
responseCode: http.StatusOK,
|
||||
responseBody: `{"token": "XXX.YYY.ZZZ"}`,
|
||||
expectedError: nil,
|
||||
expectedToken: "XXX.YYY.ZZZ",
|
||||
},
|
||||
{
|
||||
name: "Unauthorized Error",
|
||||
responseCode: http.StatusUnauthorized,
|
||||
responseBody: `{"title": "x", "description": "y"}`,
|
||||
expectedError: &APIError{http.StatusUnauthorized, "x", "y"},
|
||||
name: "Unauthorized Error",
|
||||
responseCode: http.StatusUnauthorized,
|
||||
responseBody: `{"title": "x", "description": "y"}`,
|
||||
expectedError: &APIError{
|
||||
URL: ts.URL + "/acs/api/v1/auth/login",
|
||||
StatusCode: http.StatusUnauthorized,
|
||||
Title: "x",
|
||||
Description: "y",
|
||||
},
|
||||
expectedToken: "",
|
||||
},
|
||||
}
|
||||
@@ -59,11 +67,11 @@ func TestLogin(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(tt.responseCode)
|
||||
fmt.Fprintln(w, tt.responseBody)
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -82,13 +90,14 @@ func TestLogin(t *testing.T) {
|
||||
} else {
|
||||
require.Nil(t, auth)
|
||||
}
|
||||
|
||||
ts.Close()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSummary(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
responseCode int
|
||||
@@ -98,7 +107,7 @@ func TestGetSummary(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "No nodes",
|
||||
responseCode: 200,
|
||||
responseCode: http.StatusOK,
|
||||
responseBody: `{"cluster": "a", "slaves": []}`,
|
||||
expectedValue: &Summary{Cluster: "a", Slaves: []Slave{}},
|
||||
expectedError: nil,
|
||||
@@ -108,11 +117,15 @@ func TestGetSummary(t *testing.T) {
|
||||
responseCode: http.StatusUnauthorized,
|
||||
responseBody: `<html></html>`,
|
||||
expectedValue: nil,
|
||||
expectedError: &APIError{StatusCode: http.StatusUnauthorized, Title: "401 Unauthorized"},
|
||||
expectedError: &APIError{
|
||||
URL: ts.URL + "/mesos/master/state-summary",
|
||||
StatusCode: http.StatusUnauthorized,
|
||||
Title: "401 Unauthorized",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Has nodes",
|
||||
responseCode: 200,
|
||||
responseCode: http.StatusOK,
|
||||
responseBody: `{"cluster": "a", "slaves": [{"id": "a"}, {"id": "b"}]}`,
|
||||
expectedValue: &Summary{
|
||||
Cluster: "a",
|
||||
@@ -127,12 +140,12 @@ func TestGetSummary(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// check the path
|
||||
w.WriteHeader(tt.responseCode)
|
||||
fmt.Fprintln(w, tt.responseBody)
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -142,14 +155,15 @@ func TestGetSummary(t *testing.T) {
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
require.Equal(t, tt.expectedValue, summary)
|
||||
|
||||
ts.Close()
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetNodeMetrics(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
responseCode int
|
||||
@@ -159,7 +173,7 @@ func TestGetNodeMetrics(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "Empty Body",
|
||||
responseCode: 200,
|
||||
responseCode: http.StatusOK,
|
||||
responseBody: `{}`,
|
||||
expectedValue: &Metrics{},
|
||||
expectedError: nil,
|
||||
@@ -168,12 +182,12 @@ func TestGetNodeMetrics(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// check the path
|
||||
w.WriteHeader(tt.responseCode)
|
||||
fmt.Fprintln(w, tt.responseBody)
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -183,14 +197,15 @@ func TestGetNodeMetrics(t *testing.T) {
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
require.Equal(t, tt.expectedValue, m)
|
||||
|
||||
ts.Close()
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetContainerMetrics(t *testing.T) {
|
||||
ts := httptest.NewServer(http.NotFoundHandler())
|
||||
defer ts.Close()
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
responseCode int
|
||||
@@ -199,8 +214,8 @@ func TestGetContainerMetrics(t *testing.T) {
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "204 No Contents",
|
||||
responseCode: 204,
|
||||
name: "204 No Content",
|
||||
responseCode: http.StatusNoContent,
|
||||
responseBody: ``,
|
||||
expectedValue: &Metrics{},
|
||||
expectedError: nil,
|
||||
@@ -209,12 +224,12 @@ func TestGetContainerMetrics(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// check the path
|
||||
w.WriteHeader(tt.responseCode)
|
||||
fmt.Fprintln(w, tt.responseBody)
|
||||
})
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -224,8 +239,6 @@ func TestGetContainerMetrics(t *testing.T) {
|
||||
|
||||
require.Equal(t, tt.expectedError, err)
|
||||
require.Equal(t, tt.expectedValue, m)
|
||||
|
||||
ts.Close()
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -4,12 +4,11 @@ The docker plugin uses the Docker Engine API to gather metrics on running
|
||||
docker containers.
|
||||
|
||||
The docker plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client)
|
||||
to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/).
|
||||
[Library Documentation](https://godoc.org/github.com/moby/moby/client)
|
||||
to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/).
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
```toml
|
||||
# Read metrics about docker containers
|
||||
[[inputs.docker]]
|
||||
## Docker Endpoint
|
||||
@@ -31,6 +30,11 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/)
|
||||
container_name_include = []
|
||||
container_name_exclude = []
|
||||
|
||||
## Container states to include and exclude. Globs accepted.
|
||||
## When empty only containers in the "running" state will be captured.
|
||||
# container_state_include = []
|
||||
# container_state_exclude = []
|
||||
|
||||
## Timeout for docker list, info, and stats commands
|
||||
timeout = "5s"
|
||||
|
||||
@@ -71,15 +75,57 @@ may prefer to exclude them:
|
||||
```
|
||||
|
||||
|
||||
### Measurements & Fields:
|
||||
### Metrics:
|
||||
|
||||
Every effort was made to preserve the names based on the JSON response from the
|
||||
docker API.
|
||||
|
||||
Note that the docker_container_cpu metric may appear multiple times per collection,
|
||||
based on the availability of per-cpu stats on your system.
|
||||
- docker
|
||||
- tags:
|
||||
- unit
|
||||
- engine_host
|
||||
- server_version
|
||||
- fields:
|
||||
- n_used_file_descriptors
|
||||
- n_cpus
|
||||
- n_containers
|
||||
- n_containers_running
|
||||
- n_containers_stopped
|
||||
- n_containers_paused
|
||||
- n_images
|
||||
- n_goroutines
|
||||
- n_listener_events
|
||||
- memory_total
|
||||
- pool_blocksize
|
||||
|
||||
- docker_data
|
||||
- tags:
|
||||
- unit
|
||||
- engine_host
|
||||
- server_version
|
||||
- fields:
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
|
||||
- docker_metadata
|
||||
- tags:
|
||||
- unit
|
||||
- engine_host
|
||||
- server_version
|
||||
- fields:
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
|
||||
- docker_container_mem
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_version
|
||||
- fields:
|
||||
- total_pgmafault
|
||||
- cache
|
||||
- mapped_file
|
||||
@@ -114,7 +160,16 @@ based on the availability of per-cpu stats on your system.
|
||||
- failcnt
|
||||
- limit
|
||||
- container_id
|
||||
|
||||
- docker_container_cpu
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_version
|
||||
- cpu
|
||||
- fields:
|
||||
- throttling_periods
|
||||
- throttling_throttled_periods
|
||||
- throttling_throttled_time
|
||||
@@ -124,7 +179,16 @@ based on the availability of per-cpu stats on your system.
|
||||
- usage_total
|
||||
- usage_percent
|
||||
- container_id
|
||||
|
||||
- docker_container_net
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_version
|
||||
- network
|
||||
- fields:
|
||||
- rx_dropped
|
||||
- rx_bytes
|
||||
- rx_errors
|
||||
@@ -134,7 +198,16 @@ based on the availability of per-cpu stats on your system.
|
||||
- tx_errors
|
||||
- tx_bytes
|
||||
- container_id
|
||||
|
||||
- docker_container_blkio
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_version
|
||||
- device
|
||||
- fields:
|
||||
- io_service_bytes_recursive_async
|
||||
- io_service_bytes_recursive_read
|
||||
- io_service_bytes_recursive_sync
|
||||
@@ -146,118 +219,38 @@ based on the availability of per-cpu stats on your system.
|
||||
- io_serviced_recursive_total
|
||||
- io_serviced_recursive_write
|
||||
- container_id
|
||||
- docker_
|
||||
- n_used_file_descriptors
|
||||
- n_cpus
|
||||
- n_containers
|
||||
- n_containers_running
|
||||
- n_containers_stopped
|
||||
- n_containers_paused
|
||||
- n_images
|
||||
- n_goroutines
|
||||
- n_listener_events
|
||||
- memory_total
|
||||
- pool_blocksize
|
||||
- docker_data
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
- docker_metadata
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
- docker_swarm
|
||||
- tasks_desired
|
||||
- tasks_running
|
||||
|
||||
|
||||
### Tags:
|
||||
#### Docker Engine tags
|
||||
- docker (memory_total)
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker (pool_blocksize)
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker_data
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker_metadata
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
|
||||
#### Docker Container tags
|
||||
- Tags on all containers:
|
||||
- docker_container_health
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_version
|
||||
- docker_container_mem specific:
|
||||
- docker_container_cpu specific:
|
||||
- cpu
|
||||
- docker_container_net specific:
|
||||
- network
|
||||
- docker_container_blkio specific:
|
||||
- device
|
||||
- docker_container_health specific:
|
||||
- health_status
|
||||
- failing_streak
|
||||
- docker_swarm specific:
|
||||
- fields:
|
||||
- health_status (string)
|
||||
- failing_streak (integer)
|
||||
|
||||
- docker_swarm
|
||||
- tags:
|
||||
- service_id
|
||||
- service_name
|
||||
- service_mode
|
||||
- fields:
|
||||
- tasks_desired
|
||||
- tasks_running
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
% ./telegraf --config ~/ws/telegraf.conf --input-filter docker --test
|
||||
* Plugin: docker, Collection 1
|
||||
> docker n_cpus=8i 1456926671065383978
|
||||
> docker n_used_file_descriptors=15i 1456926671065383978
|
||||
> docker n_containers=7i 1456926671065383978
|
||||
> docker n_containers_running=7i 1456926671065383978
|
||||
> docker n_containers_stopped=3i 1456926671065383978
|
||||
> docker n_containers_paused=0i 1456926671065383978
|
||||
> docker n_images=152i 1456926671065383978
|
||||
> docker n_goroutines=36i 1456926671065383978
|
||||
> docker n_listener_events=0i 1456926671065383978
|
||||
> docker,unit=bytes memory_total=18935443456i 1456926671065383978
|
||||
> docker,unit=bytes pool_blocksize=65540i 1456926671065383978
|
||||
> docker_data,unit=bytes available=24340000000i,total=107400000000i,used=14820000000i 1456926671065383978
|
||||
> docker_metadata,unit=bytes available=2126999999i,total=2146999999i,used=20420000i 145692667106538
|
||||
> docker_container_mem,
|
||||
container_image=spotify/kafka,container_name=kafka \
|
||||
active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\
|
||||
hierarchical_memory_limit=9223372036854771712i,inactive_anon=52707328i,\
|
||||
inactive_file=5111808i,limit=1044578304i,mapped_file=10301440i,\
|
||||
max_usage=140656640i,pgfault=63762i,pgmajfault=2837i,pgpgin=73355i,\
|
||||
pgpgout=45736i,rss=105275392i,rss_huge=4194304i,total_active_anon=52568064i,\
|
||||
total_active_file=6926336i,total_cache=12038144i,total_inactive_anon=52707328i,\
|
||||
total_inactive_file=5111808i,total_mapped_file=10301440i,total_pgfault=63762i,\
|
||||
total_pgmafault=0i,total_pgpgin=73355i,total_pgpgout=45736i,\
|
||||
total_rss=105275392i,total_rss_huge=4194304i,total_unevictable=0i,\
|
||||
total_writeback=0i,unevictable=0i,usage=117440512i,writeback=0i 1453409536840126713
|
||||
> docker_container_cpu,
|
||||
container_image=spotify/kafka,container_name=kafka,cpu=cpu-total \
|
||||
throttling_periods=0i,throttling_throttled_periods=0i,\
|
||||
throttling_throttled_time=0i,usage_in_kernelmode=440000000i,\
|
||||
usage_in_usermode=2290000000i,usage_system=84795360000000i,\
|
||||
usage_total=6628208865i 1453409536840126713
|
||||
> docker_container_cpu,
|
||||
container_image=spotify/kafka,container_name=kafka,cpu=cpu0 \
|
||||
usage_total=6628208865i 1453409536840126713
|
||||
> docker_container_net,\
|
||||
container_image=spotify/kafka,container_name=kafka,network=eth0 \
|
||||
rx_bytes=7468i,rx_dropped=0i,rx_errors=0i,rx_packets=94i,tx_bytes=946i,\
|
||||
tx_dropped=0i,tx_errors=0i,tx_packets=13i 1453409536840126713
|
||||
> docker_container_blkio,
|
||||
container_image=spotify/kafka,container_name=kafka,device=8:0 \
|
||||
io_service_bytes_recursive_async=80216064i,io_service_bytes_recursive_read=79925248i,\
|
||||
io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888i,\
|
||||
io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\
|
||||
io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\
|
||||
io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713
|
||||
>docker_swarm,
|
||||
service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test,\
|
||||
tasks_desired=3,tasks_running=3 1508968160000000000
|
||||
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce n_containers=6i,n_containers_paused=0i,n_containers_running=1i,n_containers_stopped=5i,n_cpus=2i,n_goroutines=41i,n_images=2i,n_listener_events=0i,n_used_file_descriptors=27i 1524002041000000000
|
||||
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce,unit=bytes memory_total=2101661696i 1524002041000000000
|
||||
docker_container_mem,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce active_anon=8327168i,active_file=2314240i,cache=27402240i,container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",hierarchical_memory_limit=9223372036854771712i,inactive_anon=0i,inactive_file=25088000i,limit=2101661696i,mapped_file=20582400i,max_usage=36646912i,pgfault=4193i,pgmajfault=214i,pgpgin=9243i,pgpgout=520i,rss=8327168i,rss_huge=0i,total_active_anon=8327168i,total_active_file=2314240i,total_cache=27402240i,total_inactive_anon=0i,total_inactive_file=25088000i,total_mapped_file=20582400i,total_pgfault=4193i,total_pgmajfault=214i,total_pgpgin=9243i,total_pgpgout=520i,total_rss=8327168i,total_rss_huge=0i,total_unevictable=0i,total_writeback=0i,unevictable=0i,usage=36528128i,usage_percent=0.4342225020025297,writeback=0i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu-total,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",throttling_periods=0i,throttling_throttled_periods=0i,throttling_throttled_time=0i,usage_in_kernelmode=40000000i,usage_in_usermode=100000000i,usage_percent=0,usage_system=6394210000000i,usage_total=117319068i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=20825265i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu1,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=96493803i 1524002042000000000
|
||||
docker_container_net,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,network=eth0,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",rx_bytes=1576i,rx_dropped=0i,rx_errors=0i,rx_packets=20i,tx_bytes=0i,tx_dropped=0i,tx_errors=0i,tx_packets=0i 1524002042000000000
|
||||
docker_container_blkio,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,device=254:0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",io_service_bytes_recursive_async=27398144i,io_service_bytes_recursive_read=27398144i,io_service_bytes_recursive_sync=0i,io_service_bytes_recursive_total=27398144i,io_service_bytes_recursive_write=0i,io_serviced_recursive_async=529i,io_serviced_recursive_read=529i,io_serviced_recursive_sync=0i,io_serviced_recursive_total=529i,io_serviced_recursive_write=0i 1524002042000000000
|
||||
docker_container_health,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce failing_streak=0i,health_status="healthy" 1524007529000000000
|
||||
docker_swarm,service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test tasks_desired=3,tasks_running=3 1508968160000000000
|
||||
```
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
version string
|
||||
version = "1.24"
|
||||
defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"}
|
||||
)
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
@@ -25,7 +26,7 @@ import (
|
||||
// Docker object
|
||||
type Docker struct {
|
||||
Endpoint string
|
||||
ContainerNames []string
|
||||
ContainerNames []string // deprecated in 1.4; use container_name_include
|
||||
|
||||
GatherServices bool `toml:"gather_services"`
|
||||
|
||||
@@ -39,6 +40,9 @@ type Docker struct {
|
||||
ContainerInclude []string `toml:"container_name_include"`
|
||||
ContainerExclude []string `toml:"container_name_exclude"`
|
||||
|
||||
ContainerStateInclude []string `toml:"container_state_include"`
|
||||
ContainerStateExclude []string `toml:"container_state_exclude"`
|
||||
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
@@ -50,9 +54,11 @@ type Docker struct {
|
||||
client Client
|
||||
httpClient *http.Client
|
||||
engine_host string
|
||||
serverVersion string
|
||||
filtersCreated bool
|
||||
labelFilter filter.Filter
|
||||
containerFilter filter.Filter
|
||||
stateFilter filter.Filter
|
||||
}
|
||||
|
||||
// KB, MB, GB, TB, PB...human friendly
|
||||
@@ -67,7 +73,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
|
||||
sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
|
||||
containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"}
|
||||
)
|
||||
|
||||
var sampleConfig = `
|
||||
@@ -87,6 +94,11 @@ var sampleConfig = `
|
||||
container_name_include = []
|
||||
container_name_exclude = []
|
||||
|
||||
## Container states to include and exclude. Globs accepted.
|
||||
## When empty only containers in the "running" state will be captured.
|
||||
# container_state_include = []
|
||||
# container_state_exclude = []
|
||||
|
||||
## Timeout for docker list, info, and stats commands
|
||||
timeout = "5s"
|
||||
|
||||
@@ -148,6 +160,10 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = d.createContainerStateFilters()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.filtersCreated = true
|
||||
}
|
||||
|
||||
@@ -164,8 +180,22 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
|
||||
filterArgs := filters.NewArgs()
|
||||
for _, state := range containerStates {
|
||||
if d.stateFilter.Match(state) {
|
||||
filterArgs.Add("status", state)
|
||||
}
|
||||
}
|
||||
|
||||
// All container states were excluded
|
||||
if filterArgs.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// List containers
|
||||
opts := types.ContainerListOptions{}
|
||||
opts := types.ContainerListOptions{
|
||||
Filters: filterArgs,
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
defer cancel()
|
||||
containers, err := d.client.ContainerList(ctx, opts)
|
||||
@@ -272,7 +302,14 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.engine_host = info.Name
|
||||
d.serverVersion = info.ServerVersion
|
||||
|
||||
tags := map[string]string{
|
||||
"engine_host": d.engine_host,
|
||||
"server_version": d.serverVersion,
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"n_cpus": info.NCPU,
|
||||
@@ -286,15 +323,13 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
"n_listener_events": info.NEventsListener,
|
||||
}
|
||||
// Add metrics
|
||||
acc.AddFields("docker",
|
||||
fields,
|
||||
map[string]string{"engine_host": d.engine_host},
|
||||
now)
|
||||
acc.AddFields("docker", fields, tags, now)
|
||||
acc.AddFields("docker",
|
||||
map[string]interface{}{"memory_total": info.MemTotal},
|
||||
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
|
||||
tags,
|
||||
now)
|
||||
// Get storage metrics
|
||||
tags["unit"] = "bytes"
|
||||
for _, rawData := range info.DriverStatus {
|
||||
// Try to convert string to int (bytes)
|
||||
value, err := parseSize(rawData[1])
|
||||
@@ -306,7 +341,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
// pool blocksize
|
||||
acc.AddFields("docker",
|
||||
map[string]interface{}{"pool_blocksize": value},
|
||||
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
|
||||
tags,
|
||||
now)
|
||||
} else if strings.HasPrefix(name, "data_space_") {
|
||||
// data space
|
||||
@@ -319,16 +354,10 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
if len(dataFields) > 0 {
|
||||
acc.AddFields("docker_data",
|
||||
dataFields,
|
||||
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
|
||||
now)
|
||||
acc.AddFields("docker_data", dataFields, tags, now)
|
||||
}
|
||||
if len(metadataFields) > 0 {
|
||||
acc.AddFields("docker_metadata",
|
||||
metadataFields,
|
||||
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
|
||||
now)
|
||||
acc.AddFields("docker_metadata", metadataFields, tags, now)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -359,6 +388,7 @@ func (d *Docker) gatherContainer(
|
||||
|
||||
tags := map[string]string{
|
||||
"engine_host": d.engine_host,
|
||||
"server_version": d.serverVersion,
|
||||
"container_name": cname,
|
||||
"container_image": imageName,
|
||||
"container_version": imageVersion,
|
||||
@@ -768,6 +798,18 @@ func (d *Docker) createLabelFilters() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Docker) createContainerStateFilters() error {
|
||||
if len(d.ContainerStateInclude) == 0 && len(d.ContainerStateExclude) == 0 {
|
||||
d.ContainerStateInclude = []string{"running"}
|
||||
}
|
||||
filter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.stateFilter = filter
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("docker", func() telegraf.Input {
|
||||
return &Docker{
|
||||
|
||||
@@ -3,6 +3,7 @@ package docker
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
@@ -614,7 +615,10 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"n_images": int(199),
|
||||
"n_goroutines": int(39),
|
||||
},
|
||||
map[string]string{"engine_host": "absol"},
|
||||
map[string]string{
|
||||
"engine_host": "absol",
|
||||
"server_version": "17.09.0-ce",
|
||||
},
|
||||
)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -625,8 +629,9 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"available": int64(36530000000),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "bytes",
|
||||
"engine_host": "absol",
|
||||
"unit": "bytes",
|
||||
"engine_host": "absol",
|
||||
"server_version": "17.09.0-ce",
|
||||
},
|
||||
)
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -647,6 +652,7 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"ENVVAR7": "ENVVAR8=ENVVAR9",
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
"server_version": "17.09.0-ce",
|
||||
},
|
||||
)
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -669,6 +675,7 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"ENVVAR7": "ENVVAR8=ENVVAR9",
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
"server_version": "17.09.0-ce",
|
||||
},
|
||||
)
|
||||
}
|
||||
@@ -711,3 +718,85 @@ func TestDockerGatherSwarmInfo(t *testing.T) {
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestContainerStateFilter(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
include []string
|
||||
exclude []string
|
||||
expected map[string][]string
|
||||
}{
|
||||
{
|
||||
name: "default",
|
||||
expected: map[string][]string{
|
||||
"status": []string{"running"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include running",
|
||||
include: []string{"running"},
|
||||
expected: map[string][]string{
|
||||
"status": []string{"running"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include glob",
|
||||
include: []string{"r*"},
|
||||
expected: map[string][]string{
|
||||
"status": []string{"restarting", "running", "removing"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include all",
|
||||
include: []string{"*"},
|
||||
expected: map[string][]string{
|
||||
"status": []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "exclude all",
|
||||
exclude: []string{"*"},
|
||||
expected: map[string][]string{
|
||||
"status": []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "exclude all",
|
||||
include: []string{"*"},
|
||||
exclude: []string{"exited"},
|
||||
expected: map[string][]string{
|
||||
"status": []string{"created", "restarting", "running", "removing", "paused", "dead"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
newClientFunc := func(host string, tlsConfig *tls.Config) (Client, error) {
|
||||
client := baseClient
|
||||
client.ContainerListF = func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
|
||||
for k, v := range tt.expected {
|
||||
actual := options.Filters.Get(k)
|
||||
sort.Strings(actual)
|
||||
sort.Strings(v)
|
||||
require.Equal(t, v, actual)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
d := Docker{
|
||||
newClient: newClientFunc,
|
||||
ContainerStateInclude: tt.include,
|
||||
ContainerStateExclude: tt.exclude,
|
||||
}
|
||||
|
||||
err := d.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,6 +55,7 @@ var info = types.Info{
|
||||
DockerRootDir: "/var/lib/docker",
|
||||
NoProxy: "",
|
||||
BridgeNfIP6tables: true,
|
||||
ServerVersion: "17.09.0-ce",
|
||||
}
|
||||
|
||||
var containerList = []types.Container{
|
||||
|
||||
@@ -35,7 +35,7 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre
|
||||
|
||||
## node_stats is a list of sub-stats that you want to have gathered. Valid options
|
||||
## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
|
||||
## "breakers". Per default, all stats are gathered.
|
||||
## "breaker". Per default, all stats are gathered.
|
||||
# node_stats = ["jvm", "http"]
|
||||
|
||||
## Optional SSL Config
|
||||
|
||||
@@ -105,7 +105,7 @@ const sampleConfig = `
|
||||
|
||||
## node_stats is a list of sub-stats that you want to have gathered. Valid options
|
||||
## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
|
||||
## "breakers". Per default, all stats are gathered.
|
||||
## "breaker". Per default, all stats are gathered.
|
||||
# node_stats = ["jvm", "http"]
|
||||
|
||||
## Optional SSL Config
|
||||
|
||||
@@ -41,6 +41,8 @@ const sampleConfig = `
|
||||
data_format = "influx"
|
||||
`
|
||||
|
||||
const MaxStderrBytes = 512
|
||||
|
||||
type Exec struct {
|
||||
Commands []string
|
||||
Command string
|
||||
@@ -96,15 +98,41 @@ func (c CommandRunner) Run(
|
||||
|
||||
cmd := exec.Command(split_cmd[0], split_cmd[1:]...)
|
||||
|
||||
var out bytes.Buffer
|
||||
var (
|
||||
out bytes.Buffer
|
||||
stderr bytes.Buffer
|
||||
)
|
||||
cmd.Stdout = &out
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := internal.RunTimeout(cmd, e.Timeout.Duration); err != nil {
|
||||
switch e.parser.(type) {
|
||||
case *nagios.NagiosParser:
|
||||
AddNagiosState(err, acc)
|
||||
default:
|
||||
return nil, fmt.Errorf("exec: %s for command '%s'", err, command)
|
||||
var errMessage = ""
|
||||
if stderr.Len() > 0 {
|
||||
stderr = removeCarriageReturns(stderr)
|
||||
// Limit the number of bytes.
|
||||
didTruncate := false
|
||||
if stderr.Len() > MaxStderrBytes {
|
||||
stderr.Truncate(MaxStderrBytes)
|
||||
didTruncate = true
|
||||
}
|
||||
if i := bytes.IndexByte(stderr.Bytes(), '\n'); i > 0 {
|
||||
// Only show truncation if the newline wasn't the last character.
|
||||
if i < stderr.Len()-1 {
|
||||
didTruncate = true
|
||||
}
|
||||
stderr.Truncate(i)
|
||||
}
|
||||
if didTruncate {
|
||||
stderr.WriteString("...")
|
||||
}
|
||||
|
||||
errMessage = fmt.Sprintf(": %s", stderr.String())
|
||||
}
|
||||
return nil, fmt.Errorf("exec: %s for command '%s'%s", err, command, errMessage)
|
||||
}
|
||||
} else {
|
||||
switch e.parser.(type) {
|
||||
|
||||
@@ -144,83 +144,6 @@ func TestCommandError(t *testing.T) {
|
||||
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
|
||||
}
|
||||
|
||||
func TestLineProtocolParse(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocol), nil),
|
||||
Commands: []string{"line-protocol"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(e.Gather))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
|
||||
}
|
||||
|
||||
func TestLineProtocolEmptyParse(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocolEmpty), nil),
|
||||
Commands: []string{"line-protocol"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestLineProtocolShortParse(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocolShort), nil),
|
||||
Commands: []string{"line-protocol"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(e.Gather)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "buffer too short", "A buffer too short error was expected")
|
||||
}
|
||||
|
||||
func TestLineProtocolParseMultiple(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocolMulti), nil),
|
||||
Commands: []string{"line-protocol"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(e.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
}
|
||||
cpuTags := []string{"cpu0", "cpu1", "cpu2", "cpu3", "cpu4", "cpu5", "cpu6"}
|
||||
|
||||
for _, cpu := range cpuTags {
|
||||
tags["cpu"] = cpu
|
||||
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecCommandWithGlob(t *testing.T) {
|
||||
parser, _ := parsers.NewValueParser("metric", "string", nil)
|
||||
e := NewExec()
|
||||
|
||||
51
plugins/inputs/fibaro/README.md
Normal file
51
plugins/inputs/fibaro/README.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Fibaro Input Plugin
|
||||
|
||||
The Fibaro plugin makes HTTP calls to the Fibaro controller API to gather values of hooked devices.
|
||||
Those values could be true (1) or false (0) for switches, percentage for dimmers, temperature, etc.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Read devices value(s) from a Fibaro controller
|
||||
[[inputs.fibaro]]
|
||||
## Required Fibaro controller address/hostname.
|
||||
## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
|
||||
url = "http://<controller>:80"
|
||||
|
||||
## Required credentials to access the API (http://<controller/api/<component>)
|
||||
username = "<username>"
|
||||
password = "<password>"
|
||||
|
||||
## Amount of time allowed to complete the HTTP request
|
||||
# timeout = "5s"
|
||||
```
|
||||
|
||||
### Metrics:
|
||||
|
||||
- fibaro
|
||||
- tags:
|
||||
- section (section name)
|
||||
- room (room name)
|
||||
- name (device name)
|
||||
- type (device type)
|
||||
- fields:
|
||||
- value (float)
|
||||
- value2 (float, when available from device)
|
||||
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
fibaro,host=vm1,name=Escaliers,room=Dégagement,section=Pièces\ communes,type=com.fibaro.binarySwitch value=0 1523351010000000000
|
||||
fibaro,host=vm1,name=Porte\ fenêtre,room=Salon,section=Pièces\ communes,type=com.fibaro.FGRM222 value=99,value2=99 1523351010000000000
|
||||
fibaro,host=vm1,name=LED\ îlot\ central,room=Cuisine,section=Cuisine,type=com.fibaro.binarySwitch value=0 1523351010000000000
|
||||
fibaro,host=vm1,name=Détérioration,room=Entrée,section=Pièces\ communes,type=com.fibaro.heatDetector value=0 1523351010000000000
|
||||
fibaro,host=vm1,name=Température,room=Cave,section=Cave,type=com.fibaro.temperatureSensor value=17.87 1523351010000000000
|
||||
fibaro,host=vm1,name=Présence,room=Garde-manger,section=Cuisine,type=com.fibaro.FGMS001 value=1 1523351010000000000
|
||||
fibaro,host=vm1,name=Luminosité,room=Garde-manger,section=Cuisine,type=com.fibaro.lightSensor value=92 1523351010000000000
|
||||
fibaro,host=vm1,name=Etat,room=Garage,section=Extérieur,type=com.fibaro.doorSensor value=0 1523351010000000000
|
||||
fibaro,host=vm1,name=CO2\ (ppm),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=880 1523351010000000000
|
||||
fibaro,host=vm1,name=Humidité\ (%),room=Salon,section=Pièces\ communes,type=com.fibaro.humiditySensor value=53 1523351010000000000
|
||||
fibaro,host=vm1,name=Pression\ (mb),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=1006.9 1523351010000000000
|
||||
fibaro,host=vm1,name=Bruit\ (db),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=58 1523351010000000000
|
||||
```
|
||||
202
plugins/inputs/fibaro/fibaro.go
Normal file
202
plugins/inputs/fibaro/fibaro.go
Normal file
@@ -0,0 +1,202 @@
|
||||
package fibaro
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const sampleConfig = `
|
||||
## Required Fibaro controller address/hostname.
|
||||
## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
|
||||
url = "http://<controller>:80"
|
||||
|
||||
## Required credentials to access the API (http://<controller/api/<component>)
|
||||
username = "<username>"
|
||||
password = "<password>"
|
||||
|
||||
## Amount of time allowed to complete the HTTP request
|
||||
# timeout = "5s"
|
||||
`
|
||||
|
||||
const description = "Read devices value(s) from a Fibaro controller"
|
||||
|
||||
// Fibaro contains connection information
|
||||
type Fibaro struct {
|
||||
URL string
|
||||
|
||||
// HTTP Basic Auth Credentials
|
||||
Username string
|
||||
Password string
|
||||
|
||||
Timeout internal.Duration
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// LinkRoomsSections links rooms to sections
|
||||
type LinkRoomsSections struct {
|
||||
Name string
|
||||
SectionID uint16
|
||||
}
|
||||
|
||||
// Sections contains sections informations
|
||||
type Sections struct {
|
||||
ID uint16 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// Rooms contains rooms informations
|
||||
type Rooms struct {
|
||||
ID uint16 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
SectionID uint16 `json:"sectionID"`
|
||||
}
|
||||
|
||||
// Devices contains devices informations
|
||||
type Devices struct {
|
||||
ID uint16 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
RoomID uint16 `json:"roomID"`
|
||||
Type string `json:"type"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Properties struct {
|
||||
Dead interface{} `json:"dead"`
|
||||
Value interface{} `json:"value"`
|
||||
Value2 interface{} `json:"value2"`
|
||||
} `json:"properties"`
|
||||
}
|
||||
|
||||
// Description returns a string explaining the purpose of this plugin
|
||||
func (f *Fibaro) Description() string { return description }
|
||||
|
||||
// SampleConfig returns text explaining how plugin should be configured
|
||||
func (f *Fibaro) SampleConfig() string { return sampleConfig }
|
||||
|
||||
// getJSON connects, authenticates and reads JSON payload returned by Fibaro box
|
||||
func (f *Fibaro) getJSON(path string, dataStruct interface{}) error {
|
||||
var requestURL = f.URL + path
|
||||
|
||||
req, err := http.NewRequest("GET", requestURL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.SetBasicAuth(f.Username, f.Password)
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
|
||||
requestURL,
|
||||
resp.StatusCode,
|
||||
http.StatusText(resp.StatusCode),
|
||||
http.StatusOK,
|
||||
http.StatusText(http.StatusOK))
|
||||
return err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
err = dec.Decode(&dataStruct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gather fetches all required information to output metrics
|
||||
func (f *Fibaro) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
if f.client == nil {
|
||||
f.client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
Timeout: f.Timeout.Duration,
|
||||
}
|
||||
}
|
||||
|
||||
var tmpSections []Sections
|
||||
err := f.getJSON("/api/sections", &tmpSections)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sections := map[uint16]string{}
|
||||
for _, v := range tmpSections {
|
||||
sections[v.ID] = v.Name
|
||||
}
|
||||
|
||||
var tmpRooms []Rooms
|
||||
err = f.getJSON("/api/rooms", &tmpRooms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rooms := map[uint16]LinkRoomsSections{}
|
||||
for _, v := range tmpRooms {
|
||||
rooms[v.ID] = LinkRoomsSections{Name: v.Name, SectionID: v.SectionID}
|
||||
}
|
||||
|
||||
var devices []Devices
|
||||
err = f.getJSON("/api/devices", &devices)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, device := range devices {
|
||||
// skip device in some cases
|
||||
if device.RoomID == 0 ||
|
||||
device.Enabled == false ||
|
||||
device.Properties.Dead == "true" ||
|
||||
device.Type == "com.fibaro.zwaveDevice" {
|
||||
continue
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"section": sections[rooms[device.RoomID].SectionID],
|
||||
"room": rooms[device.RoomID].Name,
|
||||
"name": device.Name,
|
||||
"type": device.Type,
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
if device.Properties.Value != nil {
|
||||
value := device.Properties.Value
|
||||
switch value {
|
||||
case "true":
|
||||
value = "1"
|
||||
case "false":
|
||||
value = "0"
|
||||
}
|
||||
|
||||
if fValue, err := strconv.ParseFloat(value.(string), 64); err == nil {
|
||||
fields["value"] = fValue
|
||||
}
|
||||
}
|
||||
|
||||
if device.Properties.Value2 != nil {
|
||||
if fValue, err := strconv.ParseFloat(device.Properties.Value2.(string), 64); err == nil {
|
||||
fields["value2"] = fValue
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("fibaro", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("fibaro", func() telegraf.Input {
|
||||
return &Fibaro{}
|
||||
})
|
||||
}
|
||||
204
plugins/inputs/fibaro/fibaro_test.go
Normal file
204
plugins/inputs/fibaro/fibaro_test.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package fibaro
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const sectionsJSON = `
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Section 1",
|
||||
"sortOrder": 1
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Section 2",
|
||||
"sortOrder": 2
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Section 3",
|
||||
"sortOrder": 3
|
||||
}
|
||||
]`
|
||||
|
||||
const roomsJSON = `
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Room 1",
|
||||
"sectionID": 1,
|
||||
"icon": "room_1",
|
||||
"sortOrder": 1
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Room 2",
|
||||
"sectionID": 2,
|
||||
"icon": "room_2",
|
||||
"sortOrder": 2
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Room 3",
|
||||
"sectionID": 3,
|
||||
"icon": "room_3",
|
||||
"sortOrder": 3
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"name": "Room 4",
|
||||
"sectionID": 3,
|
||||
"icon": "room_4",
|
||||
"sortOrder": 4
|
||||
}
|
||||
]`
|
||||
|
||||
const devicesJSON = `
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Device 1",
|
||||
"roomID": 1,
|
||||
"type": "com.fibaro.binarySwitch",
|
||||
"enabled": true,
|
||||
"properties": {
|
||||
"dead": "false",
|
||||
"value": "false"
|
||||
},
|
||||
"sortOrder": 1
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Device 2",
|
||||
"roomID": 2,
|
||||
"type": "com.fibaro.binarySwitch",
|
||||
"enabled": true,
|
||||
"properties": {
|
||||
"dead": "false",
|
||||
"value": "true"
|
||||
},
|
||||
"sortOrder": 2
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Device 3",
|
||||
"roomID": 3,
|
||||
"type": "com.fibaro.multilevelSwitch",
|
||||
"enabled": true,
|
||||
"properties": {
|
||||
"dead": "false",
|
||||
"value": "67"
|
||||
},
|
||||
"sortOrder": 3
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"name": "Device 4",
|
||||
"roomID": 4,
|
||||
"type": "com.fibaro.temperatureSensor",
|
||||
"enabled": true,
|
||||
"properties": {
|
||||
"dead": "false",
|
||||
"value": "22.80"
|
||||
},
|
||||
"sortOrder": 4
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"name": "Device 5",
|
||||
"roomID": 4,
|
||||
"type": "com.fibaro.FGRM222",
|
||||
"enabled": true,
|
||||
"properties": {
|
||||
"dead": "false",
|
||||
"value": "50",
|
||||
"value2": "75"
|
||||
},
|
||||
"sortOrder": 5
|
||||
}
|
||||
]`
|
||||
|
||||
// TestUnauthorized validates that 401 (wrong credentials) is managed properly
|
||||
func TestUnauthorized(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := Fibaro{
|
||||
URL: ts.URL,
|
||||
Username: "user",
|
||||
Password: "pass",
|
||||
client: &http.Client{},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(a.Gather)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// TestJSONSuccess validates that module works OK with valid JSON payloads
|
||||
func TestJSONSuccess(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
payload := ""
|
||||
switch r.URL.Path {
|
||||
case "/api/sections":
|
||||
payload = sectionsJSON
|
||||
case "/api/rooms":
|
||||
payload = roomsJSON
|
||||
case "/api/devices":
|
||||
payload = devicesJSON
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, payload)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := Fibaro{
|
||||
URL: ts.URL,
|
||||
Username: "user",
|
||||
Password: "pass",
|
||||
client: &http.Client{},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(a.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Gather should add 5 metrics
|
||||
assert.Equal(t, uint64(5), acc.NMetrics())
|
||||
|
||||
// Ensure fields / values are correct - Device 1
|
||||
tags := map[string]string{"section": "Section 1", "room": "Room 1", "name": "Device 1", "type": "com.fibaro.binarySwitch"}
|
||||
fields := map[string]interface{}{"value": float64(0)}
|
||||
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
|
||||
|
||||
// Ensure fields / values are correct - Device 2
|
||||
tags = map[string]string{"section": "Section 2", "room": "Room 2", "name": "Device 2", "type": "com.fibaro.binarySwitch"}
|
||||
fields = map[string]interface{}{"value": float64(1)}
|
||||
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
|
||||
|
||||
// Ensure fields / values are correct - Device 3
|
||||
tags = map[string]string{"section": "Section 3", "room": "Room 3", "name": "Device 3", "type": "com.fibaro.multilevelSwitch"}
|
||||
fields = map[string]interface{}{"value": float64(67)}
|
||||
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
|
||||
|
||||
// Ensure fields / values are correct - Device 4
|
||||
tags = map[string]string{"section": "Section 3", "room": "Room 4", "name": "Device 4", "type": "com.fibaro.temperatureSensor"}
|
||||
fields = map[string]interface{}{"value": float64(22.8)}
|
||||
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
|
||||
|
||||
// Ensure fields / values are correct - Device 5
|
||||
tags = map[string]string{"section": "Section 3", "room": "Room 4", "name": "Device 5", "type": "com.fibaro.FGRM222"}
|
||||
fields = map[string]interface{}{"value": float64(50), "value2": float64(75)}
|
||||
acc.AssertContainsTaggedFields(t, "fibaro", fields, tags)
|
||||
}
|
||||
52
plugins/inputs/http/README.md
Normal file
52
plugins/inputs/http/README.md
Normal file
@@ -0,0 +1,52 @@
|
||||
# HTTP Input Plugin
|
||||
|
||||
The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The endpoint should have metrics formatted in one of the supported [input data formats](../../../docs/DATA_FORMATS_INPUT.md). Each data format has its own unique set of configuration options which can be added to the input configuration.
|
||||
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Read formatted metrics from one or more HTTP endpoints
|
||||
[[inputs.http]]
|
||||
## One or more URLs from which to read formatted metrics
|
||||
urls = [
|
||||
"http://localhost/metrics"
|
||||
]
|
||||
|
||||
## HTTP method
|
||||
# method = "GET"
|
||||
|
||||
## Optional HTTP headers
|
||||
# headers = {"X-Special-Header" = "Special-Value"}
|
||||
|
||||
## Optional HTTP Basic Auth Credentials
|
||||
# username = "username"
|
||||
# password = "pa$$word"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Amount of time allowed to complete the HTTP request
|
||||
# timeout = "5s"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
|
||||
```
|
||||
|
||||
### Metrics:
|
||||
|
||||
The metrics collected by this input plugin will depend on the configured `data_format` and the payload returned by the HTTP endpoint(s).
|
||||
|
||||
The default values below are added if the input format does not specify a value:
|
||||
|
||||
- http
|
||||
- tags:
|
||||
- url
|
||||
204
plugins/inputs/http/http.go
Normal file
204
plugins/inputs/http/http.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
|
||||
type HTTP struct {
|
||||
URLs []string `toml:"urls"`
|
||||
Method string
|
||||
|
||||
Headers map[string]string
|
||||
|
||||
// HTTP Basic Auth Credentials
|
||||
Username string
|
||||
Password string
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
Timeout internal.Duration
|
||||
|
||||
client *http.Client
|
||||
|
||||
// The parser will automatically be set by Telegraf core code because
|
||||
// this plugin implements the ParserInput interface (i.e. the SetParser method)
|
||||
parser parsers.Parser
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## One or more URLs from which to read formatted metrics
|
||||
urls = [
|
||||
"http://localhost/metrics"
|
||||
]
|
||||
|
||||
## HTTP method
|
||||
# method = "GET"
|
||||
|
||||
## Optional HTTP headers
|
||||
# headers = {"X-Special-Header" = "Special-Value"}
|
||||
|
||||
## Optional HTTP Basic Auth Credentials
|
||||
# username = "username"
|
||||
# password = "pa$$word"
|
||||
|
||||
## Tag all metrics with the url
|
||||
# tag_url = true
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Amount of time allowed to complete the HTTP request
|
||||
# timeout = "5s"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
`
|
||||
|
||||
// SampleConfig returns the default configuration of the Input
|
||||
func (*HTTP) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description returns a one-sentence description on the Input
|
||||
func (*HTTP) Description() string {
|
||||
return "Read formatted metrics from one or more HTTP endpoints"
|
||||
}
|
||||
|
||||
// Gather takes in an accumulator and adds the metrics that the Input
|
||||
// gathers. This is called every "interval"
|
||||
func (h *HTTP) Gather(acc telegraf.Accumulator) error {
|
||||
if h.parser == nil {
|
||||
return errors.New("Parser is not set")
|
||||
}
|
||||
|
||||
if h.client == nil {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
h.SSLCert, h.SSLKey, h.SSLCA, h.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
Timeout: h.Timeout.Duration,
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, u := range h.URLs {
|
||||
wg.Add(1)
|
||||
go func(url string) {
|
||||
defer wg.Done()
|
||||
if err := h.gatherURL(acc, url); err != nil {
|
||||
acc.AddError(fmt.Errorf("[url=%s]: %s", url, err))
|
||||
}
|
||||
}(u)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetParser takes the data_format from the config and finds the right parser for that format
|
||||
func (h *HTTP) SetParser(parser parsers.Parser) {
|
||||
h.parser = parser
|
||||
}
|
||||
|
||||
// Gathers data from a particular URL
|
||||
// Parameters:
|
||||
// acc : The telegraf Accumulator to use
|
||||
// url : endpoint to send request to
|
||||
//
|
||||
// Returns:
|
||||
// error: Any error that may have occurred
|
||||
func (h *HTTP) gatherURL(
|
||||
acc telegraf.Accumulator,
|
||||
url string,
|
||||
) error {
|
||||
request, err := http.NewRequest(h.Method, url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for k, v := range h.Headers {
|
||||
if strings.ToLower(k) == "host" {
|
||||
request.Host = v
|
||||
} else {
|
||||
request.Header.Add(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
if h.Username != "" || h.Password != "" {
|
||||
request.SetBasicAuth(h.Username, h.Password)
|
||||
}
|
||||
|
||||
resp, err := h.client.Do(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("Received status code %d (%s), expected %d (%s)",
|
||||
resp.StatusCode,
|
||||
http.StatusText(resp.StatusCode),
|
||||
http.StatusOK,
|
||||
http.StatusText(http.StatusOK))
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
metrics, err := h.parser.Parse(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
if !metric.HasTag("url") {
|
||||
metric.AddTag("url", url)
|
||||
}
|
||||
acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("http", func() telegraf.Input {
|
||||
return &HTTP{
|
||||
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||
Method: "GET",
|
||||
}
|
||||
})
|
||||
}
|
||||
139
plugins/inputs/http/http_test.go
Normal file
139
plugins/inputs/http/http_test.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package http_test
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
plugin "github.com/influxdata/telegraf/plugins/inputs/http"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestHTTPwithJSONFormat(t *testing.T) {
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/endpoint" {
|
||||
_, _ = w.Write([]byte(simpleJSON))
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer fakeServer.Close()
|
||||
|
||||
url := fakeServer.URL + "/endpoint"
|
||||
plugin := &plugin.HTTP{
|
||||
URLs: []string{url},
|
||||
}
|
||||
metricName := "metricName"
|
||||
p, _ := parsers.NewJSONParser(metricName, nil, nil)
|
||||
plugin.SetParser(p)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(plugin.Gather))
|
||||
|
||||
require.Len(t, acc.Metrics, 1)
|
||||
|
||||
// basic check to see if we got the right field, value and tag
|
||||
var metric = acc.Metrics[0]
|
||||
require.Equal(t, metric.Measurement, metricName)
|
||||
require.Len(t, acc.Metrics[0].Fields, 1)
|
||||
require.Equal(t, acc.Metrics[0].Fields["a"], 1.2)
|
||||
require.Equal(t, acc.Metrics[0].Tags["url"], url)
|
||||
}
|
||||
|
||||
func TestHTTPHeaders(t *testing.T) {
|
||||
header := "X-Special-Header"
|
||||
headerValue := "Special-Value"
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/endpoint" {
|
||||
if r.Header.Get(header) == headerValue {
|
||||
_, _ = w.Write([]byte(simpleJSON))
|
||||
} else {
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
}
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer fakeServer.Close()
|
||||
|
||||
url := fakeServer.URL + "/endpoint"
|
||||
plugin := &plugin.HTTP{
|
||||
URLs: []string{url},
|
||||
Headers: map[string]string{header: headerValue},
|
||||
}
|
||||
metricName := "metricName"
|
||||
p, _ := parsers.NewJSONParser(metricName, nil, nil)
|
||||
plugin.SetParser(p)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(plugin.Gather))
|
||||
}
|
||||
|
||||
func TestInvalidStatusCode(t *testing.T) {
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
defer fakeServer.Close()
|
||||
|
||||
url := fakeServer.URL + "/endpoint"
|
||||
plugin := &plugin.HTTP{
|
||||
URLs: []string{url},
|
||||
}
|
||||
|
||||
metricName := "metricName"
|
||||
p, _ := parsers.NewJSONParser(metricName, nil, nil)
|
||||
plugin.SetParser(p)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.Error(t, acc.GatherError(plugin.Gather))
|
||||
}
|
||||
|
||||
func TestMethod(t *testing.T) {
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == "POST" {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer fakeServer.Close()
|
||||
|
||||
plugin := &plugin.HTTP{
|
||||
URLs: []string{fakeServer.URL},
|
||||
Method: "POST",
|
||||
}
|
||||
|
||||
metricName := "metricName"
|
||||
p, _ := parsers.NewJSONParser(metricName, nil, nil)
|
||||
plugin.SetParser(p)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(plugin.Gather))
|
||||
}
|
||||
|
||||
func TestParserNotSet(t *testing.T) {
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/endpoint" {
|
||||
_, _ = w.Write([]byte(simpleJSON))
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer fakeServer.Close()
|
||||
|
||||
url := fakeServer.URL + "/endpoint"
|
||||
plugin := &plugin.HTTP{
|
||||
URLs: []string{url},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.Error(t, acc.GatherError(plugin.Gather))
|
||||
}
|
||||
|
||||
const simpleJSON = `
|
||||
{
|
||||
"a": 1.2
|
||||
}
|
||||
`
|
||||
@@ -12,6 +12,8 @@ Enable TLS by specifying the file names of a service TLS certificate and key.
|
||||
|
||||
Enable mutually authenticated TLS and authorize client connections by signing certificate authority by including a list of allowed CA certificate file names in ````tls_allowed_cacerts````.
|
||||
|
||||
Enable basic HTTP authentication of clients by specifying a username and password to check for. These credentials will be received from the client _as plain text_ if TLS is not configured.
|
||||
|
||||
See: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx).
|
||||
|
||||
**Example:**
|
||||
@@ -39,4 +41,8 @@ This is a sample configuration for the plugin.
|
||||
|
||||
## MTLS
|
||||
tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
|
||||
|
||||
## Basic authentication
|
||||
basic_username = "foobar"
|
||||
basic_password = "barfoo"
|
||||
```
|
||||
|
||||
@@ -3,6 +3,7 @@ package http_listener
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/subtle"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io"
|
||||
@@ -32,6 +33,8 @@ const (
|
||||
DEFAULT_MAX_LINE_SIZE = 64 * 1024
|
||||
)
|
||||
|
||||
type TimeFunc func() time.Time
|
||||
|
||||
type HTTPListener struct {
|
||||
ServiceAddress string
|
||||
ReadTimeout internal.Duration
|
||||
@@ -44,14 +47,20 @@ type HTTPListener struct {
|
||||
TlsCert string
|
||||
TlsKey string
|
||||
|
||||
BasicUsername string
|
||||
BasicPassword string
|
||||
|
||||
TimeFunc
|
||||
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
|
||||
listener net.Listener
|
||||
|
||||
parser influx.InfluxParser
|
||||
acc telegraf.Accumulator
|
||||
pool *pool
|
||||
handler *influx.MetricHandler
|
||||
parser *influx.Parser
|
||||
acc telegraf.Accumulator
|
||||
pool *pool
|
||||
|
||||
BytesRecv selfstat.Stat
|
||||
RequestsServed selfstat.Stat
|
||||
@@ -64,6 +73,7 @@ type HTTPListener struct {
|
||||
PingsRecv selfstat.Stat
|
||||
NotFoundsServed selfstat.Stat
|
||||
BuffersCreated selfstat.Stat
|
||||
AuthFailures selfstat.Stat
|
||||
}
|
||||
|
||||
const sampleConfig = `
|
||||
@@ -90,6 +100,11 @@ const sampleConfig = `
|
||||
## Add service certificate and key
|
||||
tls_cert = "/etc/telegraf/cert.pem"
|
||||
tls_key = "/etc/telegraf/key.pem"
|
||||
|
||||
## Optional username and password to accept for HTTP basic authentication.
|
||||
## You probably want to make sure you have TLS configured above for this.
|
||||
# basic_username = "foobar"
|
||||
# basic_password = "barfoo"
|
||||
`
|
||||
|
||||
func (h *HTTPListener) SampleConfig() string {
|
||||
@@ -124,6 +139,7 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
|
||||
h.PingsRecv = selfstat.Register("http_listener", "pings_received", tags)
|
||||
h.NotFoundsServed = selfstat.Register("http_listener", "not_founds_served", tags)
|
||||
h.BuffersCreated = selfstat.Register("http_listener", "buffers_created", tags)
|
||||
h.AuthFailures = selfstat.Register("http_listener", "auth_failures", tags)
|
||||
|
||||
if h.MaxBodySize == 0 {
|
||||
h.MaxBodySize = DEFAULT_MAX_BODY_SIZE
|
||||
@@ -165,6 +181,9 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
|
||||
h.listener = listener
|
||||
h.Port = listener.Addr().(*net.TCPAddr).Port
|
||||
|
||||
h.handler = influx.NewMetricHandler()
|
||||
h.parser = influx.NewParser(h.handler)
|
||||
|
||||
h.wg.Add(1)
|
||||
go func() {
|
||||
defer h.wg.Done()
|
||||
@@ -194,25 +213,29 @@ func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
||||
case "/write":
|
||||
h.WritesRecv.Incr(1)
|
||||
defer h.WritesServed.Incr(1)
|
||||
h.serveWrite(res, req)
|
||||
h.AuthenticateIfSet(h.serveWrite, res, req)
|
||||
case "/query":
|
||||
h.QueriesRecv.Incr(1)
|
||||
defer h.QueriesServed.Incr(1)
|
||||
// Deliver a dummy response to the query endpoint, as some InfluxDB
|
||||
// clients test endpoint availability with a query
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
res.Header().Set("X-Influxdb-Version", "1.0")
|
||||
res.WriteHeader(http.StatusOK)
|
||||
res.Write([]byte("{\"results\":[]}"))
|
||||
h.AuthenticateIfSet(func(res http.ResponseWriter, req *http.Request) {
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
res.Header().Set("X-Influxdb-Version", "1.0")
|
||||
res.WriteHeader(http.StatusOK)
|
||||
res.Write([]byte("{\"results\":[]}"))
|
||||
}, res, req)
|
||||
case "/ping":
|
||||
h.PingsRecv.Incr(1)
|
||||
defer h.PingsServed.Incr(1)
|
||||
// respond to ping requests
|
||||
res.WriteHeader(http.StatusNoContent)
|
||||
h.AuthenticateIfSet(func(res http.ResponseWriter, req *http.Request) {
|
||||
res.WriteHeader(http.StatusNoContent)
|
||||
}, res, req)
|
||||
default:
|
||||
defer h.NotFoundsServed.Incr(1)
|
||||
// Don't know how to respond to calls to other endpoints
|
||||
http.NotFound(res, req)
|
||||
h.AuthenticateIfSet(http.NotFound, res, req)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -222,7 +245,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
||||
tooLarge(res)
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
now := h.TimeFunc()
|
||||
|
||||
precision := req.URL.Query().Get("precision")
|
||||
|
||||
@@ -321,7 +344,12 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
||||
}
|
||||
|
||||
func (h *HTTPListener) parse(b []byte, t time.Time, precision string) error {
|
||||
metrics, err := h.parser.ParseWithDefaultTimePrecision(b, t, precision)
|
||||
h.handler.SetTimePrecision(getPrecisionMultiplier(precision))
|
||||
h.handler.SetTimeFunc(func() time.Time { return t })
|
||||
metrics, err := h.parser.Parse(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, m := range metrics {
|
||||
h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||
@@ -376,10 +404,45 @@ func (h *HTTPListener) getTLSConfig() *tls.Config {
|
||||
return tlsConf
|
||||
}
|
||||
|
||||
func (h *HTTPListener) AuthenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) {
|
||||
if h.BasicUsername != "" && h.BasicPassword != "" {
|
||||
reqUsername, reqPassword, ok := req.BasicAuth()
|
||||
if !ok ||
|
||||
subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.BasicUsername)) != 1 ||
|
||||
subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.BasicPassword)) != 1 {
|
||||
|
||||
h.AuthFailures.Incr(1)
|
||||
http.Error(res, "Unauthorized.", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
handler(res, req)
|
||||
} else {
|
||||
handler(res, req)
|
||||
}
|
||||
}
|
||||
|
||||
func getPrecisionMultiplier(precision string) time.Duration {
|
||||
d := time.Nanosecond
|
||||
switch precision {
|
||||
case "u":
|
||||
d = time.Microsecond
|
||||
case "ms":
|
||||
d = time.Millisecond
|
||||
case "s":
|
||||
d = time.Second
|
||||
case "m":
|
||||
d = time.Minute
|
||||
case "h":
|
||||
d = time.Hour
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("http_listener", func() telegraf.Input {
|
||||
return &HTTPListener{
|
||||
ServiceAddress: ":8186",
|
||||
TimeFunc: time.Now,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,6 +1,6 @@
|
||||
# Example Input Plugin
|
||||
# HTTP Response Input Plugin
|
||||
|
||||
This input plugin will test HTTP/HTTPS connections.
|
||||
This input plugin checks HTTP/HTTPS connections.
|
||||
|
||||
### Configuration:
|
||||
|
||||
@@ -10,6 +10,9 @@ This input plugin will test HTTP/HTTPS connections.
|
||||
## Server address (default http://localhost)
|
||||
# address = "http://localhost"
|
||||
|
||||
## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
|
||||
# http_proxy = "http://localhost:8888"
|
||||
|
||||
## Set response_timeout (default 5 seconds)
|
||||
# response_timeout = "5s"
|
||||
|
||||
@@ -41,21 +44,38 @@ This input plugin will test HTTP/HTTPS connections.
|
||||
# Host = "github.com"
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
### Metrics:
|
||||
|
||||
- http_response
|
||||
- tags:
|
||||
- server (target URL)
|
||||
- method (request method)
|
||||
- status_code (response status code)
|
||||
- result ([see below](#result--result_code))
|
||||
- fields:
|
||||
- response_time (float, seconds)
|
||||
- http_response_code (int) #The code received
|
||||
- result_type (string) # success, timeout, response_string_mismatch, connection_failed
|
||||
- http_response_code (int, response status code)
|
||||
- result_type (string, deprecated in 1.6: use `result` tag and `result_code` field)
|
||||
- result_code (int, [see below](#result--result_code))
|
||||
|
||||
### Tags:
|
||||
#### `result` / `result_code`
|
||||
|
||||
Upon finishing polling the target server, the plugin registers the result of the operation in the `result` tag, and adds a numeric field called `result_code` corresponding with that tag value.
|
||||
|
||||
This tag is used to expose network and plugin errors. HTTP errors are considered a successful connection.
|
||||
|
||||
|Tag value |Corresponding field value|Description|
|
||||
--------------------------|-------------------------|-----------|
|
||||
|success | 0 |The HTTP request completed, even if the HTTP code represents an error|
|
||||
|response_string_mismatch | 1 |The option `response_string_match` was used, and the body of the response didn't match the regex|
|
||||
|body_read_error | 2 |The option `response_string_match` was used, but the plugin wans't able to read the body of the response. Responses with empty bodies (like 3xx, HEAD, etc) will trigger this error|
|
||||
|connection_failed | 3 |Catch all for any network error not specifically handled by the plugin|
|
||||
|timeout | 4 |The plugin timed out while awaiting the HTTP connection to complete|
|
||||
|dns_error | 5 |There was a DNS error while attempting to connect to the host|
|
||||
|
||||
- All measurements have the following tags:
|
||||
- server
|
||||
- method
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
http_response,method=GET,server=http://www.github.com http_response_code=200i,response_time=6.223266528 1459419354977857955
|
||||
http_response,method=GET,server=http://www.github.com,status_code=200,result=success http_response_code=200i,response_time=6.223266528,result_type="success",result_code=0i 1459419354977857955
|
||||
```
|
||||
|
||||
@@ -2,6 +2,7 @@ package http_response
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -20,6 +22,7 @@ import (
|
||||
// HTTPResponse struct
|
||||
type HTTPResponse struct {
|
||||
Address string
|
||||
HTTPProxy string `toml:"http_proxy"`
|
||||
Body string
|
||||
Method string
|
||||
ResponseTimeout internal.Duration
|
||||
@@ -49,6 +52,9 @@ var sampleConfig = `
|
||||
## Server address (default http://localhost)
|
||||
# address = "http://localhost"
|
||||
|
||||
## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
|
||||
# http_proxy = "http://localhost:8888"
|
||||
|
||||
## Set response_timeout (default 5 seconds)
|
||||
# response_timeout = "5s"
|
||||
|
||||
@@ -88,6 +94,22 @@ func (h *HTTPResponse) SampleConfig() string {
|
||||
// ErrRedirectAttempted indicates that a redirect occurred
|
||||
var ErrRedirectAttempted = errors.New("redirect")
|
||||
|
||||
// Set the proxy. A configured proxy overwrites the system wide proxy.
|
||||
func getProxyFunc(http_proxy string) func(*http.Request) (*url.URL, error) {
|
||||
if http_proxy == "" {
|
||||
return http.ProxyFromEnvironment
|
||||
}
|
||||
proxyURL, err := url.Parse(http_proxy)
|
||||
if err != nil {
|
||||
return func(_ *http.Request) (*url.URL, error) {
|
||||
return nil, errors.New("bad proxy: " + err.Error())
|
||||
}
|
||||
}
|
||||
return func(r *http.Request) (*url.URL, error) {
|
||||
return proxyURL, nil
|
||||
}
|
||||
}
|
||||
|
||||
// CreateHttpClient creates an http client which will timeout at the specified
|
||||
// timeout period and can follow redirects if specified
|
||||
func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
|
||||
@@ -98,7 +120,7 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
|
||||
}
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Proxy: getProxyFunc(h.HTTPProxy),
|
||||
DisableKeepAlives: true,
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
@@ -113,10 +135,54 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func setResult(result_string string, fields map[string]interface{}, tags map[string]string) {
|
||||
result_codes := map[string]int{
|
||||
"success": 0,
|
||||
"response_string_mismatch": 1,
|
||||
"body_read_error": 2,
|
||||
"connection_failed": 3,
|
||||
"timeout": 4,
|
||||
"dns_error": 5,
|
||||
}
|
||||
|
||||
tags["result"] = result_string
|
||||
fields["result_type"] = result_string
|
||||
fields["result_code"] = result_codes[result_string]
|
||||
}
|
||||
|
||||
func setError(err error, fields map[string]interface{}, tags map[string]string) error {
|
||||
if timeoutError, ok := err.(net.Error); ok && timeoutError.Timeout() {
|
||||
setResult("timeout", fields, tags)
|
||||
return timeoutError
|
||||
}
|
||||
|
||||
urlErr, isUrlErr := err.(*url.Error)
|
||||
if !isUrlErr {
|
||||
return nil
|
||||
}
|
||||
|
||||
opErr, isNetErr := (urlErr.Err).(*net.OpError)
|
||||
if isNetErr {
|
||||
switch e := (opErr.Err).(type) {
|
||||
case (*net.DNSError):
|
||||
setResult("dns_error", fields, tags)
|
||||
return e
|
||||
case (*net.ParseError):
|
||||
// Parse error has to do with parsing of IP addresses, so we
|
||||
// group it with address errors
|
||||
setResult("address_error", fields, tags)
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HTTPGather gathers all fields and returns any errors it encounters
|
||||
func (h *HTTPResponse) httpGather() (map[string]interface{}, error) {
|
||||
// Prepare fields
|
||||
func (h *HTTPResponse) httpGather() (map[string]interface{}, map[string]string, error) {
|
||||
// Prepare fields and tags
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{"server": h.Address, "method": h.Method}
|
||||
|
||||
var body io.Reader
|
||||
if h.Body != "" {
|
||||
@@ -124,7 +190,7 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, error) {
|
||||
}
|
||||
request, err := http.NewRequest(h.Method, h.Address, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for key, val := range h.Headers {
|
||||
@@ -137,68 +203,87 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, error) {
|
||||
// Start Timer
|
||||
start := time.Now()
|
||||
resp, err := h.client.Do(request)
|
||||
response_time := time.Since(start).Seconds()
|
||||
|
||||
// If an error in returned, it means we are dealing with a network error, as
|
||||
// HTTP error codes do not generate errors in the net/http library
|
||||
if err != nil {
|
||||
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
||||
fields["result_type"] = "timeout"
|
||||
return fields, nil
|
||||
// Log error
|
||||
log.Printf("D! Network error while polling %s: %s", h.Address, err.Error())
|
||||
|
||||
// Get error details
|
||||
netErr := setError(err, fields, tags)
|
||||
|
||||
// If recognize the returnded error, get out
|
||||
if netErr != nil {
|
||||
return fields, tags, nil
|
||||
}
|
||||
fields["result_type"] = "connection_failed"
|
||||
if h.FollowRedirects {
|
||||
return fields, nil
|
||||
}
|
||||
if urlError, ok := err.(*url.Error); ok &&
|
||||
urlError.Err == ErrRedirectAttempted {
|
||||
|
||||
// Any error not recognized by `set_error` is considered a "connection_failed"
|
||||
setResult("connection_failed", fields, tags)
|
||||
|
||||
// If the error is a redirect we continue processing and log the HTTP code
|
||||
urlError, isUrlError := err.(*url.Error)
|
||||
if !h.FollowRedirects && isUrlError && urlError.Err == ErrRedirectAttempted {
|
||||
err = nil
|
||||
} else {
|
||||
return fields, nil
|
||||
// If the error isn't a timeout or a redirect stop
|
||||
// processing the request
|
||||
return fields, tags, nil
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := fields["response_time"]; !ok {
|
||||
fields["response_time"] = response_time
|
||||
}
|
||||
|
||||
// This function closes the response body, as
|
||||
// required by the net/http library
|
||||
defer func() {
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}()
|
||||
|
||||
fields["response_time"] = time.Since(start).Seconds()
|
||||
// Set log the HTTP response code
|
||||
tags["status_code"] = strconv.Itoa(resp.StatusCode)
|
||||
fields["http_response_code"] = resp.StatusCode
|
||||
|
||||
// Check the response for a regex match.
|
||||
if h.ResponseStringMatch != "" {
|
||||
|
||||
// Compile once and reuse
|
||||
if h.compiledStringMatch == nil {
|
||||
h.compiledStringMatch = regexp.MustCompile(h.ResponseStringMatch)
|
||||
if err != nil {
|
||||
log.Printf("E! Failed to compile regular expression %s : %s", h.ResponseStringMatch, err)
|
||||
fields["result_type"] = "response_string_mismatch"
|
||||
return fields, nil
|
||||
}
|
||||
}
|
||||
|
||||
bodyBytes, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Printf("E! Failed to read body of HTTP Response : %s", err)
|
||||
fields["result_type"] = "response_string_mismatch"
|
||||
log.Printf("D! Failed to read body of HTTP Response : %s", err)
|
||||
setResult("body_read_error", fields, tags)
|
||||
fields["response_string_match"] = 0
|
||||
return fields, nil
|
||||
return fields, tags, nil
|
||||
}
|
||||
|
||||
if h.compiledStringMatch.Match(bodyBytes) {
|
||||
fields["result_type"] = "success"
|
||||
setResult("success", fields, tags)
|
||||
fields["response_string_match"] = 1
|
||||
} else {
|
||||
fields["result_type"] = "response_string_mismatch"
|
||||
setResult("response_string_mismatch", fields, tags)
|
||||
fields["response_string_match"] = 0
|
||||
}
|
||||
} else {
|
||||
fields["result_type"] = "success"
|
||||
setResult("success", fields, tags)
|
||||
}
|
||||
|
||||
return fields, nil
|
||||
return fields, tags, nil
|
||||
}
|
||||
|
||||
// Gather gets all metric fields and tags and returns any errors it encounters
|
||||
func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
|
||||
// Compile the body regex if it exist
|
||||
if h.compiledStringMatch == nil {
|
||||
var err error
|
||||
h.compiledStringMatch, err = regexp.Compile(h.ResponseStringMatch)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to compile regular expression %s : %s", h.ResponseStringMatch, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set default values
|
||||
if h.ResponseTimeout.Duration < time.Second {
|
||||
h.ResponseTimeout.Duration = time.Second * 5
|
||||
@@ -217,9 +302,10 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
|
||||
if addr.Scheme != "http" && addr.Scheme != "https" {
|
||||
return errors.New("Only http and https are supported")
|
||||
}
|
||||
|
||||
// Prepare data
|
||||
tags := map[string]string{"server": h.Address, "method": h.Method}
|
||||
var fields map[string]interface{}
|
||||
var tags map[string]string
|
||||
|
||||
if h.client == nil {
|
||||
client, err := h.createHttpClient()
|
||||
@@ -230,10 +316,11 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
// Gather data
|
||||
fields, err = h.httpGather()
|
||||
fields, tags, err = h.httpGather()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add metrics
|
||||
acc.AddFields("http_response", fields, tags)
|
||||
return nil
|
||||
|
||||
@@ -15,6 +15,68 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Receives a list with fields that are expected to be absent
|
||||
func checkAbsentFields(t *testing.T, fields []string, acc *testutil.Accumulator) {
|
||||
for _, field := range fields {
|
||||
ok := acc.HasField("http_response", field)
|
||||
require.False(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// Receives a list with tags that are expected to be absent
|
||||
func checkAbsentTags(t *testing.T, tags []string, acc *testutil.Accumulator) {
|
||||
for _, tag := range tags {
|
||||
ok := acc.HasTag("http_response", tag)
|
||||
require.False(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// Receives a dictionary and with expected fields and their values. If a value is nil, it will only check
|
||||
// that the field exists, but not its contents
|
||||
func checkFields(t *testing.T, fields map[string]interface{}, acc *testutil.Accumulator) {
|
||||
for key, field := range fields {
|
||||
switch v := field.(type) {
|
||||
case int:
|
||||
value, ok := acc.IntField("http_response", key)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, field, value)
|
||||
case float64:
|
||||
value, ok := acc.FloatField("http_response", key)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, field, value)
|
||||
case string:
|
||||
value, ok := acc.StringField("http_response", key)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, field, value)
|
||||
case nil:
|
||||
ok := acc.HasField("http_response", key)
|
||||
require.True(t, ok)
|
||||
default:
|
||||
t.Log("Unsupported type for field: ", v)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Receives a dictionary and with expected tags and their values. If a value is nil, it will only check
|
||||
// that the tag exists, but not its contents
|
||||
func checkTags(t *testing.T, tags map[string]interface{}, acc *testutil.Accumulator) {
|
||||
for key, tag := range tags {
|
||||
switch v := tag.(type) {
|
||||
case string:
|
||||
ok := acc.HasTag("http_response", key)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, tag, acc.TagValue("http_response", key))
|
||||
case nil:
|
||||
ok := acc.HasTag("http_response", key)
|
||||
require.True(t, ok)
|
||||
default:
|
||||
t.Log("Unsupported type for tag: ", v)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setUpTestMux() http.Handler {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/redirect", func(w http.ResponseWriter, req *http.Request) {
|
||||
@@ -56,6 +118,24 @@ func setUpTestMux() http.Handler {
|
||||
return mux
|
||||
}
|
||||
|
||||
func checkOutput(t *testing.T, acc *testutil.Accumulator, presentFields map[string]interface{}, presentTags map[string]interface{}, absentFields []string, absentTags []string) {
|
||||
if presentFields != nil {
|
||||
checkFields(t, presentFields, acc)
|
||||
}
|
||||
|
||||
if presentTags != nil {
|
||||
checkTags(t, presentTags, acc)
|
||||
}
|
||||
|
||||
if absentFields != nil {
|
||||
checkAbsentFields(t, absentFields, acc)
|
||||
}
|
||||
|
||||
if absentTags != nil {
|
||||
checkAbsentTags(t, absentTags, acc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaders(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
cHeader := r.Header.Get("Content-Type")
|
||||
@@ -78,9 +158,20 @@ func TestHeaders(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusOK,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "200",
|
||||
"result": "success",
|
||||
}
|
||||
absentFields := []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
}
|
||||
|
||||
func TestFields(t *testing.T) {
|
||||
@@ -103,12 +194,20 @@ func TestFields(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "success", response_value)
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusOK,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "200",
|
||||
"result": "success",
|
||||
}
|
||||
absentFields := []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
}
|
||||
|
||||
func TestRedirects(t *testing.T) {
|
||||
@@ -130,9 +229,20 @@ func TestRedirects(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusOK,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "200",
|
||||
"result": "success",
|
||||
}
|
||||
absentFields := []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
|
||||
h = &HTTPResponse{
|
||||
Address: ts.URL + "/badredirect",
|
||||
@@ -148,11 +258,21 @@ func TestRedirects(t *testing.T) {
|
||||
err = h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok = acc.IntField("http_response", "http_response_code")
|
||||
require.False(t, ok)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "connection_failed", response_value)
|
||||
expectedFields = map[string]interface{}{
|
||||
"result_type": "connection_failed",
|
||||
"result_code": 3,
|
||||
}
|
||||
expectedTags = map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"result": "connection_failed",
|
||||
}
|
||||
absentFields = []string{"http_response_code", "response_time", "response_string_match"}
|
||||
absentTags := []string{"status_code"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, nil, nil)
|
||||
|
||||
expectedFields = map[string]interface{}{"result_type": "connection_failed"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags)
|
||||
}
|
||||
|
||||
func TestMethod(t *testing.T) {
|
||||
@@ -174,9 +294,20 @@ func TestMethod(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusOK,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "POST",
|
||||
"status_code": "200",
|
||||
"result": "success",
|
||||
}
|
||||
absentFields := []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
|
||||
h = &HTTPResponse{
|
||||
Address: ts.URL + "/mustbepostmethod",
|
||||
@@ -192,9 +323,20 @@ func TestMethod(t *testing.T) {
|
||||
err = h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok = acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusMethodNotAllowed, value)
|
||||
expectedFields = map[string]interface{}{
|
||||
"http_response_code": http.StatusMethodNotAllowed,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags = map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "405",
|
||||
"result": "success",
|
||||
}
|
||||
absentFields = []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
|
||||
//check that lowercase methods work correctly
|
||||
h = &HTTPResponse{
|
||||
@@ -211,9 +353,20 @@ func TestMethod(t *testing.T) {
|
||||
err = h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok = acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusMethodNotAllowed, value)
|
||||
expectedFields = map[string]interface{}{
|
||||
"http_response_code": http.StatusMethodNotAllowed,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags = map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "head",
|
||||
"status_code": "405",
|
||||
"result": "success",
|
||||
}
|
||||
absentFields = []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
}
|
||||
|
||||
func TestBody(t *testing.T) {
|
||||
@@ -235,9 +388,20 @@ func TestBody(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusOK,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "200",
|
||||
"result": "success",
|
||||
}
|
||||
absentFields := []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
|
||||
h = &HTTPResponse{
|
||||
Address: ts.URL + "/musthaveabody",
|
||||
@@ -252,9 +416,19 @@ func TestBody(t *testing.T) {
|
||||
err = h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok = acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusBadRequest, value)
|
||||
expectedFields = map[string]interface{}{
|
||||
"http_response_code": http.StatusBadRequest,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
}
|
||||
expectedTags = map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "400",
|
||||
"result": "success",
|
||||
}
|
||||
absentFields = []string{"response_string_match"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil)
|
||||
}
|
||||
|
||||
func TestStringMatch(t *testing.T) {
|
||||
@@ -277,17 +451,20 @@ func TestStringMatch(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
value, ok = acc.IntField("http_response", "response_string_match")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 1, value)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "success", response_value)
|
||||
_, ok = acc.FloatField("http_response", "response_time")
|
||||
require.True(t, ok)
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusOK,
|
||||
"response_string_match": 1,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "200",
|
||||
"result": "success",
|
||||
}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, nil, nil)
|
||||
}
|
||||
|
||||
func TestStringMatchJson(t *testing.T) {
|
||||
@@ -310,17 +487,20 @@ func TestStringMatchJson(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
value, ok = acc.IntField("http_response", "response_string_match")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 1, value)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "success", response_value)
|
||||
_, ok = acc.FloatField("http_response", "response_time")
|
||||
require.True(t, ok)
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusOK,
|
||||
"response_string_match": 1,
|
||||
"result_type": "success",
|
||||
"result_code": 0,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "200",
|
||||
"result": "success",
|
||||
}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, nil, nil)
|
||||
}
|
||||
|
||||
func TestStringMatchFail(t *testing.T) {
|
||||
@@ -344,17 +524,20 @@ func TestStringMatchFail(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
value, ok = acc.IntField("http_response", "response_string_match")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 0, value)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "response_string_mismatch", response_value)
|
||||
_, ok = acc.FloatField("http_response", "response_time")
|
||||
require.True(t, ok)
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusOK,
|
||||
"response_string_match": 0,
|
||||
"result_type": "response_string_mismatch",
|
||||
"result_code": 1,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "200",
|
||||
"result": "response_string_mismatch",
|
||||
}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, nil, nil)
|
||||
}
|
||||
|
||||
func TestTimeout(t *testing.T) {
|
||||
@@ -380,11 +563,126 @@ func TestTimeout(t *testing.T) {
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.False(t, ok)
|
||||
response_value, ok := acc.StringField("http_response", "result_type")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "timeout", response_value)
|
||||
_, ok = acc.FloatField("http_response", "response_time")
|
||||
require.False(t, ok)
|
||||
expectedFields := map[string]interface{}{
|
||||
"result_type": "timeout",
|
||||
"result_code": 4,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"result": "timeout",
|
||||
}
|
||||
absentFields := []string{"http_response_code", "response_time", "response_string_match"}
|
||||
absentTags := []string{"status_code"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags)
|
||||
}
|
||||
|
||||
func TestPluginErrors(t *testing.T) {
|
||||
mux := setUpTestMux()
|
||||
ts := httptest.NewServer(mux)
|
||||
defer ts.Close()
|
||||
|
||||
// Bad regex test. Should return an error and return nothing
|
||||
h := &HTTPResponse{
|
||||
Address: ts.URL + "/good",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
ResponseStringMatch: "bad regex:[[",
|
||||
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||
Headers: map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
|
||||
absentFields := []string{"http_response_code", "response_time", "response_string_match", "result_type", "result_code"}
|
||||
absentTags := []string{"status_code", "result", "server", "method"}
|
||||
checkOutput(t, &acc, nil, nil, absentFields, absentTags)
|
||||
|
||||
// Attempt to read empty body test
|
||||
h = &HTTPResponse{
|
||||
Address: ts.URL + "/redirect",
|
||||
Body: "",
|
||||
Method: "GET",
|
||||
ResponseStringMatch: ".*",
|
||||
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||
FollowRedirects: false,
|
||||
}
|
||||
|
||||
acc = testutil.Accumulator{}
|
||||
err = h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"http_response_code": http.StatusMovedPermanently,
|
||||
"response_string_match": 0,
|
||||
"result_type": "body_read_error",
|
||||
"result_code": 2,
|
||||
"response_time": nil,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"status_code": "301",
|
||||
"result": "body_read_error",
|
||||
}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, nil, nil)
|
||||
}
|
||||
|
||||
func TestNetworkErrors(t *testing.T) {
|
||||
// DNS error
|
||||
h := &HTTPResponse{
|
||||
Address: "https://nonexistent.nonexistent", // Any non-resolvable URL works here
|
||||
Body: "",
|
||||
Method: "GET",
|
||||
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||
FollowRedirects: false,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"result_type": "dns_error",
|
||||
"result_code": 5,
|
||||
}
|
||||
expectedTags := map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"result": "dns_error",
|
||||
}
|
||||
absentFields := []string{"http_response_code", "response_time", "response_string_match"}
|
||||
absentTags := []string{"status_code"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags)
|
||||
|
||||
// Connecton failed
|
||||
h = &HTTPResponse{
|
||||
Address: "https://127.127.127.127", // Any non-routable IP works here
|
||||
Body: "",
|
||||
Method: "GET",
|
||||
ResponseTimeout: internal.Duration{Duration: time.Second * 20},
|
||||
FollowRedirects: false,
|
||||
}
|
||||
|
||||
acc = testutil.Accumulator{}
|
||||
err = h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedFields = map[string]interface{}{
|
||||
"result_type": "connection_failed",
|
||||
"result_code": 3,
|
||||
}
|
||||
expectedTags = map[string]interface{}{
|
||||
"server": nil,
|
||||
"method": "GET",
|
||||
"result": "connection_failed",
|
||||
}
|
||||
absentFields = []string{"http_response_code", "response_time", "response_string_match"}
|
||||
absentTags = []string{"status_code"}
|
||||
checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
The httpjson plugin collects data from HTTP URLs which respond with JSON. It flattens the JSON and finds all numeric values, treating them as floats.
|
||||
|
||||
Deprecated (1.6): use the [http](../http) input.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
|
||||
@@ -33,6 +33,9 @@ InfluxDB-formatted endpoints. See below for more information.
|
||||
|
||||
### Measurements & Fields
|
||||
|
||||
**Note:** The measurements and fields are dynamically built from the InfluxDB source,
|
||||
and may vary between versions.
|
||||
|
||||
- influxdb
|
||||
- n_shards
|
||||
- influxdb_database
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
The [Jolokia](http://jolokia.org) _agent_ and _proxy_ input plugins collect JMX metrics from an HTTP endpoint using Jolokia's [JSON-over-HTTP protocol](https://jolokia.org/reference/html/protocol.html).
|
||||
|
||||
## Jolokia Agent Configuration
|
||||
### Configuration:
|
||||
|
||||
#### Jolokia Agent Configuration
|
||||
|
||||
The `jolokia2_agent` input plugin reads JMX metrics from one or more [Jolokia agent](https://jolokia.org/agent/jvm.html) REST endpoints.
|
||||
|
||||
@@ -32,7 +34,7 @@ Optionally, specify SSL options for communicating with agents:
|
||||
paths = ["Uptime"]
|
||||
```
|
||||
|
||||
## Jolokia Proxy Configuration
|
||||
#### Jolokia Proxy Configuration
|
||||
|
||||
The `jolokia2_proxy` input plugin reads JMX metrics from one or more _targets_ by interacting with a [Jolokia proxy](https://jolokia.org/features/proxy.html) REST endpoint.
|
||||
|
||||
@@ -77,7 +79,7 @@ Optionally, specify SSL options for communicating with proxies:
|
||||
paths = ["Uptime"]
|
||||
```
|
||||
|
||||
## Jolokia Metric Configuration
|
||||
#### Jolokia Metric Configuration
|
||||
|
||||
Each `metric` declaration generates a Jolokia request to fetch telemetry from a JMX MBean.
|
||||
|
||||
@@ -167,3 +169,11 @@ Both `jolokia2_agent` and `jolokia2_proxy` plugins support default configuration
|
||||
| `default_field_separator` | `.` | A character to use to join Mbean attributes when creating fields. |
|
||||
| `default_field_prefix` | _None_ | A string to prepend to the field names produced by all `metric` declarations. |
|
||||
| `default_tag_prefix` | _None_ | A string to prepend to the tag names produced by all `metric` declarations. |
|
||||
|
||||
### Example Configurations:
|
||||
|
||||
- [Java JVM](/plugins/inputs/jolokia2/examples/java.conf)
|
||||
- [Kafka](/plugins/inputs/jolokia2/examples/kafka.conf)
|
||||
- [Cassandra](/plugins/inputs/jolokia2/examples/cassandra.conf)
|
||||
|
||||
Please help improve this list and contribute new configuration files by opening an issue or pull request.
|
||||
|
||||
95
plugins/inputs/jolokia2/examples/cassandra.conf
Normal file
95
plugins/inputs/jolokia2/examples/cassandra.conf
Normal file
@@ -0,0 +1,95 @@
|
||||
[[inputs.jolokia2_agent]]
|
||||
urls = ["http://localhost:8778/jolokia"]
|
||||
name_prefix = "java_"
|
||||
|
||||
[[inputs.jolokia2_agent.metrics]]
|
||||
name = "Memory"
|
||||
mbean = "java.lang:type=Memory"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "GarbageCollector"
|
||||
mbean = "java.lang:name=*,type=GarbageCollector"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent]]
|
||||
urls = ["http://localhost:8778/jolokia"]
|
||||
name_prefix = "cassandra_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "Cache"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,scope=*,type=Cache"
|
||||
tag_keys = ["name", "scope"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "Client"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,type=Client"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "ClientRequestMetrics"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,type=ClientRequestMetrics"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "ClientRequest"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,scope=*,type=ClientRequest"
|
||||
tag_keys = ["name", "scope"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "ColumnFamily"
|
||||
mbean = "org.apache.cassandra.metrics:keyspace=*,name=*,scope=*,type=ColumnFamily"
|
||||
tag_keys = ["keyspace", "name", "scope"]
|
||||
field_prefix = "$2_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "CommitLog"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,type=CommitLog"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "Compaction"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,type=Compaction"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "CQL"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,type=CQL"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "DroppedMessage"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,scope=*,type=DroppedMessage"
|
||||
tag_keys = ["name", "scope"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "FileCache"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,type=FileCache"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "ReadRepair"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,type=ReadRepair"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "Storage"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,type=Storage"
|
||||
tag_keys = ["name"]
|
||||
field_prefix = "$1_"
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "ThreadPools"
|
||||
mbean = "org.apache.cassandra.metrics:name=*,path=*,scope=*,type=ThreadPools"
|
||||
tag_keys = ["name", "path", "scope"]
|
||||
field_prefix = "$1_"
|
||||
@@ -56,13 +56,13 @@ func (jp *JolokiaProxy) SampleConfig() string {
|
||||
## Add proxy targets to query
|
||||
# default_target_username = ""
|
||||
# default_target_password = ""
|
||||
[[inputs.jolokia_proxy.target]]
|
||||
[[inputs.jolokia2_proxy.target]]
|
||||
url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
|
||||
# username = ""
|
||||
# password = ""
|
||||
# username = ""
|
||||
# password = ""
|
||||
|
||||
## Add metrics to read
|
||||
[[inputs.jolokia_proxy.metric]]
|
||||
[[inputs.jolokia2_proxy.metric]]
|
||||
name = "java_runtime"
|
||||
mbean = "java.lang:type=Runtime"
|
||||
paths = ["Uptime"]
|
||||
|
||||
@@ -14,6 +14,13 @@ The Kapacitor plugin will collect metrics from the given Kapacitor instances.
|
||||
|
||||
## Time limit for http requests
|
||||
timeout = "5s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
### Measurements & Fields
|
||||
|
||||
@@ -21,6 +21,15 @@ type Kapacitor struct {
|
||||
|
||||
Timeout internal.Duration
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
@@ -38,12 +47,23 @@ func (*Kapacitor) SampleConfig() string {
|
||||
|
||||
## Time limit for http requests
|
||||
timeout = "5s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
}
|
||||
|
||||
func (k *Kapacitor) Gather(acc telegraf.Accumulator) error {
|
||||
if k.client == nil {
|
||||
k.client = &http.Client{Timeout: k.Timeout.Duration}
|
||||
client, err := k.createHttpClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k.client = client
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
@@ -61,6 +81,23 @@ func (k *Kapacitor) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Kapacitor) createHttpClient() (*http.Client, error) {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
k.SSLCert, k.SSLKey, k.SSLCA, k.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
Timeout: k.Timeout.Duration,
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
type object struct {
|
||||
Name string `json:"name"`
|
||||
Values map[string]interface{} `json:"values"`
|
||||
|
||||
251
plugins/inputs/leofs/README.md
Normal file
251
plugins/inputs/leofs/README.md
Normal file
@@ -0,0 +1,251 @@
|
||||
# LeoFS Input Plugin
|
||||
|
||||
The LeoFS plugin gathers metrics of LeoGateway, LeoManager, and LeoStorage using SNMP. See [LeoFS Documentation / System Administration / System Monitoring](https://leo-project.net/leofs/docs/admin/system_admin/monitoring/).
|
||||
|
||||
## Configuration:
|
||||
|
||||
```toml
|
||||
# Sample Config:
|
||||
|
||||
[[inputs.leofs]]
|
||||
servers = ["127.0.0.1:4010"]
|
||||
```
|
||||
|
||||
## Measurements & Fields:
|
||||
### Statistics specific to the internals of LeoManager
|
||||
#### Erlang VM
|
||||
|
||||
- 1 min Statistics
|
||||
- num_of_processes
|
||||
- total_memory_usage
|
||||
- system_memory_usage
|
||||
- processes_memory_usage
|
||||
- ets_memory_usage
|
||||
- used_allocated_memory
|
||||
- allocated_memory
|
||||
- 5 min Statistics
|
||||
- num_of_processes_5min
|
||||
- total_memory_usage_5min
|
||||
- system_memory_usage_5min
|
||||
- processes_memory_usage_5min
|
||||
- ets_memory_usage_5min
|
||||
- used_allocated_memory_5min
|
||||
- allocated_memory_5min
|
||||
|
||||
### Statistics specific to the internals of LeoStorage
|
||||
#### Erlang VM
|
||||
|
||||
- 1 min Statistics
|
||||
- num_of_processes
|
||||
- total_memory_usage
|
||||
- system_memory_usage
|
||||
- processes_memory_usage
|
||||
- ets_memory_usage
|
||||
- used_allocated_memory
|
||||
- allocated_memory
|
||||
- 5 min Statistics
|
||||
- num_of_processes_5min
|
||||
- total_memory_usage_5min
|
||||
- system_memory_usage_5min
|
||||
- processes_memory_usage_5min
|
||||
- ets_memory_usage_5min
|
||||
- used_allocated_memory_5min
|
||||
- allocated_memory_5min
|
||||
|
||||
#### Total Number of Requests
|
||||
|
||||
- 1 min Statistics
|
||||
- num_of_writes
|
||||
- num_of_reads
|
||||
- num_of_deletes
|
||||
- 5 min Statistics
|
||||
- num_of_writes_5min
|
||||
- num_of_reads_5min
|
||||
- num_of_deletes_5min
|
||||
|
||||
#### Total Number of Objects and Total Size of Objects
|
||||
|
||||
- num_of_active_objects
|
||||
- total_objects
|
||||
- total_size_of_active_objects
|
||||
- total_size
|
||||
|
||||
#### Total Number of MQ Messages
|
||||
|
||||
- num_of_replication_messages,
|
||||
- num_of_sync-vnode_messages,
|
||||
- num_of_rebalance_messages,
|
||||
- mq_num_of_msg_recovery_node
|
||||
- mq_num_of_msg_deletion_dir
|
||||
- mq_num_of_msg_async_deletion_dir
|
||||
- mq_num_of_msg_req_deletion_dir
|
||||
- mq_mdcr_num_of_msg_req_comp_metadata
|
||||
- mq_mdcr_num_of_msg_req_sync_obj
|
||||
|
||||
Note: The following items are available since LeoFS v1.4.0:
|
||||
|
||||
- mq_num_of_msg_recovery_node
|
||||
- mq_num_of_msg_deletion_dir
|
||||
- mq_num_of_msg_async_deletion_dir
|
||||
- mq_num_of_msg_req_deletion_dir
|
||||
- mq_mdcr_num_of_msg_req_comp_metadata
|
||||
- mq_mdcr_num_of_msg_req_sync_obj
|
||||
|
||||
#### Data Compaction
|
||||
|
||||
- comp_state
|
||||
- comp_last_start_datetime
|
||||
- comp_last_end_datetime
|
||||
- comp_num_of_pending_targets
|
||||
- comp_num_of_ongoing_targets
|
||||
- comp_num_of_out_of_targets
|
||||
|
||||
Note: The all items are available since LeoFS v1.4.0.
|
||||
|
||||
### Statistics specific to the internals of LeoGateway
|
||||
#### Erlang VM
|
||||
|
||||
- 1 min Statistics
|
||||
- num_of_processes
|
||||
- total_memory_usage
|
||||
- system_memory_usage
|
||||
- processes_memory_usage
|
||||
- ets_memory_usage
|
||||
- used_allocated_memory
|
||||
- allocated_memory
|
||||
- 5 min Statistics
|
||||
- num_of_processes_5min
|
||||
- total_memory_usage_5min
|
||||
- system_memory_usage_5min
|
||||
- processes_memory_usage_5min
|
||||
- ets_memory_usage_5min
|
||||
- used_allocated_memory_5min
|
||||
- allocated_memory_5min
|
||||
|
||||
#### Total Number of Requests
|
||||
|
||||
- 1 min Statistics
|
||||
- num_of_writes
|
||||
- num_of_reads
|
||||
- num_of_deletes
|
||||
- 5 min Statistics
|
||||
- num_of_writes_5min
|
||||
- num_of_reads_5min
|
||||
- num_of_deletes_5min
|
||||
|
||||
#### Object Cache
|
||||
|
||||
- count_of_cache-hit
|
||||
- count_of_cache-miss
|
||||
- total_of_files
|
||||
- total_cached_size
|
||||
|
||||
|
||||
### Tags:
|
||||
|
||||
All measurements have the following tags:
|
||||
|
||||
- node
|
||||
|
||||
|
||||
### Example output:
|
||||
|
||||
#### LeoManager
|
||||
|
||||
```bash
|
||||
$ ./telegraf --config ./plugins/inputs/leofs/leo_manager.conf --input-filter leofs --test
|
||||
> leofs, host=manager_0, node=manager_0@127.0.0.1
|
||||
allocated_memory=78255445,
|
||||
allocated_memory_5min=78159025,
|
||||
ets_memory_usage=4611900,
|
||||
ets_memory_usage_5min=4632599,
|
||||
num_of_processes=223,
|
||||
num_of_processes_5min=223,
|
||||
processes_memory_usage=20201316,
|
||||
processes_memory_usage_5min=20186559,
|
||||
system_memory_usage=37172701,
|
||||
system_memory_usage_5min=37189213,
|
||||
total_memory_usage=57373373,
|
||||
total_memory_usage_5min=57374653,
|
||||
used_allocated_memory=67,
|
||||
used_allocated_memory_5min=67
|
||||
1524105758000000000
|
||||
```
|
||||
|
||||
#### LeoStorage
|
||||
|
||||
```bash
|
||||
$ ./telegraf --config ./plugins/inputs/leofs/leo_storage.conf --input-filter leofs --test
|
||||
> leofs,host=storage_0,node=storage_0@127.0.0.1
|
||||
allocated_memory=63504384,
|
||||
allocated_memory_5min=0,
|
||||
comp_last_end_datetime=0,
|
||||
comp_last_start_datetime=0,
|
||||
comp_num_of_ongoing_targets=0,
|
||||
comp_num_of_out_of_targets=0,
|
||||
comp_num_of_pending_targets=8,
|
||||
comp_state=0,
|
||||
ets_memory_usage=3877824,
|
||||
ets_memory_usage_5min=0,
|
||||
mq_mdcr_num_of_msg_req_comp_metadata=0,
|
||||
mq_mdcr_num_of_msg_req_sync_obj=0,
|
||||
mq_num_of_msg_async_deletion_dir=0,
|
||||
mq_num_of_msg_deletion_dir=0,
|
||||
mq_num_of_msg_recovery_node=0,
|
||||
mq_num_of_msg_req_deletion_dir=0,
|
||||
num_of_active_objects=70,
|
||||
num_of_deletes=0,
|
||||
num_of_deletes_5min=0,
|
||||
num_of_processes=577,
|
||||
num_of_processes_5min=0,
|
||||
num_of_reads=1,
|
||||
num_of_reads_5min=0,
|
||||
num_of_rebalance_messages=0,
|
||||
num_of_replication_messages=0,
|
||||
num_of_sync-vnode_messages=0,
|
||||
num_of_writes=70,
|
||||
num_of_writes_5min=0,
|
||||
processes_memory_usage=20029464,
|
||||
processes_memory_usage_5min=0,
|
||||
system_memory_usage=25900472,
|
||||
system_memory_usage_5min=0,
|
||||
total_memory_usage=45920987,
|
||||
total_memory_usage_5min=0,
|
||||
total_objects=70,
|
||||
total_size=2,
|
||||
total_size_of_active_objects=2,
|
||||
used_allocated_memory=69,
|
||||
used_allocated_memory_5min=0
|
||||
1524529826000000000
|
||||
```
|
||||
|
||||
#### LeoGateway
|
||||
|
||||
```
|
||||
$ ./telegraf --config ./plugins/inputs/leofs/leo_gateway.conf --input-filter leofs --test
|
||||
> leofs, host=gateway_0, node=gateway_0@127.0.0.1
|
||||
allocated_memory=87941120,
|
||||
allocated_memory_5min=88067672,
|
||||
count_of_cache-hit=0,
|
||||
count_of_cache-miss=0,
|
||||
ets_memory_usage=4843497,
|
||||
ets_memory_usage_5min=4841574,
|
||||
num_of_deletes=0,
|
||||
num_of_deletes_5min=0,
|
||||
num_of_processes=555,
|
||||
num_of_processes_5min=555,
|
||||
num_of_reads=0,
|
||||
num_of_reads_5min=0,
|
||||
num_of_writes=0,
|
||||
num_of_writes_5min=0,
|
||||
processes_memory_usage=17388052,
|
||||
processes_memory_usage_5min=17413928,
|
||||
system_memory_usage=49531263,
|
||||
system_memory_usage_5min=49577819,
|
||||
total_cached_size=0,
|
||||
total_memory_usage=66917393,
|
||||
total_memory_usage_5min=66989469,
|
||||
total_of_files=0,
|
||||
used_allocated_memory=69,
|
||||
used_allocated_memory_5min=69 1524105894000000000
|
||||
```
|
||||
@@ -93,6 +93,19 @@ var KeyMapping = map[ServerType][]string{
|
||||
"allocated_memory",
|
||||
"used_allocated_memory_5min",
|
||||
"allocated_memory_5min",
|
||||
// following items are since LeoFS v1.4.0
|
||||
"mq_num_of_msg_recovery_node",
|
||||
"mq_num_of_msg_deletion_dir",
|
||||
"mq_num_of_msg_async_deletion_dir",
|
||||
"mq_num_of_msg_req_deletion_dir",
|
||||
"mq_mdcr_num_of_msg_req_comp_metadata",
|
||||
"mq_mdcr_num_of_msg_req_sync_obj",
|
||||
"comp_state",
|
||||
"comp_last_start_datetime",
|
||||
"comp_last_end_datetime",
|
||||
"comp_num_of_pending_targets",
|
||||
"comp_num_of_ongoing_targets",
|
||||
"comp_num_of_out_of_targets",
|
||||
},
|
||||
ServerTypeGateway: {
|
||||
"num_of_processes",
|
||||
|
||||
@@ -42,34 +42,46 @@ package main
|
||||
|
||||
import "fmt"
|
||||
|
||||
const output = ` + "`" + `.1.3.6.1.4.1.35450.34.1.0 = STRING: "storage_0@127.0.0.1"
|
||||
.1.3.6.1.4.1.35450.34.2.0 = Gauge32: 512
|
||||
.1.3.6.1.4.1.35450.34.3.0 = Gauge32: 38126307
|
||||
.1.3.6.1.4.1.35450.34.4.0 = Gauge32: 22308716
|
||||
.1.3.6.1.4.1.35450.34.5.0 = Gauge32: 15816448
|
||||
.1.3.6.1.4.1.35450.34.6.0 = Gauge32: 5232008
|
||||
.1.3.6.1.4.1.35450.34.7.0 = Gauge32: 512
|
||||
.1.3.6.1.4.1.35450.34.8.0 = Gauge32: 38113176
|
||||
.1.3.6.1.4.1.35450.34.9.0 = Gauge32: 22313398
|
||||
.1.3.6.1.4.1.35450.34.10.0 = Gauge32: 15798779
|
||||
.1.3.6.1.4.1.35450.34.11.0 = Gauge32: 5237315
|
||||
.1.3.6.1.4.1.35450.34.12.0 = Gauge32: 191
|
||||
.1.3.6.1.4.1.35450.34.13.0 = Gauge32: 824
|
||||
.1.3.6.1.4.1.35450.34.14.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105
|
||||
.1.3.6.1.4.1.35450.34.16.0 = Gauge32: 196654
|
||||
.1.3.6.1.4.1.35450.34.17.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.34.18.0 = Gauge32: 2052
|
||||
.1.3.6.1.4.1.35450.34.19.0 = Gauge32: 50296
|
||||
.1.3.6.1.4.1.35450.34.20.0 = Gauge32: 35
|
||||
.1.3.6.1.4.1.35450.34.21.0 = Gauge32: 898
|
||||
.1.3.6.1.4.1.35450.34.22.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.34.23.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.34.24.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.34.31.0 = Gauge32: 51
|
||||
.1.3.6.1.4.1.35450.34.32.0 = Gauge32: 53219328
|
||||
.1.3.6.1.4.1.35450.34.33.0 = Gauge32: 51
|
||||
.1.3.6.1.4.1.35450.34.34.0 = Gauge32: 53351083` + "`" +
|
||||
const output = ` + "`" + `.1.3.6.1.4.1.35450.56.1.0 = STRING: "storage_0@127.0.0.1"
|
||||
.1.3.6.1.4.1.35450.56.2.0 = Gauge32: 512
|
||||
.1.3.6.1.4.1.35450.56.3.0 = Gauge32: 38126307
|
||||
.1.3.6.1.4.1.35450.56.4.0 = Gauge32: 22308716
|
||||
.1.3.6.1.4.1.35450.56.5.0 = Gauge32: 15816448
|
||||
.1.3.6.1.4.1.35450.56.6.0 = Gauge32: 5232008
|
||||
.1.3.6.1.4.1.35450.56.7.0 = Gauge32: 512
|
||||
.1.3.6.1.4.1.35450.56.8.0 = Gauge32: 38113176
|
||||
.1.3.6.1.4.1.35450.56.9.0 = Gauge32: 22313398
|
||||
.1.3.6.1.4.1.35450.56.10.0 = Gauge32: 15798779
|
||||
.1.3.6.1.4.1.35450.56.11.0 = Gauge32: 5237315
|
||||
.1.3.6.1.4.1.35450.56.12.0 = Gauge32: 191
|
||||
.1.3.6.1.4.1.35450.56.13.0 = Gauge32: 824
|
||||
.1.3.6.1.4.1.35450.56.14.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.56.15.0 = Gauge32: 50105
|
||||
.1.3.6.1.4.1.35450.56.16.0 = Gauge32: 196654
|
||||
.1.3.6.1.4.1.35450.56.17.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.56.18.0 = Gauge32: 2052
|
||||
.1.3.6.1.4.1.35450.56.19.0 = Gauge32: 50296
|
||||
.1.3.6.1.4.1.35450.56.20.0 = Gauge32: 35
|
||||
.1.3.6.1.4.1.35450.56.21.0 = Gauge32: 898
|
||||
.1.3.6.1.4.1.35450.56.22.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.56.23.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.56.24.0 = Gauge32: 0
|
||||
.1.3.6.1.4.1.35450.56.31.0 = Gauge32: 51
|
||||
.1.3.6.1.4.1.35450.56.32.0 = Gauge32: 53219328
|
||||
.1.3.6.1.4.1.35450.56.33.0 = Gauge32: 51
|
||||
.1.3.6.1.4.1.35450.56.34.0 = Gauge32: 53351083
|
||||
.1.3.6.1.4.1.35450.56.41.0 = Gauge32: 101
|
||||
.1.3.6.1.4.1.35450.56.42.0 = Gauge32: 216
|
||||
.1.3.6.1.4.1.35450.56.43.0 = Gauge32: 313
|
||||
.1.3.6.1.4.1.35450.56.44.0 = Gauge32: 421
|
||||
.1.3.6.1.4.1.35450.56.45.0 = Gauge32: 597
|
||||
.1.3.6.1.4.1.35450.56.46.0 = Gauge32: 628
|
||||
.1.3.6.1.4.1.35450.56.51.0 = Gauge32: 1
|
||||
.1.3.6.1.4.1.35450.56.52.0 = Gauge32: 1522154118
|
||||
.1.3.6.1.4.1.35450.56.53.0 = Gauge32: 1522196496
|
||||
.1.3.6.1.4.1.35450.56.54.0 = Gauge32: 1
|
||||
.1.3.6.1.4.1.35450.56.55.0 = Gauge32: 7
|
||||
.1.3.6.1.4.1.35450.56.56.0 = Gauge32: 0` + "`" +
|
||||
`
|
||||
func main() {
|
||||
fmt.Println(output)
|
||||
|
||||
@@ -80,6 +80,8 @@ Timestamp modifiers can be used to convert captures to the timestamp of the
|
||||
parsed metric. If no timestamp is parsed the metric will be created using the
|
||||
current time.
|
||||
|
||||
You must capture at least one field per line.
|
||||
|
||||
- Available modifiers:
|
||||
- string (default if nothing is specified)
|
||||
- int
|
||||
@@ -108,10 +110,11 @@ CUSTOM time layouts must be within quotes and be the representation of the
|
||||
"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`
|
||||
See https://golang.org/pkg/time/#Parse for more details.
|
||||
|
||||
Telegraf has many of its own
|
||||
[built-in patterns](./grok/patterns/influx-patterns),
|
||||
as well as supporting
|
||||
Telegraf has many of its own [built-in patterns](./grok/patterns/influx-patterns),
|
||||
as well as support for most of
|
||||
[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns).
|
||||
_Golang regular expressions do not support lookahead or lookbehind.
|
||||
logstash patterns that depend on these are not supported._
|
||||
|
||||
If you need help building patterns to match your logs,
|
||||
you will find the https://grokdebug.herokuapp.com application quite useful!
|
||||
|
||||
@@ -132,6 +132,7 @@ func (p *Parser) Compile() error {
|
||||
// "custom patterns"
|
||||
p.namedPatterns = make([]string, 0, len(p.Patterns))
|
||||
for i, pattern := range p.Patterns {
|
||||
pattern = strings.TrimSpace(pattern)
|
||||
if pattern == "" {
|
||||
continue
|
||||
}
|
||||
@@ -326,6 +327,10 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("logparser_grok: must have one or more fields")
|
||||
}
|
||||
|
||||
return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp))
|
||||
}
|
||||
|
||||
|
||||
@@ -799,7 +799,7 @@ func TestTimezoneEmptyCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.UnixNano())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
|
||||
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
|
||||
require.NotNil(t, metricB)
|
||||
@@ -812,7 +812,7 @@ func TestTimezoneEmptyCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t, int64(1465044105000000000), metricB.UnixNano())
|
||||
assert.Equal(t, int64(1465044105000000000), metricB.Time().UnixNano())
|
||||
}
|
||||
|
||||
func TestTimezoneMalformedCompileFileAndParse(t *testing.T) {
|
||||
@@ -835,7 +835,7 @@ func TestTimezoneMalformedCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.UnixNano())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
|
||||
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
|
||||
require.NotNil(t, metricB)
|
||||
@@ -848,7 +848,7 @@ func TestTimezoneMalformedCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t, int64(1465044105000000000), metricB.UnixNano())
|
||||
assert.Equal(t, int64(1465044105000000000), metricB.Time().UnixNano())
|
||||
}
|
||||
|
||||
func TestTimezoneEuropeCompileFileAndParse(t *testing.T) {
|
||||
@@ -871,7 +871,7 @@ func TestTimezoneEuropeCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.UnixNano())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
|
||||
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
|
||||
require.NotNil(t, metricB)
|
||||
@@ -884,7 +884,7 @@ func TestTimezoneEuropeCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t, int64(1465036905000000000), metricB.UnixNano())
|
||||
assert.Equal(t, int64(1465036905000000000), metricB.Time().UnixNano())
|
||||
}
|
||||
|
||||
func TestTimezoneAmericasCompileFileAndParse(t *testing.T) {
|
||||
@@ -907,7 +907,7 @@ func TestTimezoneAmericasCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.UnixNano())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
|
||||
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
|
||||
require.NotNil(t, metricB)
|
||||
@@ -920,7 +920,7 @@ func TestTimezoneAmericasCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t, int64(1465058505000000000), metricB.UnixNano())
|
||||
assert.Equal(t, int64(1465058505000000000), metricB.Time().UnixNano())
|
||||
}
|
||||
|
||||
func TestTimezoneLocalCompileFileAndParse(t *testing.T) {
|
||||
@@ -943,7 +943,7 @@ func TestTimezoneLocalCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.UnixNano())
|
||||
assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano())
|
||||
|
||||
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
|
||||
require.NotNil(t, metricB)
|
||||
@@ -956,5 +956,17 @@ func TestTimezoneLocalCompileFileAndParse(t *testing.T) {
|
||||
},
|
||||
metricB.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||
assert.Equal(t, time.Date(2016, time.June, 4, 12, 41, 45, 0, time.Local).UnixNano(), metricB.UnixNano())
|
||||
assert.Equal(t, time.Date(2016, time.June, 4, 12, 41, 45, 0, time.Local).UnixNano(), metricB.Time().UnixNano())
|
||||
}
|
||||
|
||||
func TestNewlineInPatterns(t *testing.T) {
|
||||
p := &Parser{
|
||||
Patterns: []string{`
|
||||
%{SYSLOGTIMESTAMP:timestamp}
|
||||
`},
|
||||
}
|
||||
require.NoError(t, p.Compile())
|
||||
m, err := p.ParseLine("Apr 10 05:11:57")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, m)
|
||||
}
|
||||
|
||||
103
plugins/inputs/mcrouter/README.md
Normal file
103
plugins/inputs/mcrouter/README.md
Normal file
@@ -0,0 +1,103 @@
|
||||
# Mcrouter Input Plugin
|
||||
|
||||
This plugin gathers statistics data from a Mcrouter server.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Read metrics from one or many mcrouter servers.
|
||||
[[inputs.mcrouter]]
|
||||
## An array of address to gather stats about. Specify an ip or hostname
|
||||
## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc.
|
||||
servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"]
|
||||
|
||||
## Timeout for metric collections from all servers. Minimum timeout is "1s".
|
||||
# timeout = "5s"
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
The fields from this plugin are gathered in the *mcrouter* measurement.
|
||||
|
||||
Description of gathered fields can be found [here](https://github.com/facebook/mcrouter/wiki/Stats-list).
|
||||
|
||||
Fields:
|
||||
|
||||
* uptime
|
||||
* num_servers
|
||||
* num_servers_new
|
||||
* num_servers_up
|
||||
* num_servers_down
|
||||
* num_servers_closed
|
||||
* num_clients
|
||||
* num_suspect_servers
|
||||
* destination_batches_sum
|
||||
* destination_requests_sum
|
||||
* outstanding_route_get_reqs_queued
|
||||
* outstanding_route_update_reqs_queued
|
||||
* outstanding_route_get_avg_queue_size
|
||||
* outstanding_route_update_avg_queue_size
|
||||
* outstanding_route_get_avg_wait_time_sec
|
||||
* outstanding_route_update_avg_wait_time_sec
|
||||
* retrans_closed_connections
|
||||
* destination_pending_reqs
|
||||
* destination_inflight_reqs
|
||||
* destination_batch_size
|
||||
* asynclog_requests
|
||||
* proxy_reqs_processing
|
||||
* proxy_reqs_waiting
|
||||
* client_queue_notify_period
|
||||
* rusage_system
|
||||
* rusage_user
|
||||
* ps_num_minor_faults
|
||||
* ps_num_major_faults
|
||||
* ps_user_time_sec
|
||||
* ps_system_time_sec
|
||||
* ps_vsize
|
||||
* ps_rss
|
||||
* fibers_allocated
|
||||
* fibers_pool_size
|
||||
* fibers_stack_high_watermark
|
||||
* successful_client_connections
|
||||
* duration_us
|
||||
* destination_max_pending_reqs
|
||||
* destination_max_inflight_reqs
|
||||
* retrans_per_kbyte_max
|
||||
* cmd_get_count
|
||||
* cmd_delete_out
|
||||
* cmd_lease_get
|
||||
* cmd_set
|
||||
* cmd_get_out_all
|
||||
* cmd_get_out
|
||||
* cmd_lease_set_count
|
||||
* cmd_other_out_all
|
||||
* cmd_lease_get_out
|
||||
* cmd_set_count
|
||||
* cmd_lease_set_out
|
||||
* cmd_delete_count
|
||||
* cmd_other
|
||||
* cmd_delete
|
||||
* cmd_get
|
||||
* cmd_lease_set
|
||||
* cmd_set_out
|
||||
* cmd_lease_get_count
|
||||
* cmd_other_out
|
||||
* cmd_lease_get_out_all
|
||||
* cmd_set_out_all
|
||||
* cmd_other_count
|
||||
* cmd_delete_out_all
|
||||
* cmd_lease_set_out_all
|
||||
|
||||
### Tags:
|
||||
|
||||
* Mcrouter measurements have the following tags:
|
||||
- server (the host name from which metrics are gathered)
|
||||
|
||||
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf --config telegraf.conf --input-filter mcrouter --test
|
||||
mcrouter,server=localhost:11211 uptime=166,num_servers=1,num_servers_new=1,num_servers_up=0,num_servers_down=0,num_servers_closed=0,num_clients=1,num_suspect_servers=0,destination_batches_sum=0,destination_requests_sum=0,outstanding_route_get_reqs_queued=0,outstanding_route_update_reqs_queued=0,outstanding_route_get_avg_queue_size=0,outstanding_route_update_avg_queue_size=0,outstanding_route_get_avg_wait_time_sec=0,outstanding_route_update_avg_wait_time_sec=0,retrans_closed_connections=0,destination_pending_reqs=0,destination_inflight_reqs=0,destination_batch_size=0,asynclog_requests=0,proxy_reqs_processing=1,proxy_reqs_waiting=0,client_queue_notify_period=0,rusage_system=0.040966,rusage_user=0.020483,ps_num_minor_faults=2490,ps_num_major_faults=11,ps_user_time_sec=0.02,ps_system_time_sec=0.04,ps_vsize=697741312,ps_rss=10563584,fibers_allocated=0,fibers_pool_size=0,fibers_stack_high_watermark=0,successful_client_connections=18,duration_us=0,destination_max_pending_reqs=0,destination_max_inflight_reqs=0,retrans_per_kbyte_max=0,cmd_get_count=0,cmd_delete_out=0,cmd_lease_get=0,cmd_set=0,cmd_get_out_all=0,cmd_get_out=0,cmd_lease_set_count=0,cmd_other_out_all=0,cmd_lease_get_out=0,cmd_set_count=0,cmd_lease_set_out=0,cmd_delete_count=0,cmd_other=0,cmd_delete=0,cmd_get=0,cmd_lease_set=0,cmd_set_out=0,cmd_lease_get_count=0,cmd_other_out=0,cmd_lease_get_out_all=0,cmd_set_out_all=0,cmd_other_count=0,cmd_delete_out_all=0,cmd_lease_set_out_all=0 1453831884664956455
|
||||
```
|
||||
286
plugins/inputs/mcrouter/mcrouter.go
Normal file
286
plugins/inputs/mcrouter/mcrouter.go
Normal file
@@ -0,0 +1,286 @@
|
||||
package mcrouter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// Mcrouter is a mcrouter plugin
|
||||
type Mcrouter struct {
|
||||
Servers []string
|
||||
Timeout internal.Duration
|
||||
}
|
||||
|
||||
// enum for statType
|
||||
type statType int
|
||||
|
||||
const (
|
||||
typeInt statType = iota
|
||||
typeFloat statType = iota
|
||||
)
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of address to gather stats about. Specify an ip or hostname
|
||||
## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc.
|
||||
servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"]
|
||||
|
||||
## Timeout for metric collections from all servers. Minimum timeout is "1s".
|
||||
# timeout = "5s"
|
||||
`
|
||||
|
||||
var defaultTimeout = 5 * time.Second
|
||||
|
||||
var defaultServerURL = url.URL{
|
||||
Scheme: "tcp",
|
||||
Host: "localhost:11211",
|
||||
}
|
||||
|
||||
// The list of metrics that should be sent
|
||||
var sendMetrics = map[string]statType{
|
||||
"uptime": typeInt,
|
||||
"num_servers": typeInt,
|
||||
"num_servers_new": typeInt,
|
||||
"num_servers_up": typeInt,
|
||||
"num_servers_down": typeInt,
|
||||
"num_servers_closed": typeInt,
|
||||
"num_clients": typeInt,
|
||||
"num_suspect_servers": typeInt,
|
||||
"destination_batches_sum": typeInt,
|
||||
"destination_requests_sum": typeInt,
|
||||
"outstanding_route_get_reqs_queued": typeInt,
|
||||
"outstanding_route_update_reqs_queued": typeInt,
|
||||
"outstanding_route_get_avg_queue_size": typeInt,
|
||||
"outstanding_route_update_avg_queue_size": typeInt,
|
||||
"outstanding_route_get_avg_wait_time_sec": typeInt,
|
||||
"outstanding_route_update_avg_wait_time_sec": typeInt,
|
||||
"retrans_closed_connections": typeInt,
|
||||
"destination_pending_reqs": typeInt,
|
||||
"destination_inflight_reqs": typeInt,
|
||||
"destination_batch_size": typeInt,
|
||||
"asynclog_requests": typeInt,
|
||||
"proxy_reqs_processing": typeInt,
|
||||
"proxy_reqs_waiting": typeInt,
|
||||
"client_queue_notify_period": typeInt,
|
||||
"rusage_system": typeFloat,
|
||||
"rusage_user": typeFloat,
|
||||
"ps_num_minor_faults": typeInt,
|
||||
"ps_num_major_faults": typeInt,
|
||||
"ps_user_time_sec": typeFloat,
|
||||
"ps_system_time_sec": typeFloat,
|
||||
"ps_vsize": typeInt,
|
||||
"ps_rss": typeInt,
|
||||
"fibers_allocated": typeInt,
|
||||
"fibers_pool_size": typeInt,
|
||||
"fibers_stack_high_watermark": typeInt,
|
||||
"successful_client_connections": typeInt,
|
||||
"duration_us": typeInt,
|
||||
"destination_max_pending_reqs": typeInt,
|
||||
"destination_max_inflight_reqs": typeInt,
|
||||
"retrans_per_kbyte_max": typeInt,
|
||||
"cmd_get_count": typeInt,
|
||||
"cmd_delete_out": typeInt,
|
||||
"cmd_lease_get": typeInt,
|
||||
"cmd_set": typeInt,
|
||||
"cmd_get_out_all": typeInt,
|
||||
"cmd_get_out": typeInt,
|
||||
"cmd_lease_set_count": typeInt,
|
||||
"cmd_other_out_all": typeInt,
|
||||
"cmd_lease_get_out": typeInt,
|
||||
"cmd_set_count": typeInt,
|
||||
"cmd_lease_set_out": typeInt,
|
||||
"cmd_delete_count": typeInt,
|
||||
"cmd_other": typeInt,
|
||||
"cmd_delete": typeInt,
|
||||
"cmd_get": typeInt,
|
||||
"cmd_lease_set": typeInt,
|
||||
"cmd_set_out": typeInt,
|
||||
"cmd_lease_get_count": typeInt,
|
||||
"cmd_other_out": typeInt,
|
||||
"cmd_lease_get_out_all": typeInt,
|
||||
"cmd_set_out_all": typeInt,
|
||||
"cmd_other_count": typeInt,
|
||||
"cmd_delete_out_all": typeInt,
|
||||
"cmd_lease_set_out_all": typeInt,
|
||||
}
|
||||
|
||||
// SampleConfig returns sample configuration message
|
||||
func (m *Mcrouter) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description returns description of Mcrouter plugin
|
||||
func (m *Mcrouter) Description() string {
|
||||
return "Read metrics from one or many mcrouter servers"
|
||||
}
|
||||
|
||||
// Gather reads stats from all configured servers accumulates stats
|
||||
func (m *Mcrouter) Gather(acc telegraf.Accumulator) error {
|
||||
ctx := context.Background()
|
||||
|
||||
if m.Timeout.Duration < 1*time.Second {
|
||||
m.Timeout.Duration = defaultTimeout
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, m.Timeout.Duration)
|
||||
defer cancel()
|
||||
|
||||
if len(m.Servers) == 0 {
|
||||
m.Servers = []string{defaultServerURL.String()}
|
||||
}
|
||||
|
||||
for _, serverAddress := range m.Servers {
|
||||
acc.AddError(m.gatherServer(ctx, serverAddress, acc))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseAddress parses an address string into 'host:port' and 'protocol' parts
|
||||
func (m *Mcrouter) ParseAddress(address string) (string, string, error) {
|
||||
var protocol string
|
||||
var host string
|
||||
var port string
|
||||
|
||||
u, parseError := url.Parse(address)
|
||||
|
||||
if parseError != nil {
|
||||
return "", "", fmt.Errorf("Invalid server address")
|
||||
}
|
||||
|
||||
if u.Scheme != "tcp" && u.Scheme != "unix" {
|
||||
return "", "", fmt.Errorf("Invalid server protocol")
|
||||
}
|
||||
|
||||
protocol = u.Scheme
|
||||
|
||||
if protocol == "unix" {
|
||||
if u.Path == "" {
|
||||
return "", "", fmt.Errorf("Invalid unix socket path")
|
||||
}
|
||||
|
||||
address = u.Path
|
||||
} else {
|
||||
if u.Host == "" {
|
||||
return "", "", fmt.Errorf("Invalid host")
|
||||
}
|
||||
|
||||
host = u.Hostname()
|
||||
port = u.Port()
|
||||
|
||||
if host == "" {
|
||||
host = defaultServerURL.Hostname()
|
||||
}
|
||||
|
||||
if port == "" {
|
||||
port = defaultServerURL.Port()
|
||||
}
|
||||
|
||||
address = host + ":" + port
|
||||
}
|
||||
|
||||
return address, protocol, nil
|
||||
}
|
||||
|
||||
func (m *Mcrouter) gatherServer(ctx context.Context, address string, acc telegraf.Accumulator) error {
|
||||
var conn net.Conn
|
||||
var err error
|
||||
var protocol string
|
||||
var dialer net.Dialer
|
||||
|
||||
address, protocol, err = m.ParseAddress(address)
|
||||
|
||||
conn, err = dialer.DialContext(ctx, protocol, address)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer conn.Close()
|
||||
|
||||
// Extend connection
|
||||
deadline, ok := ctx.Deadline()
|
||||
|
||||
if ok {
|
||||
conn.SetDeadline(deadline)
|
||||
}
|
||||
|
||||
// Read and write buffer
|
||||
reader := bufio.NewReader(conn)
|
||||
scanner := bufio.NewScanner(reader)
|
||||
|
||||
// Send command
|
||||
if _, err := fmt.Fprint(conn, "stats\r\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
values, err := parseResponse(scanner)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add server address as a tag
|
||||
tags := map[string]string{"server": address}
|
||||
|
||||
// Process values
|
||||
fields := make(map[string]interface{})
|
||||
for key, sType := range sendMetrics {
|
||||
if value, ok := values[key]; ok {
|
||||
switch sType {
|
||||
case typeInt:
|
||||
if v, errParse := strconv.ParseInt(value, 10, 64); errParse == nil {
|
||||
fields[key] = v
|
||||
}
|
||||
case typeFloat:
|
||||
if v, errParse := strconv.ParseFloat(value, 64); errParse == nil {
|
||||
fields[key] = v
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("mcrouter", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseResponse(r *bufio.Scanner) (map[string]string, error) {
|
||||
values := make(map[string]string)
|
||||
|
||||
for r.Scan() {
|
||||
// Read line
|
||||
line := r.Text()
|
||||
|
||||
// Done
|
||||
if line == "END" {
|
||||
break
|
||||
}
|
||||
|
||||
// Read values
|
||||
s := strings.SplitN(line, " ", 3)
|
||||
|
||||
if len(s) != 3 || s[0] != "STAT" {
|
||||
return nil, fmt.Errorf("unexpected line in stats response: %s", line)
|
||||
}
|
||||
|
||||
// Save values
|
||||
values[s[1]] = s[2]
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("mcrouter", func() telegraf.Input {
|
||||
return &Mcrouter{}
|
||||
})
|
||||
}
|
||||
250
plugins/inputs/mcrouter/mcrouter_test.go
Normal file
250
plugins/inputs/mcrouter/mcrouter_test.go
Normal file
@@ -0,0 +1,250 @@
|
||||
package mcrouter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAddressParsing(t *testing.T) {
|
||||
m := &Mcrouter{
|
||||
Servers: []string{"tcp://" + testutil.GetLocalHost()},
|
||||
}
|
||||
|
||||
var acceptTests = [][3]string{
|
||||
{"tcp://localhost:8086", "localhost:8086", "tcp"},
|
||||
{"tcp://localhost", "localhost:" + defaultServerURL.Port(), "tcp"},
|
||||
{"tcp://localhost:", "localhost:" + defaultServerURL.Port(), "tcp"},
|
||||
{"tcp://:8086", defaultServerURL.Hostname() + ":8086", "tcp"},
|
||||
{"tcp://:", defaultServerURL.Host, "tcp"},
|
||||
}
|
||||
|
||||
var rejectTests = []string{
|
||||
"tcp://",
|
||||
}
|
||||
|
||||
for _, args := range acceptTests {
|
||||
address, protocol, err := m.ParseAddress(args[0])
|
||||
|
||||
assert.Nil(t, err, args[0])
|
||||
assert.True(t, address == args[1], args[0])
|
||||
assert.True(t, protocol == args[2], args[0])
|
||||
}
|
||||
|
||||
for _, addr := range rejectTests {
|
||||
address, protocol, err := m.ParseAddress(addr)
|
||||
|
||||
assert.NotNil(t, err, addr)
|
||||
assert.Empty(t, address, addr)
|
||||
assert.Empty(t, protocol, addr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMcrouterGeneratesMetrics(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
m := &Mcrouter{
|
||||
Servers: []string{"tcp://" + testutil.GetLocalHost()},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := acc.GatherError(m.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
intMetrics := []string{"uptime", "num_servers", "num_servers_new", "num_servers_up",
|
||||
"num_servers_down", "num_servers_closed", "num_clients",
|
||||
"num_suspect_servers", "destination_batches_sum", "destination_requests_sum",
|
||||
"outstanding_route_get_reqs_queued", "outstanding_route_update_reqs_queued",
|
||||
"outstanding_route_get_avg_queue_size", "outstanding_route_update_avg_queue_size",
|
||||
"outstanding_route_get_avg_wait_time_sec", "outstanding_route_update_avg_wait_time_sec",
|
||||
"retrans_closed_connections", "destination_pending_reqs", "destination_inflight_reqs",
|
||||
"destination_batch_size", "asynclog_requests", "proxy_reqs_processing",
|
||||
"proxy_reqs_waiting", "client_queue_notify_period",
|
||||
"ps_num_minor_faults", "ps_num_major_faults",
|
||||
"ps_vsize", "ps_rss", "fibers_allocated", "fibers_pool_size", "fibers_stack_high_watermark",
|
||||
"successful_client_connections", "duration_us", "destination_max_pending_reqs",
|
||||
"destination_max_inflight_reqs", "retrans_per_kbyte_max", "cmd_get_count", "cmd_delete_out",
|
||||
"cmd_lease_get", "cmd_set", "cmd_get_out_all", "cmd_get_out", "cmd_lease_set_count",
|
||||
"cmd_other_out_all", "cmd_lease_get_out", "cmd_set_count", "cmd_lease_set_out",
|
||||
"cmd_delete_count", "cmd_other", "cmd_delete", "cmd_get", "cmd_lease_set", "cmd_set_out",
|
||||
"cmd_lease_get_count", "cmd_other_out", "cmd_lease_get_out_all", "cmd_set_out_all",
|
||||
"cmd_other_count", "cmd_delete_out_all", "cmd_lease_set_out_all"}
|
||||
|
||||
floatMetrics := []string{"rusage_system", "rusage_user", "ps_user_time_sec", "ps_system_time_sec"}
|
||||
|
||||
for _, metric := range intMetrics {
|
||||
assert.True(t, acc.HasInt64Field("mcrouter", metric), metric)
|
||||
}
|
||||
|
||||
for _, metric := range floatMetrics {
|
||||
assert.True(t, acc.HasFloatField("mcrouter", metric), metric)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMcrouterParseMetrics(t *testing.T) {
|
||||
r := bufio.NewReader(strings.NewReader(mcrouterStats))
|
||||
scanner := bufio.NewScanner(r)
|
||||
values, err := parseResponse(scanner)
|
||||
require.NoError(t, err, "Error parsing mcrouter response")
|
||||
|
||||
tests := []struct {
|
||||
key string
|
||||
value string
|
||||
}{
|
||||
{"uptime", "166"},
|
||||
{"num_servers", "1"},
|
||||
{"num_servers_new", "1"},
|
||||
{"num_servers_up", "0"},
|
||||
{"num_servers_down", "0"},
|
||||
{"num_servers_closed", "0"},
|
||||
{"num_clients", "1"},
|
||||
{"num_suspect_servers", "0"},
|
||||
{"destination_batches_sum", "0"},
|
||||
{"destination_requests_sum", "0"},
|
||||
{"outstanding_route_get_reqs_queued", "0"},
|
||||
{"outstanding_route_update_reqs_queued", "0"},
|
||||
{"outstanding_route_get_avg_queue_size", "0"},
|
||||
{"outstanding_route_update_avg_queue_size", "0"},
|
||||
{"outstanding_route_get_avg_wait_time_sec", "0"},
|
||||
{"outstanding_route_update_avg_wait_time_sec", "0"},
|
||||
{"retrans_closed_connections", "0"},
|
||||
{"destination_pending_reqs", "0"},
|
||||
{"destination_inflight_reqs", "0"},
|
||||
{"destination_batch_size", "0"},
|
||||
{"asynclog_requests", "0"},
|
||||
{"proxy_reqs_processing", "1"},
|
||||
{"proxy_reqs_waiting", "0"},
|
||||
{"client_queue_notify_period", "0"},
|
||||
{"rusage_system", "0.040966"},
|
||||
{"rusage_user", "0.020483"},
|
||||
{"ps_num_minor_faults", "2490"},
|
||||
{"ps_num_major_faults", "11"},
|
||||
{"ps_user_time_sec", "0.02"},
|
||||
{"ps_system_time_sec", "0.04"},
|
||||
{"ps_vsize", "697741312"},
|
||||
{"ps_rss", "10563584"},
|
||||
{"fibers_allocated", "0"},
|
||||
{"fibers_pool_size", "0"},
|
||||
{"fibers_stack_high_watermark", "0"},
|
||||
{"successful_client_connections", "18"},
|
||||
{"duration_us", "0"},
|
||||
{"destination_max_pending_reqs", "0"},
|
||||
{"destination_max_inflight_reqs", "0"},
|
||||
{"retrans_per_kbyte_max", "0"},
|
||||
{"cmd_get_count", "0"},
|
||||
{"cmd_delete_out", "0"},
|
||||
{"cmd_lease_get", "0"},
|
||||
{"cmd_set", "0"},
|
||||
{"cmd_get_out_all", "0"},
|
||||
{"cmd_get_out", "0"},
|
||||
{"cmd_lease_set_count", "0"},
|
||||
{"cmd_other_out_all", "0"},
|
||||
{"cmd_lease_get_out", "0"},
|
||||
{"cmd_set_count", "0"},
|
||||
{"cmd_lease_set_out", "0"},
|
||||
{"cmd_delete_count", "0"},
|
||||
{"cmd_other", "0"},
|
||||
{"cmd_delete", "0"},
|
||||
{"cmd_get", "0"},
|
||||
{"cmd_lease_set", "0"},
|
||||
{"cmd_set_out", "0"},
|
||||
{"cmd_lease_get_count", "0"},
|
||||
{"cmd_other_out", "0"},
|
||||
{"cmd_lease_get_out_all", "0"},
|
||||
{"cmd_set_out_all", "0"},
|
||||
{"cmd_other_count", "0"},
|
||||
{"cmd_delete_out_all", "0"},
|
||||
{"cmd_lease_set_out_all", "0"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
value, ok := values[test.key]
|
||||
if !ok {
|
||||
t.Errorf("Did not find key for metric %s in values", test.key)
|
||||
continue
|
||||
}
|
||||
if value != test.value {
|
||||
t.Errorf("Metric: %s, Expected: %s, actual: %s",
|
||||
test.key, test.value, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var mcrouterStats = `STAT version 36.0.0 mcrouter
|
||||
STAT commandargs --port 11211 --config-file /etc/mcrouter/mcrouter.json --async-dir /var/spool/mcrouter --log-path /var/log/mcrouter/mcrouter.log --stats-root /var/mcrouter/stats --server-timeout 100 --reset-inactive-connection-interval 10000 --proxy-threads auto
|
||||
STAT pid 21357
|
||||
STAT parent_pid 1
|
||||
STAT time 1524673265
|
||||
STAT uptime 166
|
||||
STAT num_servers 1
|
||||
STAT num_servers_new 1
|
||||
STAT num_servers_up 0
|
||||
STAT num_servers_down 0
|
||||
STAT num_servers_closed 0
|
||||
STAT num_clients 1
|
||||
STAT num_suspect_servers 0
|
||||
STAT destination_batches_sum 0
|
||||
STAT destination_requests_sum 0
|
||||
STAT outstanding_route_get_reqs_queued 0
|
||||
STAT outstanding_route_update_reqs_queued 0
|
||||
STAT outstanding_route_get_avg_queue_size 0
|
||||
STAT outstanding_route_update_avg_queue_size 0
|
||||
STAT outstanding_route_get_avg_wait_time_sec 0
|
||||
STAT outstanding_route_update_avg_wait_time_sec 0
|
||||
STAT retrans_closed_connections 0
|
||||
STAT destination_pending_reqs 0
|
||||
STAT destination_inflight_reqs 0
|
||||
STAT destination_batch_size 0
|
||||
STAT asynclog_requests 0
|
||||
STAT proxy_reqs_processing 1
|
||||
STAT proxy_reqs_waiting 0
|
||||
STAT client_queue_notify_period 0
|
||||
STAT rusage_system 0.040966
|
||||
STAT rusage_user 0.020483
|
||||
STAT ps_num_minor_faults 2490
|
||||
STAT ps_num_major_faults 11
|
||||
STAT ps_user_time_sec 0.02
|
||||
STAT ps_system_time_sec 0.04
|
||||
STAT ps_vsize 697741312
|
||||
STAT ps_rss 10563584
|
||||
STAT fibers_allocated 0
|
||||
STAT fibers_pool_size 0
|
||||
STAT fibers_stack_high_watermark 0
|
||||
STAT successful_client_connections 18
|
||||
STAT duration_us 0
|
||||
STAT destination_max_pending_reqs 0
|
||||
STAT destination_max_inflight_reqs 0
|
||||
STAT retrans_per_kbyte_max 0
|
||||
STAT cmd_get_count 0
|
||||
STAT cmd_delete_out 0
|
||||
STAT cmd_lease_get 0
|
||||
STAT cmd_set 0
|
||||
STAT cmd_get_out_all 0
|
||||
STAT cmd_get_out 0
|
||||
STAT cmd_lease_set_count 0
|
||||
STAT cmd_other_out_all 0
|
||||
STAT cmd_lease_get_out 0
|
||||
STAT cmd_set_count 0
|
||||
STAT cmd_lease_set_out 0
|
||||
STAT cmd_delete_count 0
|
||||
STAT cmd_other 0
|
||||
STAT cmd_delete 0
|
||||
STAT cmd_get 0
|
||||
STAT cmd_lease_set 0
|
||||
STAT cmd_set_out 0
|
||||
STAT cmd_lease_get_count 0
|
||||
STAT cmd_other_out 0
|
||||
STAT cmd_lease_get_out_all 0
|
||||
STAT cmd_set_out_all 0
|
||||
STAT cmd_other_count 0
|
||||
STAT cmd_delete_out_all 0
|
||||
STAT cmd_lease_set_out_all 0
|
||||
END
|
||||
`
|
||||
@@ -11,7 +11,7 @@ For more information, please check the [Mesos Observability Metrics](http://meso
|
||||
## Timeout, in ms.
|
||||
timeout = 100
|
||||
## A list of Mesos masters.
|
||||
masters = ["localhost:5050"]
|
||||
masters = ["http://localhost:5050"]
|
||||
## Master metrics groups to be collected, by default, all enabled.
|
||||
master_collections = [
|
||||
"resources",
|
||||
@@ -35,6 +35,13 @@ For more information, please check the [Mesos Observability Metrics](http://meso
|
||||
# "tasks",
|
||||
# "messages",
|
||||
# ]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
By default this plugin is not configured to gather metrics from mesos. Since a mesos cluster can be deployed in numerous ways it does not provide any default
|
||||
@@ -235,7 +242,8 @@ Mesos slave metric groups
|
||||
### Tags:
|
||||
|
||||
- All master/slave measurements have the following tags:
|
||||
- server
|
||||
- server (network location of server: `host:port`)
|
||||
- url (URL origin of server: `scheme://host:port`)
|
||||
- role (master/slave)
|
||||
|
||||
- All master measurements have the extra tags:
|
||||
|
||||
@@ -7,11 +7,14 @@ import (
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
)
|
||||
@@ -30,6 +33,20 @@ type Mesos struct {
|
||||
Slaves []string
|
||||
SlaveCols []string `toml:"slave_collections"`
|
||||
//SlaveTasks bool
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
initialized bool
|
||||
client *http.Client
|
||||
masterURLs []*url.URL
|
||||
slaveURLs []*url.URL
|
||||
}
|
||||
|
||||
var allMetrics = map[Role][]string{
|
||||
@@ -41,7 +58,7 @@ var sampleConfig = `
|
||||
## Timeout, in ms.
|
||||
timeout = 100
|
||||
## A list of Mesos masters.
|
||||
masters = ["localhost:5050"]
|
||||
masters = ["http://localhost:5050"]
|
||||
## Master metrics groups to be collected, by default, all enabled.
|
||||
master_collections = [
|
||||
"resources",
|
||||
@@ -65,6 +82,13 @@ var sampleConfig = `
|
||||
# "tasks",
|
||||
# "messages",
|
||||
# ]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
// SampleConfig returns a sample configuration block
|
||||
@@ -77,7 +101,28 @@ func (m *Mesos) Description() string {
|
||||
return "Telegraf plugin for gathering metrics from N Mesos masters"
|
||||
}
|
||||
|
||||
func (m *Mesos) SetDefaults() {
|
||||
func parseURL(s string, role Role) (*url.URL, error) {
|
||||
if !strings.HasPrefix(s, "http://") && !strings.HasPrefix(s, "https://") {
|
||||
host, port, err := net.SplitHostPort(s)
|
||||
// no port specified
|
||||
if err != nil {
|
||||
host = s
|
||||
switch role {
|
||||
case MASTER:
|
||||
port = "5050"
|
||||
case SLAVE:
|
||||
port = "5051"
|
||||
}
|
||||
}
|
||||
|
||||
s = "http://" + host + ":" + port
|
||||
log.Printf("W! [inputs.mesos] Using %q as connection URL; please update your configuration to use an URL", s)
|
||||
}
|
||||
|
||||
return url.Parse(s)
|
||||
}
|
||||
|
||||
func (m *Mesos) initialize() error {
|
||||
if len(m.MasterCols) == 0 {
|
||||
m.MasterCols = allMetrics[MASTER]
|
||||
}
|
||||
@@ -87,33 +132,71 @@ func (m *Mesos) SetDefaults() {
|
||||
}
|
||||
|
||||
if m.Timeout == 0 {
|
||||
log.Println("I! [mesos] Missing timeout value, setting default value (100ms)")
|
||||
log.Println("I! [inputs.mesos] Missing timeout value, setting default value (100ms)")
|
||||
m.Timeout = 100
|
||||
}
|
||||
|
||||
rawQuery := "timeout=" + strconv.Itoa(m.Timeout) + "ms"
|
||||
|
||||
m.masterURLs = make([]*url.URL, 0, len(m.Masters))
|
||||
for _, master := range m.Masters {
|
||||
u, err := parseURL(master, MASTER)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
u.RawQuery = rawQuery
|
||||
m.masterURLs = append(m.masterURLs, u)
|
||||
}
|
||||
|
||||
m.slaveURLs = make([]*url.URL, 0, len(m.Slaves))
|
||||
for _, slave := range m.Slaves {
|
||||
u, err := parseURL(slave, SLAVE)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
u.RawQuery = rawQuery
|
||||
m.slaveURLs = append(m.slaveURLs, u)
|
||||
}
|
||||
|
||||
client, err := m.createHttpClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.client = client
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gather() metrics from given list of Mesos Masters
|
||||
func (m *Mesos) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
m.SetDefaults()
|
||||
|
||||
for _, v := range m.Masters {
|
||||
wg.Add(1)
|
||||
go func(c string) {
|
||||
acc.AddError(m.gatherMainMetrics(c, ":5050", MASTER, acc))
|
||||
wg.Done()
|
||||
return
|
||||
}(v)
|
||||
if !m.initialized {
|
||||
err := m.initialize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.initialized = true
|
||||
}
|
||||
|
||||
for _, v := range m.Slaves {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, master := range m.masterURLs {
|
||||
wg.Add(1)
|
||||
go func(c string) {
|
||||
acc.AddError(m.gatherMainMetrics(c, ":5051", SLAVE, acc))
|
||||
go func(master *url.URL) {
|
||||
acc.AddError(m.gatherMainMetrics(master, MASTER, acc))
|
||||
wg.Done()
|
||||
return
|
||||
}(v)
|
||||
}(master)
|
||||
}
|
||||
|
||||
for _, slave := range m.slaveURLs {
|
||||
wg.Add(1)
|
||||
go func(slave *url.URL) {
|
||||
acc.AddError(m.gatherMainMetrics(slave, SLAVE, acc))
|
||||
wg.Done()
|
||||
return
|
||||
}(slave)
|
||||
|
||||
// if !m.SlaveTasks {
|
||||
// continue
|
||||
@@ -121,7 +204,7 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
// wg.Add(1)
|
||||
// go func(c string) {
|
||||
// acc.AddError(m.gatherSlaveTaskMetrics(c, ":5051", acc))
|
||||
// acc.AddError(m.gatherSlaveTaskMetrics(slave, acc))
|
||||
// wg.Done()
|
||||
// return
|
||||
// }(v)
|
||||
@@ -132,6 +215,24 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Mesos) createHttpClient() (*http.Client, error) {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
m.SSLCert, m.SSLKey, m.SSLCA, m.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
Timeout: 4 * time.Second,
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// metricsDiff() returns set names for removal
|
||||
func metricsDiff(role Role, w []string) []string {
|
||||
b := []string{}
|
||||
@@ -393,15 +494,6 @@ func (m *Mesos) filterMetrics(role Role, metrics *map[string]interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
// TaskStats struct for JSON API output /monitor/statistics
|
||||
type TaskStats struct {
|
||||
ExecutorID string `json:"executor_id"`
|
||||
@@ -409,22 +501,15 @@ type TaskStats struct {
|
||||
Statistics map[string]interface{} `json:"statistics"`
|
||||
}
|
||||
|
||||
func (m *Mesos) gatherSlaveTaskMetrics(address string, defaultPort string, acc telegraf.Accumulator) error {
|
||||
func (m *Mesos) gatherSlaveTaskMetrics(u *url.URL, acc telegraf.Accumulator) error {
|
||||
var metrics []TaskStats
|
||||
|
||||
host, _, err := net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
host = address
|
||||
address = address + defaultPort
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"server": host,
|
||||
"server": u.Hostname(),
|
||||
"url": urlTag(u),
|
||||
}
|
||||
|
||||
ts := strconv.Itoa(m.Timeout) + "ms"
|
||||
|
||||
resp, err := client.Get("http://" + address + "/monitor/statistics?timeout=" + ts)
|
||||
resp, err := m.client.Get(withPath(u, "/monitor/statistics").String())
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -459,24 +544,31 @@ func (m *Mesos) gatherSlaveTaskMetrics(address string, defaultPort string, acc t
|
||||
return nil
|
||||
}
|
||||
|
||||
func withPath(u *url.URL, path string) *url.URL {
|
||||
c := *u
|
||||
c.Path = path
|
||||
return &c
|
||||
}
|
||||
|
||||
func urlTag(u *url.URL) string {
|
||||
c := *u
|
||||
c.Path = ""
|
||||
c.User = nil
|
||||
c.RawQuery = ""
|
||||
return c.String()
|
||||
}
|
||||
|
||||
// This should not belong to the object
|
||||
func (m *Mesos) gatherMainMetrics(a string, defaultPort string, role Role, acc telegraf.Accumulator) error {
|
||||
func (m *Mesos) gatherMainMetrics(u *url.URL, role Role, acc telegraf.Accumulator) error {
|
||||
var jsonOut map[string]interface{}
|
||||
|
||||
host, _, err := net.SplitHostPort(a)
|
||||
if err != nil {
|
||||
host = a
|
||||
a = a + defaultPort
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"server": host,
|
||||
"server": u.Hostname(),
|
||||
"url": urlTag(u),
|
||||
"role": string(role),
|
||||
}
|
||||
|
||||
ts := strconv.Itoa(m.Timeout) + "ms"
|
||||
|
||||
resp, err := client.Get("http://" + a + "/metrics/snapshot?timeout=" + ts)
|
||||
resp, err := m.client.Get(withPath(u, "/metrics/snapshot").String())
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -6,10 +6,12 @@ import (
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var masterMetrics map[string]interface{}
|
||||
@@ -378,3 +380,19 @@ func TestSlaveFilter(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithPathDoesNotModify(t *testing.T) {
|
||||
u, err := url.Parse("http://localhost:5051")
|
||||
require.NoError(t, err)
|
||||
v := withPath(u, "/xyzzy")
|
||||
require.Equal(t, u.String(), "http://localhost:5051")
|
||||
require.Equal(t, v.String(), "http://localhost:5051/xyzzy")
|
||||
}
|
||||
|
||||
func TestURLTagDoesNotModify(t *testing.T) {
|
||||
u, err := url.Parse("http://a:b@localhost:5051?timeout=1ms")
|
||||
require.NoError(t, err)
|
||||
v := urlTag(u)
|
||||
require.Equal(t, u.String(), "http://a:b@localhost:5051?timeout=1ms")
|
||||
require.Equal(t, v, "http://localhost:5051")
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Telegraf plugin: MongoDB
|
||||
# MongoDB Input Plugin
|
||||
|
||||
#### Configuration
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.mongodb]]
|
||||
@@ -10,7 +10,9 @@
|
||||
## mongodb://user:auth_key@10.10.3.30:27017,
|
||||
## mongodb://10.10.3.33:18832,
|
||||
servers = ["mongodb://127.0.0.1:27017"]
|
||||
gather_perdb_stats = false
|
||||
|
||||
## When true, collect per database stats
|
||||
# gather_perdb_stats = false
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -19,49 +21,106 @@
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
This connection uri may be different based on your environment and mongodb
|
||||
setup. If the user doesn't have the required privilege to execute serverStatus
|
||||
command the you will get this error on telegraf
|
||||
|
||||
#### Permissions:
|
||||
|
||||
If your MongoDB instance has access control enabled you will need to connect
|
||||
as a user with sufficient rights.
|
||||
|
||||
With MongoDB 3.4 and higher, the `clusterMonitor` role can be used. In
|
||||
version 3.2 you may also need these additional permissions:
|
||||
```
|
||||
> db.grantRolesToUser("user", [{role: "read", actions: "find", db: "local"}])
|
||||
```
|
||||
|
||||
If the user is missing required privileges you may see an error in the
|
||||
Telegraf logs similar to:
|
||||
```
|
||||
Error in input [mongodb]: not authorized on admin to execute command { serverStatus: 1, recordStats: 0 }
|
||||
```
|
||||
|
||||
#### Description
|
||||
### Metrics:
|
||||
|
||||
The telegraf plugin collects mongodb stats exposed by serverStatus and few more
|
||||
and create a single measurement containing values e.g.
|
||||
* active_reads
|
||||
* active_writes
|
||||
* commands_per_sec
|
||||
* deletes_per_sec
|
||||
* flushes_per_sec
|
||||
* getmores_per_sec
|
||||
* inserts_per_sec
|
||||
* net_in_bytes
|
||||
* net_out_bytes
|
||||
* open_connections
|
||||
* percent_cache_dirty
|
||||
* percent_cache_used
|
||||
* queries_per_sec
|
||||
* queued_reads
|
||||
* queued_writes
|
||||
* resident_megabytes
|
||||
* updates_per_sec
|
||||
* vsize_megabytes
|
||||
* ttl_deletes_per_sec
|
||||
* ttl_passes_per_sec
|
||||
* repl_lag
|
||||
* jumbo_chunks (only if mongos or mongo config)
|
||||
- mongodb
|
||||
- tags:
|
||||
- hostname
|
||||
- fields:
|
||||
- active_reads (integer)
|
||||
- active_writes (integer)
|
||||
- commands_per_sec (integer)
|
||||
- deletes_per_sec (integer)
|
||||
- flushes_per_sec (integer)
|
||||
- getmores_per_sec (integer)
|
||||
- inserts_per_sec (integer)
|
||||
- jumbo_chunks (integer)
|
||||
- member_status (string)
|
||||
- net_in_bytes (integer)
|
||||
- net_out_bytes (integer)
|
||||
- open_connections (integer)
|
||||
- percent_cache_dirty (float)
|
||||
- percent_cache_used (float)
|
||||
- queries_per_sec (integer)
|
||||
- queued_reads (integer)
|
||||
- queued_writes (integer)
|
||||
- repl_commands_per_sec (integer)
|
||||
- repl_deletes_per_sec (integer)
|
||||
- repl_getmores_per_sec (integer)
|
||||
- repl_inserts_per_sec (integer)
|
||||
- repl_lag (integer)
|
||||
- repl_queries_per_sec (integer)
|
||||
- repl_updates_per_sec (integer)
|
||||
- repl_oplog_window_sec (integer)
|
||||
- resident_megabytes (integer)
|
||||
- state (string)
|
||||
- total_available (integer)
|
||||
- total_created (integer)
|
||||
- total_in_use (integer)
|
||||
- total_refreshing (integer)
|
||||
- ttl_deletes_per_sec (integer)
|
||||
- ttl_passes_per_sec (integer)
|
||||
- updates_per_sec (integer)
|
||||
- vsize_megabytes (integer)
|
||||
- wtcache_app_threads_page_read_count (integer)
|
||||
- wtcache_app_threads_page_read_time (integer)
|
||||
- wtcache_app_threads_page_write_count (integer)
|
||||
- wtcache_bytes_read_into (integer)
|
||||
- wtcache_bytes_written_from (integer)
|
||||
- wtcache_current_bytes (integer)
|
||||
- wtcache_max_bytes_configured (integer)
|
||||
- wtcache_pages_evicted_by_app_thread (integer)
|
||||
- wtcache_pages_queued_for_eviction (integer)
|
||||
- wtcache_server_evicting_pages (integer)
|
||||
- wtcache_tracked_dirty_bytes (integer)
|
||||
- wtcache_worker_thread_evictingpages (integer)
|
||||
|
||||
If gather_db_stats is set to true, it will also collect per database stats exposed by db.stats()
|
||||
creating another measurement called mongodb_db_stats and containing values:
|
||||
* collections
|
||||
* objects
|
||||
* avg_obj_size
|
||||
* data_size
|
||||
* storage_size
|
||||
* num_extents
|
||||
* indexes
|
||||
* index_size
|
||||
* ok
|
||||
- mongodb_db_stats
|
||||
- tags:
|
||||
- db_name
|
||||
- hostname
|
||||
- fields:
|
||||
- avg_obj_size (float)
|
||||
- collections (integer)
|
||||
- data_size (integer)
|
||||
- index_size (integer)
|
||||
- indexes (integer)
|
||||
- num_extents (integer)
|
||||
- objects (integer)
|
||||
- ok (integer)
|
||||
- storage_size (integer)
|
||||
- type (string)
|
||||
|
||||
- mongodb_shard_stats
|
||||
- tags:
|
||||
- hostname
|
||||
- fields:
|
||||
- in_use (integer)
|
||||
- available (integer)
|
||||
- created (integer)
|
||||
- refreshing (integer)
|
||||
|
||||
### Example Output:
|
||||
```
|
||||
mongodb,hostname=127.0.0.1:27017 active_reads=0i,active_writes=0i,commands_per_sec=6i,deletes_per_sec=0i,flushes_per_sec=0i,getmores_per_sec=1i,inserts_per_sec=0i,jumbo_chunks=0i,member_status="PRI",net_in_bytes=851i,net_out_bytes=23904i,open_connections=6i,percent_cache_dirty=0,percent_cache_used=0,queries_per_sec=2i,queued_reads=0i,queued_writes=0i,repl_commands_per_sec=0i,repl_deletes_per_sec=0i,repl_getmores_per_sec=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_queries_per_sec=0i,repl_updates_per_sec=0i,resident_megabytes=67i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,ttl_deletes_per_sec=0i,ttl_passes_per_sec=0i,updates_per_sec=0i,vsize_megabytes=729i,wtcache_app_threads_page_read_count=4i,wtcache_app_threads_page_read_time=18i,wtcache_app_threads_page_write_count=6i,wtcache_bytes_read_into=10075i,wtcache_bytes_written_from=115711i,wtcache_current_bytes=86038i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1522798796000000000
|
||||
mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=818.625,collections=5i,data_size=6549i,index_size=86016i,indexes=4i,num_extents=0i,objects=8i,ok=1i,storage_size=118784i,type="db_stat" 1522799074000000000
|
||||
mongodb_shard_stats,hostname=127.0.0.1:27017,in_use=3i,available=3i,created=4i,refreshing=0i 1522799074000000000
|
||||
```
|
||||
|
||||
@@ -45,7 +45,9 @@ var sampleConfig = `
|
||||
## mongodb://user:auth_key@10.10.3.30:27017,
|
||||
## mongodb://10.10.3.33:18832,
|
||||
servers = ["mongodb://127.0.0.1:27017"]
|
||||
gather_perdb_stats = false
|
||||
|
||||
## When true, collect per database stats
|
||||
# gather_perdb_stats = false
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -149,6 +151,9 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error {
|
||||
} else {
|
||||
tlsConfig, err = internal.GetTLSConfig(
|
||||
m.SSLCert, m.SSLKey, m.SSLCA, m.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If configured to use TLS, add a dial function
|
||||
|
||||
@@ -9,10 +9,11 @@ import (
|
||||
)
|
||||
|
||||
type MongodbData struct {
|
||||
StatLine *StatLine
|
||||
Fields map[string]interface{}
|
||||
Tags map[string]string
|
||||
DbData []DbData
|
||||
StatLine *StatLine
|
||||
Fields map[string]interface{}
|
||||
Tags map[string]string
|
||||
DbData []DbData
|
||||
ShardHostData []DbData
|
||||
}
|
||||
|
||||
type DbData struct {
|
||||
@@ -60,12 +61,27 @@ var DefaultReplStats = map[string]string{
|
||||
"member_status": "NodeType",
|
||||
"state": "NodeState",
|
||||
"repl_lag": "ReplLag",
|
||||
"repl_oplog_window_sec": "OplogTimeDiff",
|
||||
}
|
||||
|
||||
var DefaultClusterStats = map[string]string{
|
||||
"jumbo_chunks": "JumboChunksCount",
|
||||
}
|
||||
|
||||
var DefaultShardStats = map[string]string{
|
||||
"total_in_use": "TotalInUse",
|
||||
"total_available": "TotalAvailable",
|
||||
"total_created": "TotalCreated",
|
||||
"total_refreshing": "TotalRefreshing",
|
||||
}
|
||||
|
||||
var ShardHostStats = map[string]string{
|
||||
"in_use": "InUse",
|
||||
"available": "Available",
|
||||
"created": "Created",
|
||||
"refreshing": "Refreshing",
|
||||
}
|
||||
|
||||
var MmapStats = map[string]string{
|
||||
"mapped_megabytes": "Mapped",
|
||||
"non-mapped_megabytes": "NonMapped",
|
||||
@@ -120,6 +136,22 @@ func (d *MongodbData) AddDbStats() {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *MongodbData) AddShardHostStats() {
|
||||
for host, hostStat := range d.StatLine.ShardHostStatsLines {
|
||||
hostStatLine := reflect.ValueOf(&hostStat).Elem()
|
||||
newDbData := &DbData{
|
||||
Name: host,
|
||||
Fields: make(map[string]interface{}),
|
||||
}
|
||||
newDbData.Fields["type"] = "shard_host_stat"
|
||||
for k, v := range ShardHostStats {
|
||||
val := hostStatLine.FieldByName(v).Interface()
|
||||
newDbData.Fields[k] = val
|
||||
}
|
||||
d.ShardHostData = append(d.ShardHostData, *newDbData)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *MongodbData) AddDefaultStats() {
|
||||
statLine := reflect.ValueOf(d.StatLine).Elem()
|
||||
d.addStat(statLine, DefaultStats)
|
||||
@@ -127,7 +159,8 @@ func (d *MongodbData) AddDefaultStats() {
|
||||
d.addStat(statLine, DefaultReplStats)
|
||||
}
|
||||
d.addStat(statLine, DefaultClusterStats)
|
||||
if d.StatLine.StorageEngine == "mmapv1" {
|
||||
d.addStat(statLine, DefaultShardStats)
|
||||
if d.StatLine.StorageEngine == "mmapv1" || d.StatLine.StorageEngine == "rocksdb" {
|
||||
d.addStat(statLine, MmapStats)
|
||||
} else if d.StatLine.StorageEngine == "wiredTiger" {
|
||||
for key, value := range WiredTigerStats {
|
||||
@@ -170,4 +203,14 @@ func (d *MongodbData) flush(acc telegraf.Accumulator) {
|
||||
)
|
||||
db.Fields = make(map[string]interface{})
|
||||
}
|
||||
for _, host := range d.ShardHostData {
|
||||
d.Tags["hostname"] = host.Name
|
||||
acc.AddFields(
|
||||
"mongodb_shard_stats",
|
||||
host.Fields,
|
||||
d.Tags,
|
||||
d.StatLine.Time,
|
||||
)
|
||||
host.Fields = make(map[string]interface{})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -99,6 +100,64 @@ func TestAddWiredTigerStats(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddShardStats(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
TotalInUse: 0,
|
||||
TotalAvailable: 0,
|
||||
TotalCreated: 0,
|
||||
TotalRefreshing: 0,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
d.AddDefaultStats()
|
||||
d.flush(&acc)
|
||||
|
||||
for key, _ := range DefaultShardStats {
|
||||
assert.True(t, acc.HasInt64Field("mongodb", key))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddShardHostStats(t *testing.T) {
|
||||
expectedHosts := []string{"hostA", "hostB"}
|
||||
hostStatLines := map[string]ShardHostStatLine{}
|
||||
for _, host := range expectedHosts {
|
||||
hostStatLines[host] = ShardHostStatLine{
|
||||
InUse: 0,
|
||||
Available: 0,
|
||||
Created: 0,
|
||||
Refreshing: 0,
|
||||
}
|
||||
}
|
||||
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
ShardHostStatsLines: hostStatLines,
|
||||
},
|
||||
map[string]string{}, // Use empty tags, so we don't break existing tests
|
||||
)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
d.AddShardHostStats()
|
||||
d.flush(&acc)
|
||||
|
||||
var hostsFound []string
|
||||
for host, _ := range hostStatLines {
|
||||
for key, _ := range ShardHostStats {
|
||||
assert.True(t, acc.HasInt64Field("mongodb_shard_stats", key))
|
||||
}
|
||||
|
||||
assert.True(t, acc.HasTag("mongodb_shard_stats", "hostname"))
|
||||
hostsFound = append(hostsFound, host)
|
||||
}
|
||||
sort.Strings(hostsFound)
|
||||
sort.Strings(expectedHosts)
|
||||
assert.Equal(t, hostsFound, expectedHosts)
|
||||
}
|
||||
|
||||
func TestStateTag(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
@@ -141,12 +200,17 @@ func TestStateTag(t *testing.T) {
|
||||
"repl_queries_per_sec": int64(0),
|
||||
"repl_updates_per_sec": int64(0),
|
||||
"repl_lag": int64(0),
|
||||
"repl_oplog_window_sec": int64(0),
|
||||
"resident_megabytes": int64(0),
|
||||
"updates_per_sec": int64(0),
|
||||
"vsize_megabytes": int64(0),
|
||||
"ttl_deletes_per_sec": int64(0),
|
||||
"ttl_passes_per_sec": int64(0),
|
||||
"jumbo_chunks": int64(0),
|
||||
"total_in_use": int64(0),
|
||||
"total_available": int64(0),
|
||||
"total_created": int64(0),
|
||||
"total_refreshing": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags)
|
||||
}
|
||||
|
||||
@@ -22,6 +22,41 @@ func (s *Server) getDefaultTags() map[string]string {
|
||||
return tags
|
||||
}
|
||||
|
||||
type oplogEntry struct {
|
||||
Timestamp bson.MongoTimestamp `bson:"ts"`
|
||||
}
|
||||
|
||||
func (s *Server) gatherOplogStats() *OplogStats {
|
||||
stats := &OplogStats{}
|
||||
localdb := s.Session.DB("local")
|
||||
|
||||
op_first := oplogEntry{}
|
||||
op_last := oplogEntry{}
|
||||
query := bson.M{"ts": bson.M{"$exists": true}}
|
||||
|
||||
for _, collection_name := range []string{"oplog.rs", "oplog.$main"} {
|
||||
if err := localdb.C(collection_name).Find(query).Sort("$natural").Limit(1).One(&op_first); err != nil {
|
||||
if err == mgo.ErrNotFound {
|
||||
continue
|
||||
}
|
||||
log.Println("E! Error getting first oplog entry (" + err.Error() + ")")
|
||||
return stats
|
||||
}
|
||||
if err := localdb.C(collection_name).Find(query).Sort("-$natural").Limit(1).One(&op_last); err != nil {
|
||||
if err == mgo.ErrNotFound {
|
||||
continue
|
||||
}
|
||||
log.Println("E! Error getting last oplog entry (" + err.Error() + ")")
|
||||
return stats
|
||||
}
|
||||
}
|
||||
|
||||
op_first_time := time.Unix(int64(op_first.Timestamp>>32), 0)
|
||||
op_last_time := time.Unix(int64(op_last.Timestamp>>32), 0)
|
||||
stats.TimeDiff = int64(op_last_time.Sub(op_first_time).Seconds())
|
||||
return stats
|
||||
}
|
||||
|
||||
func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error {
|
||||
s.Session.SetMode(mgo.Eventual, true)
|
||||
s.Session.SetSocketTimeout(0)
|
||||
@@ -55,8 +90,20 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||
JumboChunksCount: int64(jumbo_chunks),
|
||||
}
|
||||
|
||||
result_db_stats := &DbStats{}
|
||||
resultShards := &ShardStats{}
|
||||
err = s.Session.DB("admin").Run(bson.D{
|
||||
{
|
||||
Name: "shardConnPoolStats",
|
||||
Value: 1,
|
||||
},
|
||||
}, &resultShards)
|
||||
if err != nil {
|
||||
log.Println("E! Error getting database shard stats (" + err.Error() + ")")
|
||||
}
|
||||
|
||||
oplogStats := s.gatherOplogStats()
|
||||
|
||||
result_db_stats := &DbStats{}
|
||||
if gatherDbStats == true {
|
||||
names := []string{}
|
||||
names, err = s.Session.DatabaseNames()
|
||||
@@ -88,6 +135,8 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||
ReplSetStatus: result_repl,
|
||||
ClusterStatus: result_cluster,
|
||||
DbStats: result_db_stats,
|
||||
ShardStats: resultShards,
|
||||
OplogStats: oplogStats,
|
||||
}
|
||||
|
||||
defer func() {
|
||||
@@ -107,6 +156,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||
)
|
||||
data.AddDefaultStats()
|
||||
data.AddDbStats()
|
||||
data.AddShardHostStats()
|
||||
data.flush(acc)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -34,6 +34,8 @@ type MongoStatus struct {
|
||||
ReplSetStatus *ReplSetStatus
|
||||
ClusterStatus *ClusterStatus
|
||||
DbStats *DbStats
|
||||
ShardStats *ShardStats
|
||||
OplogStats *OplogStats
|
||||
}
|
||||
|
||||
type ServerStatus struct {
|
||||
@@ -101,6 +103,11 @@ type ReplSetStatus struct {
|
||||
MyState int64 `bson:"myState"`
|
||||
}
|
||||
|
||||
// OplogStatus stores information from getReplicationInfo
|
||||
type OplogStats struct {
|
||||
TimeDiff int64
|
||||
}
|
||||
|
||||
// ReplSetMember stores information related to a replica set member
|
||||
type ReplSetMember struct {
|
||||
Name string `bson:"name"`
|
||||
@@ -116,6 +123,29 @@ type WiredTiger struct {
|
||||
Cache CacheStats `bson:"cache"`
|
||||
}
|
||||
|
||||
// ShardStats stores information from shardConnPoolStats.
|
||||
type ShardStats struct {
|
||||
ShardStatsData `bson:",inline"`
|
||||
Hosts map[string]ShardHostStatsData `bson:"hosts"`
|
||||
}
|
||||
|
||||
// ShardStatsData is the total Shard Stats from shardConnPoolStats database command.
|
||||
type ShardStatsData struct {
|
||||
TotalInUse int64 `bson:"totalInUse"`
|
||||
TotalAvailable int64 `bson:"totalAvailable"`
|
||||
TotalCreated int64 `bson:"totalCreated"`
|
||||
TotalRefreshing int64 `bson:"totalRefreshing"`
|
||||
}
|
||||
|
||||
// ShardHostStatsData is the host-specific stats
|
||||
// from shardConnPoolStats database command.
|
||||
type ShardHostStatsData struct {
|
||||
InUse int64 `bson:"inUse"`
|
||||
Available int64 `bson:"available"`
|
||||
Created int64 `bson:"created"`
|
||||
Refreshing int64 `bson:"refreshing"`
|
||||
}
|
||||
|
||||
type ConcurrentTransactions struct {
|
||||
Write ConcurrentTransStats `bson:"write"`
|
||||
Read ConcurrentTransStats `bson:"read"`
|
||||
@@ -433,6 +463,7 @@ type StatLine struct {
|
||||
// Replicated Opcounter fields
|
||||
InsertR, QueryR, UpdateR, DeleteR, GetMoreR, CommandR int64
|
||||
ReplLag int64
|
||||
OplogTimeDiff int64
|
||||
Flushes int64
|
||||
Mapped, Virtual, Resident, NonMapped int64
|
||||
Faults int64
|
||||
@@ -450,6 +481,12 @@ type StatLine struct {
|
||||
|
||||
// DB stats field
|
||||
DbStatsLines []DbStatLine
|
||||
|
||||
// Shard stats
|
||||
TotalInUse, TotalAvailable, TotalCreated, TotalRefreshing int64
|
||||
|
||||
// Shard Hosts stats field
|
||||
ShardHostStatsLines map[string]ShardHostStatLine
|
||||
}
|
||||
|
||||
type DbStatLine struct {
|
||||
@@ -465,6 +502,13 @@ type DbStatLine struct {
|
||||
Ok int64
|
||||
}
|
||||
|
||||
type ShardHostStatLine struct {
|
||||
InUse int64
|
||||
Available int64
|
||||
Created int64
|
||||
Refreshing int64
|
||||
}
|
||||
|
||||
func parseLocks(stat ServerStatus) map[string]LockUsage {
|
||||
returnVal := map[string]LockUsage{}
|
||||
for namespace, lockInfo := range stat.Locks {
|
||||
@@ -760,6 +804,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
||||
|
||||
newClusterStat := *newMongo.ClusterStatus
|
||||
returnVal.JumboChunksCount = newClusterStat.JumboChunksCount
|
||||
returnVal.OplogTimeDiff = newMongo.OplogStats.TimeDiff
|
||||
|
||||
newDbStats := *newMongo.DbStats
|
||||
for _, db := range newDbStats.Dbs {
|
||||
@@ -783,5 +828,23 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
||||
returnVal.DbStatsLines = append(returnVal.DbStatsLines, *dbStatLine)
|
||||
}
|
||||
|
||||
// Set shard stats
|
||||
newShardStats := *newMongo.ShardStats
|
||||
returnVal.TotalInUse = newShardStats.TotalInUse
|
||||
returnVal.TotalAvailable = newShardStats.TotalAvailable
|
||||
returnVal.TotalCreated = newShardStats.TotalCreated
|
||||
returnVal.TotalRefreshing = newShardStats.TotalRefreshing
|
||||
returnVal.ShardHostStatsLines = map[string]ShardHostStatLine{}
|
||||
for host, stats := range newShardStats.Hosts {
|
||||
shardStatLine := &ShardHostStatLine{
|
||||
InUse: stats.InUse,
|
||||
Available: stats.Available,
|
||||
Created: stats.Created,
|
||||
Refreshing: stats.Refreshing,
|
||||
}
|
||||
|
||||
returnVal.ShardHostStatsLines[host] = *shardStatLine
|
||||
}
|
||||
|
||||
return returnVal
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# MySQL Input plugin
|
||||
# MySQL Input Plugin
|
||||
|
||||
This plugin gathers the statistic data from MySQL server
|
||||
|
||||
@@ -18,9 +18,9 @@ This plugin gathers the statistic data from MySQL server
|
||||
* File events statistics
|
||||
* Table schema statistics
|
||||
|
||||
## Configuration
|
||||
### Configuration
|
||||
|
||||
```
|
||||
```toml
|
||||
# Read metrics from one or many mysql servers
|
||||
[[inputs.mysql]]
|
||||
## specify servers via a url matching:
|
||||
@@ -81,14 +81,97 @@ This plugin gathers the statistic data from MySQL server
|
||||
#
|
||||
## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
|
||||
interval_slow = "30m"
|
||||
|
||||
|
||||
## Optional SSL Config (will be used if tls=custom parameter specified in server uri)
|
||||
ssl_ca = "/etc/telegraf/ca.pem"
|
||||
ssl_cert = "/etc/telegraf/cert.pem"
|
||||
ssl_key = "/etc/telegraf/key.pem"
|
||||
```
|
||||
|
||||
## Measurements & Fields
|
||||
#### Metric Version
|
||||
|
||||
When `metric_version = 2`, a variety of field type issues are corrected as well
|
||||
as naming inconsistencies. If you have existing data on the original version
|
||||
enabling this feature will cause a `field type error` when inserted into
|
||||
InfluxDB due to the change of types. For this reason, you should keep the
|
||||
`metric_version` unset until you are ready to migrate to the new format.
|
||||
|
||||
If preserving your old data is not required you may wish to drop conflicting
|
||||
measurements:
|
||||
```
|
||||
DROP SERIES from mysql
|
||||
DROP SERIES from mysql_variables
|
||||
DROP SERIES from mysql_innodb
|
||||
```
|
||||
|
||||
Otherwise, migration can be performed using the following steps:
|
||||
|
||||
1. Duplicate your `mysql` plugin configuration and add a `name_suffix` and
|
||||
`metric_version = 2`, this will result in collection using both the old and new
|
||||
style concurrently:
|
||||
```toml
|
||||
[[inputs.mysql]]
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
|
||||
[[inputs.mysql]]
|
||||
name_override = "_2"
|
||||
metric_version = 2
|
||||
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
```
|
||||
|
||||
2. Upgrade all affected Telegraf clients to version >=1.6.
|
||||
|
||||
New measurements will be created with the `name_suffix`, for example::
|
||||
- `mysql_v2`
|
||||
- `mysql_variables_v2`
|
||||
|
||||
3. Update charts, alerts, and other supporting code to the new format.
|
||||
4. You can now remove the old `mysql` plugin configuration and remove old
|
||||
measurements.
|
||||
|
||||
If you wish to remove the `name_suffix` you may use Kapacitor to copy the
|
||||
historical data to the default name. Do this only after retiring the old
|
||||
measurement name.
|
||||
|
||||
1. Use the techinique described above to write to multiple locations:
|
||||
```toml
|
||||
[[inputs.mysql]]
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
metric_version = 2
|
||||
|
||||
[[inputs.mysql]]
|
||||
name_override = "_2"
|
||||
metric_version = 2
|
||||
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
```
|
||||
2. Create a TICKScript to copy the historical data:
|
||||
```
|
||||
dbrp "telegraf"."autogen"
|
||||
|
||||
batch
|
||||
|query('''
|
||||
SELECT * FROM "telegraf"."autogen"."mysql_v2"
|
||||
''')
|
||||
.period(5m)
|
||||
.every(5m)
|
||||
|influxDBOut()
|
||||
.database('telegraf')
|
||||
.retentionPolicy('autogen')
|
||||
.measurement('mysql')
|
||||
```
|
||||
3. Define a task for your script:
|
||||
```sh
|
||||
kapacitor define copy-measurement -tick copy-measurement.task
|
||||
```
|
||||
4. Run the task over the data you would like to migrate:
|
||||
```sh
|
||||
kapacitor replay-live batch -start 2018-03-30T20:00:00Z -stop 2018-04-01T12:00:00Z -rec-time -task copy-measurement
|
||||
```
|
||||
5. Verify copied data and repeat for other measurements.
|
||||
|
||||
### Metrics:
|
||||
* Global statuses - all numeric and boolean values of `SHOW GLOBAL STATUSES`
|
||||
* Global variables - all numeric and boolean values of `SHOW GLOBAL VARIABLES`
|
||||
* Slave status - metrics from `SHOW SLAVE STATUS` the metrics are gathered when
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user