Compare commits
65 Commits
1.0.0-beta
...
1.0.0-beta
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
03d02fa67a | ||
|
|
b58cd78c79 | ||
|
|
dabb6f5466 | ||
|
|
281a4d5500 | ||
|
|
1c2965703d | ||
|
|
5dc4cce157 | ||
|
|
8c7edeb53b | ||
|
|
1d9745ee98 | ||
|
|
2d6c8767f7 | ||
|
|
b4a6d9c647 | ||
|
|
6afe9ceef1 | ||
|
|
704d9ad76c | ||
|
|
300d9adbd0 | ||
|
|
207c5498e7 | ||
|
|
d5e7439343 | ||
|
|
21add2c799 | ||
|
|
4651ab88ad | ||
|
|
53f40063b3 | ||
|
|
97d92bba67 | ||
|
|
bfdd665435 | ||
|
|
821d3fafa6 | ||
|
|
7c9b312cee | ||
|
|
69ab8a645c | ||
|
|
7b550c11cb | ||
|
|
bb4f18ca88 | ||
|
|
6efe91ea9c | ||
|
|
5f0a63f554 | ||
|
|
d14e7536ab | ||
|
|
c873937356 | ||
|
|
e1c3800cd9 | ||
|
|
c046232425 | ||
|
|
2d4864e126 | ||
|
|
048448aa93 | ||
|
|
755b2ec953 | ||
|
|
f62c493c77 | ||
|
|
a6365a6086 | ||
|
|
f7e057ec55 | ||
|
|
30cc00d11b | ||
|
|
d641c42029 | ||
|
|
9c2ca805da | ||
|
|
b0484d8a0c | ||
|
|
5ddd61d2e2 | ||
|
|
50ea7f4a9d | ||
|
|
b18134a4e3 | ||
|
|
7825df4771 | ||
|
|
d6951dacdc | ||
|
|
e603825e37 | ||
|
|
e3448153e1 | ||
|
|
25848c545a | ||
|
|
3098564896 | ||
|
|
4b6f9b93dd | ||
|
|
2beef21231 | ||
|
|
cb3c54a1ae | ||
|
|
d50a1e83ac | ||
|
|
1f10639222 | ||
|
|
af0979cce5 | ||
|
|
5b43901bd8 | ||
|
|
d7efb7a71d | ||
|
|
4d242836ee | ||
|
|
06cb5a041e | ||
|
|
ea2521bf27 | ||
|
|
4cd1f7a104 | ||
|
|
137843b2f6 | ||
|
|
008ed17a79 | ||
|
|
75e6cb9064 |
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -11,6 +11,8 @@ Erase the other section and everything on and above this line.
|
|||||||
|
|
||||||
## Bug report
|
## Bug report
|
||||||
|
|
||||||
|
### Relevant telegraf.conf:
|
||||||
|
|
||||||
### System info:
|
### System info:
|
||||||
|
|
||||||
[Include Telegraf version, operating system name, and other relevant details]
|
[Include Telegraf version, operating system name, and other relevant details]
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
|||||||
|
build
|
||||||
tivan
|
tivan
|
||||||
.vagrant
|
.vagrant
|
||||||
/telegraf
|
/telegraf
|
||||||
|
|||||||
92
CHANGELOG.md
92
CHANGELOG.md
@@ -1,3 +1,93 @@
|
|||||||
|
## v1.0 [unreleased]
|
||||||
|
|
||||||
|
## v1.0 beta 3 [2016-07-18]
|
||||||
|
|
||||||
|
### Release Notes
|
||||||
|
|
||||||
|
**Breaking Change**: Aerospike main server node measurements have been renamed
|
||||||
|
aerospike_node. Aerospike namespace measurements have been renamed to
|
||||||
|
aerospike_namespace. They will also now be tagged with the node_name
|
||||||
|
that they correspond to. This has been done to differentiate measurements
|
||||||
|
that pertain to node vs. namespace statistics.
|
||||||
|
|
||||||
|
**Breaking Change**: users of github_webhooks must change to the new
|
||||||
|
`[[inputs.webhooks]]` plugin.
|
||||||
|
|
||||||
|
This means that the default github_webhooks config:
|
||||||
|
|
||||||
|
```
|
||||||
|
# A Github Webhook Event collector
|
||||||
|
[[inputs.github_webhooks]]
|
||||||
|
## Address and port to host Webhook listener on
|
||||||
|
service_address = ":1618"
|
||||||
|
```
|
||||||
|
|
||||||
|
should now look like:
|
||||||
|
|
||||||
|
```
|
||||||
|
# A Webhooks Event collector
|
||||||
|
[[inputs.webhooks]]
|
||||||
|
## Address and port to host Webhook listener on
|
||||||
|
service_address = ":1618"
|
||||||
|
|
||||||
|
[inputs.webhooks.github]
|
||||||
|
path = "/"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- [#1289](https://github.com/influxdata/telegraf/pull/1289): webhooks input plugin. Thanks @francois2metz and @cduez!
|
||||||
|
- [#1247](https://github.com/influxdata/telegraf/pull/1247): rollbar webhook plugin.
|
||||||
|
- [#1408](https://github.com/influxdata/telegraf/pull/1408): mandrill webhook plugin.
|
||||||
|
- [#1402](https://github.com/influxdata/telegraf/pull/1402): docker-machine/boot2docker no longer required for unit tests.
|
||||||
|
- [#1350](https://github.com/influxdata/telegraf/pull/1350): cgroup input plugin.
|
||||||
|
- [#1369](https://github.com/influxdata/telegraf/pull/1369): Add input plugin for consuming metrics from NSQD.
|
||||||
|
- [#1369](https://github.com/influxdata/telegraf/pull/1480): add ability to read redis from a socket.
|
||||||
|
- [#1387](https://github.com/influxdata/telegraf/pull/1387): **Breaking Change** - Redis `role` tag renamed to `replication_role` to avoid global_tags override
|
||||||
|
- [#1437](https://github.com/influxdata/telegraf/pull/1437): Fetching Galera status metrics in MySQL
|
||||||
|
- [#1500](https://github.com/influxdata/telegraf/pull/1500): Aerospike plugin refactored to use official client lib.
|
||||||
|
- [#1434](https://github.com/influxdata/telegraf/pull/1434): Add measurement name arg to logparser plugin.
|
||||||
|
- [#1479](https://github.com/influxdata/telegraf/pull/1479): logparser: change resp_code from a field to a tag.
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- [#1472](https://github.com/influxdata/telegraf/pull/1472): diskio input plugin: set 'skip_serial_number = true' by default to avoid high cardinality.
|
||||||
|
- [#1426](https://github.com/influxdata/telegraf/pull/1426): nil metrics panic fix.
|
||||||
|
- [#1384](https://github.com/influxdata/telegraf/pull/1384): Fix datarace in apache input plugin.
|
||||||
|
- [#1399](https://github.com/influxdata/telegraf/issues/1399): Add `read_repairs` statistics to riak plugin.
|
||||||
|
- [#1405](https://github.com/influxdata/telegraf/issues/1405): Fix memory/connection leak in prometheus input plugin.
|
||||||
|
- [#1378](https://github.com/influxdata/telegraf/issues/1378): Trim BOM from config file for Windows support.
|
||||||
|
- [#1339](https://github.com/influxdata/telegraf/issues/1339): Prometheus client output panic on service reload.
|
||||||
|
- [#1461](https://github.com/influxdata/telegraf/pull/1461): Prometheus parser, protobuf format header fix.
|
||||||
|
- [#1334](https://github.com/influxdata/telegraf/issues/1334): Prometheus output, metric refresh and caching fixes.
|
||||||
|
- [#1432](https://github.com/influxdata/telegraf/issues/1432): Panic fix for multiple graphite outputs under very high load.
|
||||||
|
- [#1412](https://github.com/influxdata/telegraf/pull/1412): Instrumental output has better reconnect behavior
|
||||||
|
- [#1460](https://github.com/influxdata/telegraf/issues/1460): Remove PID from procstat plugin to fix cardinality issues.
|
||||||
|
- [#1427](https://github.com/influxdata/telegraf/issues/1427): Cassandra input: version 2.x "column family" fix.
|
||||||
|
- [#1463](https://github.com/influxdata/telegraf/issues/1463): Shared WaitGroup in Exec plugin
|
||||||
|
- [#1436](https://github.com/influxdata/telegraf/issues/1436): logparser: honor modifiers in "pattern" config.
|
||||||
|
- [#1418](https://github.com/influxdata/telegraf/issues/1418): logparser: error and exit on file permissions/missing errors.
|
||||||
|
|
||||||
|
## v1.0 beta 2 [2016-06-21]
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- [#1340](https://github.com/influxdata/telegraf/issues/1340): statsd: do not log every dropped metric.
|
||||||
|
- [#1368](https://github.com/influxdata/telegraf/pull/1368): Add precision rounding to all metrics on collection.
|
||||||
|
- [#1390](https://github.com/influxdata/telegraf/pull/1390): Add support for Tengine
|
||||||
|
- [#1320](https://github.com/influxdata/telegraf/pull/1320): Logparser input plugin for parsing grok-style log patterns.
|
||||||
|
- [#1397](https://github.com/influxdata/telegraf/issues/1397): ElasticSearch: now supports connecting to ElasticSearch via SSL
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- [#1330](https://github.com/influxdata/telegraf/issues/1330): Fix exec plugin panic when using single binary.
|
||||||
|
- [#1336](https://github.com/influxdata/telegraf/issues/1336): Fixed incorrect prometheus metrics source selection.
|
||||||
|
- [#1112](https://github.com/influxdata/telegraf/issues/1112): Set default Zookeeper chroot to empty string.
|
||||||
|
- [#1335](https://github.com/influxdata/telegraf/issues/1335): Fix overall ping timeout to be calculated based on per-ping timeout.
|
||||||
|
- [#1374](https://github.com/influxdata/telegraf/pull/1374): Change "default" retention policy to "".
|
||||||
|
- [#1377](https://github.com/influxdata/telegraf/issues/1377): Graphite output mangling '%' character.
|
||||||
|
- [#1396](https://github.com/influxdata/telegraf/pull/1396): Prometheus input plugin now supports x509 certs authentication
|
||||||
|
|
||||||
## v1.0 beta 1 [2016-06-07]
|
## v1.0 beta 1 [2016-06-07]
|
||||||
|
|
||||||
### Release Notes
|
### Release Notes
|
||||||
@@ -22,11 +112,11 @@ in conjunction with wildcard dimension values as it will control the amount of
|
|||||||
time before a new metric is included by the plugin.
|
time before a new metric is included by the plugin.
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|
||||||
- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging.
|
- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging.
|
||||||
- [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash
|
- [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash
|
||||||
- [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy!
|
- [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy!
|
||||||
- [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm!
|
- [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm!
|
||||||
- [#1247](https://github.com/influxdata/telegraf/pull/1247): rollbar input plugin. Thanks @francois2metz and @cduez!
|
|
||||||
- [#1208](https://github.com/influxdata/telegraf/pull/1208): Standardized AWS credentials evaluation & wildcard CloudWatch dimensions. Thanks @johnrengelman!
|
- [#1208](https://github.com/influxdata/telegraf/pull/1208): Standardized AWS credentials evaluation & wildcard CloudWatch dimensions. Thanks @johnrengelman!
|
||||||
- [#1264](https://github.com/influxdata/telegraf/pull/1264): Add SSL config options to http_response plugin.
|
- [#1264](https://github.com/influxdata/telegraf/pull/1264): Add SSL config options to http_response plugin.
|
||||||
- [#1272](https://github.com/influxdata/telegraf/pull/1272): graphite parser: add ability to specify multiple tag keys, for consistency with influxdb parser.
|
- [#1272](https://github.com/influxdata/telegraf/pull/1272): graphite parser: add ability to specify multiple tag keys, for consistency with influxdb parser.
|
||||||
|
|||||||
@@ -114,7 +114,7 @@ creating the `Parser` object.
|
|||||||
You should also add the following to your SampleConfig() return:
|
You should also add the following to your SampleConfig() return:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
## Data format to consume.
|
## Data format to consume.
|
||||||
## Each data format has it's own unique set of configuration options, read
|
## Each data format has it's own unique set of configuration options, read
|
||||||
## more about them here:
|
## more about them here:
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
@@ -244,7 +244,7 @@ instantiating and creating the `Serializer` object.
|
|||||||
You should also add the following to your SampleConfig() return:
|
You should also add the following to your SampleConfig() return:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
## Data format to output.
|
## Data format to output.
|
||||||
## Each data format has it's own unique set of configuration options, read
|
## Each data format has it's own unique set of configuration options, read
|
||||||
## more about them here:
|
## more about them here:
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||||
@@ -290,10 +290,6 @@ To execute Telegraf tests follow these simple steps:
|
|||||||
instructions
|
instructions
|
||||||
- execute `make test`
|
- execute `make test`
|
||||||
|
|
||||||
**OSX users**: you will need to install `boot2docker` or `docker-machine`.
|
|
||||||
The Makefile will assume that you have a `docker-machine` box called `default` to
|
|
||||||
get the IP address.
|
|
||||||
|
|
||||||
### Unit test troubleshooting
|
### Unit test troubleshooting
|
||||||
|
|
||||||
Try cleaning up your test environment by executing `make docker-kill` and
|
Try cleaning up your test environment by executing `make docker-kill` and
|
||||||
|
|||||||
4
Godeps
4
Godeps
@@ -1,5 +1,6 @@
|
|||||||
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
|
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
|
||||||
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
|
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
|
||||||
|
github.com/aerospike/aerospike-client-go 45863b7fd8640dc12f7fdd397104d97e1986f25a
|
||||||
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
|
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
|
||||||
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
|
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
|
||||||
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
||||||
@@ -45,10 +46,13 @@ github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
|||||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||||
github.com/shirou/gopsutil 586bb697f3ec9f8ec08ffefe18f521a64534037c
|
github.com/shirou/gopsutil 586bb697f3ec9f8ec08ffefe18f521a64534037c
|
||||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||||
|
github.com/sparrc/aerospike-client-go d4bb42d2c2d39dae68e054116f4538af189e05d5
|
||||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||||
|
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
|
||||||
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
||||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||||
|
github.com/yuin/gopher-lua bf3808abd44b1e55143a2d7f08571aaa80db1808
|
||||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||||
golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
|
golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
|
||||||
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
|
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
|
||||||
|
|||||||
18
Makefile
18
Makefile
@@ -1,4 +1,3 @@
|
|||||||
UNAME := $(shell sh -c 'uname')
|
|
||||||
VERSION := $(shell sh -c 'git describe --always --tags')
|
VERSION := $(shell sh -c 'git describe --always --tags')
|
||||||
ifdef GOBIN
|
ifdef GOBIN
|
||||||
PATH := $(GOBIN):$(PATH)
|
PATH := $(GOBIN):$(PATH)
|
||||||
@@ -26,10 +25,6 @@ build-for-docker:
|
|||||||
"-s -X main.version=$(VERSION)" \
|
"-s -X main.version=$(VERSION)" \
|
||||||
./cmd/telegraf/telegraf.go
|
./cmd/telegraf/telegraf.go
|
||||||
|
|
||||||
# Build with race detector
|
|
||||||
dev: prepare
|
|
||||||
go build -race -ldflags "-X main.version=$(VERSION)" ./...
|
|
||||||
|
|
||||||
# run package script
|
# run package script
|
||||||
package:
|
package:
|
||||||
./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload
|
./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload
|
||||||
@@ -46,26 +41,17 @@ prepare-windows:
|
|||||||
|
|
||||||
# Run all docker containers necessary for unit tests
|
# Run all docker containers necessary for unit tests
|
||||||
docker-run:
|
docker-run:
|
||||||
ifeq ($(UNAME), Darwin)
|
|
||||||
docker run --name kafka \
|
|
||||||
-e ADVERTISED_HOST=$(shell sh -c 'boot2docker ip || docker-machine ip default') \
|
|
||||||
-e ADVERTISED_PORT=9092 \
|
|
||||||
-p "2181:2181" -p "9092:9092" \
|
|
||||||
-d spotify/kafka
|
|
||||||
endif
|
|
||||||
ifeq ($(UNAME), Linux)
|
|
||||||
docker run --name kafka \
|
docker run --name kafka \
|
||||||
-e ADVERTISED_HOST=localhost \
|
-e ADVERTISED_HOST=localhost \
|
||||||
-e ADVERTISED_PORT=9092 \
|
-e ADVERTISED_PORT=9092 \
|
||||||
-p "2181:2181" -p "9092:9092" \
|
-p "2181:2181" -p "9092:9092" \
|
||||||
-d spotify/kafka
|
-d spotify/kafka
|
||||||
endif
|
|
||||||
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
||||||
docker run --name memcached -p "11211:11211" -d memcached
|
docker run --name memcached -p "11211:11211" -d memcached
|
||||||
docker run --name postgres -p "5432:5432" -d postgres
|
docker run --name postgres -p "5432:5432" -d postgres
|
||||||
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
||||||
docker run --name redis -p "6379:6379" -d redis
|
docker run --name redis -p "6379:6379" -d redis
|
||||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server
|
||||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||||
@@ -78,7 +64,7 @@ docker-run-circle:
|
|||||||
-e ADVERTISED_PORT=9092 \
|
-e ADVERTISED_PORT=9092 \
|
||||||
-p "2181:2181" -p "9092:9092" \
|
-p "2181:2181" -p "9092:9092" \
|
||||||
-d spotify/kafka
|
-d spotify/kafka
|
||||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server
|
||||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||||
|
|||||||
25
README.md
25
README.md
@@ -20,12 +20,12 @@ new plugins.
|
|||||||
### Linux deb and rpm Packages:
|
### Linux deb and rpm Packages:
|
||||||
|
|
||||||
Latest:
|
Latest:
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta1_amd64.deb
|
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta3_amd64.deb
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta1.x86_64.rpm
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta3.x86_64.rpm
|
||||||
|
|
||||||
Latest (arm):
|
Latest (arm):
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta1_armhf.deb
|
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta3_armhf.deb
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta1.armhf.rpm
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta3.armhf.rpm
|
||||||
|
|
||||||
##### Package Instructions:
|
##### Package Instructions:
|
||||||
|
|
||||||
@@ -46,14 +46,14 @@ to use this repo to install & update telegraf.
|
|||||||
### Linux tarballs:
|
### Linux tarballs:
|
||||||
|
|
||||||
Latest:
|
Latest:
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta1_linux_amd64.tar.gz
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_amd64.tar.gz
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta1_linux_i386.tar.gz
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_i386.tar.gz
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta1_linux_armhf.tar.gz
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_armhf.tar.gz
|
||||||
|
|
||||||
### FreeBSD tarball:
|
### FreeBSD tarball:
|
||||||
|
|
||||||
Latest:
|
Latest:
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta1_freebsd_amd64.tar.gz
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_freebsd_amd64.tar.gz
|
||||||
|
|
||||||
### Ansible Role:
|
### Ansible Role:
|
||||||
|
|
||||||
@@ -69,7 +69,7 @@ brew install telegraf
|
|||||||
### Windows Binaries (EXPERIMENTAL)
|
### Windows Binaries (EXPERIMENTAL)
|
||||||
|
|
||||||
Latest:
|
Latest:
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta1_windows_amd64.zip
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_windows_amd64.zip
|
||||||
|
|
||||||
### From Source:
|
### From Source:
|
||||||
|
|
||||||
@@ -217,8 +217,11 @@ Telegraf can also collect metrics via the following service plugins:
|
|||||||
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
|
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
|
||||||
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
|
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
|
||||||
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
|
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
|
||||||
* [github_webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/github_webhooks)
|
* [webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks)
|
||||||
* [rollbar_webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rollbar_webhooks)
|
* [github](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/github)
|
||||||
|
* [mandrill](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/mandrill)
|
||||||
|
* [rollbar](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/rollbar)
|
||||||
|
* [nsq_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq_consumer)
|
||||||
|
|
||||||
We'll be adding support for many more over the coming months. Read on if you
|
We'll be adding support for many more over the coming months. Read on if you
|
||||||
want to add support for another service or third-party API.
|
want to add support for another service or third-party API.
|
||||||
|
|||||||
@@ -18,4 +18,8 @@ type Accumulator interface {
|
|||||||
|
|
||||||
Debug() bool
|
Debug() bool
|
||||||
SetDebug(enabled bool)
|
SetDebug(enabled bool)
|
||||||
|
|
||||||
|
SetPrecision(precision, interval time.Duration)
|
||||||
|
|
||||||
|
DisablePrecision()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ func NewAccumulator(
|
|||||||
acc := accumulator{}
|
acc := accumulator{}
|
||||||
acc.metrics = metrics
|
acc.metrics = metrics
|
||||||
acc.inputConfig = inputConfig
|
acc.inputConfig = inputConfig
|
||||||
|
acc.precision = time.Nanosecond
|
||||||
return &acc
|
return &acc
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -32,6 +33,8 @@ type accumulator struct {
|
|||||||
inputConfig *internal_models.InputConfig
|
inputConfig *internal_models.InputConfig
|
||||||
|
|
||||||
prefix string
|
prefix string
|
||||||
|
|
||||||
|
precision time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ac *accumulator) Add(
|
func (ac *accumulator) Add(
|
||||||
@@ -141,6 +144,7 @@ func (ac *accumulator) AddFields(
|
|||||||
} else {
|
} else {
|
||||||
timestamp = time.Now()
|
timestamp = time.Now()
|
||||||
}
|
}
|
||||||
|
timestamp = timestamp.Round(ac.precision)
|
||||||
|
|
||||||
if ac.prefix != "" {
|
if ac.prefix != "" {
|
||||||
measurement = ac.prefix + measurement
|
measurement = ac.prefix + measurement
|
||||||
@@ -173,6 +177,31 @@ func (ac *accumulator) SetTrace(trace bool) {
|
|||||||
ac.trace = trace
|
ac.trace = trace
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetPrecision takes two time.Duration objects. If the first is non-zero,
|
||||||
|
// it sets that as the precision. Otherwise, it takes the second argument
|
||||||
|
// as the order of time that the metrics should be rounded to, with the
|
||||||
|
// maximum being 1s.
|
||||||
|
func (ac *accumulator) SetPrecision(precision, interval time.Duration) {
|
||||||
|
if precision > 0 {
|
||||||
|
ac.precision = precision
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case interval >= time.Second:
|
||||||
|
ac.precision = time.Second
|
||||||
|
case interval >= time.Millisecond:
|
||||||
|
ac.precision = time.Millisecond
|
||||||
|
case interval >= time.Microsecond:
|
||||||
|
ac.precision = time.Microsecond
|
||||||
|
default:
|
||||||
|
ac.precision = time.Nanosecond
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) DisablePrecision() {
|
||||||
|
ac.precision = time.Nanosecond
|
||||||
|
}
|
||||||
|
|
||||||
func (ac *accumulator) setDefaultTags(tags map[string]string) {
|
func (ac *accumulator) setDefaultTags(tags map[string]string) {
|
||||||
ac.defaultTags = tags
|
ac.defaultTags = tags
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,6 +38,128 @@ func TestAdd(t *testing.T) {
|
|||||||
actual)
|
actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Second)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddNoIntervalWithPrecision(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.SetPrecision(time.Second, time.Millisecond)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddDisablePrecision(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.SetPrecision(time.Second, time.Millisecond)
|
||||||
|
a.DisablePrecision()
|
||||||
|
a.Add("acctest", float64(101), map[string]string{})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDifferentPrecisions(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Second)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||||
|
actual)
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Millisecond)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800083000000)),
|
||||||
|
actual)
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Microsecond)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082913000)),
|
||||||
|
actual)
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Nanosecond)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
func TestAddDefaultTags(t *testing.T) {
|
func TestAddDefaultTags(t *testing.T) {
|
||||||
a := accumulator{}
|
a := accumulator{}
|
||||||
a.addDefaultTag("default", "tag")
|
a.addDefaultTag("default", "tag")
|
||||||
|
|||||||
@@ -118,6 +118,8 @@ func (a *Agent) gatherer(
|
|||||||
|
|
||||||
acc := NewAccumulator(input.Config, metricC)
|
acc := NewAccumulator(input.Config, metricC)
|
||||||
acc.SetDebug(a.Config.Agent.Debug)
|
acc.SetDebug(a.Config.Agent.Debug)
|
||||||
|
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||||
|
a.Config.Agent.Interval.Duration)
|
||||||
acc.setDefaultTags(a.Config.Tags)
|
acc.setDefaultTags(a.Config.Tags)
|
||||||
|
|
||||||
internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown)
|
internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown)
|
||||||
@@ -201,6 +203,8 @@ func (a *Agent) Test() error {
|
|||||||
for _, input := range a.Config.Inputs {
|
for _, input := range a.Config.Inputs {
|
||||||
acc := NewAccumulator(input.Config, metricC)
|
acc := NewAccumulator(input.Config, metricC)
|
||||||
acc.SetTrace(true)
|
acc.SetTrace(true)
|
||||||
|
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||||
|
a.Config.Agent.Interval.Duration)
|
||||||
acc.setDefaultTags(a.Config.Tags)
|
acc.setDefaultTags(a.Config.Tags)
|
||||||
|
|
||||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
|
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
|
||||||
@@ -264,13 +268,33 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
|
|||||||
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
|
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
|
||||||
a.flush()
|
a.flush()
|
||||||
case m := <-metricC:
|
case m := <-metricC:
|
||||||
for _, o := range a.Config.Outputs {
|
for i, o := range a.Config.Outputs {
|
||||||
o.AddMetric(m)
|
if i == len(a.Config.Outputs)-1 {
|
||||||
|
o.AddMetric(m)
|
||||||
|
} else {
|
||||||
|
o.AddMetric(copyMetric(m))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func copyMetric(m telegraf.Metric) telegraf.Metric {
|
||||||
|
t := time.Time(m.Time())
|
||||||
|
|
||||||
|
tags := make(map[string]string)
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
for k, v := range m.Tags() {
|
||||||
|
tags[k] = v
|
||||||
|
}
|
||||||
|
for k, v := range m.Fields() {
|
||||||
|
fields[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
out, _ := telegraf.NewMetric(m.Name(), tags, fields, t)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// Run runs the agent daemon, gathering every Interval
|
// Run runs the agent daemon, gathering every Interval
|
||||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
@@ -289,6 +313,9 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
|||||||
case telegraf.ServiceInput:
|
case telegraf.ServiceInput:
|
||||||
acc := NewAccumulator(input.Config, metricC)
|
acc := NewAccumulator(input.Config, metricC)
|
||||||
acc.SetDebug(a.Config.Agent.Debug)
|
acc.SetDebug(a.Config.Agent.Debug)
|
||||||
|
// Service input plugins should set their own precision of their
|
||||||
|
// metrics.
|
||||||
|
acc.DisablePrecision()
|
||||||
acc.setDefaultTags(a.Config.Tags)
|
acc.setDefaultTags(a.Config.Tags)
|
||||||
if err := p.Start(acc); err != nil {
|
if err := p.Start(acc); err != nil {
|
||||||
log.Printf("Service for input %s failed to start, exiting\n%s\n",
|
log.Printf("Service for input %s failed to start, exiting\n%s\n",
|
||||||
|
|||||||
@@ -52,6 +52,11 @@
|
|||||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||||
flush_jitter = "0s"
|
flush_jitter = "0s"
|
||||||
|
|
||||||
|
## By default, precision will be set to the same timestamp order as the
|
||||||
|
## collection interval, with the maximum being 1s.
|
||||||
|
## Precision will NOT be used for service inputs, such as logparser and statsd.
|
||||||
|
## Valid values are "Nns", "Nus" (or "Nµs"), "Nms", "Ns".
|
||||||
|
precision = ""
|
||||||
## Run telegraf in debug mode
|
## Run telegraf in debug mode
|
||||||
debug = false
|
debug = false
|
||||||
## Run telegraf in quiet mode
|
## Run telegraf in quiet mode
|
||||||
@@ -75,12 +80,9 @@
|
|||||||
urls = ["http://localhost:8086"] # required
|
urls = ["http://localhost:8086"] # required
|
||||||
## The target database for metrics (telegraf will create it if not exists).
|
## The target database for metrics (telegraf will create it if not exists).
|
||||||
database = "telegraf" # required
|
database = "telegraf" # required
|
||||||
## Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
|
||||||
## note: using "s" precision greatly improves InfluxDB compression.
|
|
||||||
precision = "s"
|
|
||||||
|
|
||||||
## Retention policy to write to.
|
## Retention policy to write to. Empty string writes to the default rp.
|
||||||
retention_policy = "default"
|
retention_policy = ""
|
||||||
## Write consistency (clusters only), can be: "any", "one", "quorom", "all"
|
## Write consistency (clusters only), can be: "any", "one", "quorom", "all"
|
||||||
write_consistency = "any"
|
write_consistency = "any"
|
||||||
|
|
||||||
@@ -195,6 +197,8 @@
|
|||||||
# # Configuration for Graphite server to send metrics to
|
# # Configuration for Graphite server to send metrics to
|
||||||
# [[outputs.graphite]]
|
# [[outputs.graphite]]
|
||||||
# ## TCP endpoint for your graphite instance.
|
# ## TCP endpoint for your graphite instance.
|
||||||
|
# ## If multiple endpoints are configured, the output will be load balanced.
|
||||||
|
# ## Only one of the endpoints will be written to with each iteration.
|
||||||
# servers = ["localhost:2003"]
|
# servers = ["localhost:2003"]
|
||||||
# ## Prefix metrics name
|
# ## Prefix metrics name
|
||||||
# prefix = ""
|
# prefix = ""
|
||||||
@@ -524,6 +528,19 @@
|
|||||||
# socket_suffix = "asok"
|
# socket_suffix = "asok"
|
||||||
|
|
||||||
|
|
||||||
|
# # Read specific statistics per cgroup
|
||||||
|
# [[inputs.cgroup]]
|
||||||
|
# ## Directories in which to look for files, globs are supported.
|
||||||
|
# # paths = [
|
||||||
|
# # "/cgroup/memory",
|
||||||
|
# # "/cgroup/memory/child1",
|
||||||
|
# # "/cgroup/memory/child2/*",
|
||||||
|
# # ]
|
||||||
|
# ## cgroup stat fields, as file names, globs are supported.
|
||||||
|
# ## these file names are appended to each path from above.
|
||||||
|
# # files = ["memory.*usage*", "memory.limit_in_bytes"]
|
||||||
|
|
||||||
|
|
||||||
# # Pull Metric Statistics from Amazon CloudWatch
|
# # Pull Metric Statistics from Amazon CloudWatch
|
||||||
# [[inputs.cloudwatch]]
|
# [[inputs.cloudwatch]]
|
||||||
# ## Amazon Region
|
# ## Amazon Region
|
||||||
@@ -677,6 +694,13 @@
|
|||||||
#
|
#
|
||||||
# ## set cluster_health to true when you want to also obtain cluster level stats
|
# ## set cluster_health to true when you want to also obtain cluster level stats
|
||||||
# cluster_health = false
|
# cluster_health = false
|
||||||
|
#
|
||||||
|
# ## Optional SSL Config
|
||||||
|
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||||
|
# # ssl_key = "/etc/telegraf/key.pem"
|
||||||
|
# ## Use SSL but skip chain & host verification
|
||||||
|
# # insecure_skip_verify = false
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics from one or more commands that can output to stdout
|
# # Read metrics from one or more commands that can output to stdout
|
||||||
@@ -1138,7 +1162,7 @@
|
|||||||
# count = 1 # required
|
# count = 1 # required
|
||||||
# ## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
|
# ## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
|
||||||
# ping_interval = 0.0
|
# ping_interval = 0.0
|
||||||
# ## ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
|
# ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
|
||||||
# timeout = 1.0
|
# timeout = 1.0
|
||||||
# ## interface to send ping from (ping -I <INTERFACE>)
|
# ## interface to send ping from (ping -I <INTERFACE>)
|
||||||
# interface = ""
|
# interface = ""
|
||||||
@@ -1257,10 +1281,15 @@
|
|||||||
# ## An array of urls to scrape metrics from.
|
# ## An array of urls to scrape metrics from.
|
||||||
# urls = ["http://localhost:9100/metrics"]
|
# urls = ["http://localhost:9100/metrics"]
|
||||||
#
|
#
|
||||||
# ## Use SSL but skip chain & host verification
|
|
||||||
# # insecure_skip_verify = false
|
|
||||||
# ## Use bearer token for authorization
|
# ## Use bearer token for authorization
|
||||||
# # bearer_token = /path/to/bearer/token
|
# # bearer_token = /path/to/bearer/token
|
||||||
|
#
|
||||||
|
# ## Optional SSL Config
|
||||||
|
# # ssl_ca = /path/to/cafile
|
||||||
|
# # ssl_cert = /path/to/certfile
|
||||||
|
# # ssl_key = /path/to/keyfile
|
||||||
|
# ## Use SSL but skip chain & host verification
|
||||||
|
# # insecure_skip_verify = false
|
||||||
|
|
||||||
|
|
||||||
# # Reads last_run_summary.yaml file and converts to measurments
|
# # Reads last_run_summary.yaml file and converts to measurments
|
||||||
@@ -1488,12 +1517,6 @@
|
|||||||
# SERVICE INPUT PLUGINS #
|
# SERVICE INPUT PLUGINS #
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
# # A Github Webhook Event collector
|
|
||||||
# [[inputs.github_webhooks]]
|
|
||||||
# ## Address and port to host Webhook listener on
|
|
||||||
# service_address = ":1618"
|
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics from Kafka topic(s)
|
# # Read metrics from Kafka topic(s)
|
||||||
# [[inputs.kafka_consumer]]
|
# [[inputs.kafka_consumer]]
|
||||||
# ## topic(s) to consume
|
# ## topic(s) to consume
|
||||||
@@ -1501,7 +1524,7 @@
|
|||||||
# ## an array of Zookeeper connection strings
|
# ## an array of Zookeeper connection strings
|
||||||
# zookeeper_peers = ["localhost:2181"]
|
# zookeeper_peers = ["localhost:2181"]
|
||||||
# ## Zookeeper Chroot
|
# ## Zookeeper Chroot
|
||||||
# zookeeper_chroot = "/"
|
# zookeeper_chroot = ""
|
||||||
# ## the name of the consumer group
|
# ## the name of the consumer group
|
||||||
# consumer_group = "telegraf_metrics_consumers"
|
# consumer_group = "telegraf_metrics_consumers"
|
||||||
# ## Offset (must be either "oldest" or "newest")
|
# ## Offset (must be either "oldest" or "newest")
|
||||||
@@ -1514,6 +1537,35 @@
|
|||||||
# data_format = "influx"
|
# data_format = "influx"
|
||||||
|
|
||||||
|
|
||||||
|
# # Stream and parse log file(s).
|
||||||
|
# [[inputs.logparser]]
|
||||||
|
# ## Log files to parse.
|
||||||
|
# ## These accept standard unix glob matching rules, but with the addition of
|
||||||
|
# ## ** as a "super asterisk". ie:
|
||||||
|
# ## /var/log/**.log -> recursively find all .log files in /var/log
|
||||||
|
# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
|
||||||
|
# ## /var/log/apache.log -> only tail the apache log file
|
||||||
|
# files = ["/var/log/influxdb/influxdb.log"]
|
||||||
|
# ## Read file from beginning.
|
||||||
|
# from_beginning = false
|
||||||
|
#
|
||||||
|
# ## Parse logstash-style "grok" patterns:
|
||||||
|
# ## Telegraf built-in parsing patterns: https://goo.gl/dkay10
|
||||||
|
# [inputs.logparser.grok]
|
||||||
|
# ## This is a list of patterns to check the given log file(s) for.
|
||||||
|
# ## Note that adding patterns here increases processing time. The most
|
||||||
|
# ## efficient configuration is to have one pattern per logparser.
|
||||||
|
# ## Other common built-in patterns are:
|
||||||
|
# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
|
||||||
|
# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
|
||||||
|
# patterns = ["%{INFLUXDB_HTTPD_LOG}"]
|
||||||
|
# ## Full path(s) to custom pattern files.
|
||||||
|
# custom_pattern_files = []
|
||||||
|
# ## Custom patterns can also be defined here. Put one pattern per line.
|
||||||
|
# custom_patterns = '''
|
||||||
|
# '''
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics from MQTT topic(s)
|
# # Read metrics from MQTT topic(s)
|
||||||
# [[inputs.mqtt_consumer]]
|
# [[inputs.mqtt_consumer]]
|
||||||
# servers = ["localhost:1883"]
|
# servers = ["localhost:1883"]
|
||||||
@@ -1570,12 +1622,6 @@
|
|||||||
# data_format = "influx"
|
# data_format = "influx"
|
||||||
|
|
||||||
|
|
||||||
# # A Rollbar Webhook Event collector
|
|
||||||
# [[inputs.rollbar_webhooks]]
|
|
||||||
# ## Address and port to host Webhook listener on
|
|
||||||
# service_address = ":1619"
|
|
||||||
|
|
||||||
|
|
||||||
# # Statsd Server
|
# # Statsd Server
|
||||||
# [[inputs.statsd]]
|
# [[inputs.statsd]]
|
||||||
# ## Address and port to host UDP listener on
|
# ## Address and port to host UDP listener on
|
||||||
@@ -1670,3 +1716,15 @@
|
|||||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
# data_format = "influx"
|
# data_format = "influx"
|
||||||
|
|
||||||
|
|
||||||
|
# # A Webhooks Event collector
|
||||||
|
# [[inputs.webhooks]]
|
||||||
|
# ## Address and port to host Webhook listener on
|
||||||
|
# service_address = ":1619"
|
||||||
|
#
|
||||||
|
# [inputs.webhooks.github]
|
||||||
|
# path = "/github"
|
||||||
|
#
|
||||||
|
# [inputs.webhooks.rollbar]
|
||||||
|
# path = "/rollbar"
|
||||||
|
|
||||||
|
|||||||
79
filter/filter.go
Normal file
79
filter/filter.go
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
package filter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gobwas/glob"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Filter interface {
|
||||||
|
Match(string) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompileFilter takes a list of string filters and returns a Filter interface
|
||||||
|
// for matching a given string against the filter list. The filter list
|
||||||
|
// supports glob matching too, ie:
|
||||||
|
//
|
||||||
|
// f, _ := CompileFilter([]string{"cpu", "mem", "net*"})
|
||||||
|
// f.Match("cpu") // true
|
||||||
|
// f.Match("network") // true
|
||||||
|
// f.Match("memory") // false
|
||||||
|
//
|
||||||
|
func CompileFilter(filters []string) (Filter, error) {
|
||||||
|
// return if there is nothing to compile
|
||||||
|
if len(filters) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if we can compile a non-glob filter
|
||||||
|
noGlob := true
|
||||||
|
for _, filter := range filters {
|
||||||
|
if hasMeta(filter) {
|
||||||
|
noGlob = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case noGlob:
|
||||||
|
// return non-globbing filter if not needed.
|
||||||
|
return compileFilterNoGlob(filters), nil
|
||||||
|
case len(filters) == 1:
|
||||||
|
return glob.Compile(filters[0])
|
||||||
|
default:
|
||||||
|
return glob.Compile("{" + strings.Join(filters, ",") + "}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasMeta reports whether path contains any magic glob characters.
|
||||||
|
func hasMeta(s string) bool {
|
||||||
|
return strings.IndexAny(s, "*?[") >= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type filter struct {
|
||||||
|
m map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *filter) Match(s string) bool {
|
||||||
|
_, ok := f.m[s]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
type filtersingle struct {
|
||||||
|
s string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *filtersingle) Match(s string) bool {
|
||||||
|
return f.s == s
|
||||||
|
}
|
||||||
|
|
||||||
|
func compileFilterNoGlob(filters []string) Filter {
|
||||||
|
if len(filters) == 1 {
|
||||||
|
return &filtersingle{s: filters[0]}
|
||||||
|
}
|
||||||
|
out := filter{m: make(map[string]struct{})}
|
||||||
|
for _, filter := range filters {
|
||||||
|
out.m[filter] = struct{}{}
|
||||||
|
}
|
||||||
|
return &out
|
||||||
|
}
|
||||||
96
filter/filter_test.go
Normal file
96
filter/filter_test.go
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
package filter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCompileFilter(t *testing.T) {
|
||||||
|
f, err := CompileFilter([]string{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Nil(t, f)
|
||||||
|
|
||||||
|
f, err = CompileFilter([]string{"cpu"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, f.Match("cpu"))
|
||||||
|
assert.False(t, f.Match("cpu0"))
|
||||||
|
assert.False(t, f.Match("mem"))
|
||||||
|
|
||||||
|
f, err = CompileFilter([]string{"cpu*"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, f.Match("cpu"))
|
||||||
|
assert.True(t, f.Match("cpu0"))
|
||||||
|
assert.False(t, f.Match("mem"))
|
||||||
|
|
||||||
|
f, err = CompileFilter([]string{"cpu", "mem"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, f.Match("cpu"))
|
||||||
|
assert.False(t, f.Match("cpu0"))
|
||||||
|
assert.True(t, f.Match("mem"))
|
||||||
|
|
||||||
|
f, err = CompileFilter([]string{"cpu", "mem", "net*"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, f.Match("cpu"))
|
||||||
|
assert.False(t, f.Match("cpu0"))
|
||||||
|
assert.True(t, f.Match("mem"))
|
||||||
|
assert.True(t, f.Match("network"))
|
||||||
|
}
|
||||||
|
|
||||||
|
var benchbool bool
|
||||||
|
|
||||||
|
func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"cpu"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("network")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFilterSingleNoGlobTrue(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"cpu"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("cpu")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFilter(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"cpu", "mem", "net*"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("network")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFilterNoGlob(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"cpu", "mem", "net"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("net")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFilter2(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
||||||
|
"aw", "az", "axxx", "ab", "cpu", "mem", "net*"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("network")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFilter2NoGlob(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
||||||
|
"aw", "az", "axxx", "ab", "cpu", "mem", "net"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("net")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
@@ -77,6 +77,14 @@ type AgentConfig struct {
|
|||||||
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
||||||
RoundInterval bool
|
RoundInterval bool
|
||||||
|
|
||||||
|
// By default, precision will be set to the same timestamp order as the
|
||||||
|
// collection interval, with the maximum being 1s.
|
||||||
|
// ie, when interval = "10s", precision will be "1s"
|
||||||
|
// when interval = "250ms", precision will be "1ms"
|
||||||
|
// Precision will NOT be used for service inputs. It is up to each individual
|
||||||
|
// service input to set the timestamp at the appropriate precision.
|
||||||
|
Precision internal.Duration
|
||||||
|
|
||||||
// CollectionJitter is used to jitter the collection by a random amount.
|
// CollectionJitter is used to jitter the collection by a random amount.
|
||||||
// Each plugin will sleep for a random time within jitter before collecting.
|
// Each plugin will sleep for a random time within jitter before collecting.
|
||||||
// This can be used to avoid many plugins querying things like sysfs at the
|
// This can be used to avoid many plugins querying things like sysfs at the
|
||||||
@@ -108,11 +116,10 @@ type AgentConfig struct {
|
|||||||
// does _not_ deactivate FlushInterval.
|
// does _not_ deactivate FlushInterval.
|
||||||
FlushBufferWhenFull bool
|
FlushBufferWhenFull bool
|
||||||
|
|
||||||
// TODO(cam): Remove UTC and Precision parameters, they are no longer
|
// TODO(cam): Remove UTC and parameter, they are no longer
|
||||||
// valid for the agent config. Leaving them here for now for backwards-
|
// valid for the agent config. Leaving them here for now for backwards-
|
||||||
// compatability
|
// compatability
|
||||||
UTC bool `toml:"utc"`
|
UTC bool `toml:"utc"`
|
||||||
Precision string
|
|
||||||
|
|
||||||
// Debug is the option for running in debug mode
|
// Debug is the option for running in debug mode
|
||||||
Debug bool
|
Debug bool
|
||||||
@@ -209,6 +216,11 @@ var header = `# Telegraf Configuration
|
|||||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||||
flush_jitter = "0s"
|
flush_jitter = "0s"
|
||||||
|
|
||||||
|
## By default, precision will be set to the same timestamp order as the
|
||||||
|
## collection interval, with the maximum being 1s.
|
||||||
|
## Precision will NOT be used for service inputs, such as logparser and statsd.
|
||||||
|
## Valid values are "Nns", "Nus" (or "Nµs"), "Nms", "Ns".
|
||||||
|
precision = ""
|
||||||
## Run telegraf in debug mode
|
## Run telegraf in debug mode
|
||||||
debug = false
|
debug = false
|
||||||
## Run telegraf in quiet mode
|
## Run telegraf in quiet mode
|
||||||
@@ -527,6 +539,13 @@ func (c *Config) LoadConfig(path string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
|
||||||
|
// this is for Windows compatability only.
|
||||||
|
// see https://github.com/influxdata/telegraf/issues/1378
|
||||||
|
func trimBOM(f []byte) []byte {
|
||||||
|
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
|
||||||
|
}
|
||||||
|
|
||||||
// parseFile loads a TOML configuration from a provided path and
|
// parseFile loads a TOML configuration from a provided path and
|
||||||
// returns the AST produced from the TOML parser. When loading the file, it
|
// returns the AST produced from the TOML parser. When loading the file, it
|
||||||
// will find environment variables and replace them.
|
// will find environment variables and replace them.
|
||||||
@@ -535,6 +554,8 @@ func parseFile(fpath string) (*ast.Table, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
// ugh windows why
|
||||||
|
contents = trimBOM(contents)
|
||||||
|
|
||||||
env_vars := envVarRe.FindAll(contents, -1)
|
env_vars := envVarRe.FindAll(contents, -1)
|
||||||
for _, env_var := range env_vars {
|
for _, env_var := range env_vars {
|
||||||
|
|||||||
@@ -17,8 +17,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
"github.com/gobwas/glob"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||||
@@ -135,8 +133,8 @@ func GetTLSConfig(
|
|||||||
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
|
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New(fmt.Sprintf(
|
return nil, errors.New(fmt.Sprintf(
|
||||||
"Could not load TLS client key/certificate: %s",
|
"Could not load TLS client key/certificate from %s:%s: %s",
|
||||||
err))
|
SSLKey, SSLCert, err))
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Certificates = []tls.Certificate{cert}
|
t.Certificates = []tls.Certificate{cert}
|
||||||
@@ -209,27 +207,6 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompileFilter takes a list of glob "filters", ie:
|
|
||||||
// ["MAIN.*", "CPU.*", "NET"]
|
|
||||||
// and compiles them into a glob object. This glob object can
|
|
||||||
// then be used to match keys to the filter.
|
|
||||||
func CompileFilter(filters []string) (glob.Glob, error) {
|
|
||||||
var out glob.Glob
|
|
||||||
|
|
||||||
// return if there is nothing to compile
|
|
||||||
if len(filters) == 0 {
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
if len(filters) == 1 {
|
|
||||||
out, err = glob.Compile(filters[0])
|
|
||||||
} else {
|
|
||||||
out, err = glob.Compile("{" + strings.Join(filters, ",") + "}")
|
|
||||||
}
|
|
||||||
return out, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RandomSleep will sleep for a random amount of time up to max.
|
// RandomSleep will sleep for a random amount of time up to max.
|
||||||
// If the shutdown channel is closed, it will return before it has finished
|
// If the shutdown channel is closed, it will return before it has finished
|
||||||
// sleeping.
|
// sleeping.
|
||||||
|
|||||||
@@ -107,37 +107,6 @@ func TestRunError(t *testing.T) {
|
|||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCompileFilter(t *testing.T) {
|
|
||||||
f, err := CompileFilter([]string{})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Nil(t, f)
|
|
||||||
|
|
||||||
f, err = CompileFilter([]string{"cpu"})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, f.Match("cpu"))
|
|
||||||
assert.False(t, f.Match("cpu0"))
|
|
||||||
assert.False(t, f.Match("mem"))
|
|
||||||
|
|
||||||
f, err = CompileFilter([]string{"cpu*"})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, f.Match("cpu"))
|
|
||||||
assert.True(t, f.Match("cpu0"))
|
|
||||||
assert.False(t, f.Match("mem"))
|
|
||||||
|
|
||||||
f, err = CompileFilter([]string{"cpu", "mem"})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, f.Match("cpu"))
|
|
||||||
assert.False(t, f.Match("cpu0"))
|
|
||||||
assert.True(t, f.Match("mem"))
|
|
||||||
|
|
||||||
f, err = CompileFilter([]string{"cpu", "mem", "net*"})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, f.Match("cpu"))
|
|
||||||
assert.False(t, f.Match("cpu0"))
|
|
||||||
assert.True(t, f.Match("mem"))
|
|
||||||
assert.True(t, f.Match("network"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRandomSleep(t *testing.T) {
|
func TestRandomSleep(t *testing.T) {
|
||||||
// test that zero max returns immediately
|
// test that zero max returns immediately
|
||||||
s := time.Now()
|
s := time.Now()
|
||||||
|
|||||||
@@ -3,80 +3,78 @@ package internal_models
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/gobwas/glob"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/internal"
|
"github.com/influxdata/telegraf/filter"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TagFilter is the name of a tag, and the values on which to filter
|
// TagFilter is the name of a tag, and the values on which to filter
|
||||||
type TagFilter struct {
|
type TagFilter struct {
|
||||||
Name string
|
Name string
|
||||||
Filter []string
|
Filter []string
|
||||||
filter glob.Glob
|
filter filter.Filter
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter containing drop/pass and tagdrop/tagpass rules
|
// Filter containing drop/pass and tagdrop/tagpass rules
|
||||||
type Filter struct {
|
type Filter struct {
|
||||||
NameDrop []string
|
NameDrop []string
|
||||||
nameDrop glob.Glob
|
nameDrop filter.Filter
|
||||||
NamePass []string
|
NamePass []string
|
||||||
namePass glob.Glob
|
namePass filter.Filter
|
||||||
|
|
||||||
FieldDrop []string
|
FieldDrop []string
|
||||||
fieldDrop glob.Glob
|
fieldDrop filter.Filter
|
||||||
FieldPass []string
|
FieldPass []string
|
||||||
fieldPass glob.Glob
|
fieldPass filter.Filter
|
||||||
|
|
||||||
TagDrop []TagFilter
|
TagDrop []TagFilter
|
||||||
TagPass []TagFilter
|
TagPass []TagFilter
|
||||||
|
|
||||||
TagExclude []string
|
TagExclude []string
|
||||||
tagExclude glob.Glob
|
tagExclude filter.Filter
|
||||||
TagInclude []string
|
TagInclude []string
|
||||||
tagInclude glob.Glob
|
tagInclude filter.Filter
|
||||||
|
|
||||||
IsActive bool
|
IsActive bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compile all Filter lists into glob.Glob objects.
|
// Compile all Filter lists into filter.Filter objects.
|
||||||
func (f *Filter) CompileFilter() error {
|
func (f *Filter) CompileFilter() error {
|
||||||
var err error
|
var err error
|
||||||
f.nameDrop, err = internal.CompileFilter(f.NameDrop)
|
f.nameDrop, err = filter.CompileFilter(f.NameDrop)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'namedrop', %s", err)
|
return fmt.Errorf("Error compiling 'namedrop', %s", err)
|
||||||
}
|
}
|
||||||
f.namePass, err = internal.CompileFilter(f.NamePass)
|
f.namePass, err = filter.CompileFilter(f.NamePass)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'namepass', %s", err)
|
return fmt.Errorf("Error compiling 'namepass', %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
f.fieldDrop, err = internal.CompileFilter(f.FieldDrop)
|
f.fieldDrop, err = filter.CompileFilter(f.FieldDrop)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'fielddrop', %s", err)
|
return fmt.Errorf("Error compiling 'fielddrop', %s", err)
|
||||||
}
|
}
|
||||||
f.fieldPass, err = internal.CompileFilter(f.FieldPass)
|
f.fieldPass, err = filter.CompileFilter(f.FieldPass)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'fieldpass', %s", err)
|
return fmt.Errorf("Error compiling 'fieldpass', %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
f.tagExclude, err = internal.CompileFilter(f.TagExclude)
|
f.tagExclude, err = filter.CompileFilter(f.TagExclude)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'tagexclude', %s", err)
|
return fmt.Errorf("Error compiling 'tagexclude', %s", err)
|
||||||
}
|
}
|
||||||
f.tagInclude, err = internal.CompileFilter(f.TagInclude)
|
f.tagInclude, err = filter.CompileFilter(f.TagInclude)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'taginclude', %s", err)
|
return fmt.Errorf("Error compiling 'taginclude', %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, _ := range f.TagDrop {
|
for i, _ := range f.TagDrop {
|
||||||
f.TagDrop[i].filter, err = internal.CompileFilter(f.TagDrop[i].Filter)
|
f.TagDrop[i].filter, err = filter.CompileFilter(f.TagDrop[i].Filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
|
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i, _ := range f.TagPass {
|
for i, _ := range f.TagPass {
|
||||||
f.TagPass[i].filter, err = internal.CompileFilter(f.TagPass[i].Filter)
|
f.TagPass[i].filter, err = filter.CompileFilter(f.TagPass[i].Filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'tagpass', %s", err)
|
return fmt.Errorf("Error compiling 'tagpass', %s", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -253,51 +253,6 @@ func TestFilter_TagDrop(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilter_CompileFilterError(t *testing.T) {
|
|
||||||
f := Filter{
|
|
||||||
NameDrop: []string{"", ""},
|
|
||||||
}
|
|
||||||
assert.Error(t, f.CompileFilter())
|
|
||||||
f = Filter{
|
|
||||||
NamePass: []string{"", ""},
|
|
||||||
}
|
|
||||||
assert.Error(t, f.CompileFilter())
|
|
||||||
f = Filter{
|
|
||||||
FieldDrop: []string{"", ""},
|
|
||||||
}
|
|
||||||
assert.Error(t, f.CompileFilter())
|
|
||||||
f = Filter{
|
|
||||||
FieldPass: []string{"", ""},
|
|
||||||
}
|
|
||||||
assert.Error(t, f.CompileFilter())
|
|
||||||
f = Filter{
|
|
||||||
TagExclude: []string{"", ""},
|
|
||||||
}
|
|
||||||
assert.Error(t, f.CompileFilter())
|
|
||||||
f = Filter{
|
|
||||||
TagInclude: []string{"", ""},
|
|
||||||
}
|
|
||||||
assert.Error(t, f.CompileFilter())
|
|
||||||
filters := []TagFilter{
|
|
||||||
TagFilter{
|
|
||||||
Name: "cpu",
|
|
||||||
Filter: []string{"{foobar}"},
|
|
||||||
}}
|
|
||||||
f = Filter{
|
|
||||||
TagDrop: filters,
|
|
||||||
}
|
|
||||||
require.Error(t, f.CompileFilter())
|
|
||||||
filters = []TagFilter{
|
|
||||||
TagFilter{
|
|
||||||
Name: "cpu",
|
|
||||||
Filter: []string{"{foobar}"},
|
|
||||||
}}
|
|
||||||
f = Filter{
|
|
||||||
TagPass: filters,
|
|
||||||
}
|
|
||||||
require.Error(t, f.CompileFilter())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_ShouldMetricsPass(t *testing.T) {
|
func TestFilter_ShouldMetricsPass(t *testing.T) {
|
||||||
m := testutil.TestMetric(1, "testmetric")
|
m := testutil.TestMetric(1, "testmetric")
|
||||||
f := Filter{
|
f := Filter{
|
||||||
|
|||||||
@@ -138,7 +138,7 @@ func (ro *RunningOutput) Write() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||||
if len(metrics) == 0 {
|
if metrics == nil || len(metrics) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|||||||
@@ -45,14 +45,9 @@ func NewMetric(
|
|||||||
name string,
|
name string,
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
fields map[string]interface{},
|
fields map[string]interface{},
|
||||||
t ...time.Time,
|
t time.Time,
|
||||||
) (Metric, error) {
|
) (Metric, error) {
|
||||||
var T time.Time
|
pt, err := client.NewPoint(name, tags, fields, t)
|
||||||
if len(t) > 0 {
|
|
||||||
T = t[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
pt, err := client.NewPoint(name, tags, fields, T)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -51,23 +51,6 @@ func TestNewMetricString(t *testing.T) {
|
|||||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewMetricStringNoTime(t *testing.T) {
|
|
||||||
tags := map[string]string{
|
|
||||||
"host": "localhost",
|
|
||||||
}
|
|
||||||
fields := map[string]interface{}{
|
|
||||||
"usage_idle": float64(99),
|
|
||||||
}
|
|
||||||
m, err := NewMetric("cpu", tags, fields)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
|
||||||
assert.Equal(t, lineProto, m.String())
|
|
||||||
|
|
||||||
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
|
||||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewMetricFailNaN(t *testing.T) {
|
func TestNewMetricFailNaN(t *testing.T) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
|
|||||||
@@ -1,104 +1,19 @@
|
|||||||
package aerospike
|
package aerospike
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal/errchan"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
|
||||||
|
as "github.com/sparrc/aerospike-client-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
MSG_HEADER_SIZE = 8
|
|
||||||
MSG_TYPE = 1 // Info is 1
|
|
||||||
MSG_VERSION = 2
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
STATISTICS_COMMAND = []byte("statistics\n")
|
|
||||||
NAMESPACES_COMMAND = []byte("namespaces\n")
|
|
||||||
)
|
|
||||||
|
|
||||||
type aerospikeMessageHeader struct {
|
|
||||||
Version uint8
|
|
||||||
Type uint8
|
|
||||||
DataLen [6]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type aerospikeMessage struct {
|
|
||||||
aerospikeMessageHeader
|
|
||||||
Data []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Taken from aerospike-client-go/types/message.go
|
|
||||||
func (msg *aerospikeMessage) Serialize() []byte {
|
|
||||||
msg.DataLen = msgLenToBytes(int64(len(msg.Data)))
|
|
||||||
buf := bytes.NewBuffer([]byte{})
|
|
||||||
binary.Write(buf, binary.BigEndian, msg.aerospikeMessageHeader)
|
|
||||||
binary.Write(buf, binary.BigEndian, msg.Data[:])
|
|
||||||
return buf.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
type aerospikeInfoCommand struct {
|
|
||||||
msg *aerospikeMessage
|
|
||||||
}
|
|
||||||
|
|
||||||
// Taken from aerospike-client-go/info.go
|
|
||||||
func (nfo *aerospikeInfoCommand) parseMultiResponse() (map[string]string, error) {
|
|
||||||
responses := make(map[string]string)
|
|
||||||
offset := int64(0)
|
|
||||||
begin := int64(0)
|
|
||||||
|
|
||||||
dataLen := int64(len(nfo.msg.Data))
|
|
||||||
|
|
||||||
// Create reusable StringBuilder for performance.
|
|
||||||
for offset < dataLen {
|
|
||||||
b := nfo.msg.Data[offset]
|
|
||||||
|
|
||||||
if b == '\t' {
|
|
||||||
name := nfo.msg.Data[begin:offset]
|
|
||||||
offset++
|
|
||||||
begin = offset
|
|
||||||
|
|
||||||
// Parse field value.
|
|
||||||
for offset < dataLen {
|
|
||||||
if nfo.msg.Data[offset] == '\n' {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
offset++
|
|
||||||
}
|
|
||||||
|
|
||||||
if offset > begin {
|
|
||||||
value := nfo.msg.Data[begin:offset]
|
|
||||||
responses[string(name)] = string(value)
|
|
||||||
} else {
|
|
||||||
responses[string(name)] = ""
|
|
||||||
}
|
|
||||||
offset++
|
|
||||||
begin = offset
|
|
||||||
} else if b == '\n' {
|
|
||||||
if offset > begin {
|
|
||||||
name := nfo.msg.Data[begin:offset]
|
|
||||||
responses[string(name)] = ""
|
|
||||||
}
|
|
||||||
offset++
|
|
||||||
begin = offset
|
|
||||||
} else {
|
|
||||||
offset++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if offset > begin {
|
|
||||||
name := nfo.msg.Data[begin:offset]
|
|
||||||
responses[string(name)] = ""
|
|
||||||
}
|
|
||||||
return responses, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Aerospike struct {
|
type Aerospike struct {
|
||||||
Servers []string
|
Servers []string
|
||||||
}
|
}
|
||||||
@@ -115,7 +30,7 @@ func (a *Aerospike) SampleConfig() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a *Aerospike) Description() string {
|
func (a *Aerospike) Description() string {
|
||||||
return "Read stats from an aerospike server"
|
return "Read stats from aerospike server(s)"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
||||||
@@ -124,214 +39,90 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
errChan := errchan.New(len(a.Servers))
|
||||||
var outerr error
|
wg.Add(len(a.Servers))
|
||||||
|
|
||||||
for _, server := range a.Servers {
|
for _, server := range a.Servers {
|
||||||
wg.Add(1)
|
go func(serv string) {
|
||||||
go func(server string) {
|
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
outerr = a.gatherServer(server, acc)
|
errChan.C <- a.gatherServer(serv, acc)
|
||||||
}(server)
|
}(server)
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
return outerr
|
return errChan.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Aerospike) gatherServer(host string, acc telegraf.Accumulator) error {
|
func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) error {
|
||||||
aerospikeInfo, err := getMap(STATISTICS_COMMAND, host)
|
host, port, err := net.SplitHostPort(hostport)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Aerospike info failed: %s", err)
|
return err
|
||||||
}
|
}
|
||||||
readAerospikeStats(aerospikeInfo, acc, host, "")
|
|
||||||
namespaces, err := getList(NAMESPACES_COMMAND, host)
|
iport, err := strconv.Atoi(port)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Aerospike namespace list failed: %s", err)
|
iport = 3000
|
||||||
}
|
}
|
||||||
for ix := range namespaces {
|
|
||||||
nsInfo, err := getMap([]byte("namespace/"+namespaces[ix]+"\n"), host)
|
c, err := as.NewClient(host, iport)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Aerospike namespace '%s' query failed: %s", namespaces[ix], err)
|
return err
|
||||||
|
}
|
||||||
|
defer c.Close()
|
||||||
|
|
||||||
|
nodes := c.GetNodes()
|
||||||
|
for _, n := range nodes {
|
||||||
|
tags := map[string]string{
|
||||||
|
"node_name": n.GetName(),
|
||||||
|
"aerospike_host": hostport,
|
||||||
|
}
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
stats, err := as.RequestNodeStats(n)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for k, v := range stats {
|
||||||
|
if iv, err := strconv.ParseInt(v, 10, 64); err == nil {
|
||||||
|
fields[strings.Replace(k, "-", "_", -1)] = iv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
acc.AddFields("aerospike_node", fields, tags, time.Now())
|
||||||
|
|
||||||
|
info, err := as.RequestNodeInfo(n, "namespaces")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
namespaces := strings.Split(info["namespaces"], ";")
|
||||||
|
|
||||||
|
for _, namespace := range namespaces {
|
||||||
|
nTags := copyTags(tags)
|
||||||
|
nTags["namespace"] = namespace
|
||||||
|
nFields := make(map[string]interface{})
|
||||||
|
info, err := as.RequestNodeInfo(n, "namespace/"+namespace)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
stats := strings.Split(info["namespace/"+namespace], ";")
|
||||||
|
for _, stat := range stats {
|
||||||
|
parts := strings.Split(stat, "=")
|
||||||
|
if len(parts) < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if iv, err := strconv.ParseInt(parts[1], 10, 64); err == nil {
|
||||||
|
nFields[strings.Replace(parts[0], "-", "_", -1)] = iv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
|
||||||
}
|
}
|
||||||
readAerospikeStats(nsInfo, acc, host, namespaces[ix])
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getMap(key []byte, host string) (map[string]string, error) {
|
func copyTags(m map[string]string) map[string]string {
|
||||||
data, err := get(key, host)
|
out := make(map[string]string)
|
||||||
if err != nil {
|
for k, v := range m {
|
||||||
return nil, fmt.Errorf("Failed to get data: %s", err)
|
out[k] = v
|
||||||
}
|
}
|
||||||
parsed, err := unmarshalMapInfo(data, string(key))
|
return out
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return parsed, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getList(key []byte, host string) ([]string, error) {
|
|
||||||
data, err := get(key, host)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to get data: %s", err)
|
|
||||||
}
|
|
||||||
parsed, err := unmarshalListInfo(data, string(key))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return parsed, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func get(key []byte, host string) (map[string]string, error) {
|
|
||||||
var err error
|
|
||||||
var data map[string]string
|
|
||||||
|
|
||||||
asInfo := &aerospikeInfoCommand{
|
|
||||||
msg: &aerospikeMessage{
|
|
||||||
aerospikeMessageHeader: aerospikeMessageHeader{
|
|
||||||
Version: uint8(MSG_VERSION),
|
|
||||||
Type: uint8(MSG_TYPE),
|
|
||||||
DataLen: msgLenToBytes(int64(len(key))),
|
|
||||||
},
|
|
||||||
Data: key,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := asInfo.msg.Serialize()
|
|
||||||
addr, err := net.ResolveTCPAddr("tcp", host)
|
|
||||||
if err != nil {
|
|
||||||
return data, fmt.Errorf("Lookup failed for '%s': %s", host, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
conn, err := net.DialTCP("tcp", nil, addr)
|
|
||||||
if err != nil {
|
|
||||||
return data, fmt.Errorf("Connection failed for '%s': %s", host, err)
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
_, err = conn.Write(cmd)
|
|
||||||
if err != nil {
|
|
||||||
return data, fmt.Errorf("Failed to send to '%s': %s", host, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
msgHeader := bytes.NewBuffer(make([]byte, MSG_HEADER_SIZE))
|
|
||||||
_, err = readLenFromConn(conn, msgHeader.Bytes(), MSG_HEADER_SIZE)
|
|
||||||
if err != nil {
|
|
||||||
return data, fmt.Errorf("Failed to read header: %s", err)
|
|
||||||
}
|
|
||||||
err = binary.Read(msgHeader, binary.BigEndian, &asInfo.msg.aerospikeMessageHeader)
|
|
||||||
if err != nil {
|
|
||||||
return data, fmt.Errorf("Failed to unmarshal header: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
msgLen := msgLenFromBytes(asInfo.msg.aerospikeMessageHeader.DataLen)
|
|
||||||
|
|
||||||
if int64(len(asInfo.msg.Data)) != msgLen {
|
|
||||||
asInfo.msg.Data = make([]byte, msgLen)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = readLenFromConn(conn, asInfo.msg.Data, len(asInfo.msg.Data))
|
|
||||||
if err != nil {
|
|
||||||
return data, fmt.Errorf("Failed to read from connection to '%s': %s", host, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err = asInfo.parseMultiResponse()
|
|
||||||
if err != nil {
|
|
||||||
return data, fmt.Errorf("Failed to parse response from '%s': %s", host, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return data, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func readAerospikeStats(
|
|
||||||
stats map[string]string,
|
|
||||||
acc telegraf.Accumulator,
|
|
||||||
host string,
|
|
||||||
namespace string,
|
|
||||||
) {
|
|
||||||
fields := make(map[string]interface{})
|
|
||||||
tags := map[string]string{
|
|
||||||
"aerospike_host": host,
|
|
||||||
"namespace": "_service",
|
|
||||||
}
|
|
||||||
|
|
||||||
if namespace != "" {
|
|
||||||
tags["namespace"] = namespace
|
|
||||||
}
|
|
||||||
for key, value := range stats {
|
|
||||||
// We are going to ignore all string based keys
|
|
||||||
val, err := strconv.ParseInt(value, 10, 64)
|
|
||||||
if err == nil {
|
|
||||||
if strings.Contains(key, "-") {
|
|
||||||
key = strings.Replace(key, "-", "_", -1)
|
|
||||||
}
|
|
||||||
fields[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
acc.AddFields("aerospike", fields, tags)
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) {
|
|
||||||
key = strings.TrimSuffix(key, "\n")
|
|
||||||
res := map[string]string{}
|
|
||||||
|
|
||||||
v, exists := infoMap[key]
|
|
||||||
if !exists {
|
|
||||||
return res, fmt.Errorf("Key '%s' missing from info", key)
|
|
||||||
}
|
|
||||||
|
|
||||||
values := strings.Split(v, ";")
|
|
||||||
for i := range values {
|
|
||||||
kv := strings.Split(values[i], "=")
|
|
||||||
if len(kv) > 1 {
|
|
||||||
res[kv[0]] = kv[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshalListInfo(infoMap map[string]string, key string) ([]string, error) {
|
|
||||||
key = strings.TrimSuffix(key, "\n")
|
|
||||||
|
|
||||||
v, exists := infoMap[key]
|
|
||||||
if !exists {
|
|
||||||
return []string{}, fmt.Errorf("Key '%s' missing from info", key)
|
|
||||||
}
|
|
||||||
|
|
||||||
values := strings.Split(v, ";")
|
|
||||||
return values, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readLenFromConn(c net.Conn, buffer []byte, length int) (total int, err error) {
|
|
||||||
var r int
|
|
||||||
for total < length {
|
|
||||||
r, err = c.Read(buffer[total:length])
|
|
||||||
total += r
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Taken from aerospike-client-go/types/message.go
|
|
||||||
func msgLenToBytes(DataLen int64) [6]byte {
|
|
||||||
b := make([]byte, 8)
|
|
||||||
binary.BigEndian.PutUint64(b, uint64(DataLen))
|
|
||||||
res := [6]byte{}
|
|
||||||
copy(res[:], b[2:])
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// Taken from aerospike-client-go/types/message.go
|
|
||||||
func msgLenFromBytes(buf [6]byte) int64 {
|
|
||||||
nbytes := append([]byte{0, 0}, buf[:]...)
|
|
||||||
DataLen := binary.BigEndian.Uint64(nbytes)
|
|
||||||
return int64(DataLen)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package aerospike
|
package aerospike
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
@@ -23,96 +22,29 @@ func TestAerospikeStatistics(t *testing.T) {
|
|||||||
err := a.Gather(&acc)
|
err := a.Gather(&acc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Only use a few of the metrics
|
assert.True(t, acc.HasMeasurement("aerospike_node"))
|
||||||
asMetrics := []string{
|
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
|
||||||
"transactions",
|
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
|
||||||
"stat_write_errs",
|
|
||||||
"stat_read_reqs",
|
|
||||||
"stat_write_reqs",
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, metric := range asMetrics {
|
|
||||||
assert.True(t, acc.HasIntField("aerospike", metric), metric)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAerospikeMsgLenFromToBytes(t *testing.T) {
|
func TestAerospikeStatisticsPartialErr(t *testing.T) {
|
||||||
var i int64 = 8
|
if testing.Short() {
|
||||||
assert.True(t, i == msgLenFromBytes(msgLenToBytes(i)))
|
t.Skip("Skipping integration test in short mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
a := &Aerospike{
|
||||||
|
Servers: []string{
|
||||||
|
testutil.GetLocalHost() + ":3000",
|
||||||
|
testutil.GetLocalHost() + ":9999",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
func TestReadAerospikeStatsNoNamespace(t *testing.T) {
|
|
||||||
// Also test for re-writing
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
stats := map[string]string{
|
|
||||||
"stat-write-errs": "12345",
|
|
||||||
"stat_read_reqs": "12345",
|
|
||||||
}
|
|
||||||
readAerospikeStats(stats, &acc, "host1", "")
|
|
||||||
|
|
||||||
fields := map[string]interface{}{
|
err := a.Gather(&acc)
|
||||||
"stat_write_errs": int64(12345),
|
require.Error(t, err)
|
||||||
"stat_read_reqs": int64(12345),
|
|
||||||
}
|
assert.True(t, acc.HasMeasurement("aerospike_node"))
|
||||||
tags := map[string]string{
|
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
|
||||||
"aerospike_host": "host1",
|
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
|
||||||
"namespace": "_service",
|
|
||||||
}
|
|
||||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadAerospikeStatsNamespace(t *testing.T) {
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
stats := map[string]string{
|
|
||||||
"stat_write_errs": "12345",
|
|
||||||
"stat_read_reqs": "12345",
|
|
||||||
}
|
|
||||||
readAerospikeStats(stats, &acc, "host1", "test")
|
|
||||||
|
|
||||||
fields := map[string]interface{}{
|
|
||||||
"stat_write_errs": int64(12345),
|
|
||||||
"stat_read_reqs": int64(12345),
|
|
||||||
}
|
|
||||||
tags := map[string]string{
|
|
||||||
"aerospike_host": "host1",
|
|
||||||
"namespace": "test",
|
|
||||||
}
|
|
||||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAerospikeUnmarshalList(t *testing.T) {
|
|
||||||
i := map[string]string{
|
|
||||||
"test": "one;two;three",
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := []string{"one", "two", "three"}
|
|
||||||
|
|
||||||
list, err := unmarshalListInfo(i, "test2")
|
|
||||||
assert.True(t, err != nil)
|
|
||||||
|
|
||||||
list, err = unmarshalListInfo(i, "test")
|
|
||||||
assert.True(t, err == nil)
|
|
||||||
equal := true
|
|
||||||
for ix := range expected {
|
|
||||||
if list[ix] != expected[ix] {
|
|
||||||
equal = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert.True(t, equal)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAerospikeUnmarshalMap(t *testing.T) {
|
|
||||||
i := map[string]string{
|
|
||||||
"test": "key1=value1;key2=value2",
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := map[string]string{
|
|
||||||
"key1": "value1",
|
|
||||||
"key2": "value2",
|
|
||||||
}
|
|
||||||
m, err := unmarshalMapInfo(i, "test")
|
|
||||||
assert.True(t, err == nil)
|
|
||||||
assert.True(t, reflect.DeepEqual(m, expected))
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
|
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
|
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/cgroup"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/chrony"
|
_ "github.com/influxdata/telegraf/plugins/inputs/chrony"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch"
|
_ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/conntrack"
|
_ "github.com/influxdata/telegraf/plugins/inputs/conntrack"
|
||||||
@@ -19,7 +20,6 @@ import (
|
|||||||
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
|
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks"
|
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
|
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
|
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
|
||||||
@@ -29,6 +29,7 @@ import (
|
|||||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||||
@@ -40,6 +41,7 @@ import (
|
|||||||
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
|
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
|
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
|
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
|
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||||
@@ -56,7 +58,6 @@ import (
|
|||||||
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
|
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
|
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
|
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/rollbar_webhooks"
|
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
|
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
|
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
||||||
@@ -69,6 +70,7 @@ import (
|
|||||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
|
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
|
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/webhooks"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
|
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
|
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
|
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
@@ -38,8 +37,8 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
|||||||
n.Urls = []string{"http://localhost/server-status?auto"}
|
n.Urls = []string{"http://localhost/server-status?auto"}
|
||||||
}
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
var outerr error
|
var outerr error
|
||||||
|
var errch = make(chan error)
|
||||||
|
|
||||||
for _, u := range n.Urls {
|
for _, u := range n.Urls {
|
||||||
addr, err := url.Parse(u)
|
addr, err := url.Parse(u)
|
||||||
@@ -47,14 +46,17 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
|||||||
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Add(1)
|
|
||||||
go func(addr *url.URL) {
|
go func(addr *url.URL) {
|
||||||
defer wg.Done()
|
errch <- n.gatherUrl(addr, acc)
|
||||||
outerr = n.gatherUrl(addr, acc)
|
|
||||||
}(addr)
|
}(addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Wait()
|
// Drain channel, waiting for all requests to finish and save last error.
|
||||||
|
for range n.Urls {
|
||||||
|
if err := <-errch; err != nil {
|
||||||
|
outerr = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return outerr
|
return outerr
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -36,7 +36,8 @@ func TestHTTPApache(t *testing.T) {
|
|||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
a := Apache{
|
a := Apache{
|
||||||
Urls: []string{ts.URL},
|
// Fetch it 2 times to catch possible data races.
|
||||||
|
Urls: []string{ts.URL, ts.URL},
|
||||||
}
|
}
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|||||||
@@ -148,7 +148,7 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
|||||||
tokens := parseJmxMetricRequest(r.(map[string]interface{})["mbean"].(string))
|
tokens := parseJmxMetricRequest(r.(map[string]interface{})["mbean"].(string))
|
||||||
// Requests with wildcards for keyspace or table names will return nested
|
// Requests with wildcards for keyspace or table names will return nested
|
||||||
// maps in the json response
|
// maps in the json response
|
||||||
if tokens["type"] == "Table" && (tokens["keyspace"] == "*" ||
|
if (tokens["type"] == "Table" || tokens["type"] == "ColumnFamily") && (tokens["keyspace"] == "*" ||
|
||||||
tokens["scope"] == "*") {
|
tokens["scope"] == "*") {
|
||||||
if valuesMap, ok := out["value"]; ok {
|
if valuesMap, ok := out["value"]; ok {
|
||||||
for k, v := range valuesMap.(map[string]interface{}) {
|
for k, v := range valuesMap.(map[string]interface{}) {
|
||||||
|
|||||||
59
plugins/inputs/cgroup/README.md
Normal file
59
plugins/inputs/cgroup/README.md
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
# CGroup Input Plugin For Telegraf Agent
|
||||||
|
|
||||||
|
This input plugin will capture specific statistics per cgroup.
|
||||||
|
|
||||||
|
Following file formats are supported:
|
||||||
|
|
||||||
|
* Single value
|
||||||
|
|
||||||
|
```
|
||||||
|
VAL\n
|
||||||
|
```
|
||||||
|
|
||||||
|
* New line separated values
|
||||||
|
|
||||||
|
```
|
||||||
|
VAL0\n
|
||||||
|
VAL1\n
|
||||||
|
```
|
||||||
|
|
||||||
|
* Space separated values
|
||||||
|
|
||||||
|
```
|
||||||
|
VAL0 VAL1 ...\n
|
||||||
|
```
|
||||||
|
|
||||||
|
* New line separated key-space-value's
|
||||||
|
|
||||||
|
```
|
||||||
|
KEY0 VAL0\n
|
||||||
|
KEY1 VAL1\n
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Tags:
|
||||||
|
|
||||||
|
Measurements don't have any specific tags unless you define them at the telegraf level (defaults). We
|
||||||
|
used to have the path listed as a tag, but to keep cardinality in check it's easier to move this
|
||||||
|
value to a field. Thanks @sebito91!
|
||||||
|
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
```
|
||||||
|
# [[inputs.cgroup]]
|
||||||
|
# paths = [
|
||||||
|
# "/cgroup/memory", # root cgroup
|
||||||
|
# "/cgroup/memory/child1", # container cgroup
|
||||||
|
# "/cgroup/memory/child2/*", # all children cgroups under child2, but not child2 itself
|
||||||
|
# ]
|
||||||
|
# files = ["memory.*usage*", "memory.limit_in_bytes"]
|
||||||
|
|
||||||
|
# [[inputs.cgroup]]
|
||||||
|
# paths = [
|
||||||
|
# "/cgroup/cpu", # root cgroup
|
||||||
|
# "/cgroup/cpu/*", # all container cgroups
|
||||||
|
# "/cgroup/cpu/*/*", # all children cgroups under each container cgroup
|
||||||
|
# ]
|
||||||
|
# files = ["cpuacct.usage", "cpu.cfs_period_us", "cpu.cfs_quota_us"]
|
||||||
|
```
|
||||||
35
plugins/inputs/cgroup/cgroup.go
Normal file
35
plugins/inputs/cgroup/cgroup.go
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
package cgroup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CGroup struct {
|
||||||
|
Paths []string `toml:"paths"`
|
||||||
|
Files []string `toml:"files"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
## Directories in which to look for files, globs are supported.
|
||||||
|
# paths = [
|
||||||
|
# "/cgroup/memory",
|
||||||
|
# "/cgroup/memory/child1",
|
||||||
|
# "/cgroup/memory/child2/*",
|
||||||
|
# ]
|
||||||
|
## cgroup stat fields, as file names, globs are supported.
|
||||||
|
## these file names are appended to each path from above.
|
||||||
|
# files = ["memory.*usage*", "memory.limit_in_bytes"]
|
||||||
|
`
|
||||||
|
|
||||||
|
func (g *CGroup) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *CGroup) Description() string {
|
||||||
|
return "Read specific statistics per cgroup"
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add("cgroup", func() telegraf.Input { return &CGroup{} })
|
||||||
|
}
|
||||||
243
plugins/inputs/cgroup/cgroup_linux.go
Normal file
243
plugins/inputs/cgroup/cgroup_linux.go
Normal file
@@ -0,0 +1,243 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
package cgroup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
)
|
||||||
|
|
||||||
|
const metricName = "cgroup"
|
||||||
|
|
||||||
|
func (g *CGroup) Gather(acc telegraf.Accumulator) error {
|
||||||
|
list := make(chan pathInfo)
|
||||||
|
go g.generateDirs(list)
|
||||||
|
|
||||||
|
for dir := range list {
|
||||||
|
if dir.err != nil {
|
||||||
|
return dir.err
|
||||||
|
}
|
||||||
|
if err := g.gatherDir(dir.path, acc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *CGroup) gatherDir(dir string, acc telegraf.Accumulator) error {
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
|
||||||
|
list := make(chan pathInfo)
|
||||||
|
go g.generateFiles(dir, list)
|
||||||
|
|
||||||
|
for file := range list {
|
||||||
|
if file.err != nil {
|
||||||
|
return file.err
|
||||||
|
}
|
||||||
|
|
||||||
|
raw, err := ioutil.ReadFile(file.path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(raw) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fd := fileData{data: raw, path: file.path}
|
||||||
|
if err := fd.parse(fields); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fields["path"] = dir
|
||||||
|
|
||||||
|
acc.AddFields(metricName, fields, nil)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ======================================================================
|
||||||
|
|
||||||
|
type pathInfo struct {
|
||||||
|
path string
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func isDir(path string) (bool, error) {
|
||||||
|
result, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return result.IsDir(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *CGroup) generateDirs(list chan<- pathInfo) {
|
||||||
|
for _, dir := range g.Paths {
|
||||||
|
// getting all dirs that match the pattern 'dir'
|
||||||
|
items, err := filepath.Glob(dir)
|
||||||
|
if err != nil {
|
||||||
|
list <- pathInfo{err: err}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range items {
|
||||||
|
ok, err := isDir(item)
|
||||||
|
if err != nil {
|
||||||
|
list <- pathInfo{err: err}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// supply only dirs
|
||||||
|
if ok {
|
||||||
|
list <- pathInfo{path: item}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *CGroup) generateFiles(dir string, list chan<- pathInfo) {
|
||||||
|
for _, file := range g.Files {
|
||||||
|
// getting all file paths that match the pattern 'dir + file'
|
||||||
|
// path.Base make sure that file variable does not contains part of path
|
||||||
|
items, err := filepath.Glob(path.Join(dir, path.Base(file)))
|
||||||
|
if err != nil {
|
||||||
|
list <- pathInfo{err: err}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range items {
|
||||||
|
ok, err := isDir(item)
|
||||||
|
if err != nil {
|
||||||
|
list <- pathInfo{err: err}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// supply only files not dirs
|
||||||
|
if !ok {
|
||||||
|
list <- pathInfo{path: item}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ======================================================================
|
||||||
|
|
||||||
|
type fileData struct {
|
||||||
|
data []byte
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fd *fileData) format() (*fileFormat, error) {
|
||||||
|
for _, ff := range fileFormats {
|
||||||
|
ok, err := ff.match(fd.data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
|
return &ff, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("%v: unknown file format", fd.path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fd *fileData) parse(fields map[string]interface{}) error {
|
||||||
|
format, err := fd.format()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
format.parser(filepath.Base(fd.path), fields, fd.data)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ======================================================================
|
||||||
|
|
||||||
|
type fileFormat struct {
|
||||||
|
name string
|
||||||
|
pattern string
|
||||||
|
parser func(measurement string, fields map[string]interface{}, b []byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
const keyPattern = "[[:alpha:]_]+"
|
||||||
|
const valuePattern = "[\\d-]+"
|
||||||
|
|
||||||
|
var fileFormats = [...]fileFormat{
|
||||||
|
// VAL\n
|
||||||
|
fileFormat{
|
||||||
|
name: "Single value",
|
||||||
|
pattern: "^" + valuePattern + "\n$",
|
||||||
|
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||||
|
re := regexp.MustCompile("^(" + valuePattern + ")\n$")
|
||||||
|
matches := re.FindAllStringSubmatch(string(b), -1)
|
||||||
|
fields[measurement] = numberOrString(matches[0][1])
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// VAL0\n
|
||||||
|
// VAL1\n
|
||||||
|
// ...
|
||||||
|
fileFormat{
|
||||||
|
name: "New line separated values",
|
||||||
|
pattern: "^(" + valuePattern + "\n){2,}$",
|
||||||
|
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||||
|
re := regexp.MustCompile("(" + valuePattern + ")\n")
|
||||||
|
matches := re.FindAllStringSubmatch(string(b), -1)
|
||||||
|
for i, v := range matches {
|
||||||
|
fields[measurement+"."+strconv.Itoa(i)] = numberOrString(v[1])
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// VAL0 VAL1 ...\n
|
||||||
|
fileFormat{
|
||||||
|
name: "Space separated values",
|
||||||
|
pattern: "^(" + valuePattern + " )+\n$",
|
||||||
|
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||||
|
re := regexp.MustCompile("(" + valuePattern + ") ")
|
||||||
|
matches := re.FindAllStringSubmatch(string(b), -1)
|
||||||
|
for i, v := range matches {
|
||||||
|
fields[measurement+"."+strconv.Itoa(i)] = numberOrString(v[1])
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// KEY0 VAL0\n
|
||||||
|
// KEY1 VAL1\n
|
||||||
|
// ...
|
||||||
|
fileFormat{
|
||||||
|
name: "New line separated key-space-value's",
|
||||||
|
pattern: "^(" + keyPattern + " " + valuePattern + "\n)+$",
|
||||||
|
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||||
|
re := regexp.MustCompile("(" + keyPattern + ") (" + valuePattern + ")\n")
|
||||||
|
matches := re.FindAllStringSubmatch(string(b), -1)
|
||||||
|
for _, v := range matches {
|
||||||
|
fields[measurement+"."+v[1]] = numberOrString(v[2])
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func numberOrString(s string) interface{} {
|
||||||
|
i, err := strconv.Atoi(s)
|
||||||
|
if err == nil {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f fileFormat) match(b []byte) (bool, error) {
|
||||||
|
ok, err := regexp.Match(f.pattern, b)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
11
plugins/inputs/cgroup/cgroup_notlinux.go
Normal file
11
plugins/inputs/cgroup/cgroup_notlinux.go
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package cgroup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (g *CGroup) Gather(acc telegraf.Accumulator) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
194
plugins/inputs/cgroup/cgroup_test.go
Normal file
194
plugins/inputs/cgroup/cgroup_test.go
Normal file
@@ -0,0 +1,194 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
package cgroup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
var cg1 = &CGroup{
|
||||||
|
Paths: []string{"testdata/memory"},
|
||||||
|
Files: []string{
|
||||||
|
"memory.empty",
|
||||||
|
"memory.max_usage_in_bytes",
|
||||||
|
"memory.limit_in_bytes",
|
||||||
|
"memory.stat",
|
||||||
|
"memory.use_hierarchy",
|
||||||
|
"notify_on_release",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertContainsFields(a *testutil.Accumulator, t *testing.T, measurement string, fieldSet []map[string]interface{}) {
|
||||||
|
a.Lock()
|
||||||
|
defer a.Unlock()
|
||||||
|
|
||||||
|
numEquals := 0
|
||||||
|
for _, p := range a.Metrics {
|
||||||
|
if p.Measurement == measurement {
|
||||||
|
for _, fields := range fieldSet {
|
||||||
|
if reflect.DeepEqual(fields, p.Fields) {
|
||||||
|
numEquals++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if numEquals != len(fieldSet) {
|
||||||
|
assert.Fail(t, fmt.Sprintf("only %d of %d are equal", numEquals, len(fieldSet)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCgroupStatistics_1(t *testing.T) {
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
err := cg1.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"memory.stat.cache": 1739362304123123123,
|
||||||
|
"memory.stat.rss": 1775325184,
|
||||||
|
"memory.stat.rss_huge": 778043392,
|
||||||
|
"memory.stat.mapped_file": 421036032,
|
||||||
|
"memory.stat.dirty": -307200,
|
||||||
|
"memory.max_usage_in_bytes.0": 0,
|
||||||
|
"memory.max_usage_in_bytes.1": -1,
|
||||||
|
"memory.max_usage_in_bytes.2": 2,
|
||||||
|
"memory.limit_in_bytes": 223372036854771712,
|
||||||
|
"memory.use_hierarchy": "12-781",
|
||||||
|
"notify_on_release": 0,
|
||||||
|
"path": "testdata/memory",
|
||||||
|
}
|
||||||
|
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ======================================================================
|
||||||
|
|
||||||
|
var cg2 = &CGroup{
|
||||||
|
Paths: []string{"testdata/cpu"},
|
||||||
|
Files: []string{"cpuacct.usage_percpu"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCgroupStatistics_2(t *testing.T) {
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
err := cg2.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"cpuacct.usage_percpu.0": -1452543795404,
|
||||||
|
"cpuacct.usage_percpu.1": 1376681271659,
|
||||||
|
"cpuacct.usage_percpu.2": 1450950799997,
|
||||||
|
"cpuacct.usage_percpu.3": -1473113374257,
|
||||||
|
"path": "testdata/cpu",
|
||||||
|
}
|
||||||
|
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ======================================================================
|
||||||
|
|
||||||
|
var cg3 = &CGroup{
|
||||||
|
Paths: []string{"testdata/memory/*"},
|
||||||
|
Files: []string{"memory.limit_in_bytes"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCgroupStatistics_3(t *testing.T) {
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
err := cg3.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"memory.limit_in_bytes": 223372036854771712,
|
||||||
|
"path": "testdata/memory/group_1",
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldsTwo := map[string]interface{}{
|
||||||
|
"memory.limit_in_bytes": 223372036854771712,
|
||||||
|
"path": "testdata/memory/group_2",
|
||||||
|
}
|
||||||
|
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ======================================================================
|
||||||
|
|
||||||
|
var cg4 = &CGroup{
|
||||||
|
Paths: []string{"testdata/memory/*/*", "testdata/memory/group_2"},
|
||||||
|
Files: []string{"memory.limit_in_bytes"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCgroupStatistics_4(t *testing.T) {
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
err := cg4.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"memory.limit_in_bytes": 223372036854771712,
|
||||||
|
"path": "testdata/memory/group_1/group_1_1",
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldsTwo := map[string]interface{}{
|
||||||
|
"memory.limit_in_bytes": 223372036854771712,
|
||||||
|
"path": "testdata/memory/group_1/group_1_2",
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldsThree := map[string]interface{}{
|
||||||
|
"memory.limit_in_bytes": 223372036854771712,
|
||||||
|
"path": "testdata/memory/group_2",
|
||||||
|
}
|
||||||
|
|
||||||
|
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo, fieldsThree})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ======================================================================
|
||||||
|
|
||||||
|
var cg5 = &CGroup{
|
||||||
|
Paths: []string{"testdata/memory/*/group_1_1"},
|
||||||
|
Files: []string{"memory.limit_in_bytes"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCgroupStatistics_5(t *testing.T) {
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
err := cg5.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"memory.limit_in_bytes": 223372036854771712,
|
||||||
|
"path": "testdata/memory/group_1/group_1_1",
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldsTwo := map[string]interface{}{
|
||||||
|
"memory.limit_in_bytes": 223372036854771712,
|
||||||
|
"path": "testdata/memory/group_2/group_1_1",
|
||||||
|
}
|
||||||
|
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ======================================================================
|
||||||
|
|
||||||
|
var cg6 = &CGroup{
|
||||||
|
Paths: []string{"testdata/memory"},
|
||||||
|
Files: []string{"memory.us*", "*/memory.kmem.*"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCgroupStatistics_6(t *testing.T) {
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
err := cg6.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"memory.usage_in_bytes": 3513667584,
|
||||||
|
"memory.use_hierarchy": "12-781",
|
||||||
|
"memory.kmem.limit_in_bytes": 9223372036854771712,
|
||||||
|
"path": "testdata/memory",
|
||||||
|
}
|
||||||
|
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields})
|
||||||
|
}
|
||||||
1
plugins/inputs/cgroup/testdata/blkio/blkio.io_serviced
vendored
Normal file
1
plugins/inputs/cgroup/testdata/blkio/blkio.io_serviced
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Total 0
|
||||||
131
plugins/inputs/cgroup/testdata/blkio/blkio.throttle.io_serviced
vendored
Normal file
131
plugins/inputs/cgroup/testdata/blkio/blkio.throttle.io_serviced
vendored
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
11:0 Read 0
|
||||||
|
11:0 Write 0
|
||||||
|
11:0 Sync 0
|
||||||
|
11:0 Async 0
|
||||||
|
11:0 Total 0
|
||||||
|
8:0 Read 49134
|
||||||
|
8:0 Write 216703
|
||||||
|
8:0 Sync 177906
|
||||||
|
8:0 Async 87931
|
||||||
|
8:0 Total 265837
|
||||||
|
7:7 Read 0
|
||||||
|
7:7 Write 0
|
||||||
|
7:7 Sync 0
|
||||||
|
7:7 Async 0
|
||||||
|
7:7 Total 0
|
||||||
|
7:6 Read 0
|
||||||
|
7:6 Write 0
|
||||||
|
7:6 Sync 0
|
||||||
|
7:6 Async 0
|
||||||
|
7:6 Total 0
|
||||||
|
7:5 Read 0
|
||||||
|
7:5 Write 0
|
||||||
|
7:5 Sync 0
|
||||||
|
7:5 Async 0
|
||||||
|
7:5 Total 0
|
||||||
|
7:4 Read 0
|
||||||
|
7:4 Write 0
|
||||||
|
7:4 Sync 0
|
||||||
|
7:4 Async 0
|
||||||
|
7:4 Total 0
|
||||||
|
7:3 Read 0
|
||||||
|
7:3 Write 0
|
||||||
|
7:3 Sync 0
|
||||||
|
7:3 Async 0
|
||||||
|
7:3 Total 0
|
||||||
|
7:2 Read 0
|
||||||
|
7:2 Write 0
|
||||||
|
7:2 Sync 0
|
||||||
|
7:2 Async 0
|
||||||
|
7:2 Total 0
|
||||||
|
7:1 Read 0
|
||||||
|
7:1 Write 0
|
||||||
|
7:1 Sync 0
|
||||||
|
7:1 Async 0
|
||||||
|
7:1 Total 0
|
||||||
|
7:0 Read 0
|
||||||
|
7:0 Write 0
|
||||||
|
7:0 Sync 0
|
||||||
|
7:0 Async 0
|
||||||
|
7:0 Total 0
|
||||||
|
1:15 Read 3
|
||||||
|
1:15 Write 0
|
||||||
|
1:15 Sync 0
|
||||||
|
1:15 Async 3
|
||||||
|
1:15 Total 3
|
||||||
|
1:14 Read 3
|
||||||
|
1:14 Write 0
|
||||||
|
1:14 Sync 0
|
||||||
|
1:14 Async 3
|
||||||
|
1:14 Total 3
|
||||||
|
1:13 Read 3
|
||||||
|
1:13 Write 0
|
||||||
|
1:13 Sync 0
|
||||||
|
1:13 Async 3
|
||||||
|
1:13 Total 3
|
||||||
|
1:12 Read 3
|
||||||
|
1:12 Write 0
|
||||||
|
1:12 Sync 0
|
||||||
|
1:12 Async 3
|
||||||
|
1:12 Total 3
|
||||||
|
1:11 Read 3
|
||||||
|
1:11 Write 0
|
||||||
|
1:11 Sync 0
|
||||||
|
1:11 Async 3
|
||||||
|
1:11 Total 3
|
||||||
|
1:10 Read 3
|
||||||
|
1:10 Write 0
|
||||||
|
1:10 Sync 0
|
||||||
|
1:10 Async 3
|
||||||
|
1:10 Total 3
|
||||||
|
1:9 Read 3
|
||||||
|
1:9 Write 0
|
||||||
|
1:9 Sync 0
|
||||||
|
1:9 Async 3
|
||||||
|
1:9 Total 3
|
||||||
|
1:8 Read 3
|
||||||
|
1:8 Write 0
|
||||||
|
1:8 Sync 0
|
||||||
|
1:8 Async 3
|
||||||
|
1:8 Total 3
|
||||||
|
1:7 Read 3
|
||||||
|
1:7 Write 0
|
||||||
|
1:7 Sync 0
|
||||||
|
1:7 Async 3
|
||||||
|
1:7 Total 3
|
||||||
|
1:6 Read 3
|
||||||
|
1:6 Write 0
|
||||||
|
1:6 Sync 0
|
||||||
|
1:6 Async 3
|
||||||
|
1:6 Total 3
|
||||||
|
1:5 Read 3
|
||||||
|
1:5 Write 0
|
||||||
|
1:5 Sync 0
|
||||||
|
1:5 Async 3
|
||||||
|
1:5 Total 3
|
||||||
|
1:4 Read 3
|
||||||
|
1:4 Write 0
|
||||||
|
1:4 Sync 0
|
||||||
|
1:4 Async 3
|
||||||
|
1:4 Total 3
|
||||||
|
1:3 Read 3
|
||||||
|
1:3 Write 0
|
||||||
|
1:3 Sync 0
|
||||||
|
1:3 Async 3
|
||||||
|
1:3 Total 3
|
||||||
|
1:2 Read 3
|
||||||
|
1:2 Write 0
|
||||||
|
1:2 Sync 0
|
||||||
|
1:2 Async 3
|
||||||
|
1:2 Total 3
|
||||||
|
1:1 Read 3
|
||||||
|
1:1 Write 0
|
||||||
|
1:1 Sync 0
|
||||||
|
1:1 Async 3
|
||||||
|
1:1 Total 3
|
||||||
|
1:0 Read 3
|
||||||
|
1:0 Write 0
|
||||||
|
1:0 Sync 0
|
||||||
|
1:0 Async 3
|
||||||
|
1:0 Total 3
|
||||||
|
Total 265885
|
||||||
1
plugins/inputs/cgroup/testdata/cpu/cpu.cfs_quota_us
vendored
Normal file
1
plugins/inputs/cgroup/testdata/cpu/cpu.cfs_quota_us
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
-1
|
||||||
1
plugins/inputs/cgroup/testdata/cpu/cpuacct.usage_percpu
vendored
Normal file
1
plugins/inputs/cgroup/testdata/cpu/cpuacct.usage_percpu
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
-1452543795404 1376681271659 1450950799997 -1473113374257
|
||||||
1
plugins/inputs/cgroup/testdata/memory/group_1/group_1_1/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/group_1_1/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
223372036854771712
|
||||||
5
plugins/inputs/cgroup/testdata/memory/group_1/group_1_1/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_1/group_1_1/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
cache 1739362304123123123
|
||||||
|
rss 1775325184
|
||||||
|
rss_huge 778043392
|
||||||
|
mapped_file 421036032
|
||||||
|
dirty -307200
|
||||||
1
plugins/inputs/cgroup/testdata/memory/group_1/group_1_2/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/group_1_2/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
223372036854771712
|
||||||
5
plugins/inputs/cgroup/testdata/memory/group_1/group_1_2/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_1/group_1_2/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
cache 1739362304123123123
|
||||||
|
rss 1775325184
|
||||||
|
rss_huge 778043392
|
||||||
|
mapped_file 421036032
|
||||||
|
dirty -307200
|
||||||
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.kmem.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.kmem.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
9223372036854771712
|
||||||
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.kmem.max_usage_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.kmem.max_usage_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
0
|
||||||
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
223372036854771712
|
||||||
5
plugins/inputs/cgroup/testdata/memory/group_1/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_1/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
cache 1739362304123123123
|
||||||
|
rss 1775325184
|
||||||
|
rss_huge 778043392
|
||||||
|
mapped_file 421036032
|
||||||
|
dirty -307200
|
||||||
1
plugins/inputs/cgroup/testdata/memory/group_2/group_1_1/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_2/group_1_1/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
223372036854771712
|
||||||
5
plugins/inputs/cgroup/testdata/memory/group_2/group_1_1/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_2/group_1_1/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
cache 1739362304123123123
|
||||||
|
rss 1775325184
|
||||||
|
rss_huge 778043392
|
||||||
|
mapped_file 421036032
|
||||||
|
dirty -307200
|
||||||
1
plugins/inputs/cgroup/testdata/memory/group_2/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_2/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
223372036854771712
|
||||||
5
plugins/inputs/cgroup/testdata/memory/group_2/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_2/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
cache 1739362304123123123
|
||||||
|
rss 1775325184
|
||||||
|
rss_huge 778043392
|
||||||
|
mapped_file 421036032
|
||||||
|
dirty -307200
|
||||||
0
plugins/inputs/cgroup/testdata/memory/memory.empty
vendored
Normal file
0
plugins/inputs/cgroup/testdata/memory/memory.empty
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/memory.kmem.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/memory.kmem.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
9223372036854771712
|
||||||
1
plugins/inputs/cgroup/testdata/memory/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
223372036854771712
|
||||||
3
plugins/inputs/cgroup/testdata/memory/memory.max_usage_in_bytes
vendored
Normal file
3
plugins/inputs/cgroup/testdata/memory/memory.max_usage_in_bytes
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
0
|
||||||
|
-1
|
||||||
|
2
|
||||||
8
plugins/inputs/cgroup/testdata/memory/memory.numa_stat
vendored
Normal file
8
plugins/inputs/cgroup/testdata/memory/memory.numa_stat
vendored
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
total=858067 N0=858067
|
||||||
|
file=406254 N0=406254
|
||||||
|
anon=451792 N0=451792
|
||||||
|
unevictable=21 N0=21
|
||||||
|
hierarchical_total=858067 N0=858067
|
||||||
|
hierarchical_file=406254 N0=406254
|
||||||
|
hierarchical_anon=451792 N0=451792
|
||||||
|
hierarchical_unevictable=21 N0=21
|
||||||
5
plugins/inputs/cgroup/testdata/memory/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
cache 1739362304123123123
|
||||||
|
rss 1775325184
|
||||||
|
rss_huge 778043392
|
||||||
|
mapped_file 421036032
|
||||||
|
dirty -307200
|
||||||
1
plugins/inputs/cgroup/testdata/memory/memory.usage_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/memory.usage_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
3513667584
|
||||||
1
plugins/inputs/cgroup/testdata/memory/memory.use_hierarchy
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/memory.use_hierarchy
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
12-781
|
||||||
1
plugins/inputs/cgroup/testdata/memory/notify_on_release
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/notify_on_release
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
0
|
||||||
@@ -11,6 +11,13 @@ and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference
|
|||||||
servers = ["http://localhost:9200"]
|
servers = ["http://localhost:9200"]
|
||||||
local = true
|
local = true
|
||||||
cluster_health = true
|
cluster_health = true
|
||||||
|
|
||||||
|
## Optional SSL Config
|
||||||
|
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||||
|
# ssl_key = "/etc/telegraf/key.pem"
|
||||||
|
## Use SSL but skip chain & host verification
|
||||||
|
# insecure_skip_verify = false
|
||||||
```
|
```
|
||||||
|
|
||||||
### Measurements & Fields:
|
### Measurements & Fields:
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
"github.com/influxdata/telegraf/internal/errchan"
|
"github.com/influxdata/telegraf/internal/errchan"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||||
@@ -67,25 +68,31 @@ const sampleConfig = `
|
|||||||
|
|
||||||
## set cluster_health to true when you want to also obtain cluster level stats
|
## set cluster_health to true when you want to also obtain cluster level stats
|
||||||
cluster_health = false
|
cluster_health = false
|
||||||
|
|
||||||
|
## Optional SSL Config
|
||||||
|
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||||
|
# ssl_key = "/etc/telegraf/key.pem"
|
||||||
|
## Use SSL but skip chain & host verification
|
||||||
|
# insecure_skip_verify = false
|
||||||
`
|
`
|
||||||
|
|
||||||
// Elasticsearch is a plugin to read stats from one or many Elasticsearch
|
// Elasticsearch is a plugin to read stats from one or many Elasticsearch
|
||||||
// servers.
|
// servers.
|
||||||
type Elasticsearch struct {
|
type Elasticsearch struct {
|
||||||
Local bool
|
Local bool
|
||||||
Servers []string
|
Servers []string
|
||||||
ClusterHealth bool
|
ClusterHealth bool
|
||||||
client *http.Client
|
SSLCA string `toml:"ssl_ca"` // Path to CA file
|
||||||
|
SSLCert string `toml:"ssl_cert"` // Path to host cert file
|
||||||
|
SSLKey string `toml:"ssl_key"` // Path to cert key file
|
||||||
|
InsecureSkipVerify bool // Use SSL but skip chain & host verification
|
||||||
|
client *http.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewElasticsearch return a new instance of Elasticsearch
|
// NewElasticsearch return a new instance of Elasticsearch
|
||||||
func NewElasticsearch() *Elasticsearch {
|
func NewElasticsearch() *Elasticsearch {
|
||||||
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
|
return &Elasticsearch{}
|
||||||
client := &http.Client{
|
|
||||||
Transport: tr,
|
|
||||||
Timeout: time.Duration(4 * time.Second),
|
|
||||||
}
|
|
||||||
return &Elasticsearch{client: client}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SampleConfig returns sample configuration for this plugin.
|
// SampleConfig returns sample configuration for this plugin.
|
||||||
@@ -101,6 +108,15 @@ func (e *Elasticsearch) Description() string {
|
|||||||
// Gather reads the stats from Elasticsearch and writes it to the
|
// Gather reads the stats from Elasticsearch and writes it to the
|
||||||
// Accumulator.
|
// Accumulator.
|
||||||
func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||||
|
if e.client == nil {
|
||||||
|
client, err := e.createHttpClient()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
e.client = client
|
||||||
|
}
|
||||||
|
|
||||||
errChan := errchan.New(len(e.Servers))
|
errChan := errchan.New(len(e.Servers))
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(len(e.Servers))
|
wg.Add(len(e.Servers))
|
||||||
@@ -128,6 +144,23 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
|||||||
return errChan.Error()
|
return errChan.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
|
||||||
|
tlsCfg, err := internal.GetTLSConfig(e.SSLCert, e.SSLKey, e.SSLCA, e.InsecureSkipVerify)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tr := &http.Transport{
|
||||||
|
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||||
|
TLSClientConfig: tlsCfg,
|
||||||
|
}
|
||||||
|
client := &http.Client{
|
||||||
|
Transport: tr,
|
||||||
|
Timeout: time.Duration(4 * time.Second),
|
||||||
|
}
|
||||||
|
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
|
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
|
||||||
nodeStats := &struct {
|
nodeStats := &struct {
|
||||||
ClusterName string `json:"cluster_name"`
|
ClusterName string `json:"cluster_name"`
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ func (t *transportMock) CancelRequest(_ *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestElasticsearch(t *testing.T) {
|
func TestElasticsearch(t *testing.T) {
|
||||||
es := NewElasticsearch()
|
es := newElasticsearchWithClient()
|
||||||
es.Servers = []string{"http://example.com:9200"}
|
es.Servers = []string{"http://example.com:9200"}
|
||||||
es.client.Transport = newTransportMock(http.StatusOK, statsResponse)
|
es.client.Transport = newTransportMock(http.StatusOK, statsResponse)
|
||||||
|
|
||||||
@@ -67,7 +67,7 @@ func TestElasticsearch(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGatherClusterStats(t *testing.T) {
|
func TestGatherClusterStats(t *testing.T) {
|
||||||
es := NewElasticsearch()
|
es := newElasticsearchWithClient()
|
||||||
es.Servers = []string{"http://example.com:9200"}
|
es.Servers = []string{"http://example.com:9200"}
|
||||||
es.ClusterHealth = true
|
es.ClusterHealth = true
|
||||||
es.client.Transport = newTransportMock(http.StatusOK, clusterResponse)
|
es.client.Transport = newTransportMock(http.StatusOK, clusterResponse)
|
||||||
@@ -87,3 +87,9 @@ func TestGatherClusterStats(t *testing.T) {
|
|||||||
v2IndexExpected,
|
v2IndexExpected,
|
||||||
map[string]string{"index": "v2"})
|
map[string]string{"index": "v2"})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newElasticsearchWithClient() *Elasticsearch {
|
||||||
|
es := NewElasticsearch()
|
||||||
|
es.client = &http.Client{}
|
||||||
|
return es
|
||||||
|
}
|
||||||
|
|||||||
@@ -48,8 +48,6 @@ type Exec struct {
|
|||||||
|
|
||||||
parser parsers.Parser
|
parser parsers.Parser
|
||||||
|
|
||||||
wg sync.WaitGroup
|
|
||||||
|
|
||||||
runner Runner
|
runner Runner
|
||||||
errChan chan error
|
errChan chan error
|
||||||
}
|
}
|
||||||
@@ -119,8 +117,8 @@ func (c CommandRunner) Run(
|
|||||||
return out.Bytes(), nil
|
return out.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator) {
|
func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync.WaitGroup) {
|
||||||
defer e.wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
out, err := e.runner.Run(e, command, acc)
|
out, err := e.runner.Run(e, command, acc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -151,6 +149,7 @@ func (e *Exec) SetParser(parser parsers.Parser) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *Exec) Gather(acc telegraf.Accumulator) error {
|
func (e *Exec) Gather(acc telegraf.Accumulator) error {
|
||||||
|
var wg sync.WaitGroup
|
||||||
// Legacy single command support
|
// Legacy single command support
|
||||||
if e.Command != "" {
|
if e.Command != "" {
|
||||||
e.Commands = append(e.Commands, e.Command)
|
e.Commands = append(e.Commands, e.Command)
|
||||||
@@ -177,8 +176,12 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error {
|
|||||||
// There were matches, so we'll append each match together with
|
// There were matches, so we'll append each match together with
|
||||||
// the arguments to the commands slice
|
// the arguments to the commands slice
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
commands = append(
|
if len(cmdAndArgs) == 1 {
|
||||||
commands, strings.Join([]string{match, cmdAndArgs[1]}, " "))
|
commands = append(commands, match)
|
||||||
|
} else {
|
||||||
|
commands = append(commands,
|
||||||
|
strings.Join([]string{match, cmdAndArgs[1]}, " "))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -186,11 +189,11 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error {
|
|||||||
errChan := errchan.New(len(commands))
|
errChan := errchan.New(len(commands))
|
||||||
e.errChan = errChan.C
|
e.errChan = errChan.C
|
||||||
|
|
||||||
e.wg.Add(len(commands))
|
wg.Add(len(commands))
|
||||||
for _, command := range commands {
|
for _, command := range commands {
|
||||||
go e.ProcessCommand(command, acc)
|
go e.ProcessCommand(command, acc, &wg)
|
||||||
}
|
}
|
||||||
e.wg.Wait()
|
wg.Wait()
|
||||||
return errChan.Error()
|
return errChan.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ from the same topic in parallel.
|
|||||||
## Offset (must be either "oldest" or "newest")
|
## Offset (must be either "oldest" or "newest")
|
||||||
offset = "oldest"
|
offset = "oldest"
|
||||||
|
|
||||||
## Data format to consume.
|
## Data format to consume.
|
||||||
|
|
||||||
## Each data format has it's own unique set of configuration options, read
|
## Each data format has it's own unique set of configuration options, read
|
||||||
## more about them here:
|
## more about them here:
|
||||||
@@ -32,11 +32,5 @@ from the same topic in parallel.
|
|||||||
|
|
||||||
## Testing
|
## Testing
|
||||||
|
|
||||||
Running integration tests requires running Zookeeper & Kafka. The following
|
Running integration tests requires running Zookeeper & Kafka. See Makefile
|
||||||
commands assume you're on OS X & using [boot2docker](http://boot2docker.io/) or docker-machine through [Docker Toolbox](https://www.docker.com/docker-toolbox).
|
for kafka container command.
|
||||||
|
|
||||||
To start Kafka & Zookeeper:
|
|
||||||
|
|
||||||
```
|
|
||||||
docker run -d -p 2181:2181 -p 9092:9092 --env ADVERTISED_HOST=`boot2docker ip || docker-machine ip <your_machine_name>` --env ADVERTISED_PORT=9092 spotify/kafka
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ var sampleConfig = `
|
|||||||
## an array of Zookeeper connection strings
|
## an array of Zookeeper connection strings
|
||||||
zookeeper_peers = ["localhost:2181"]
|
zookeeper_peers = ["localhost:2181"]
|
||||||
## Zookeeper Chroot
|
## Zookeeper Chroot
|
||||||
zookeeper_chroot = "/"
|
zookeeper_chroot = ""
|
||||||
## the name of the consumer group
|
## the name of the consumer group
|
||||||
consumer_group = "telegraf_metrics_consumers"
|
consumer_group = "telegraf_metrics_consumers"
|
||||||
## Offset (must be either "oldest" or "newest")
|
## Offset (must be either "oldest" or "newest")
|
||||||
|
|||||||
91
plugins/inputs/logparser/README.md
Normal file
91
plugins/inputs/logparser/README.md
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
# logparser Input Plugin
|
||||||
|
|
||||||
|
The logparser plugin streams and parses the given logfiles. Currently it only
|
||||||
|
has the capability of parsing "grok" patterns from logfiles, which also supports
|
||||||
|
regex patterns.
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.logparser]]
|
||||||
|
## Log files to parse.
|
||||||
|
## These accept standard unix glob matching rules, but with the addition of
|
||||||
|
## ** as a "super asterisk". ie:
|
||||||
|
## /var/log/**.log -> recursively find all .log files in /var/log
|
||||||
|
## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
|
||||||
|
## /var/log/apache.log -> only tail the apache log file
|
||||||
|
files = ["/var/log/influxdb/influxdb.log"]
|
||||||
|
## Read file from beginning.
|
||||||
|
from_beginning = false
|
||||||
|
|
||||||
|
## Parse logstash-style "grok" patterns:
|
||||||
|
## Telegraf builtin parsing patterns: https://goo.gl/dkay10
|
||||||
|
[inputs.logparser.grok]
|
||||||
|
## This is a list of patterns to check the given log file(s) for.
|
||||||
|
## Note that adding patterns here increases processing time. The most
|
||||||
|
## efficient configuration is to have one file & pattern per logparser.
|
||||||
|
patterns = ["%{INFLUXDB_HTTPD_LOG}"]
|
||||||
|
## Full path(s) to custom pattern files.
|
||||||
|
custom_pattern_files = []
|
||||||
|
## Custom patterns can also be defined here. Put one pattern per line.
|
||||||
|
custom_patterns = '''
|
||||||
|
'''
|
||||||
|
```
|
||||||
|
|
||||||
|
> **Note:** The InfluxDB log pattern in the default configuration only works for Influx versions 1.0.0-beta1 or higher.
|
||||||
|
|
||||||
|
## Grok Parser
|
||||||
|
|
||||||
|
The grok parser uses a slightly modified version of logstash "grok" patterns,
|
||||||
|
with the format `%{<capture_syntax>[:<semantic_name>][:<modifier>]}`
|
||||||
|
|
||||||
|
|
||||||
|
Telegraf has many of it's own
|
||||||
|
[built-in patterns](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/logparser/grok/patterns/influx-patterns),
|
||||||
|
as well as supporting
|
||||||
|
[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns).
|
||||||
|
|
||||||
|
|
||||||
|
The best way to get acquainted with grok patterns is to read the logstash docs,
|
||||||
|
which are available here:
|
||||||
|
https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html
|
||||||
|
|
||||||
|
|
||||||
|
If you need help building patterns to match your logs,
|
||||||
|
you will find the http://grokdebug.herokuapp.com application quite useful!
|
||||||
|
|
||||||
|
|
||||||
|
By default all named captures are converted into string fields.
|
||||||
|
Modifiers can be used to convert captures to other types or tags.
|
||||||
|
Timestamp modifiers can be used to convert captures to the timestamp of the
|
||||||
|
parsed metric.
|
||||||
|
|
||||||
|
|
||||||
|
- Available modifiers:
|
||||||
|
- string (default if nothing is specified)
|
||||||
|
- int
|
||||||
|
- float
|
||||||
|
- duration (ie, 5.23ms gets converted to int nanoseconds)
|
||||||
|
- tag (converts the field into a tag)
|
||||||
|
- drop (drops the field completely)
|
||||||
|
- Timestamp modifiers:
|
||||||
|
- ts-ansic ("Mon Jan _2 15:04:05 2006")
|
||||||
|
- ts-unix ("Mon Jan _2 15:04:05 MST 2006")
|
||||||
|
- ts-ruby ("Mon Jan 02 15:04:05 -0700 2006")
|
||||||
|
- ts-rfc822 ("02 Jan 06 15:04 MST")
|
||||||
|
- ts-rfc822z ("02 Jan 06 15:04 -0700")
|
||||||
|
- ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST")
|
||||||
|
- ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST")
|
||||||
|
- ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700")
|
||||||
|
- ts-rfc3339 ("2006-01-02T15:04:05Z07:00")
|
||||||
|
- ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00")
|
||||||
|
- ts-httpd ("02/Jan/2006:15:04:05 -0700")
|
||||||
|
- ts-epoch (seconds since unix epoch)
|
||||||
|
- ts-epochnano (nanoseconds since unix epoch)
|
||||||
|
- ts-"CUSTOM"
|
||||||
|
|
||||||
|
|
||||||
|
CUSTOM time layouts must be within quotes and be the representation of the
|
||||||
|
"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`
|
||||||
|
See https://golang.org/pkg/time/#Parse for more details.
|
||||||
|
|
||||||
394
plugins/inputs/logparser/grok/grok.go
Normal file
394
plugins/inputs/logparser/grok/grok.go
Normal file
@@ -0,0 +1,394 @@
|
|||||||
|
package grok
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/vjeantet/grok"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
)
|
||||||
|
|
||||||
|
var timeFormats = map[string]string{
|
||||||
|
"ts-ansic": "Mon Jan _2 15:04:05 2006",
|
||||||
|
"ts-unix": "Mon Jan _2 15:04:05 MST 2006",
|
||||||
|
"ts-ruby": "Mon Jan 02 15:04:05 -0700 2006",
|
||||||
|
"ts-rfc822": "02 Jan 06 15:04 MST",
|
||||||
|
"ts-rfc822z": "02 Jan 06 15:04 -0700", // RFC822 with numeric zone
|
||||||
|
"ts-rfc850": "Monday, 02-Jan-06 15:04:05 MST",
|
||||||
|
"ts-rfc1123": "Mon, 02 Jan 2006 15:04:05 MST",
|
||||||
|
"ts-rfc1123z": "Mon, 02 Jan 2006 15:04:05 -0700", // RFC1123 with numeric zone
|
||||||
|
"ts-rfc3339": "2006-01-02T15:04:05Z07:00",
|
||||||
|
"ts-rfc3339nano": "2006-01-02T15:04:05.999999999Z07:00",
|
||||||
|
"ts-httpd": "02/Jan/2006:15:04:05 -0700",
|
||||||
|
"ts-epoch": "EPOCH",
|
||||||
|
"ts-epochnano": "EPOCH_NANO",
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
INT = "int"
|
||||||
|
TAG = "tag"
|
||||||
|
FLOAT = "float"
|
||||||
|
STRING = "string"
|
||||||
|
DURATION = "duration"
|
||||||
|
DROP = "drop"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// matches named captures that contain a type.
|
||||||
|
// ie,
|
||||||
|
// %{NUMBER:bytes:int}
|
||||||
|
// %{IPORHOST:clientip:tag}
|
||||||
|
// %{HTTPDATE:ts1:ts-http}
|
||||||
|
// %{HTTPDATE:ts2:ts-"02 Jan 06 15:04"}
|
||||||
|
typedRe = regexp.MustCompile(`%{\w+:(\w+):(ts-".+"|t?s?-?\w+)}`)
|
||||||
|
// matches a plain pattern name. ie, %{NUMBER}
|
||||||
|
patternOnlyRe = regexp.MustCompile(`%{(\w+)}`)
|
||||||
|
)
|
||||||
|
|
||||||
|
type Parser struct {
|
||||||
|
Patterns []string
|
||||||
|
// namedPatterns is a list of internally-assigned names to the patterns
|
||||||
|
// specified by the user in Patterns.
|
||||||
|
// They will look like:
|
||||||
|
// GROK_INTERNAL_PATTERN_0, GROK_INTERNAL_PATTERN_1, etc.
|
||||||
|
namedPatterns []string
|
||||||
|
CustomPatterns string
|
||||||
|
CustomPatternFiles []string
|
||||||
|
Measurement string
|
||||||
|
|
||||||
|
// typeMap is a map of patterns -> capture name -> modifier,
|
||||||
|
// ie, {
|
||||||
|
// "%{TESTLOG}":
|
||||||
|
// {
|
||||||
|
// "bytes": "int",
|
||||||
|
// "clientip": "tag"
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
typeMap map[string]map[string]string
|
||||||
|
// tsMap is a map of patterns -> capture name -> timestamp layout.
|
||||||
|
// ie, {
|
||||||
|
// "%{TESTLOG}":
|
||||||
|
// {
|
||||||
|
// "httptime": "02/Jan/2006:15:04:05 -0700"
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
tsMap map[string]map[string]string
|
||||||
|
// patterns is a map of all of the parsed patterns from CustomPatterns
|
||||||
|
// and CustomPatternFiles.
|
||||||
|
// ie, {
|
||||||
|
// "DURATION": "%{NUMBER}[nuµm]?s"
|
||||||
|
// "RESPONSE_CODE": "%{NUMBER:rc:tag}"
|
||||||
|
// }
|
||||||
|
patterns map[string]string
|
||||||
|
|
||||||
|
g *grok.Grok
|
||||||
|
tsModder *tsModder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) Compile() error {
|
||||||
|
p.typeMap = make(map[string]map[string]string)
|
||||||
|
p.tsMap = make(map[string]map[string]string)
|
||||||
|
p.patterns = make(map[string]string)
|
||||||
|
p.tsModder = &tsModder{}
|
||||||
|
var err error
|
||||||
|
p.g, err = grok.NewWithConfig(&grok.Config{NamedCapturesOnly: true})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Give Patterns fake names so that they can be treated as named
|
||||||
|
// "custom patterns"
|
||||||
|
p.namedPatterns = make([]string, len(p.Patterns))
|
||||||
|
for i, pattern := range p.Patterns {
|
||||||
|
name := fmt.Sprintf("GROK_INTERNAL_PATTERN_%d", i)
|
||||||
|
p.CustomPatterns += "\n" + name + " " + pattern + "\n"
|
||||||
|
p.namedPatterns[i] = "%{" + name + "}"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Combine user-supplied CustomPatterns with DEFAULT_PATTERNS and parse
|
||||||
|
// them together as the same type of pattern.
|
||||||
|
p.CustomPatterns = DEFAULT_PATTERNS + p.CustomPatterns
|
||||||
|
if len(p.CustomPatterns) != 0 {
|
||||||
|
scanner := bufio.NewScanner(strings.NewReader(p.CustomPatterns))
|
||||||
|
p.addCustomPatterns(scanner)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse any custom pattern files supplied.
|
||||||
|
for _, filename := range p.CustomPatternFiles {
|
||||||
|
file, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(bufio.NewReader(file))
|
||||||
|
p.addCustomPatterns(scanner)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.Measurement == "" {
|
||||||
|
p.Measurement = "logparser_grok"
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.compileCustomPatterns()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||||
|
var err error
|
||||||
|
var values map[string]string
|
||||||
|
// the matching pattern string
|
||||||
|
var patternName string
|
||||||
|
for _, pattern := range p.namedPatterns {
|
||||||
|
if values, err = p.g.Parse(pattern, line); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(values) != 0 {
|
||||||
|
patternName = pattern
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(values) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
tags := make(map[string]string)
|
||||||
|
timestamp := time.Now()
|
||||||
|
for k, v := range values {
|
||||||
|
if k == "" || v == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var t string
|
||||||
|
// check if pattern has some modifiers
|
||||||
|
if types, ok := p.typeMap[patternName]; ok {
|
||||||
|
t = types[k]
|
||||||
|
}
|
||||||
|
// if we didn't find a modifier, check if we have a timestamp layout
|
||||||
|
if t == "" {
|
||||||
|
if ts, ok := p.tsMap[patternName]; ok {
|
||||||
|
// check if the modifier is a timestamp layout
|
||||||
|
if layout, ok := ts[k]; ok {
|
||||||
|
t = layout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if we didn't find a type OR timestamp modifier, assume string
|
||||||
|
if t == "" {
|
||||||
|
t = STRING
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t {
|
||||||
|
case INT:
|
||||||
|
iv, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR parsing %s to int: %s", v, err)
|
||||||
|
} else {
|
||||||
|
fields[k] = iv
|
||||||
|
}
|
||||||
|
case FLOAT:
|
||||||
|
fv, err := strconv.ParseFloat(v, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR parsing %s to float: %s", v, err)
|
||||||
|
} else {
|
||||||
|
fields[k] = fv
|
||||||
|
}
|
||||||
|
case DURATION:
|
||||||
|
d, err := time.ParseDuration(v)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR parsing %s to duration: %s", v, err)
|
||||||
|
} else {
|
||||||
|
fields[k] = int64(d)
|
||||||
|
}
|
||||||
|
case TAG:
|
||||||
|
tags[k] = v
|
||||||
|
case STRING:
|
||||||
|
fields[k] = strings.Trim(v, `"`)
|
||||||
|
case "EPOCH":
|
||||||
|
iv, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR parsing %s to int: %s", v, err)
|
||||||
|
} else {
|
||||||
|
timestamp = time.Unix(iv, 0)
|
||||||
|
}
|
||||||
|
case "EPOCH_NANO":
|
||||||
|
iv, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR parsing %s to int: %s", v, err)
|
||||||
|
} else {
|
||||||
|
timestamp = time.Unix(0, iv)
|
||||||
|
}
|
||||||
|
case DROP:
|
||||||
|
// goodbye!
|
||||||
|
default:
|
||||||
|
ts, err := time.Parse(t, v)
|
||||||
|
if err == nil {
|
||||||
|
timestamp = ts
|
||||||
|
} else {
|
||||||
|
log.Printf("ERROR parsing %s to time layout [%s]: %s", v, t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return telegraf.NewMetric(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) addCustomPatterns(scanner *bufio.Scanner) {
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
if len(line) > 0 && line[0] != '#' {
|
||||||
|
names := strings.SplitN(line, " ", 2)
|
||||||
|
p.patterns[names[0]] = names[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) compileCustomPatterns() error {
|
||||||
|
var err error
|
||||||
|
// check if the pattern contains a subpattern that is already defined
|
||||||
|
// replace it with the subpattern for modifier inheritance.
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
for name, pattern := range p.patterns {
|
||||||
|
subNames := patternOnlyRe.FindAllStringSubmatch(pattern, -1)
|
||||||
|
for _, subName := range subNames {
|
||||||
|
if subPattern, ok := p.patterns[subName[1]]; ok {
|
||||||
|
pattern = strings.Replace(pattern, subName[0], subPattern, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.patterns[name] = pattern
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if pattern contains modifiers. Parse them out if it does.
|
||||||
|
for name, pattern := range p.patterns {
|
||||||
|
if typedRe.MatchString(pattern) {
|
||||||
|
// this pattern has modifiers, so parse out the modifiers
|
||||||
|
pattern, err = p.parseTypedCaptures(name, pattern)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.patterns[name] = pattern
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.g.AddPatternsFromMap(p.patterns)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseTypedCaptures parses the capture types, and then deletes the type from
|
||||||
|
// the line so that it is a valid "grok" pattern again.
|
||||||
|
// ie,
|
||||||
|
// %{NUMBER:bytes:int} => %{NUMBER:bytes} (stores %{NUMBER}->bytes->int)
|
||||||
|
// %{IPORHOST:clientip:tag} => %{IPORHOST:clientip} (stores %{IPORHOST}->clientip->tag)
|
||||||
|
func (p *Parser) parseTypedCaptures(name, pattern string) (string, error) {
|
||||||
|
matches := typedRe.FindAllStringSubmatch(pattern, -1)
|
||||||
|
|
||||||
|
// grab the name of the capture pattern
|
||||||
|
patternName := "%{" + name + "}"
|
||||||
|
// create type map for this pattern
|
||||||
|
p.typeMap[patternName] = make(map[string]string)
|
||||||
|
p.tsMap[patternName] = make(map[string]string)
|
||||||
|
|
||||||
|
// boolean to verify that each pattern only has a single ts- data type.
|
||||||
|
hasTimestamp := false
|
||||||
|
for _, match := range matches {
|
||||||
|
// regex capture 1 is the name of the capture
|
||||||
|
// regex capture 2 is the type of the capture
|
||||||
|
if strings.HasPrefix(match[2], "ts-") {
|
||||||
|
if hasTimestamp {
|
||||||
|
return pattern, fmt.Errorf("logparser pattern compile error: "+
|
||||||
|
"Each pattern is allowed only one named "+
|
||||||
|
"timestamp data type. pattern: %s", pattern)
|
||||||
|
}
|
||||||
|
if f, ok := timeFormats[match[2]]; ok {
|
||||||
|
p.tsMap[patternName][match[1]] = f
|
||||||
|
} else {
|
||||||
|
p.tsMap[patternName][match[1]] = strings.TrimSuffix(strings.TrimPrefix(match[2], `ts-"`), `"`)
|
||||||
|
}
|
||||||
|
hasTimestamp = true
|
||||||
|
} else {
|
||||||
|
p.typeMap[patternName][match[1]] = match[2]
|
||||||
|
}
|
||||||
|
|
||||||
|
// the modifier is not a valid part of a "grok" pattern, so remove it
|
||||||
|
// from the pattern.
|
||||||
|
pattern = strings.Replace(pattern, ":"+match[2]+"}", "}", 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pattern, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// tsModder is a struct for incrementing identical timestamps of log lines
|
||||||
|
// so that we don't push identical metrics that will get overwritten.
|
||||||
|
type tsModder struct {
|
||||||
|
dupe time.Time
|
||||||
|
last time.Time
|
||||||
|
incr time.Duration
|
||||||
|
incrn time.Duration
|
||||||
|
rollover time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// tsMod increments the given timestamp one unit more from the previous
|
||||||
|
// duplicate timestamp.
|
||||||
|
// the increment unit is determined as the next smallest time unit below the
|
||||||
|
// most significant time unit of ts.
|
||||||
|
// ie, if the input is at ms precision, it will increment it 1µs.
|
||||||
|
func (t *tsModder) tsMod(ts time.Time) time.Time {
|
||||||
|
defer func() { t.last = ts }()
|
||||||
|
// don't mod the time if we don't need to
|
||||||
|
if t.last.IsZero() || ts.IsZero() {
|
||||||
|
t.incrn = 0
|
||||||
|
t.rollover = 0
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
if !ts.Equal(t.last) && !ts.Equal(t.dupe) {
|
||||||
|
t.incr = 0
|
||||||
|
t.incrn = 0
|
||||||
|
t.rollover = 0
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.Equal(t.last) {
|
||||||
|
t.dupe = ts
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.Equal(t.dupe) && t.incr == time.Duration(0) {
|
||||||
|
tsNano := ts.UnixNano()
|
||||||
|
|
||||||
|
d := int64(10)
|
||||||
|
counter := 1
|
||||||
|
for {
|
||||||
|
a := tsNano % d
|
||||||
|
if a > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
d = d * 10
|
||||||
|
counter++
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case counter <= 6:
|
||||||
|
t.incr = time.Nanosecond
|
||||||
|
case counter <= 9:
|
||||||
|
t.incr = time.Microsecond
|
||||||
|
case counter > 9:
|
||||||
|
t.incr = time.Millisecond
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.incrn++
|
||||||
|
if t.incrn == 999 && t.incr > time.Nanosecond {
|
||||||
|
t.rollover = t.incr * t.incrn
|
||||||
|
t.incrn = 1
|
||||||
|
t.incr = t.incr / 1000
|
||||||
|
if t.incr < time.Nanosecond {
|
||||||
|
t.incr = time.Nanosecond
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ts.Add(t.incr*t.incrn + t.rollover)
|
||||||
|
}
|
||||||
564
plugins/inputs/logparser/grok/grok_test.go
Normal file
564
plugins/inputs/logparser/grok/grok_test.go
Normal file
@@ -0,0 +1,564 @@
|
|||||||
|
package grok
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
var benchM telegraf.Metric
|
||||||
|
|
||||||
|
func Benchmark_ParseLine_CommonLogFormat(b *testing.B) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{COMMON_LOG_FORMAT}"},
|
||||||
|
}
|
||||||
|
p.Compile()
|
||||||
|
|
||||||
|
var m telegraf.Metric
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
m, _ = p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)
|
||||||
|
}
|
||||||
|
benchM = m
|
||||||
|
}
|
||||||
|
|
||||||
|
func Benchmark_ParseLine_CombinedLogFormat(b *testing.B) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{COMBINED_LOG_FORMAT}"},
|
||||||
|
}
|
||||||
|
p.Compile()
|
||||||
|
|
||||||
|
var m telegraf.Metric
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
m, _ = p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "-" "Mozilla"`)
|
||||||
|
}
|
||||||
|
benchM = m
|
||||||
|
}
|
||||||
|
|
||||||
|
func Benchmark_ParseLine_InfluxLog(b *testing.B) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{INFLUXDB_HTTPD_LOG}"},
|
||||||
|
}
|
||||||
|
p.Compile()
|
||||||
|
|
||||||
|
var m telegraf.Metric
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
m, _ = p.ParseLine(`[httpd] 192.168.1.1 - - [14/Jun/2016:11:33:29 +0100] "POST /write?consistency=any&db=telegraf&precision=ns&rp= HTTP/1.1" 204 0 "-" "InfluxDBClient" 6f61bc44-321b-11e6-8050-000000000000 2513`)
|
||||||
|
}
|
||||||
|
benchM = m
|
||||||
|
}
|
||||||
|
|
||||||
|
func Benchmark_ParseLine_InfluxLog_NoMatch(b *testing.B) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{INFLUXDB_HTTPD_LOG}"},
|
||||||
|
}
|
||||||
|
p.Compile()
|
||||||
|
|
||||||
|
var m telegraf.Metric
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
m, _ = p.ParseLine(`[retention] 2016/06/14 14:38:24 retention policy shard deletion check commencing`)
|
||||||
|
}
|
||||||
|
benchM = m
|
||||||
|
}
|
||||||
|
|
||||||
|
func Benchmark_ParseLine_CustomPattern(b *testing.B) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
RESPONSE_CODE %{NUMBER:response_code:tag}
|
||||||
|
RESPONSE_TIME %{DURATION:response_time:duration}
|
||||||
|
TEST_LOG_A %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
p.Compile()
|
||||||
|
|
||||||
|
var m telegraf.Metric
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
m, _ = p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`)
|
||||||
|
}
|
||||||
|
benchM = m
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMeasurementName(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Measurement: "my_web_log",
|
||||||
|
Patterns: []string{"%{COMMON_LOG_FORMAT}"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
// Parse an influxdb POST request
|
||||||
|
m, err := p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)
|
||||||
|
require.NotNil(t, m)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"resp_bytes": int64(2326),
|
||||||
|
"auth": "frank",
|
||||||
|
"client_ip": "127.0.0.1",
|
||||||
|
"http_version": float64(1.0),
|
||||||
|
"ident": "user-identifier",
|
||||||
|
"request": "/apache_pb.gif",
|
||||||
|
},
|
||||||
|
m.Fields())
|
||||||
|
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||||
|
assert.Equal(t, "my_web_log", m.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuiltinInfluxdbHttpd(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{INFLUXDB_HTTPD_LOG}"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
// Parse an influxdb POST request
|
||||||
|
m, err := p.ParseLine(`[httpd] ::1 - - [14/Jun/2016:11:33:29 +0100] "POST /write?consistency=any&db=telegraf&precision=ns&rp= HTTP/1.1" 204 0 "-" "InfluxDBClient" 6f61bc44-321b-11e6-8050-000000000000 2513`)
|
||||||
|
require.NotNil(t, m)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"resp_bytes": int64(0),
|
||||||
|
"auth": "-",
|
||||||
|
"client_ip": "::1",
|
||||||
|
"http_version": float64(1.1),
|
||||||
|
"ident": "-",
|
||||||
|
"referrer": "-",
|
||||||
|
"request": "/write?consistency=any&db=telegraf&precision=ns&rp=",
|
||||||
|
"response_time_us": int64(2513),
|
||||||
|
"agent": "InfluxDBClient",
|
||||||
|
},
|
||||||
|
m.Fields())
|
||||||
|
assert.Equal(t, map[string]string{"verb": "POST", "resp_code": "204"}, m.Tags())
|
||||||
|
|
||||||
|
// Parse an influxdb GET request
|
||||||
|
m, err = p.ParseLine(`[httpd] ::1 - - [14/Jun/2016:12:10:02 +0100] "GET /query?db=telegraf&q=SELECT+bytes%2Cresponse_time_us+FROM+logparser_grok+WHERE+http_method+%3D+%27GET%27+AND+response_time_us+%3E+0+AND+time+%3E+now%28%29+-+1h HTTP/1.1" 200 578 "http://localhost:8083/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36" 8a3806f1-3220-11e6-8006-000000000000 988`)
|
||||||
|
require.NotNil(t, m)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"resp_bytes": int64(578),
|
||||||
|
"auth": "-",
|
||||||
|
"client_ip": "::1",
|
||||||
|
"http_version": float64(1.1),
|
||||||
|
"ident": "-",
|
||||||
|
"referrer": "http://localhost:8083/",
|
||||||
|
"request": "/query?db=telegraf&q=SELECT+bytes%2Cresponse_time_us+FROM+logparser_grok+WHERE+http_method+%3D+%27GET%27+AND+response_time_us+%3E+0+AND+time+%3E+now%28%29+-+1h",
|
||||||
|
"response_time_us": int64(988),
|
||||||
|
"agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36",
|
||||||
|
},
|
||||||
|
m.Fields())
|
||||||
|
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||||
|
}
|
||||||
|
|
||||||
|
// common log format
|
||||||
|
// 127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326
|
||||||
|
func TestBuiltinCommonLogFormat(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{COMMON_LOG_FORMAT}"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
// Parse an influxdb POST request
|
||||||
|
m, err := p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)
|
||||||
|
require.NotNil(t, m)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"resp_bytes": int64(2326),
|
||||||
|
"auth": "frank",
|
||||||
|
"client_ip": "127.0.0.1",
|
||||||
|
"http_version": float64(1.0),
|
||||||
|
"ident": "user-identifier",
|
||||||
|
"request": "/apache_pb.gif",
|
||||||
|
},
|
||||||
|
m.Fields())
|
||||||
|
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||||
|
}
|
||||||
|
|
||||||
|
// combined log format
|
||||||
|
// 127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "-" "Mozilla"
|
||||||
|
func TestBuiltinCombinedLogFormat(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{COMBINED_LOG_FORMAT}"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
// Parse an influxdb POST request
|
||||||
|
m, err := p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "-" "Mozilla"`)
|
||||||
|
require.NotNil(t, m)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"resp_bytes": int64(2326),
|
||||||
|
"auth": "frank",
|
||||||
|
"client_ip": "127.0.0.1",
|
||||||
|
"http_version": float64(1.0),
|
||||||
|
"ident": "user-identifier",
|
||||||
|
"request": "/apache_pb.gif",
|
||||||
|
"referrer": "-",
|
||||||
|
"agent": "Mozilla",
|
||||||
|
},
|
||||||
|
m.Fields())
|
||||||
|
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompileStringAndParse(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
RESPONSE_CODE %{NUMBER:response_code:tag}
|
||||||
|
RESPONSE_TIME %{DURATION:response_time:duration}
|
||||||
|
TEST_LOG_A %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
metricA, err := p.ParseLine(`1.25 200 192.168.1.1 5.432µs`)
|
||||||
|
require.NotNil(t, metricA)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"clientip": "192.168.1.1",
|
||||||
|
"myfloat": float64(1.25),
|
||||||
|
"response_time": int64(5432),
|
||||||
|
},
|
||||||
|
metricA.Fields())
|
||||||
|
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompileErrorsOnInvalidPattern(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
RESPONSE_CODE %{NUMBER:response_code:tag}
|
||||||
|
RESPONSE_TIME %{DURATION:response_time:duration}
|
||||||
|
TEST_LOG_A %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.Error(t, p.Compile())
|
||||||
|
|
||||||
|
metricA, _ := p.ParseLine(`1.25 200 192.168.1.1 5.432µs`)
|
||||||
|
require.Nil(t, metricA)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParsePatternsWithoutCustom(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{POSINT:ts:ts-epochnano} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
metricA, err := p.ParseLine(`1466004605359052000 response_time=20821 mymetric=10890.645`)
|
||||||
|
require.NotNil(t, metricA)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"response_time": int64(20821),
|
||||||
|
"metric": float64(10890.645),
|
||||||
|
},
|
||||||
|
metricA.Fields())
|
||||||
|
assert.Equal(t, map[string]string{}, metricA.Tags())
|
||||||
|
assert.Equal(t, time.Unix(0, 1466004605359052000), metricA.Time())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseEpochNano(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{MYAPP}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
MYAPP %{POSINT:ts:ts-epochnano} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
metricA, err := p.ParseLine(`1466004605359052000 response_time=20821 mymetric=10890.645`)
|
||||||
|
require.NotNil(t, metricA)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"response_time": int64(20821),
|
||||||
|
"metric": float64(10890.645),
|
||||||
|
},
|
||||||
|
metricA.Fields())
|
||||||
|
assert.Equal(t, map[string]string{}, metricA.Tags())
|
||||||
|
assert.Equal(t, time.Unix(0, 1466004605359052000), metricA.Time())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseEpoch(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{MYAPP}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
MYAPP %{POSINT:ts:ts-epoch} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
metricA, err := p.ParseLine(`1466004605 response_time=20821 mymetric=10890.645`)
|
||||||
|
require.NotNil(t, metricA)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"response_time": int64(20821),
|
||||||
|
"metric": float64(10890.645),
|
||||||
|
},
|
||||||
|
metricA.Fields())
|
||||||
|
assert.Equal(t, map[string]string{}, metricA.Tags())
|
||||||
|
assert.Equal(t, time.Unix(1466004605, 0), metricA.Time())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseEpochErrors(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{MYAPP}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
MYAPP %{WORD:ts:ts-epoch} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
_, err := p.ParseLine(`foobar response_time=20821 mymetric=10890.645`)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
p = &Parser{
|
||||||
|
Patterns: []string{"%{MYAPP}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
MYAPP %{WORD:ts:ts-epochnano} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
_, err = p.ParseLine(`foobar response_time=20821 mymetric=10890.645`)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompileFileAndParse(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||||
|
CustomPatternFiles: []string{"./testdata/test-patterns"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`)
|
||||||
|
require.NotNil(t, metricA)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"clientip": "192.168.1.1",
|
||||||
|
"myfloat": float64(1.25),
|
||||||
|
"response_time": int64(5432),
|
||||||
|
"myint": int64(101),
|
||||||
|
},
|
||||||
|
metricA.Fields())
|
||||||
|
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||||
|
assert.Equal(t,
|
||||||
|
time.Date(2016, time.June, 4, 12, 41, 45, 0, time.FixedZone("foo", 60*60)).Nanosecond(),
|
||||||
|
metricA.Time().Nanosecond())
|
||||||
|
|
||||||
|
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
|
||||||
|
require.NotNil(t, metricB)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"myfloat": 1.25,
|
||||||
|
"mystring": "mystring",
|
||||||
|
"nomodifier": "nomodifier",
|
||||||
|
},
|
||||||
|
metricB.Fields())
|
||||||
|
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||||
|
assert.Equal(t,
|
||||||
|
time.Date(2016, time.June, 4, 12, 41, 45, 0, time.FixedZone("foo", 60*60)).Nanosecond(),
|
||||||
|
metricB.Time().Nanosecond())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompileNoModifiersAndParse(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_C}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
TEST_LOG_C %{NUMBER:myfloat} %{NUMBER} %{IPORHOST:clientip} %{DURATION:rt}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
metricA, err := p.ParseLine(`1.25 200 192.168.1.1 5.432µs`)
|
||||||
|
require.NotNil(t, metricA)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"clientip": "192.168.1.1",
|
||||||
|
"myfloat": "1.25",
|
||||||
|
"rt": "5.432µs",
|
||||||
|
},
|
||||||
|
metricA.Fields())
|
||||||
|
assert.Equal(t, map[string]string{}, metricA.Tags())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompileNoNamesAndParse(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_C}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
TEST_LOG_C %{NUMBER} %{NUMBER} %{IPORHOST} %{DURATION}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
metricA, err := p.ParseLine(`1.25 200 192.168.1.1 5.432µs`)
|
||||||
|
require.Nil(t, metricA)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseNoMatch(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||||
|
CustomPatternFiles: []string{"./testdata/test-patterns"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] notnumber 200 192.168.1.1 5.432µs 101`)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Nil(t, metricA)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompileErrors(t *testing.T) {
|
||||||
|
// Compile fails because there are multiple timestamps:
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
TEST_LOG_A %{HTTPDATE:ts1:ts-httpd} %{HTTPDATE:ts2:ts-httpd} %{NUMBER:mynum:int}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.Error(t, p.Compile())
|
||||||
|
|
||||||
|
// Compile fails because file doesn't exist:
|
||||||
|
p = &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||||
|
CustomPatternFiles: []string{"/tmp/foo/bar/baz"},
|
||||||
|
}
|
||||||
|
assert.Error(t, p.Compile())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseErrors(t *testing.T) {
|
||||||
|
// Parse fails because the pattern doesn't exist
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_B}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:int} %{}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.Error(t, p.Compile())
|
||||||
|
_, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] notnumber 200 192.168.1.1 5.432µs 101`)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
// Parse fails because myword is not an int
|
||||||
|
p = &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:int}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
_, err = p.ParseLine(`04/Jun/2016:12:41:45 +0100 notnumber`)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
// Parse fails because myword is not a float
|
||||||
|
p = &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:float}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
_, err = p.ParseLine(`04/Jun/2016:12:41:45 +0100 notnumber`)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
// Parse fails because myword is not a duration
|
||||||
|
p = &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:duration}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
_, err = p.ParseLine(`04/Jun/2016:12:41:45 +0100 notnumber`)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
// Parse fails because the time layout is wrong.
|
||||||
|
p = &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
TEST_LOG_A %{HTTPDATE:ts:ts-unix} %{WORD:myword:duration}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
_, err = p.ParseLine(`04/Jun/2016:12:41:45 +0100 notnumber`)
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTsModder(t *testing.T) {
|
||||||
|
tsm := &tsModder{}
|
||||||
|
|
||||||
|
reftime := time.Date(2006, time.December, 1, 1, 1, 1, int(time.Millisecond), time.UTC)
|
||||||
|
modt := tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime, modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Microsecond*1), modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Microsecond*2), modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Microsecond*3), modt)
|
||||||
|
|
||||||
|
reftime = time.Date(2006, time.December, 1, 1, 1, 1, int(time.Microsecond), time.UTC)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime, modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Nanosecond*1), modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Nanosecond*2), modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Nanosecond*3), modt)
|
||||||
|
|
||||||
|
reftime = time.Date(2006, time.December, 1, 1, 1, 1, int(time.Microsecond)*999, time.UTC)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime, modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Nanosecond*1), modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Nanosecond*2), modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Nanosecond*3), modt)
|
||||||
|
|
||||||
|
reftime = time.Date(2006, time.December, 1, 1, 1, 1, 0, time.UTC)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime, modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Millisecond*1), modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Millisecond*2), modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Millisecond*3), modt)
|
||||||
|
|
||||||
|
reftime = time.Time{}
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime, modt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTsModder_Rollover(t *testing.T) {
|
||||||
|
tsm := &tsModder{}
|
||||||
|
|
||||||
|
reftime := time.Date(2006, time.December, 1, 1, 1, 1, int(time.Millisecond), time.UTC)
|
||||||
|
modt := tsm.tsMod(reftime)
|
||||||
|
for i := 1; i < 1000; i++ {
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
}
|
||||||
|
assert.Equal(t, reftime.Add(time.Microsecond*999+time.Nanosecond), modt)
|
||||||
|
|
||||||
|
reftime = time.Date(2006, time.December, 1, 1, 1, 1, int(time.Microsecond), time.UTC)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
for i := 1; i < 1001; i++ {
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
}
|
||||||
|
assert.Equal(t, reftime.Add(time.Nanosecond*1000), modt)
|
||||||
|
}
|
||||||
80
plugins/inputs/logparser/grok/influx_patterns.go
Normal file
80
plugins/inputs/logparser/grok/influx_patterns.go
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
package grok
|
||||||
|
|
||||||
|
// THIS SHOULD BE KEPT IN-SYNC WITH patterns/influx-patterns
|
||||||
|
const DEFAULT_PATTERNS = `
|
||||||
|
# Captures are a slightly modified version of logstash "grok" patterns, with
|
||||||
|
# the format %{<capture syntax>[:<semantic name>][:<modifier>]}
|
||||||
|
# By default all named captures are converted into string fields.
|
||||||
|
# Modifiers can be used to convert captures to other types or tags.
|
||||||
|
# Timestamp modifiers can be used to convert captures to the timestamp of the
|
||||||
|
# parsed metric.
|
||||||
|
|
||||||
|
# View logstash grok pattern docs here:
|
||||||
|
# https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html
|
||||||
|
# All default logstash patterns are supported, these can be viewed here:
|
||||||
|
# https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns
|
||||||
|
|
||||||
|
# Available modifiers:
|
||||||
|
# string (default if nothing is specified)
|
||||||
|
# int
|
||||||
|
# float
|
||||||
|
# duration (ie, 5.23ms gets converted to int nanoseconds)
|
||||||
|
# tag (converts the field into a tag)
|
||||||
|
# drop (drops the field completely)
|
||||||
|
# Timestamp modifiers:
|
||||||
|
# ts-ansic ("Mon Jan _2 15:04:05 2006")
|
||||||
|
# ts-unix ("Mon Jan _2 15:04:05 MST 2006")
|
||||||
|
# ts-ruby ("Mon Jan 02 15:04:05 -0700 2006")
|
||||||
|
# ts-rfc822 ("02 Jan 06 15:04 MST")
|
||||||
|
# ts-rfc822z ("02 Jan 06 15:04 -0700")
|
||||||
|
# ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST")
|
||||||
|
# ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST")
|
||||||
|
# ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700")
|
||||||
|
# ts-rfc3339 ("2006-01-02T15:04:05Z07:00")
|
||||||
|
# ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00")
|
||||||
|
# ts-httpd ("02/Jan/2006:15:04:05 -0700")
|
||||||
|
# ts-epoch (seconds since unix epoch)
|
||||||
|
# ts-epochnano (nanoseconds since unix epoch)
|
||||||
|
# ts-"CUSTOM"
|
||||||
|
# CUSTOM time layouts must be within quotes and be the representation of the
|
||||||
|
# "reference time", which is Mon Jan 2 15:04:05 -0700 MST 2006
|
||||||
|
# See https://golang.org/pkg/time/#Parse for more details.
|
||||||
|
|
||||||
|
# Example log file pattern, example log looks like this:
|
||||||
|
# [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs
|
||||||
|
# Breakdown of the DURATION pattern below:
|
||||||
|
# NUMBER is a builtin logstash grok pattern matching float & int numbers.
|
||||||
|
# [nuµm]? is a regex specifying 0 or 1 of the characters within brackets.
|
||||||
|
# s is also regex, this pattern must end in "s".
|
||||||
|
# so DURATION will match something like '5.324ms' or '6.1µs' or '10s'
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
RESPONSE_CODE %{NUMBER:response_code:tag}
|
||||||
|
RESPONSE_TIME %{DURATION:response_time_ns:duration}
|
||||||
|
EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
|
||||||
|
|
||||||
|
# Wider-ranging username matching vs. logstash built-in %{USER}
|
||||||
|
NGUSERNAME [a-zA-Z\.\@\-\+_%]+
|
||||||
|
NGUSER %{NGUSERNAME}
|
||||||
|
|
||||||
|
##
|
||||||
|
## COMMON LOG PATTERNS
|
||||||
|
##
|
||||||
|
|
||||||
|
# InfluxDB log patterns
|
||||||
|
CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
|
||||||
|
INFLUXDB_HTTPD_LOG \[httpd\] %{COMBINED_LOG_FORMAT} %{UUID:uuid:drop} %{NUMBER:response_time_us:int}
|
||||||
|
|
||||||
|
# apache & nginx logs, this is also known as the "common log format"
|
||||||
|
# see https://en.wikipedia.org/wiki/Common_Log_Format
|
||||||
|
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-)
|
||||||
|
|
||||||
|
# Combined log format is the same as the common log format but with the addition
|
||||||
|
# of two quoted strings at the end for "referrer" and "agent"
|
||||||
|
# See Examples at http://httpd.apache.org/docs/current/mod/mod_log_config.html
|
||||||
|
COMBINED_LOG_FORMAT %{COMMON_LOG_FORMAT} %{QS:referrer} %{QS:agent}
|
||||||
|
|
||||||
|
# HTTPD log formats
|
||||||
|
HTTPD20_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:loglevel:tag}\] (?:\[client %{IPORHOST:clientip}\] ){0,1}%{GREEDYDATA:errormsg}
|
||||||
|
HTTPD24_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{WORD:module}:%{LOGLEVEL:loglevel:tag}\] \[pid %{POSINT:pid:int}:tid %{NUMBER:tid:int}\]( \(%{POSINT:proxy_errorcode:int}\)%{DATA:proxy_errormessage}:)?( \[client %{IPORHOST:client}:%{POSINT:clientport}\])? %{DATA:errorcode}: %{GREEDYDATA:message}
|
||||||
|
HTTPD_ERRORLOG %{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG}
|
||||||
|
`
|
||||||
75
plugins/inputs/logparser/grok/patterns/influx-patterns
Normal file
75
plugins/inputs/logparser/grok/patterns/influx-patterns
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
# Captures are a slightly modified version of logstash "grok" patterns, with
|
||||||
|
# the format %{<capture syntax>[:<semantic name>][:<modifier>]}
|
||||||
|
# By default all named captures are converted into string fields.
|
||||||
|
# Modifiers can be used to convert captures to other types or tags.
|
||||||
|
# Timestamp modifiers can be used to convert captures to the timestamp of the
|
||||||
|
# parsed metric.
|
||||||
|
|
||||||
|
# View logstash grok pattern docs here:
|
||||||
|
# https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html
|
||||||
|
# All default logstash patterns are supported, these can be viewed here:
|
||||||
|
# https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns
|
||||||
|
|
||||||
|
# Available modifiers:
|
||||||
|
# string (default if nothing is specified)
|
||||||
|
# int
|
||||||
|
# float
|
||||||
|
# duration (ie, 5.23ms gets converted to int nanoseconds)
|
||||||
|
# tag (converts the field into a tag)
|
||||||
|
# drop (drops the field completely)
|
||||||
|
# Timestamp modifiers:
|
||||||
|
# ts-ansic ("Mon Jan _2 15:04:05 2006")
|
||||||
|
# ts-unix ("Mon Jan _2 15:04:05 MST 2006")
|
||||||
|
# ts-ruby ("Mon Jan 02 15:04:05 -0700 2006")
|
||||||
|
# ts-rfc822 ("02 Jan 06 15:04 MST")
|
||||||
|
# ts-rfc822z ("02 Jan 06 15:04 -0700")
|
||||||
|
# ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST")
|
||||||
|
# ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST")
|
||||||
|
# ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700")
|
||||||
|
# ts-rfc3339 ("2006-01-02T15:04:05Z07:00")
|
||||||
|
# ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00")
|
||||||
|
# ts-httpd ("02/Jan/2006:15:04:05 -0700")
|
||||||
|
# ts-epoch (seconds since unix epoch)
|
||||||
|
# ts-epochnano (nanoseconds since unix epoch)
|
||||||
|
# ts-"CUSTOM"
|
||||||
|
# CUSTOM time layouts must be within quotes and be the representation of the
|
||||||
|
# "reference time", which is Mon Jan 2 15:04:05 -0700 MST 2006
|
||||||
|
# See https://golang.org/pkg/time/#Parse for more details.
|
||||||
|
|
||||||
|
# Example log file pattern, example log looks like this:
|
||||||
|
# [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs
|
||||||
|
# Breakdown of the DURATION pattern below:
|
||||||
|
# NUMBER is a builtin logstash grok pattern matching float & int numbers.
|
||||||
|
# [nuµm]? is a regex specifying 0 or 1 of the characters within brackets.
|
||||||
|
# s is also regex, this pattern must end in "s".
|
||||||
|
# so DURATION will match something like '5.324ms' or '6.1µs' or '10s'
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
RESPONSE_CODE %{NUMBER:response_code:tag}
|
||||||
|
RESPONSE_TIME %{DURATION:response_time_ns:duration}
|
||||||
|
EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
|
||||||
|
|
||||||
|
# Wider-ranging username matching vs. logstash built-in %{USER}
|
||||||
|
NGUSERNAME [a-zA-Z\.\@\-\+_%]+
|
||||||
|
NGUSER %{NGUSERNAME}
|
||||||
|
|
||||||
|
##
|
||||||
|
## COMMON LOG PATTERNS
|
||||||
|
##
|
||||||
|
|
||||||
|
# InfluxDB log patterns
|
||||||
|
CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
|
||||||
|
INFLUXDB_HTTPD_LOG \[httpd\] %{COMBINED_LOG_FORMAT} %{UUID:uuid:drop} %{NUMBER:response_time_us:int}
|
||||||
|
|
||||||
|
# apache & nginx logs, this is also known as the "common log format"
|
||||||
|
# see https://en.wikipedia.org/wiki/Common_Log_Format
|
||||||
|
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-)
|
||||||
|
|
||||||
|
# Combined log format is the same as the common log format but with the addition
|
||||||
|
# of two quoted strings at the end for "referrer" and "agent"
|
||||||
|
# See Examples at http://httpd.apache.org/docs/current/mod/mod_log_config.html
|
||||||
|
COMBINED_LOG_FORMAT %{COMMON_LOG_FORMAT} %{QS:referrer} %{QS:agent}
|
||||||
|
|
||||||
|
# HTTPD log formats
|
||||||
|
HTTPD20_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:loglevel:tag}\] (?:\[client %{IPORHOST:clientip}\] ){0,1}%{GREEDYDATA:errormsg}
|
||||||
|
HTTPD24_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{WORD:module}:%{LOGLEVEL:loglevel:tag}\] \[pid %{POSINT:pid:int}:tid %{NUMBER:tid:int}\]( \(%{POSINT:proxy_errorcode:int}\)%{DATA:proxy_errormessage}:)?( \[client %{IPORHOST:client}:%{POSINT:clientport}\])? %{DATA:errorcode}: %{GREEDYDATA:message}
|
||||||
|
HTTPD_ERRORLOG %{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG}
|
||||||
14
plugins/inputs/logparser/grok/testdata/test-patterns
vendored
Normal file
14
plugins/inputs/logparser/grok/testdata/test-patterns
vendored
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
# Test A log line:
|
||||||
|
# [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
RESPONSE_CODE %{NUMBER:response_code:tag}
|
||||||
|
RESPONSE_TIME %{DURATION:response_time:duration}
|
||||||
|
TEST_LOG_A \[%{HTTPDATE:timestamp:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} %{NUMBER:myint:int}
|
||||||
|
|
||||||
|
# Test B log line:
|
||||||
|
# [04/06/2016--12:41:45] 1.25 mystring dropme nomodifier
|
||||||
|
TEST_TIMESTAMP %{MONTHDAY}/%{MONTHNUM}/%{YEAR}--%{TIME}
|
||||||
|
TEST_LOG_B \[%{TEST_TIMESTAMP:timestamp:ts-"02/01/2006--15:04:05"}\] %{NUMBER:myfloat:float} %{WORD:mystring:string} %{WORD:dropme:drop} %{WORD:nomodifier}
|
||||||
|
|
||||||
|
TEST_TIMESTAMP %{MONTHDAY}/%{MONTHNUM}/%{YEAR}--%{TIME}
|
||||||
|
TEST_LOG_BAD \[%{TEST_TIMESTAMP:timestamp:ts-"02/01/2006--15:04:05"}\] %{NUMBER:myfloat:float} %{WORD:mystring:int} %{WORD:dropme:drop} %{WORD:nomodifier}
|
||||||
1
plugins/inputs/logparser/grok/testdata/test_a.log
vendored
Normal file
1
plugins/inputs/logparser/grok/testdata/test_a.log
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101
|
||||||
1
plugins/inputs/logparser/grok/testdata/test_b.log
vendored
Normal file
1
plugins/inputs/logparser/grok/testdata/test_b.log
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier
|
||||||
231
plugins/inputs/logparser/logparser.go
Normal file
231
plugins/inputs/logparser/logparser.go
Normal file
@@ -0,0 +1,231 @@
|
|||||||
|
package logparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/hpcloud/tail"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal/errchan"
|
||||||
|
"github.com/influxdata/telegraf/internal/globpath"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
|
||||||
|
// Parsers
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs/logparser/grok"
|
||||||
|
)
|
||||||
|
|
||||||
|
type LogParser interface {
|
||||||
|
ParseLine(line string) (telegraf.Metric, error)
|
||||||
|
Compile() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type LogParserPlugin struct {
|
||||||
|
Files []string
|
||||||
|
FromBeginning bool
|
||||||
|
|
||||||
|
tailers []*tail.Tail
|
||||||
|
lines chan string
|
||||||
|
done chan struct{}
|
||||||
|
wg sync.WaitGroup
|
||||||
|
acc telegraf.Accumulator
|
||||||
|
parsers []LogParser
|
||||||
|
|
||||||
|
sync.Mutex
|
||||||
|
|
||||||
|
GrokParser *grok.Parser `toml:"grok"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const sampleConfig = `
|
||||||
|
## Log files to parse.
|
||||||
|
## These accept standard unix glob matching rules, but with the addition of
|
||||||
|
## ** as a "super asterisk". ie:
|
||||||
|
## /var/log/**.log -> recursively find all .log files in /var/log
|
||||||
|
## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
|
||||||
|
## /var/log/apache.log -> only tail the apache log file
|
||||||
|
files = ["/var/log/influxdb/influxdb.log"]
|
||||||
|
## Read file from beginning.
|
||||||
|
from_beginning = false
|
||||||
|
|
||||||
|
## Parse logstash-style "grok" patterns:
|
||||||
|
## Telegraf built-in parsing patterns: https://goo.gl/dkay10
|
||||||
|
[inputs.logparser.grok]
|
||||||
|
## This is a list of patterns to check the given log file(s) for.
|
||||||
|
## Note that adding patterns here increases processing time. The most
|
||||||
|
## efficient configuration is to have one pattern per logparser.
|
||||||
|
## Other common built-in patterns are:
|
||||||
|
## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
|
||||||
|
## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
|
||||||
|
patterns = ["%{INFLUXDB_HTTPD_LOG}"]
|
||||||
|
## Name of the outputted measurement name.
|
||||||
|
measurement = "influxdb_log"
|
||||||
|
## Full path(s) to custom pattern files.
|
||||||
|
custom_pattern_files = []
|
||||||
|
## Custom patterns can also be defined here. Put one pattern per line.
|
||||||
|
custom_patterns = '''
|
||||||
|
'''
|
||||||
|
`
|
||||||
|
|
||||||
|
func (l *LogParserPlugin) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LogParserPlugin) Description() string {
|
||||||
|
return "Stream and parse log file(s)."
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LogParserPlugin) Gather(acc telegraf.Accumulator) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error {
|
||||||
|
l.Lock()
|
||||||
|
defer l.Unlock()
|
||||||
|
|
||||||
|
l.acc = acc
|
||||||
|
l.lines = make(chan string, 1000)
|
||||||
|
l.done = make(chan struct{})
|
||||||
|
|
||||||
|
// Looks for fields which implement LogParser interface
|
||||||
|
l.parsers = []LogParser{}
|
||||||
|
s := reflect.ValueOf(l).Elem()
|
||||||
|
for i := 0; i < s.NumField(); i++ {
|
||||||
|
f := s.Field(i)
|
||||||
|
|
||||||
|
if !f.CanInterface() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if lpPlugin, ok := f.Interface().(LogParser); ok {
|
||||||
|
if reflect.ValueOf(lpPlugin).IsNil() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
l.parsers = append(l.parsers, lpPlugin)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(l.parsers) == 0 {
|
||||||
|
return fmt.Errorf("ERROR: logparser input plugin: no parser defined.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// compile log parser patterns:
|
||||||
|
errChan := errchan.New(len(l.parsers))
|
||||||
|
for _, parser := range l.parsers {
|
||||||
|
if err := parser.Compile(); err != nil {
|
||||||
|
errChan.C <- err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := errChan.Error(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var seek tail.SeekInfo
|
||||||
|
if !l.FromBeginning {
|
||||||
|
seek.Whence = 2
|
||||||
|
seek.Offset = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
l.wg.Add(1)
|
||||||
|
go l.parser()
|
||||||
|
|
||||||
|
// Create a "tailer" for each file
|
||||||
|
for _, filepath := range l.Files {
|
||||||
|
g, err := globpath.Compile(filepath)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR Glob %s failed to compile, %s", filepath, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
files := g.Match()
|
||||||
|
errChan = errchan.New(len(files))
|
||||||
|
for file, _ := range files {
|
||||||
|
tailer, err := tail.TailFile(file,
|
||||||
|
tail.Config{
|
||||||
|
ReOpen: true,
|
||||||
|
Follow: true,
|
||||||
|
Location: &seek,
|
||||||
|
MustExist: true,
|
||||||
|
})
|
||||||
|
errChan.C <- err
|
||||||
|
|
||||||
|
// create a goroutine for each "tailer"
|
||||||
|
l.wg.Add(1)
|
||||||
|
go l.receiver(tailer)
|
||||||
|
l.tailers = append(l.tailers, tailer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return errChan.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// receiver is launched as a goroutine to continuously watch a tailed logfile
|
||||||
|
// for changes and send any log lines down the l.lines channel.
|
||||||
|
func (l *LogParserPlugin) receiver(tailer *tail.Tail) {
|
||||||
|
defer l.wg.Done()
|
||||||
|
|
||||||
|
var line *tail.Line
|
||||||
|
for line = range tailer.Lines {
|
||||||
|
if line.Err != nil {
|
||||||
|
log.Printf("ERROR tailing file %s, Error: %s\n",
|
||||||
|
tailer.Filename, line.Err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-l.done:
|
||||||
|
case l.lines <- line.Text:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parser is launched as a goroutine to watch the l.lines channel.
|
||||||
|
// when a line is available, parser parses it and adds the metric(s) to the
|
||||||
|
// accumulator.
|
||||||
|
func (l *LogParserPlugin) parser() {
|
||||||
|
defer l.wg.Done()
|
||||||
|
|
||||||
|
var m telegraf.Metric
|
||||||
|
var err error
|
||||||
|
var line string
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-l.done:
|
||||||
|
return
|
||||||
|
case line = <-l.lines:
|
||||||
|
if line == "" || line == "\n" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, parser := range l.parsers {
|
||||||
|
m, err = parser.ParseLine(line)
|
||||||
|
if err == nil {
|
||||||
|
if m != nil {
|
||||||
|
l.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LogParserPlugin) Stop() {
|
||||||
|
l.Lock()
|
||||||
|
defer l.Unlock()
|
||||||
|
|
||||||
|
for _, t := range l.tailers {
|
||||||
|
err := t.Stop()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR stopping tail on file %s\n", t.Filename)
|
||||||
|
}
|
||||||
|
t.Cleanup()
|
||||||
|
}
|
||||||
|
close(l.done)
|
||||||
|
l.wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add("logparser", func() telegraf.Input {
|
||||||
|
return &LogParserPlugin{}
|
||||||
|
})
|
||||||
|
}
|
||||||
119
plugins/inputs/logparser/logparser_test.go
Normal file
119
plugins/inputs/logparser/logparser_test.go
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
package logparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs/logparser/grok"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStartNoParsers(t *testing.T) {
|
||||||
|
logparser := &LogParserPlugin{
|
||||||
|
FromBeginning: true,
|
||||||
|
Files: []string{"grok/testdata/*.log"},
|
||||||
|
}
|
||||||
|
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
assert.Error(t, logparser.Start(&acc))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGrokParseLogFilesNonExistPattern(t *testing.T) {
|
||||||
|
thisdir := getCurrentDir()
|
||||||
|
p := &grok.Parser{
|
||||||
|
Patterns: []string{"%{FOOBAR}"},
|
||||||
|
CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"},
|
||||||
|
}
|
||||||
|
|
||||||
|
logparser := &LogParserPlugin{
|
||||||
|
FromBeginning: true,
|
||||||
|
Files: []string{thisdir + "grok/testdata/*.log"},
|
||||||
|
GrokParser: p,
|
||||||
|
}
|
||||||
|
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
assert.Error(t, logparser.Start(&acc))
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 500)
|
||||||
|
logparser.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGrokParseLogFiles(t *testing.T) {
|
||||||
|
thisdir := getCurrentDir()
|
||||||
|
p := &grok.Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||||
|
CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"},
|
||||||
|
}
|
||||||
|
|
||||||
|
logparser := &LogParserPlugin{
|
||||||
|
FromBeginning: true,
|
||||||
|
Files: []string{thisdir + "grok/testdata/*.log"},
|
||||||
|
GrokParser: p,
|
||||||
|
}
|
||||||
|
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
assert.NoError(t, logparser.Start(&acc))
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 500)
|
||||||
|
logparser.Stop()
|
||||||
|
|
||||||
|
acc.AssertContainsTaggedFields(t, "logparser_grok",
|
||||||
|
map[string]interface{}{
|
||||||
|
"clientip": "192.168.1.1",
|
||||||
|
"myfloat": float64(1.25),
|
||||||
|
"response_time": int64(5432),
|
||||||
|
"myint": int64(101),
|
||||||
|
},
|
||||||
|
map[string]string{"response_code": "200"})
|
||||||
|
|
||||||
|
acc.AssertContainsTaggedFields(t, "logparser_grok",
|
||||||
|
map[string]interface{}{
|
||||||
|
"myfloat": 1.25,
|
||||||
|
"mystring": "mystring",
|
||||||
|
"nomodifier": "nomodifier",
|
||||||
|
},
|
||||||
|
map[string]string{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that test_a.log line gets parsed even though we don't have the correct
|
||||||
|
// pattern available for test_b.log
|
||||||
|
func TestGrokParseLogFilesOneBad(t *testing.T) {
|
||||||
|
thisdir := getCurrentDir()
|
||||||
|
p := &grok.Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_BAD}"},
|
||||||
|
CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
logparser := &LogParserPlugin{
|
||||||
|
FromBeginning: true,
|
||||||
|
Files: []string{thisdir + "grok/testdata/test_a.log"},
|
||||||
|
GrokParser: p,
|
||||||
|
}
|
||||||
|
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
acc.SetDebug(true)
|
||||||
|
assert.NoError(t, logparser.Start(&acc))
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 500)
|
||||||
|
logparser.Stop()
|
||||||
|
|
||||||
|
acc.AssertContainsTaggedFields(t, "logparser_grok",
|
||||||
|
map[string]interface{}{
|
||||||
|
"clientip": "192.168.1.1",
|
||||||
|
"myfloat": float64(1.25),
|
||||||
|
"response_time": int64(5432),
|
||||||
|
"myint": int64(101),
|
||||||
|
},
|
||||||
|
map[string]string{"response_code": "200"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCurrentDir() string {
|
||||||
|
_, filename, _, _ := runtime.Caller(1)
|
||||||
|
return strings.Replace(filename, "logparser_test.go", "", 1)
|
||||||
|
}
|
||||||
@@ -53,13 +53,13 @@ This plugin gathers the statistic data from MySQL server
|
|||||||
## gather metrics from SHOW BINARY LOGS command output
|
## gather metrics from SHOW BINARY LOGS command output
|
||||||
gather_binary_logs = false
|
gather_binary_logs = false
|
||||||
#
|
#
|
||||||
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_TABLE
|
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
|
||||||
gather_table_io_waits = false
|
gather_table_io_waits = false
|
||||||
#
|
#
|
||||||
## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
|
## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
|
||||||
gather_table_lock_waits = false
|
gather_table_lock_waits = false
|
||||||
#
|
#
|
||||||
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_INDEX_USAGE
|
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
|
||||||
gather_index_io_waits = false
|
gather_index_io_waits = false
|
||||||
#
|
#
|
||||||
## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
|
## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
|
||||||
|
|||||||
@@ -306,6 +306,10 @@ var mappings = []*mapping{
|
|||||||
onServer: "Threadpool_",
|
onServer: "Threadpool_",
|
||||||
inExport: "threadpool_",
|
inExport: "threadpool_",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
onServer: "wsrep_",
|
||||||
|
inExport: "wsrep_",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|||||||
@@ -97,11 +97,12 @@ func (n *Nginx) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
data := strings.SplitN(strings.TrimSpace(line), " ", 3)
|
data := strings.Fields(line)
|
||||||
accepts, err := strconv.ParseUint(data[0], 10, 64)
|
accepts, err := strconv.ParseUint(data[0], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
handled, err := strconv.ParseUint(data[1], 10, 64)
|
handled, err := strconv.ParseUint(data[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -116,7 +117,7 @@ func (n *Nginx) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
data = strings.SplitN(strings.TrimSpace(line), " ", 6)
|
data = strings.Fields(line)
|
||||||
reading, err := strconv.ParseUint(data[1], 10, 64)
|
reading, err := strconv.ParseUint(data[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -13,12 +13,18 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
const sampleResponse = `
|
const nginxSampleResponse = `
|
||||||
Active connections: 585
|
Active connections: 585
|
||||||
server accepts handled requests
|
server accepts handled requests
|
||||||
85340 85340 35085
|
85340 85340 35085
|
||||||
Reading: 4 Writing: 135 Waiting: 446
|
Reading: 4 Writing: 135 Waiting: 446
|
||||||
`
|
`
|
||||||
|
const tengineSampleResponse = `
|
||||||
|
Active connections: 403
|
||||||
|
server accepts handled requests request_time
|
||||||
|
853 8533 3502 1546565864
|
||||||
|
Reading: 8 Writing: 125 Waiting: 946
|
||||||
|
`
|
||||||
|
|
||||||
// Verify that nginx tags are properly parsed based on the server
|
// Verify that nginx tags are properly parsed based on the server
|
||||||
func TestNginxTags(t *testing.T) {
|
func TestNginxTags(t *testing.T) {
|
||||||
@@ -36,7 +42,9 @@ func TestNginxGeneratesMetrics(t *testing.T) {
|
|||||||
var rsp string
|
var rsp string
|
||||||
|
|
||||||
if r.URL.Path == "/stub_status" {
|
if r.URL.Path == "/stub_status" {
|
||||||
rsp = sampleResponse
|
rsp = nginxSampleResponse
|
||||||
|
} else if r.URL.Path == "/tengine_status" {
|
||||||
|
rsp = tengineSampleResponse
|
||||||
} else {
|
} else {
|
||||||
panic("Cannot handle request")
|
panic("Cannot handle request")
|
||||||
}
|
}
|
||||||
@@ -49,12 +57,20 @@ func TestNginxGeneratesMetrics(t *testing.T) {
|
|||||||
Urls: []string{fmt.Sprintf("%s/stub_status", ts.URL)},
|
Urls: []string{fmt.Sprintf("%s/stub_status", ts.URL)},
|
||||||
}
|
}
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
nt := &Nginx{
|
||||||
|
Urls: []string{fmt.Sprintf("%s/tengine_status", ts.URL)},
|
||||||
|
}
|
||||||
|
|
||||||
err := n.Gather(&acc)
|
var acc_nginx testutil.Accumulator
|
||||||
require.NoError(t, err)
|
var acc_tengine testutil.Accumulator
|
||||||
|
|
||||||
fields := map[string]interface{}{
|
err_nginx := n.Gather(&acc_nginx)
|
||||||
|
err_tengine := nt.Gather(&acc_tengine)
|
||||||
|
|
||||||
|
require.NoError(t, err_nginx)
|
||||||
|
require.NoError(t, err_tengine)
|
||||||
|
|
||||||
|
fields_nginx := map[string]interface{}{
|
||||||
"active": uint64(585),
|
"active": uint64(585),
|
||||||
"accepts": uint64(85340),
|
"accepts": uint64(85340),
|
||||||
"handled": uint64(85340),
|
"handled": uint64(85340),
|
||||||
@@ -63,6 +79,17 @@ func TestNginxGeneratesMetrics(t *testing.T) {
|
|||||||
"writing": uint64(135),
|
"writing": uint64(135),
|
||||||
"waiting": uint64(446),
|
"waiting": uint64(446),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fields_tengine := map[string]interface{}{
|
||||||
|
"active": uint64(403),
|
||||||
|
"accepts": uint64(853),
|
||||||
|
"handled": uint64(8533),
|
||||||
|
"requests": uint64(3502),
|
||||||
|
"reading": uint64(8),
|
||||||
|
"writing": uint64(125),
|
||||||
|
"waiting": uint64(946),
|
||||||
|
}
|
||||||
|
|
||||||
addr, err := url.Parse(ts.URL)
|
addr, err := url.Parse(ts.URL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@@ -81,5 +108,6 @@ func TestNginxGeneratesMetrics(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tags := map[string]string{"server": host, "port": port}
|
tags := map[string]string{"server": host, "port": port}
|
||||||
acc.AssertContainsTaggedFields(t, "nginx", fields, tags)
|
acc_nginx.AssertContainsTaggedFields(t, "nginx", fields_nginx, tags)
|
||||||
|
acc_tengine.AssertContainsTaggedFields(t, "nginx", fields_tengine, tags)
|
||||||
}
|
}
|
||||||
|
|||||||
25
plugins/inputs/nsq_consumer/README.md
Normal file
25
plugins/inputs/nsq_consumer/README.md
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# NSQ Consumer Input Plugin
|
||||||
|
|
||||||
|
The [NSQ](http://nsq.io/) consumer plugin polls a specified NSQD
|
||||||
|
topic and adds messages to InfluxDB. This plugin allows a message to be in any of the supported `data_format` types.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Read metrics from NSQD topic(s)
|
||||||
|
[[inputs.nsq_consumer]]
|
||||||
|
## An array of NSQD HTTP API endpoints
|
||||||
|
server = "localhost:4150"
|
||||||
|
topic = "telegraf"
|
||||||
|
channel = "consumer"
|
||||||
|
max_in_flight = 100
|
||||||
|
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has it's own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "influx"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
The `nsq_consumer_test` mocks out the interaction with `NSQD`. It requires no outside dependencies.
|
||||||
99
plugins/inputs/nsq_consumer/nsq_consumer.go
Normal file
99
plugins/inputs/nsq_consumer/nsq_consumer.go
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
package nsq_consumer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
"github.com/influxdata/telegraf/plugins/parsers"
|
||||||
|
"github.com/nsqio/go-nsq"
|
||||||
|
)
|
||||||
|
|
||||||
|
//NSQConsumer represents the configuration of the plugin
|
||||||
|
type NSQConsumer struct {
|
||||||
|
Server string
|
||||||
|
Topic string
|
||||||
|
Channel string
|
||||||
|
MaxInFlight int
|
||||||
|
parser parsers.Parser
|
||||||
|
consumer *nsq.Consumer
|
||||||
|
acc telegraf.Accumulator
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
## An string representing the NSQD TCP Endpoint
|
||||||
|
server = "localhost:4150"
|
||||||
|
topic = "telegraf"
|
||||||
|
channel = "consumer"
|
||||||
|
max_in_flight = 100
|
||||||
|
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has it's own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "influx"
|
||||||
|
`
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add("nsq_consumer", func() telegraf.Input {
|
||||||
|
return &NSQConsumer{}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetParser takes the data_format from the config and finds the right parser for that format
|
||||||
|
func (n *NSQConsumer) SetParser(parser parsers.Parser) {
|
||||||
|
n.parser = parser
|
||||||
|
}
|
||||||
|
|
||||||
|
// SampleConfig returns config values for generating a sample configuration file
|
||||||
|
func (n *NSQConsumer) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// Description prints description string
|
||||||
|
func (n *NSQConsumer) Description() string {
|
||||||
|
return "Read NSQ topic for metrics."
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start pulls data from nsq
|
||||||
|
func (n *NSQConsumer) Start(acc telegraf.Accumulator) error {
|
||||||
|
n.acc = acc
|
||||||
|
n.connect()
|
||||||
|
n.consumer.AddConcurrentHandlers(nsq.HandlerFunc(func(message *nsq.Message) error {
|
||||||
|
metrics, err := n.parser.Parse(message.Body)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, metric := range metrics {
|
||||||
|
n.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
|
||||||
|
}
|
||||||
|
message.Finish()
|
||||||
|
return nil
|
||||||
|
}), n.MaxInFlight)
|
||||||
|
n.consumer.ConnectToNSQD(n.Server)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop processing messages
|
||||||
|
func (n *NSQConsumer) Stop() {
|
||||||
|
n.consumer.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gather is a noop
|
||||||
|
func (n *NSQConsumer) Gather(acc telegraf.Accumulator) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NSQConsumer) connect() error {
|
||||||
|
if n.consumer == nil {
|
||||||
|
config := nsq.NewConfig()
|
||||||
|
config.MaxInFlight = n.MaxInFlight
|
||||||
|
consumer, err := nsq.NewConsumer(n.Topic, n.Channel, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.consumer = consumer
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
245
plugins/inputs/nsq_consumer/nsq_consumer_test.go
Normal file
245
plugins/inputs/nsq_consumer/nsq_consumer_test.go
Normal file
@@ -0,0 +1,245 @@
|
|||||||
|
package nsq_consumer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/plugins/parsers"
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
"github.com/nsqio/go-nsq"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This test is modeled after the kafka consumer integration test
|
||||||
|
func TestReadsMetricsFromNSQ(t *testing.T) {
|
||||||
|
msgID := nsq.MessageID{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'}
|
||||||
|
msg := nsq.NewMessage(msgID, []byte("cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257"))
|
||||||
|
|
||||||
|
script := []instruction{
|
||||||
|
// SUB
|
||||||
|
instruction{0, nsq.FrameTypeResponse, []byte("OK")},
|
||||||
|
// IDENTIFY
|
||||||
|
instruction{0, nsq.FrameTypeResponse, []byte("OK")},
|
||||||
|
instruction{20 * time.Millisecond, nsq.FrameTypeMessage, frameMessage(msg)},
|
||||||
|
// needed to exit test
|
||||||
|
instruction{100 * time.Millisecond, -1, []byte("exit")},
|
||||||
|
}
|
||||||
|
|
||||||
|
addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:4155")
|
||||||
|
newMockNSQD(script, addr.String())
|
||||||
|
|
||||||
|
consumer := &NSQConsumer{
|
||||||
|
Server: "127.0.0.1:4155",
|
||||||
|
Topic: "telegraf",
|
||||||
|
Channel: "consume",
|
||||||
|
MaxInFlight: 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
p, _ := parsers.NewInfluxParser()
|
||||||
|
consumer.SetParser(p)
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
assert.Equal(t, 0, len(acc.Metrics), "There should not be any points")
|
||||||
|
if err := consumer.Start(&acc); err != nil {
|
||||||
|
t.Fatal(err.Error())
|
||||||
|
} else {
|
||||||
|
defer consumer.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
waitForPoint(&acc, t)
|
||||||
|
|
||||||
|
if len(acc.Metrics) == 1 {
|
||||||
|
point := acc.Metrics[0]
|
||||||
|
assert.Equal(t, "cpu_load_short", point.Measurement)
|
||||||
|
assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
|
||||||
|
assert.Equal(t, map[string]string{
|
||||||
|
"host": "server01",
|
||||||
|
"direction": "in",
|
||||||
|
"region": "us-west",
|
||||||
|
}, point.Tags)
|
||||||
|
assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix())
|
||||||
|
} else {
|
||||||
|
t.Errorf("No points found in accumulator, expected 1")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Waits for the metric that was sent to the kafka broker to arrive at the kafka
|
||||||
|
// consumer
|
||||||
|
func waitForPoint(acc *testutil.Accumulator, t *testing.T) {
|
||||||
|
// Give the kafka container up to 2 seconds to get the point to the consumer
|
||||||
|
ticker := time.NewTicker(5 * time.Millisecond)
|
||||||
|
defer ticker.Stop()
|
||||||
|
counter := 0
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
counter++
|
||||||
|
if counter > 1000 {
|
||||||
|
t.Fatal("Waited for 5s, point never arrived to consumer")
|
||||||
|
} else if acc.NFields() == 1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMockNSQD(script []instruction, addr string) *mockNSQD {
|
||||||
|
n := &mockNSQD{
|
||||||
|
script: script,
|
||||||
|
exitChan: make(chan int),
|
||||||
|
}
|
||||||
|
|
||||||
|
tcpListener, err := net.Listen("tcp", addr)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("FATAL: listen (%s) failed - %s", n.tcpAddr.String(), err)
|
||||||
|
}
|
||||||
|
n.tcpListener = tcpListener
|
||||||
|
n.tcpAddr = tcpListener.Addr().(*net.TCPAddr)
|
||||||
|
|
||||||
|
go n.listen()
|
||||||
|
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// The code below allows us to mock the interactions with nsqd. This is taken from:
|
||||||
|
// https://github.com/nsqio/go-nsq/blob/master/mock_test.go
|
||||||
|
type instruction struct {
|
||||||
|
delay time.Duration
|
||||||
|
frameType int32
|
||||||
|
body []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockNSQD struct {
|
||||||
|
script []instruction
|
||||||
|
got [][]byte
|
||||||
|
tcpAddr *net.TCPAddr
|
||||||
|
tcpListener net.Listener
|
||||||
|
exitChan chan int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *mockNSQD) listen() {
|
||||||
|
for {
|
||||||
|
conn, err := n.tcpListener.Accept()
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
go n.handle(conn)
|
||||||
|
}
|
||||||
|
close(n.exitChan)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *mockNSQD) handle(conn net.Conn) {
|
||||||
|
var idx int
|
||||||
|
buf := make([]byte, 4)
|
||||||
|
_, err := io.ReadFull(conn, buf)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("ERROR: failed to read protocol version - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
readChan := make(chan []byte)
|
||||||
|
readDoneChan := make(chan int)
|
||||||
|
scriptTime := time.After(n.script[0].delay)
|
||||||
|
rdr := bufio.NewReader(conn)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
line, err := rdr.ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// trim the '\n'
|
||||||
|
line = line[:len(line)-1]
|
||||||
|
readChan <- line
|
||||||
|
<-readDoneChan
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
var rdyCount int
|
||||||
|
for idx < len(n.script) {
|
||||||
|
select {
|
||||||
|
case line := <-readChan:
|
||||||
|
n.got = append(n.got, line)
|
||||||
|
params := bytes.Split(line, []byte(" "))
|
||||||
|
switch {
|
||||||
|
case bytes.Equal(params[0], []byte("IDENTIFY")):
|
||||||
|
l := make([]byte, 4)
|
||||||
|
_, err := io.ReadFull(rdr, l)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf(err.Error())
|
||||||
|
goto exit
|
||||||
|
}
|
||||||
|
size := int32(binary.BigEndian.Uint32(l))
|
||||||
|
b := make([]byte, size)
|
||||||
|
_, err = io.ReadFull(rdr, b)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf(err.Error())
|
||||||
|
goto exit
|
||||||
|
}
|
||||||
|
case bytes.Equal(params[0], []byte("RDY")):
|
||||||
|
rdy, _ := strconv.Atoi(string(params[1]))
|
||||||
|
rdyCount = rdy
|
||||||
|
case bytes.Equal(params[0], []byte("FIN")):
|
||||||
|
case bytes.Equal(params[0], []byte("REQ")):
|
||||||
|
}
|
||||||
|
readDoneChan <- 1
|
||||||
|
case <-scriptTime:
|
||||||
|
inst := n.script[idx]
|
||||||
|
if bytes.Equal(inst.body, []byte("exit")) {
|
||||||
|
goto exit
|
||||||
|
}
|
||||||
|
if inst.frameType == nsq.FrameTypeMessage {
|
||||||
|
if rdyCount == 0 {
|
||||||
|
scriptTime = time.After(n.script[idx+1].delay)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
rdyCount--
|
||||||
|
}
|
||||||
|
_, err := conn.Write(framedResponse(inst.frameType, inst.body))
|
||||||
|
if err != nil {
|
||||||
|
log.Printf(err.Error())
|
||||||
|
goto exit
|
||||||
|
}
|
||||||
|
scriptTime = time.After(n.script[idx+1].delay)
|
||||||
|
idx++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
exit:
|
||||||
|
n.tcpListener.Close()
|
||||||
|
conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func framedResponse(frameType int32, data []byte) []byte {
|
||||||
|
var w bytes.Buffer
|
||||||
|
|
||||||
|
beBuf := make([]byte, 4)
|
||||||
|
size := uint32(len(data)) + 4
|
||||||
|
|
||||||
|
binary.BigEndian.PutUint32(beBuf, size)
|
||||||
|
_, err := w.Write(beBuf)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
binary.BigEndian.PutUint32(beBuf, uint32(frameType))
|
||||||
|
_, err = w.Write(beBuf)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Write(data)
|
||||||
|
return w.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func frameMessage(m *nsq.Message) []byte {
|
||||||
|
var b bytes.Buffer
|
||||||
|
m.WriteTo(&b)
|
||||||
|
return b.Bytes()
|
||||||
|
}
|
||||||
@@ -28,7 +28,7 @@ type Ping struct {
|
|||||||
// Number of pings to send (ping -c <COUNT>)
|
// Number of pings to send (ping -c <COUNT>)
|
||||||
Count int
|
Count int
|
||||||
|
|
||||||
// Ping timeout, in seconds. 0 means no timeout (ping -t <TIMEOUT>)
|
// Ping timeout, in seconds. 0 means no timeout (ping -W <TIMEOUT>)
|
||||||
Timeout float64
|
Timeout float64
|
||||||
|
|
||||||
// Interface to send ping from (ping -I <INTERFACE>)
|
// Interface to send ping from (ping -I <INTERFACE>)
|
||||||
@@ -55,7 +55,7 @@ const sampleConfig = `
|
|||||||
count = 1 # required
|
count = 1 # required
|
||||||
## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
|
## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
|
||||||
ping_interval = 0.0
|
ping_interval = 0.0
|
||||||
## ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
|
## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
|
||||||
timeout = 1.0
|
timeout = 1.0
|
||||||
## interface to send ping from (ping -I <INTERFACE>)
|
## interface to send ping from (ping -I <INTERFACE>)
|
||||||
interface = ""
|
interface = ""
|
||||||
@@ -76,7 +76,8 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
|
|||||||
go func(u string) {
|
go func(u string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
args := p.args(u)
|
args := p.args(u)
|
||||||
out, err := p.pingHost(p.Timeout, args...)
|
totalTimeout := float64(p.Count)*p.Timeout + float64(p.Count-1)*p.PingInterval
|
||||||
|
out, err := p.pingHost(totalTimeout, args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Combine go err + stderr output
|
// Combine go err + stderr output
|
||||||
errorChannel <- errors.New(
|
errorChannel <- errors.New(
|
||||||
@@ -138,8 +139,8 @@ func (p *Ping) args(url string) []string {
|
|||||||
}
|
}
|
||||||
if p.Timeout > 0 {
|
if p.Timeout > 0 {
|
||||||
switch runtime.GOOS {
|
switch runtime.GOOS {
|
||||||
case "darwin", "freebsd":
|
case "darwin":
|
||||||
args = append(args, "-t", strconv.FormatFloat(p.Timeout, 'f', 1, 64))
|
args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', 1, 64))
|
||||||
case "linux":
|
case "linux":
|
||||||
args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', 1, 64))
|
args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', 1, 64))
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -95,7 +95,10 @@ func TestArgs(t *testing.T) {
|
|||||||
p.Timeout = 12.0
|
p.Timeout = 12.0
|
||||||
actual = p.args("www.google.com")
|
actual = p.args("www.google.com")
|
||||||
switch runtime.GOOS {
|
switch runtime.GOOS {
|
||||||
case "darwin", "freebsd":
|
case "darwin":
|
||||||
|
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W",
|
||||||
|
"12000.0", "www.google.com"}
|
||||||
|
case "freebsd":
|
||||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-t",
|
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-t",
|
||||||
"12.0", "www.google.com"}
|
"12.0", "www.google.com"}
|
||||||
default:
|
default:
|
||||||
@@ -111,7 +114,10 @@ func TestArgs(t *testing.T) {
|
|||||||
p.PingInterval = 1.2
|
p.PingInterval = 1.2
|
||||||
actual = p.args("www.google.com")
|
actual = p.args("www.google.com")
|
||||||
switch runtime.GOOS {
|
switch runtime.GOOS {
|
||||||
case "darwin", "freebsd":
|
case "darwin":
|
||||||
|
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W",
|
||||||
|
"12000.0", "-i", "1.2", "www.google.com"}
|
||||||
|
case "freebsd":
|
||||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-t",
|
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-t",
|
||||||
"12.0", "-i", "1.2", "www.google.com"}
|
"12.0", "-i", "1.2", "www.google.com"}
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error {
|
|||||||
p.Exe, p.PidFile, p.Pattern, p.User, err.Error())
|
p.Exe, p.PidFile, p.Pattern, p.User, err.Error())
|
||||||
} else {
|
} else {
|
||||||
for pid, proc := range p.pidmap {
|
for pid, proc := range p.pidmap {
|
||||||
p := NewSpecProcessor(p.ProcessName, p.Prefix, acc, proc, p.tagmap[pid])
|
p := NewSpecProcessor(p.ProcessName, p.Prefix, pid, acc, proc, p.tagmap[pid])
|
||||||
p.pushMetrics()
|
p.pushMetrics()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -140,7 +140,6 @@ func (p *Procstat) pidsFromFile() ([]int32, error) {
|
|||||||
out = append(out, int32(pid))
|
out = append(out, int32(pid))
|
||||||
p.tagmap[int32(pid)] = map[string]string{
|
p.tagmap[int32(pid)] = map[string]string{
|
||||||
"pidfile": p.PidFile,
|
"pidfile": p.PidFile,
|
||||||
"pid": strings.TrimSpace(string(pidString)),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -165,7 +164,6 @@ func (p *Procstat) pidsFromExe() ([]int32, error) {
|
|||||||
out = append(out, int32(ipid))
|
out = append(out, int32(ipid))
|
||||||
p.tagmap[int32(ipid)] = map[string]string{
|
p.tagmap[int32(ipid)] = map[string]string{
|
||||||
"exe": p.Exe,
|
"exe": p.Exe,
|
||||||
"pid": pid,
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
outerr = err
|
outerr = err
|
||||||
@@ -193,7 +191,6 @@ func (p *Procstat) pidsFromPattern() ([]int32, error) {
|
|||||||
out = append(out, int32(ipid))
|
out = append(out, int32(ipid))
|
||||||
p.tagmap[int32(ipid)] = map[string]string{
|
p.tagmap[int32(ipid)] = map[string]string{
|
||||||
"pattern": p.Pattern,
|
"pattern": p.Pattern,
|
||||||
"pid": pid,
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
outerr = err
|
outerr = err
|
||||||
@@ -221,7 +218,6 @@ func (p *Procstat) pidsFromUser() ([]int32, error) {
|
|||||||
out = append(out, int32(ipid))
|
out = append(out, int32(ipid))
|
||||||
p.tagmap[int32(ipid)] = map[string]string{
|
p.tagmap[int32(ipid)] = map[string]string{
|
||||||
"user": p.User,
|
"user": p.User,
|
||||||
"pid": pid,
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
outerr = err
|
outerr = err
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
|
|
||||||
type SpecProcessor struct {
|
type SpecProcessor struct {
|
||||||
Prefix string
|
Prefix string
|
||||||
|
pid int32
|
||||||
tags map[string]string
|
tags map[string]string
|
||||||
fields map[string]interface{}
|
fields map[string]interface{}
|
||||||
acc telegraf.Accumulator
|
acc telegraf.Accumulator
|
||||||
@@ -19,6 +20,7 @@ type SpecProcessor struct {
|
|||||||
func NewSpecProcessor(
|
func NewSpecProcessor(
|
||||||
processName string,
|
processName string,
|
||||||
prefix string,
|
prefix string,
|
||||||
|
pid int32,
|
||||||
acc telegraf.Accumulator,
|
acc telegraf.Accumulator,
|
||||||
p *process.Process,
|
p *process.Process,
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
@@ -33,6 +35,7 @@ func NewSpecProcessor(
|
|||||||
}
|
}
|
||||||
return &SpecProcessor{
|
return &SpecProcessor{
|
||||||
Prefix: prefix,
|
Prefix: prefix,
|
||||||
|
pid: pid,
|
||||||
tags: tags,
|
tags: tags,
|
||||||
fields: make(map[string]interface{}),
|
fields: make(map[string]interface{}),
|
||||||
acc: acc,
|
acc: acc,
|
||||||
@@ -45,7 +48,7 @@ func (p *SpecProcessor) pushMetrics() {
|
|||||||
if p.Prefix != "" {
|
if p.Prefix != "" {
|
||||||
prefix = p.Prefix + "_"
|
prefix = p.Prefix + "_"
|
||||||
}
|
}
|
||||||
fields := map[string]interface{}{}
|
fields := map[string]interface{}{"pid": p.pid}
|
||||||
|
|
||||||
numThreads, err := p.proc.NumThreads()
|
numThreads, err := p.proc.NumThreads()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|||||||
@@ -30,6 +30,26 @@ to filter and some tags
|
|||||||
kubeservice = "kube-apiserver"
|
kubeservice = "kube-apiserver"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Authorize with a bearer token skipping cert verification
|
||||||
|
[[inputs.prometheus]]
|
||||||
|
# An array of urls to scrape metrics from.
|
||||||
|
urls = ["http://my-kube-apiserver:8080/metrics"]
|
||||||
|
bearer_token = '/path/to/bearer/token'
|
||||||
|
insecure_skip_verify = true
|
||||||
|
```
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Authorize using x509 certs
|
||||||
|
[[inputs.prometheus]]
|
||||||
|
# An array of urls to scrape metrics from.
|
||||||
|
urls = ["https://my-kube-apiserver:8080/metrics"]
|
||||||
|
|
||||||
|
ssl_ca = '/path/to/cafile'
|
||||||
|
ssl_cert = '/path/to/certfile'
|
||||||
|
ssl_key = '/path/to/keyfile'
|
||||||
|
```
|
||||||
|
|
||||||
### Measurements & Fields & Tags:
|
### Measurements & Fields & Tags:
|
||||||
|
|
||||||
Measurements and fields could be any thing.
|
Measurements and fields could be any thing.
|
||||||
|
|||||||
@@ -10,6 +10,8 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"mime"
|
"mime"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
|
|
||||||
@@ -18,17 +20,9 @@ import (
|
|||||||
"github.com/prometheus/common/expfmt"
|
"github.com/prometheus/common/expfmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PrometheusParser is an object for Parsing incoming metrics.
|
|
||||||
type PrometheusParser struct {
|
|
||||||
// PromFormat
|
|
||||||
PromFormat map[string]string
|
|
||||||
// DefaultTags will be added to every parsed metric
|
|
||||||
// DefaultTags map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse returns a slice of Metrics from a text representation of a
|
// Parse returns a slice of Metrics from a text representation of a
|
||||||
// metrics
|
// metrics
|
||||||
func (p *PrometheusParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) {
|
||||||
var metrics []telegraf.Metric
|
var metrics []telegraf.Metric
|
||||||
var parser expfmt.TextParser
|
var parser expfmt.TextParser
|
||||||
// parse even if the buffer begins with a newline
|
// parse even if the buffer begins with a newline
|
||||||
@@ -37,91 +31,71 @@ func (p *PrometheusParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
|||||||
buffer := bytes.NewBuffer(buf)
|
buffer := bytes.NewBuffer(buf)
|
||||||
reader := bufio.NewReader(buffer)
|
reader := bufio.NewReader(buffer)
|
||||||
|
|
||||||
// Get format
|
mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type"))
|
||||||
mediatype, params, err := mime.ParseMediaType(p.PromFormat["Content-Type"])
|
|
||||||
// Prepare output
|
// Prepare output
|
||||||
metricFamilies := make(map[string]*dto.MetricFamily)
|
metricFamilies := make(map[string]*dto.MetricFamily)
|
||||||
|
|
||||||
if err == nil && mediatype == "application/vnd.google.protobuf" &&
|
if err == nil && mediatype == "application/vnd.google.protobuf" &&
|
||||||
params["encoding"] == "delimited" &&
|
params["encoding"] == "delimited" &&
|
||||||
params["proto"] == "io.prometheus.client.MetricFamily" {
|
params["proto"] == "io.prometheus.client.MetricFamily" {
|
||||||
for {
|
for {
|
||||||
metricFamily := &dto.MetricFamily{}
|
mf := &dto.MetricFamily{}
|
||||||
if _, err = pbutil.ReadDelimited(reader, metricFamily); err != nil {
|
if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil {
|
||||||
if err == io.EOF {
|
if ierr == io.EOF {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", err)
|
return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", ierr)
|
||||||
}
|
}
|
||||||
metricFamilies[metricFamily.GetName()] = metricFamily
|
metricFamilies[mf.GetName()] = mf
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
metricFamilies, err = parser.TextToMetricFamilies(reader)
|
metricFamilies, err = parser.TextToMetricFamilies(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("reading text format failed: %s", err)
|
return nil, fmt.Errorf("reading text format failed: %s", err)
|
||||||
}
|
}
|
||||||
// read metrics
|
}
|
||||||
for metricName, mf := range metricFamilies {
|
|
||||||
for _, m := range mf.Metric {
|
|
||||||
// reading tags
|
|
||||||
tags := makeLabels(m)
|
|
||||||
/*
|
|
||||||
for key, value := range p.DefaultTags {
|
|
||||||
tags[key] = value
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
// reading fields
|
|
||||||
fields := make(map[string]interface{})
|
|
||||||
if mf.GetType() == dto.MetricType_SUMMARY {
|
|
||||||
// summary metric
|
|
||||||
fields = makeQuantiles(m)
|
|
||||||
fields["count"] = float64(m.GetHistogram().GetSampleCount())
|
|
||||||
fields["sum"] = float64(m.GetSummary().GetSampleSum())
|
|
||||||
} else if mf.GetType() == dto.MetricType_HISTOGRAM {
|
|
||||||
// historgram metric
|
|
||||||
fields = makeBuckets(m)
|
|
||||||
fields["count"] = float64(m.GetHistogram().GetSampleCount())
|
|
||||||
fields["sum"] = float64(m.GetSummary().GetSampleSum())
|
|
||||||
|
|
||||||
|
// read metrics
|
||||||
|
for metricName, mf := range metricFamilies {
|
||||||
|
for _, m := range mf.Metric {
|
||||||
|
// reading tags
|
||||||
|
tags := makeLabels(m)
|
||||||
|
// reading fields
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
if mf.GetType() == dto.MetricType_SUMMARY {
|
||||||
|
// summary metric
|
||||||
|
fields = makeQuantiles(m)
|
||||||
|
fields["count"] = float64(m.GetSummary().GetSampleCount())
|
||||||
|
fields["sum"] = float64(m.GetSummary().GetSampleSum())
|
||||||
|
} else if mf.GetType() == dto.MetricType_HISTOGRAM {
|
||||||
|
// historgram metric
|
||||||
|
fields = makeBuckets(m)
|
||||||
|
fields["count"] = float64(m.GetHistogram().GetSampleCount())
|
||||||
|
fields["sum"] = float64(m.GetHistogram().GetSampleSum())
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// standard metric
|
||||||
|
fields = getNameAndValue(m)
|
||||||
|
}
|
||||||
|
// converting to telegraf metric
|
||||||
|
if len(fields) > 0 {
|
||||||
|
var t time.Time
|
||||||
|
if m.TimestampMs != nil && *m.TimestampMs > 0 {
|
||||||
|
t = time.Unix(0, *m.TimestampMs*1000000)
|
||||||
} else {
|
} else {
|
||||||
// standard metric
|
t = time.Now()
|
||||||
fields = getNameAndValue(m)
|
|
||||||
}
|
}
|
||||||
// converting to telegraf metric
|
metric, err := telegraf.NewMetric(metricName, tags, fields, t)
|
||||||
if len(fields) > 0 {
|
if err == nil {
|
||||||
metric, err := telegraf.NewMetric(metricName, tags, fields)
|
metrics = append(metrics, metric)
|
||||||
if err == nil {
|
|
||||||
metrics = append(metrics, metric)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return metrics, err
|
return metrics, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse one line
|
|
||||||
func (p *PrometheusParser) ParseLine(line string) (telegraf.Metric, error) {
|
|
||||||
metrics, err := p.Parse([]byte(line + "\n"))
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(metrics) < 1 {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"Can not parse the line: %s, for data format: prometheus", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
return metrics[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
// Set default tags
|
|
||||||
func (p *PrometheusParser) SetDefaultTags(tags map[string]string) {
|
|
||||||
p.DefaultTags = tags
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Get Quantiles from summary metric
|
// Get Quantiles from summary metric
|
||||||
func makeQuantiles(m *dto.Metric) map[string]interface{} {
|
func makeQuantiles(m *dto.Metric) map[string]interface{} {
|
||||||
fields := make(map[string]interface{})
|
fields := make(map[string]interface{})
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -101,10 +102,8 @@ cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
|||||||
`
|
`
|
||||||
|
|
||||||
func TestParseValidPrometheus(t *testing.T) {
|
func TestParseValidPrometheus(t *testing.T) {
|
||||||
parser := PrometheusParser{}
|
|
||||||
|
|
||||||
// Gauge value
|
// Gauge value
|
||||||
metrics, err := parser.Parse([]byte(validUniqueGauge))
|
metrics, err := Parse([]byte(validUniqueGauge), http.Header{})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, metrics, 1)
|
assert.Len(t, metrics, 1)
|
||||||
assert.Equal(t, "cadvisor_version_info", metrics[0].Name())
|
assert.Equal(t, "cadvisor_version_info", metrics[0].Name())
|
||||||
@@ -118,8 +117,7 @@ func TestParseValidPrometheus(t *testing.T) {
|
|||||||
}, metrics[0].Tags())
|
}, metrics[0].Tags())
|
||||||
|
|
||||||
// Counter value
|
// Counter value
|
||||||
//parser.SetDefaultTags(map[string]string{"mytag": "mytagvalue"})
|
metrics, err = Parse([]byte(validUniqueCounter), http.Header{})
|
||||||
metrics, err = parser.Parse([]byte(validUniqueCounter))
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, metrics, 1)
|
assert.Len(t, metrics, 1)
|
||||||
assert.Equal(t, "get_token_fail_count", metrics[0].Name())
|
assert.Equal(t, "get_token_fail_count", metrics[0].Name())
|
||||||
@@ -129,8 +127,8 @@ func TestParseValidPrometheus(t *testing.T) {
|
|||||||
assert.Equal(t, map[string]string{}, metrics[0].Tags())
|
assert.Equal(t, map[string]string{}, metrics[0].Tags())
|
||||||
|
|
||||||
// Summary data
|
// Summary data
|
||||||
//parser.SetDefaultTags(map[string]string{})
|
//SetDefaultTags(map[string]string{})
|
||||||
metrics, err = parser.Parse([]byte(validUniqueSummary))
|
metrics, err = Parse([]byte(validUniqueSummary), http.Header{})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, metrics, 1)
|
assert.Len(t, metrics, 1)
|
||||||
assert.Equal(t, "http_request_duration_microseconds", metrics[0].Name())
|
assert.Equal(t, "http_request_duration_microseconds", metrics[0].Name())
|
||||||
@@ -138,20 +136,20 @@ func TestParseValidPrometheus(t *testing.T) {
|
|||||||
"0.5": 552048.506,
|
"0.5": 552048.506,
|
||||||
"0.9": 5.876804288e+06,
|
"0.9": 5.876804288e+06,
|
||||||
"0.99": 5.876804288e+06,
|
"0.99": 5.876804288e+06,
|
||||||
"count": 0.0,
|
"count": 9.0,
|
||||||
"sum": 1.8909097205e+07,
|
"sum": 1.8909097205e+07,
|
||||||
}, metrics[0].Fields())
|
}, metrics[0].Fields())
|
||||||
assert.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags())
|
assert.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags())
|
||||||
|
|
||||||
// histogram data
|
// histogram data
|
||||||
metrics, err = parser.Parse([]byte(validUniqueHistogram))
|
metrics, err = Parse([]byte(validUniqueHistogram), http.Header{})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, metrics, 1)
|
assert.Len(t, metrics, 1)
|
||||||
assert.Equal(t, "apiserver_request_latencies", metrics[0].Name())
|
assert.Equal(t, "apiserver_request_latencies", metrics[0].Name())
|
||||||
assert.Equal(t, map[string]interface{}{
|
assert.Equal(t, map[string]interface{}{
|
||||||
"500000": 2000.0,
|
"500000": 2000.0,
|
||||||
"count": 2025.0,
|
"count": 2025.0,
|
||||||
"sum": 0.0,
|
"sum": 1.02726334e+08,
|
||||||
"250000": 1997.0,
|
"250000": 1997.0,
|
||||||
"2e+06": 2012.0,
|
"2e+06": 2012.0,
|
||||||
"4e+06": 2017.0,
|
"4e+06": 2017.0,
|
||||||
@@ -165,11 +163,3 @@ func TestParseValidPrometheus(t *testing.T) {
|
|||||||
metrics[0].Tags())
|
metrics[0].Tags())
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseLineInvalidPrometheus(t *testing.T) {
|
|
||||||
parser := PrometheusParser{}
|
|
||||||
metric, err := parser.ParseLine(validUniqueLine)
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
assert.Nil(t, metric)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
@@ -13,23 +13,37 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3`
|
||||||
|
|
||||||
type Prometheus struct {
|
type Prometheus struct {
|
||||||
Urls []string
|
Urls []string
|
||||||
|
|
||||||
// Use SSL but skip chain & host verification
|
|
||||||
InsecureSkipVerify bool
|
|
||||||
// Bearer Token authorization file path
|
// Bearer Token authorization file path
|
||||||
BearerToken string `toml:"bearer_token"`
|
BearerToken string `toml:"bearer_token"`
|
||||||
|
|
||||||
|
// Path to CA file
|
||||||
|
SSLCA string `toml:"ssl_ca"`
|
||||||
|
// Path to host cert file
|
||||||
|
SSLCert string `toml:"ssl_cert"`
|
||||||
|
// Path to cert key file
|
||||||
|
SSLKey string `toml:"ssl_key"`
|
||||||
|
// Use SSL but skip chain & host verification
|
||||||
|
InsecureSkipVerify bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
## An array of urls to scrape metrics from.
|
## An array of urls to scrape metrics from.
|
||||||
urls = ["http://localhost:9100/metrics"]
|
urls = ["http://localhost:9100/metrics"]
|
||||||
|
|
||||||
## Use SSL but skip chain & host verification
|
|
||||||
# insecure_skip_verify = false
|
|
||||||
## Use bearer token for authorization
|
## Use bearer token for authorization
|
||||||
# bearer_token = /path/to/bearer/token
|
# bearer_token = /path/to/bearer/token
|
||||||
|
|
||||||
|
## Optional SSL Config
|
||||||
|
# ssl_ca = /path/to/cafile
|
||||||
|
# ssl_cert = /path/to/certfile
|
||||||
|
# ssl_key = /path/to/keyfile
|
||||||
|
## Use SSL but skip chain & host verification
|
||||||
|
# insecure_skip_verify = false
|
||||||
`
|
`
|
||||||
|
|
||||||
func (p *Prometheus) SampleConfig() string {
|
func (p *Prometheus) SampleConfig() string {
|
||||||
@@ -74,20 +88,25 @@ var client = &http.Client{
|
|||||||
func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error {
|
func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error {
|
||||||
collectDate := time.Now()
|
collectDate := time.Now()
|
||||||
var req, err = http.NewRequest("GET", url, nil)
|
var req, err = http.NewRequest("GET", url, nil)
|
||||||
req.Header = make(http.Header)
|
req.Header.Add("Accept", acceptHeader)
|
||||||
var token []byte
|
var token []byte
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
|
|
||||||
|
tlsCfg, err := internal.GetTLSConfig(
|
||||||
|
p.SSLCert, p.SSLKey, p.SSLCA, p.InsecureSkipVerify)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var rt http.RoundTripper = &http.Transport{
|
var rt http.RoundTripper = &http.Transport{
|
||||||
Dial: (&net.Dialer{
|
Dial: (&net.Dialer{
|
||||||
Timeout: 5 * time.Second,
|
Timeout: 5 * time.Second,
|
||||||
KeepAlive: 30 * time.Second,
|
KeepAlive: 30 * time.Second,
|
||||||
}).Dial,
|
}).Dial,
|
||||||
TLSHandshakeTimeout: 5 * time.Second,
|
TLSHandshakeTimeout: 5 * time.Second,
|
||||||
TLSClientConfig: &tls.Config{
|
TLSClientConfig: tlsCfg,
|
||||||
InsecureSkipVerify: p.InsecureSkipVerify,
|
|
||||||
},
|
|
||||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||||
|
DisableKeepAlives: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.BearerToken != "" {
|
if p.BearerToken != "" {
|
||||||
@@ -112,20 +131,9 @@ func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error {
|
|||||||
return fmt.Errorf("error reading body: %s", err)
|
return fmt.Errorf("error reading body: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Headers
|
metrics, err := Parse(body, resp.Header)
|
||||||
headers := make(map[string]string)
|
|
||||||
for key, value := range headers {
|
|
||||||
headers[key] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare Prometheus parser config
|
|
||||||
promparser := PrometheusParser{
|
|
||||||
PromFormat: headers,
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics, err := promparser.Parse(body)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error getting processing samples for %s: %s",
|
return fmt.Errorf("error reading metrics for %s: %s",
|
||||||
url, err)
|
url, err)
|
||||||
}
|
}
|
||||||
// Add (or not) collected metrics
|
// Add (or not) collected metrics
|
||||||
|
|||||||
@@ -43,6 +43,7 @@
|
|||||||
- latest_fork_usec
|
- latest_fork_usec
|
||||||
- connected_slaves
|
- connected_slaves
|
||||||
- master_repl_offset
|
- master_repl_offset
|
||||||
|
- master_last_io_seconds_ago
|
||||||
- repl_backlog_active
|
- repl_backlog_active
|
||||||
- repl_backlog_size
|
- repl_backlog_size
|
||||||
- repl_backlog_histlen
|
- repl_backlog_histlen
|
||||||
@@ -57,6 +58,7 @@
|
|||||||
- All measurements have the following tags:
|
- All measurements have the following tags:
|
||||||
- port
|
- port
|
||||||
- server
|
- server
|
||||||
|
- replication role
|
||||||
|
|
||||||
### Example Output:
|
### Example Output:
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal/errchan"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -25,6 +26,7 @@ var sampleConfig = `
|
|||||||
## e.g.
|
## e.g.
|
||||||
## tcp://localhost:6379
|
## tcp://localhost:6379
|
||||||
## tcp://:password@192.168.99.100
|
## tcp://:password@192.168.99.100
|
||||||
|
## unix:///var/run/redis.sock
|
||||||
##
|
##
|
||||||
## If no servers are specified, then localhost is used as the host.
|
## If no servers are specified, then localhost is used as the host.
|
||||||
## If no port is specified, 6379 is used
|
## If no port is specified, 6379 is used
|
||||||
@@ -66,6 +68,7 @@ var Tracking = map[string]string{
|
|||||||
"latest_fork_usec": "latest_fork_usec",
|
"latest_fork_usec": "latest_fork_usec",
|
||||||
"connected_slaves": "connected_slaves",
|
"connected_slaves": "connected_slaves",
|
||||||
"master_repl_offset": "master_repl_offset",
|
"master_repl_offset": "master_repl_offset",
|
||||||
|
"master_last_io_seconds_ago": "master_last_io_seconds_ago",
|
||||||
"repl_backlog_active": "repl_backlog_active",
|
"repl_backlog_active": "repl_backlog_active",
|
||||||
"repl_backlog_size": "repl_backlog_size",
|
"repl_backlog_size": "repl_backlog_size",
|
||||||
"repl_backlog_histlen": "repl_backlog_histlen",
|
"repl_backlog_histlen": "repl_backlog_histlen",
|
||||||
@@ -74,27 +77,32 @@ var Tracking = map[string]string{
|
|||||||
"used_cpu_user": "used_cpu_user",
|
"used_cpu_user": "used_cpu_user",
|
||||||
"used_cpu_sys_children": "used_cpu_sys_children",
|
"used_cpu_sys_children": "used_cpu_sys_children",
|
||||||
"used_cpu_user_children": "used_cpu_user_children",
|
"used_cpu_user_children": "used_cpu_user_children",
|
||||||
"role": "role",
|
"role": "replication_role",
|
||||||
}
|
}
|
||||||
|
|
||||||
var ErrProtocolError = errors.New("redis protocol error")
|
var ErrProtocolError = errors.New("redis protocol error")
|
||||||
|
|
||||||
|
const defaultPort = "6379"
|
||||||
|
|
||||||
// Reads stats from all configured servers accumulates stats.
|
// Reads stats from all configured servers accumulates stats.
|
||||||
// Returns one of the errors encountered while gather stats (if any).
|
// Returns one of the errors encountered while gather stats (if any).
|
||||||
func (r *Redis) Gather(acc telegraf.Accumulator) error {
|
func (r *Redis) Gather(acc telegraf.Accumulator) error {
|
||||||
if len(r.Servers) == 0 {
|
if len(r.Servers) == 0 {
|
||||||
url := &url.URL{
|
url := &url.URL{
|
||||||
Host: ":6379",
|
Scheme: "tcp",
|
||||||
|
Host: ":6379",
|
||||||
}
|
}
|
||||||
r.gatherServer(url, acc)
|
r.gatherServer(url, acc)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
errChan := errchan.New(len(r.Servers))
|
||||||
var outerr error
|
|
||||||
|
|
||||||
for _, serv := range r.Servers {
|
for _, serv := range r.Servers {
|
||||||
|
if !strings.HasPrefix(serv, "tcp://") || !strings.HasPrefix(serv, "unix://") {
|
||||||
|
serv = "tcp://" + serv
|
||||||
|
}
|
||||||
|
|
||||||
u, err := url.Parse(serv)
|
u, err := url.Parse(serv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err)
|
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err)
|
||||||
@@ -104,29 +112,35 @@ func (r *Redis) Gather(acc telegraf.Accumulator) error {
|
|||||||
u.Host = serv
|
u.Host = serv
|
||||||
u.Path = ""
|
u.Path = ""
|
||||||
}
|
}
|
||||||
|
if u.Scheme == "tcp" {
|
||||||
|
_, _, err := net.SplitHostPort(u.Host)
|
||||||
|
if err != nil {
|
||||||
|
u.Host = u.Host + ":" + defaultPort
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(serv string) {
|
go func(serv string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
outerr = r.gatherServer(u, acc)
|
errChan.C <- r.gatherServer(u, acc)
|
||||||
}(serv)
|
}(serv)
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
return errChan.Error()
|
||||||
return outerr
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const defaultPort = "6379"
|
|
||||||
|
|
||||||
func (r *Redis) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
func (r *Redis) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
||||||
_, _, err := net.SplitHostPort(addr.Host)
|
var address string
|
||||||
if err != nil {
|
|
||||||
addr.Host = addr.Host + ":" + defaultPort
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := net.DialTimeout("tcp", addr.Host, defaultTimeout)
|
if addr.Scheme == "unix" {
|
||||||
|
address = addr.Path
|
||||||
|
} else {
|
||||||
|
address = addr.Host
|
||||||
|
}
|
||||||
|
c, err := net.DialTimeout(addr.Scheme, address, defaultTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Unable to connect to redis server '%s': %s", addr.Host, err)
|
return fmt.Errorf("Unable to connect to redis server '%s': %s", address, err)
|
||||||
}
|
}
|
||||||
defer c.Close()
|
defer c.Close()
|
||||||
|
|
||||||
@@ -154,12 +168,17 @@ func (r *Redis) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
|||||||
c.Write([]byte("EOF\r\n"))
|
c.Write([]byte("EOF\r\n"))
|
||||||
rdr := bufio.NewReader(c)
|
rdr := bufio.NewReader(c)
|
||||||
|
|
||||||
// Setup tags for all redis metrics
|
var tags map[string]string
|
||||||
host, port := "unknown", "unknown"
|
|
||||||
// If there's an error, ignore and use 'unknown' tags
|
|
||||||
host, port, _ = net.SplitHostPort(addr.Host)
|
|
||||||
tags := map[string]string{"server": host, "port": port}
|
|
||||||
|
|
||||||
|
if addr.Scheme == "unix" {
|
||||||
|
tags = map[string]string{"socket": addr.Path}
|
||||||
|
} else {
|
||||||
|
// Setup tags for all redis metrics
|
||||||
|
host, port := "unknown", "unknown"
|
||||||
|
// If there's an error, ignore and use 'unknown' tags
|
||||||
|
host, port, _ = net.SplitHostPort(addr.Host)
|
||||||
|
tags = map[string]string{"server": host, "port": port}
|
||||||
|
}
|
||||||
return gatherInfoOutput(rdr, acc, tags)
|
return gatherInfoOutput(rdr, acc, tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -208,7 +227,7 @@ func gatherInfoOutput(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if name == "role" {
|
if name == "role" {
|
||||||
tags["role"] = val
|
tags["replication_role"] = val
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ func TestRedis_ParseMetrics(t *testing.T) {
|
|||||||
err := gatherInfoOutput(rdr, &acc, tags)
|
err := gatherInfoOutput(rdr, &acc, tags)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
tags = map[string]string{"host": "redis.net", "role": "master"}
|
tags = map[string]string{"host": "redis.net", "replication_role": "master"}
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"uptime": uint64(238),
|
"uptime": uint64(238),
|
||||||
"clients": uint64(1),
|
"clients": uint64(1),
|
||||||
@@ -71,7 +71,7 @@ func TestRedis_ParseMetrics(t *testing.T) {
|
|||||||
"used_cpu_user_children": float64(0.00),
|
"used_cpu_user_children": float64(0.00),
|
||||||
"keyspace_hitrate": float64(0.50),
|
"keyspace_hitrate": float64(0.50),
|
||||||
}
|
}
|
||||||
keyspaceTags := map[string]string{"host": "redis.net", "role": "master", "database": "db0"}
|
keyspaceTags := map[string]string{"host": "redis.net", "replication_role": "master", "database": "db0"}
|
||||||
keyspaceFields := map[string]interface{}{
|
keyspaceFields := map[string]interface{}{
|
||||||
"avg_ttl": uint64(0),
|
"avg_ttl": uint64(0),
|
||||||
"expires": uint64(0),
|
"expires": uint64(0),
|
||||||
|
|||||||
@@ -58,6 +58,8 @@ Riak provides one measurement named "riak", with the following fields:
|
|||||||
- vnode_index_writes_total
|
- vnode_index_writes_total
|
||||||
- vnode_puts
|
- vnode_puts
|
||||||
- vnode_puts_total
|
- vnode_puts_total
|
||||||
|
- read_repairs
|
||||||
|
- read_repairs_total
|
||||||
|
|
||||||
Measurements of time (such as node_get_fsm_time_mean) are measured in nanoseconds.
|
Measurements of time (such as node_get_fsm_time_mean) are measured in nanoseconds.
|
||||||
|
|
||||||
@@ -72,5 +74,5 @@ All measurements have the following tags:
|
|||||||
|
|
||||||
```
|
```
|
||||||
$ ./telegraf -config telegraf.conf -input-filter riak -test
|
$ ./telegraf -config telegraf.conf -input-filter riak -test
|
||||||
> riak,nodename=riak@127.0.0.1,server=localhost:8098 cpu_avg1=31i,cpu_avg15=69i,cpu_avg5=51i,memory_code=11563738i,memory_ets=5925872i,memory_processes=30236069i,memory_system=93074971i,memory_total=123311040i,node_get_fsm_objsize_100=0i,node_get_fsm_objsize_95=0i,node_get_fsm_objsize_99=0i,node_get_fsm_objsize_mean=0i,node_get_fsm_objsize_median=0i,node_get_fsm_siblings_100=0i,node_get_fsm_siblings_95=0i,node_get_fsm_siblings_99=0i,node_get_fsm_siblings_mean=0i,node_get_fsm_siblings_median=0i,node_get_fsm_time_100=0i,node_get_fsm_time_95=0i,node_get_fsm_time_99=0i,node_get_fsm_time_mean=0i,node_get_fsm_time_median=0i,node_gets=0i,node_gets_total=19i,node_put_fsm_time_100=0i,node_put_fsm_time_95=0i,node_put_fsm_time_99=0i,node_put_fsm_time_mean=0i,node_put_fsm_time_median=0i,node_puts=0i,node_puts_total=0i,pbc_active=0i,pbc_connects=0i,pbc_connects_total=20i,vnode_gets=0i,vnode_gets_total=57i,vnode_index_reads=0i,vnode_index_reads_total=0i,vnode_index_writes=0i,vnode_index_writes_total=0i,vnode_puts=0i,vnode_puts_total=0i 1455913392622482332
|
> riak,nodename=riak@127.0.0.1,server=localhost:8098 cpu_avg1=31i,cpu_avg15=69i,cpu_avg5=51i,memory_code=11563738i,memory_ets=5925872i,memory_processes=30236069i,memory_system=93074971i,memory_total=123311040i,node_get_fsm_objsize_100=0i,node_get_fsm_objsize_95=0i,node_get_fsm_objsize_99=0i,node_get_fsm_objsize_mean=0i,node_get_fsm_objsize_median=0i,node_get_fsm_siblings_100=0i,node_get_fsm_siblings_95=0i,node_get_fsm_siblings_99=0i,node_get_fsm_siblings_mean=0i,node_get_fsm_siblings_median=0i,node_get_fsm_time_100=0i,node_get_fsm_time_95=0i,node_get_fsm_time_99=0i,node_get_fsm_time_mean=0i,node_get_fsm_time_median=0i,node_gets=0i,node_gets_total=19i,node_put_fsm_time_100=0i,node_put_fsm_time_95=0i,node_put_fsm_time_99=0i,node_put_fsm_time_mean=0i,node_put_fsm_time_median=0i,node_puts=0i,node_puts_total=0i,pbc_active=0i,pbc_connects=0i,pbc_connects_total=20i,vnode_gets=0i,vnode_gets_total=57i,vnode_index_reads=0i,vnode_index_reads_total=0i,vnode_index_writes=0i,vnode_index_writes_total=0i,vnode_puts=0i,vnode_puts_total=0i,read_repair=0i,read_repairs_total=0i 1455913392622482332
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -75,6 +75,8 @@ type riakStats struct {
|
|||||||
VnodeIndexWritesTotal int64 `json:"vnode_index_writes_total"`
|
VnodeIndexWritesTotal int64 `json:"vnode_index_writes_total"`
|
||||||
VnodePuts int64 `json:"vnode_puts"`
|
VnodePuts int64 `json:"vnode_puts"`
|
||||||
VnodePutsTotal int64 `json:"vnode_puts_total"`
|
VnodePutsTotal int64 `json:"vnode_puts_total"`
|
||||||
|
ReadRepairs int64 `json:"read_repairs"`
|
||||||
|
ReadRepairsTotal int64 `json:"read_repairs_total"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// A sample configuration to only gather stats from localhost, default port.
|
// A sample configuration to only gather stats from localhost, default port.
|
||||||
@@ -187,6 +189,8 @@ func (r *Riak) gatherServer(s string, acc telegraf.Accumulator) error {
|
|||||||
"vnode_index_writes_total": stats.VnodeIndexWritesTotal,
|
"vnode_index_writes_total": stats.VnodeIndexWritesTotal,
|
||||||
"vnode_puts": stats.VnodePuts,
|
"vnode_puts": stats.VnodePuts,
|
||||||
"vnode_puts_total": stats.VnodePutsTotal,
|
"vnode_puts_total": stats.VnodePutsTotal,
|
||||||
|
"read_repairs": stats.ReadRepairs,
|
||||||
|
"read_repairs_total": stats.ReadRepairsTotal,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Accumulate the tags and values
|
// Accumulate the tags and values
|
||||||
|
|||||||
@@ -66,6 +66,8 @@ func TestRiak(t *testing.T) {
|
|||||||
"node_put_fsm_time_99": int64(84422),
|
"node_put_fsm_time_99": int64(84422),
|
||||||
"node_put_fsm_time_mean": int64(10832),
|
"node_put_fsm_time_mean": int64(10832),
|
||||||
"node_put_fsm_time_median": int64(4085),
|
"node_put_fsm_time_median": int64(4085),
|
||||||
|
"read_repairs": int64(2),
|
||||||
|
"read_repairs_total": int64(7918375),
|
||||||
"node_puts": int64(1155),
|
"node_puts": int64(1155),
|
||||||
"node_puts_total": int64(444895769),
|
"node_puts_total": int64(444895769),
|
||||||
"pbc_active": int64(360),
|
"pbc_active": int64(360),
|
||||||
|
|||||||
@@ -1,119 +0,0 @@
|
|||||||
package rollbar_webhooks
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
inputs.Add("rollbar_webhooks", func() telegraf.Input { return NewRollbarWebhooks() })
|
|
||||||
}
|
|
||||||
|
|
||||||
type RollbarWebhooks struct {
|
|
||||||
ServiceAddress string
|
|
||||||
// Lock for the struct
|
|
||||||
sync.Mutex
|
|
||||||
// Events buffer to store events between Gather calls
|
|
||||||
events []Event
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRollbarWebhooks() *RollbarWebhooks {
|
|
||||||
return &RollbarWebhooks{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rb *RollbarWebhooks) SampleConfig() string {
|
|
||||||
return `
|
|
||||||
## Address and port to host Webhook listener on
|
|
||||||
service_address = ":1619"
|
|
||||||
`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rb *RollbarWebhooks) Description() string {
|
|
||||||
return "A Rollbar Webhook Event collector"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rb *RollbarWebhooks) Gather(acc telegraf.Accumulator) error {
|
|
||||||
rb.Lock()
|
|
||||||
defer rb.Unlock()
|
|
||||||
for _, event := range rb.events {
|
|
||||||
acc.AddFields("rollbar_webhooks", event.Fields(), event.Tags(), time.Now())
|
|
||||||
}
|
|
||||||
rb.events = make([]Event, 0)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rb *RollbarWebhooks) Listen() {
|
|
||||||
r := mux.NewRouter()
|
|
||||||
r.HandleFunc("/", rb.eventHandler).Methods("POST")
|
|
||||||
err := http.ListenAndServe(fmt.Sprintf("%s", rb.ServiceAddress), r)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Error starting server: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rb *RollbarWebhooks) Start(_ telegraf.Accumulator) error {
|
|
||||||
go rb.Listen()
|
|
||||||
log.Printf("Started the rollbar_webhooks service on %s\n", rb.ServiceAddress)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rb *RollbarWebhooks) Stop() {
|
|
||||||
log.Println("Stopping the rbWebhooks service")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rb *RollbarWebhooks) eventHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
defer r.Body.Close()
|
|
||||||
data, err := ioutil.ReadAll(r.Body)
|
|
||||||
if err != nil {
|
|
||||||
w.WriteHeader(http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
dummyEvent := &DummyEvent{}
|
|
||||||
err = json.Unmarshal(data, dummyEvent)
|
|
||||||
if err != nil {
|
|
||||||
w.WriteHeader(http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
event, err := NewEvent(dummyEvent, data)
|
|
||||||
if err != nil {
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
rb.Lock()
|
|
||||||
rb.events = append(rb.events, event)
|
|
||||||
rb.Unlock()
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateEvent(event Event, data []byte) (Event, error) {
|
|
||||||
err := json.Unmarshal(data, event)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return event, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewEvent(dummyEvent *DummyEvent, data []byte) (Event, error) {
|
|
||||||
switch dummyEvent.EventName {
|
|
||||||
case "new_item":
|
|
||||||
return generateEvent(&NewItem{}, data)
|
|
||||||
case "deploy":
|
|
||||||
return generateEvent(&Deploy{}, data)
|
|
||||||
default:
|
|
||||||
return nil, errors.New("Not implemented type: " + dummyEvent.EventName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -400,6 +400,8 @@ IF OBJECT_ID('tempdb..#baseline') IS NOT NULL
|
|||||||
DROP TABLE #baseline;
|
DROP TABLE #baseline;
|
||||||
SELECT
|
SELECT
|
||||||
DB_NAME(mf.database_id) AS database_name ,
|
DB_NAME(mf.database_id) AS database_name ,
|
||||||
|
mf.size as database_size_8k_pages,
|
||||||
|
mf.max_size as database_max_size_8k_pages,
|
||||||
size_on_disk_bytes ,
|
size_on_disk_bytes ,
|
||||||
type_desc as datafile_type,
|
type_desc as datafile_type,
|
||||||
GETDATE() AS baselineDate
|
GETDATE() AS baselineDate
|
||||||
@@ -435,6 +437,50 @@ FROM #baseline
|
|||||||
WHERE datafile_type = ''ROWS''
|
WHERE datafile_type = ''ROWS''
|
||||||
) as V
|
) as V
|
||||||
PIVOT(SUM(size_on_disk_bytes) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
|
PIVOT(SUM(size_on_disk_bytes) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
|
||||||
|
|
||||||
|
UNION ALL
|
||||||
|
|
||||||
|
SELECT measurement = ''Rows size (8KB pages)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
|
||||||
|
, ' + @ColumnName + ' FROM
|
||||||
|
(
|
||||||
|
SELECT database_name, database_size_8k_pages
|
||||||
|
FROM #baseline
|
||||||
|
WHERE datafile_type = ''ROWS''
|
||||||
|
) as V
|
||||||
|
PIVOT(SUM(database_size_8k_pages) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
|
||||||
|
|
||||||
|
UNION ALL
|
||||||
|
|
||||||
|
SELECT measurement = ''Log size (8KB pages)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
|
||||||
|
, ' + @ColumnName + ' FROM
|
||||||
|
(
|
||||||
|
SELECT database_name, database_size_8k_pages
|
||||||
|
FROM #baseline
|
||||||
|
WHERE datafile_type = ''LOG''
|
||||||
|
) as V
|
||||||
|
PIVOT(SUM(database_size_8k_pages) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
|
||||||
|
|
||||||
|
UNION ALL
|
||||||
|
|
||||||
|
SELECT measurement = ''Rows max size (8KB pages)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
|
||||||
|
, ' + @ColumnName + ' FROM
|
||||||
|
(
|
||||||
|
SELECT database_name, database_max_size_8k_pages
|
||||||
|
FROM #baseline
|
||||||
|
WHERE datafile_type = ''ROWS''
|
||||||
|
) as V
|
||||||
|
PIVOT(SUM(database_max_size_8k_pages) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
|
||||||
|
|
||||||
|
UNION ALL
|
||||||
|
|
||||||
|
SELECT measurement = ''Logs max size (8KB pages)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
|
||||||
|
, ' + @ColumnName + ' FROM
|
||||||
|
(
|
||||||
|
SELECT database_name, database_max_size_8k_pages
|
||||||
|
FROM #baseline
|
||||||
|
WHERE datafile_type = ''LOG''
|
||||||
|
) as V
|
||||||
|
PIVOT(SUM(database_max_size_8k_pages) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
|
||||||
'
|
'
|
||||||
--PRINT @DynamicPivotQuery
|
--PRINT @DynamicPivotQuery
|
||||||
EXEC sp_executesql @DynamicPivotQuery;
|
EXEC sp_executesql @DynamicPivotQuery;
|
||||||
|
|||||||
@@ -27,7 +27,8 @@ const (
|
|||||||
defaultSeparator = "_"
|
defaultSeparator = "_"
|
||||||
)
|
)
|
||||||
|
|
||||||
var dropwarn = "ERROR: Message queue full. Discarding line [%s] " +
|
var dropwarn = "ERROR: statsd message queue full. " +
|
||||||
|
"We have dropped %d messages so far. " +
|
||||||
"You may want to increase allowed_pending_messages in the config\n"
|
"You may want to increase allowed_pending_messages in the config\n"
|
||||||
|
|
||||||
var prevInstance *Statsd
|
var prevInstance *Statsd
|
||||||
@@ -65,6 +66,8 @@ type Statsd struct {
|
|||||||
|
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
|
// drops tracks the number of dropped metrics.
|
||||||
|
drops int
|
||||||
|
|
||||||
// Channel for all incoming statsd packets
|
// Channel for all incoming statsd packets
|
||||||
in chan []byte
|
in chan []byte
|
||||||
@@ -291,7 +294,10 @@ func (s *Statsd) udpListen() error {
|
|||||||
select {
|
select {
|
||||||
case s.in <- bufCopy:
|
case s.in <- bufCopy:
|
||||||
default:
|
default:
|
||||||
log.Printf(dropwarn, string(buf[:n]))
|
s.drops++
|
||||||
|
if s.drops == 1 || s.drops%s.AllowedPendingMessages == 0 {
|
||||||
|
log.Printf(dropwarn, s.drops)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -92,8 +92,8 @@ var diskIoSampleConfig = `
|
|||||||
## disk partitions.
|
## disk partitions.
|
||||||
## Setting devices will restrict the stats to the specified devices.
|
## Setting devices will restrict the stats to the specified devices.
|
||||||
# devices = ["sda", "sdb"]
|
# devices = ["sda", "sdb"]
|
||||||
## Uncomment the following line if you do not need disk serial numbers.
|
## Uncomment the following line if you need disk serial numbers.
|
||||||
# skip_serial_number = true
|
# skip_serial_number = false
|
||||||
`
|
`
|
||||||
|
|
||||||
func (_ *DiskIOStats) SampleConfig() string {
|
func (_ *DiskIOStats) SampleConfig() string {
|
||||||
@@ -151,6 +151,6 @@ func init() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
inputs.Add("diskio", func() telegraf.Input {
|
inputs.Add("diskio", func() telegraf.Input {
|
||||||
return &DiskIOStats{ps: &systemPS{}}
|
return &DiskIOStats{ps: &systemPS{}, SkipSerialNumber: true}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -86,9 +86,10 @@ func (t *Tail) Start(acc telegraf.Accumulator) error {
|
|||||||
for file, _ := range g.Match() {
|
for file, _ := range g.Match() {
|
||||||
tailer, err := tail.TailFile(file,
|
tailer, err := tail.TailFile(file,
|
||||||
tail.Config{
|
tail.Config{
|
||||||
ReOpen: true,
|
ReOpen: true,
|
||||||
Follow: true,
|
Follow: true,
|
||||||
Location: &seek,
|
Location: &seek,
|
||||||
|
MustExist: true,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errS += err.Error() + " "
|
errS += err.Error() + " "
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user