Compare commits
51 Commits
1.0.0-beta
...
1.0.0-beta
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
03d02fa67a | ||
|
|
b58cd78c79 | ||
|
|
dabb6f5466 | ||
|
|
281a4d5500 | ||
|
|
1c2965703d | ||
|
|
5dc4cce157 | ||
|
|
8c7edeb53b | ||
|
|
1d9745ee98 | ||
|
|
2d6c8767f7 | ||
|
|
b4a6d9c647 | ||
|
|
6afe9ceef1 | ||
|
|
704d9ad76c | ||
|
|
300d9adbd0 | ||
|
|
207c5498e7 | ||
|
|
d5e7439343 | ||
|
|
21add2c799 | ||
|
|
4651ab88ad | ||
|
|
53f40063b3 | ||
|
|
97d92bba67 | ||
|
|
bfdd665435 | ||
|
|
821d3fafa6 | ||
|
|
7c9b312cee | ||
|
|
69ab8a645c | ||
|
|
7b550c11cb | ||
|
|
bb4f18ca88 | ||
|
|
6efe91ea9c | ||
|
|
5f0a63f554 | ||
|
|
d14e7536ab | ||
|
|
c873937356 | ||
|
|
e1c3800cd9 | ||
|
|
c046232425 | ||
|
|
2d4864e126 | ||
|
|
048448aa93 | ||
|
|
755b2ec953 | ||
|
|
f62c493c77 | ||
|
|
a6365a6086 | ||
|
|
f7e057ec55 | ||
|
|
30cc00d11b | ||
|
|
d641c42029 | ||
|
|
9c2ca805da | ||
|
|
b0484d8a0c | ||
|
|
5ddd61d2e2 | ||
|
|
50ea7f4a9d | ||
|
|
b18134a4e3 | ||
|
|
7825df4771 | ||
|
|
d6951dacdc | ||
|
|
e603825e37 | ||
|
|
e3448153e1 | ||
|
|
25848c545a | ||
|
|
3098564896 | ||
|
|
4b6f9b93dd |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
build
|
||||
tivan
|
||||
.vagrant
|
||||
/telegraf
|
||||
|
||||
70
CHANGELOG.md
70
CHANGELOG.md
@@ -1,9 +1,73 @@
|
||||
## v1.0
|
||||
## v1.0 [unreleased]
|
||||
|
||||
## v1.0 beta 3 [2016-07-18]
|
||||
|
||||
### Release Notes
|
||||
|
||||
**Breaking Change**: Aerospike main server node measurements have been renamed
|
||||
aerospike_node. Aerospike namespace measurements have been renamed to
|
||||
aerospike_namespace. They will also now be tagged with the node_name
|
||||
that they correspond to. This has been done to differentiate measurements
|
||||
that pertain to node vs. namespace statistics.
|
||||
|
||||
**Breaking Change**: users of github_webhooks must change to the new
|
||||
`[[inputs.webhooks]]` plugin.
|
||||
|
||||
This means that the default github_webhooks config:
|
||||
|
||||
```
|
||||
# A Github Webhook Event collector
|
||||
[[inputs.github_webhooks]]
|
||||
## Address and port to host Webhook listener on
|
||||
service_address = ":1618"
|
||||
```
|
||||
|
||||
should now look like:
|
||||
|
||||
```
|
||||
# A Webhooks Event collector
|
||||
[[inputs.webhooks]]
|
||||
## Address and port to host Webhook listener on
|
||||
service_address = ":1618"
|
||||
|
||||
[inputs.webhooks.github]
|
||||
path = "/"
|
||||
```
|
||||
|
||||
### Features
|
||||
|
||||
- [#1289](https://github.com/influxdata/telegraf/pull/1289): webhooks input plugin. Thanks @francois2metz and @cduez!
|
||||
- [#1247](https://github.com/influxdata/telegraf/pull/1247): rollbar webhook plugin.
|
||||
- [#1408](https://github.com/influxdata/telegraf/pull/1408): mandrill webhook plugin.
|
||||
- [#1402](https://github.com/influxdata/telegraf/pull/1402): docker-machine/boot2docker no longer required for unit tests.
|
||||
- [#1350](https://github.com/influxdata/telegraf/pull/1350): cgroup input plugin.
|
||||
- [#1369](https://github.com/influxdata/telegraf/pull/1369): Add input plugin for consuming metrics from NSQD.
|
||||
- [#1369](https://github.com/influxdata/telegraf/pull/1480): add ability to read redis from a socket.
|
||||
- [#1387](https://github.com/influxdata/telegraf/pull/1387): **Breaking Change** - Redis `role` tag renamed to `replication_role` to avoid global_tags override
|
||||
- [#1437](https://github.com/influxdata/telegraf/pull/1437): Fetching Galera status metrics in MySQL
|
||||
- [#1500](https://github.com/influxdata/telegraf/pull/1500): Aerospike plugin refactored to use official client lib.
|
||||
- [#1434](https://github.com/influxdata/telegraf/pull/1434): Add measurement name arg to logparser plugin.
|
||||
- [#1479](https://github.com/influxdata/telegraf/pull/1479): logparser: change resp_code from a field to a tag.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1472](https://github.com/influxdata/telegraf/pull/1472): diskio input plugin: set 'skip_serial_number = true' by default to avoid high cardinality.
|
||||
- [#1426](https://github.com/influxdata/telegraf/pull/1426): nil metrics panic fix.
|
||||
- [#1384](https://github.com/influxdata/telegraf/pull/1384): Fix datarace in apache input plugin.
|
||||
- [#1399](https://github.com/influxdata/telegraf/issues/1399): Add `read_repairs` statistics to riak plugin.
|
||||
- [#1405](https://github.com/influxdata/telegraf/issues/1405): Fix memory/connection leak in prometheus input plugin.
|
||||
- [#1378](https://github.com/influxdata/telegraf/issues/1378): Trim BOM from config file for Windows support.
|
||||
- [#1339](https://github.com/influxdata/telegraf/issues/1339): Prometheus client output panic on service reload.
|
||||
- [#1461](https://github.com/influxdata/telegraf/pull/1461): Prometheus parser, protobuf format header fix.
|
||||
- [#1334](https://github.com/influxdata/telegraf/issues/1334): Prometheus output, metric refresh and caching fixes.
|
||||
- [#1432](https://github.com/influxdata/telegraf/issues/1432): Panic fix for multiple graphite outputs under very high load.
|
||||
- [#1412](https://github.com/influxdata/telegraf/pull/1412): Instrumental output has better reconnect behavior
|
||||
- [#1460](https://github.com/influxdata/telegraf/issues/1460): Remove PID from procstat plugin to fix cardinality issues.
|
||||
- [#1427](https://github.com/influxdata/telegraf/issues/1427): Cassandra input: version 2.x "column family" fix.
|
||||
- [#1463](https://github.com/influxdata/telegraf/issues/1463): Shared WaitGroup in Exec plugin
|
||||
- [#1436](https://github.com/influxdata/telegraf/issues/1436): logparser: honor modifiers in "pattern" config.
|
||||
- [#1418](https://github.com/influxdata/telegraf/issues/1418): logparser: error and exit on file permissions/missing errors.
|
||||
|
||||
## v1.0 beta 2 [2016-06-21]
|
||||
|
||||
### Features
|
||||
@@ -12,6 +76,7 @@
|
||||
- [#1368](https://github.com/influxdata/telegraf/pull/1368): Add precision rounding to all metrics on collection.
|
||||
- [#1390](https://github.com/influxdata/telegraf/pull/1390): Add support for Tengine
|
||||
- [#1320](https://github.com/influxdata/telegraf/pull/1320): Logparser input plugin for parsing grok-style log patterns.
|
||||
- [#1397](https://github.com/influxdata/telegraf/issues/1397): ElasticSearch: now supports connecting to ElasticSearch via SSL
|
||||
|
||||
### Bugfixes
|
||||
|
||||
@@ -21,6 +86,7 @@
|
||||
- [#1335](https://github.com/influxdata/telegraf/issues/1335): Fix overall ping timeout to be calculated based on per-ping timeout.
|
||||
- [#1374](https://github.com/influxdata/telegraf/pull/1374): Change "default" retention policy to "".
|
||||
- [#1377](https://github.com/influxdata/telegraf/issues/1377): Graphite output mangling '%' character.
|
||||
- [#1396](https://github.com/influxdata/telegraf/pull/1396): Prometheus input plugin now supports x509 certs authentication
|
||||
|
||||
## v1.0 beta 1 [2016-06-07]
|
||||
|
||||
@@ -46,11 +112,11 @@ in conjunction with wildcard dimension values as it will control the amount of
|
||||
time before a new metric is included by the plugin.
|
||||
|
||||
### Features
|
||||
|
||||
- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging.
|
||||
- [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash
|
||||
- [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy!
|
||||
- [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm!
|
||||
- [#1247](https://github.com/influxdata/telegraf/pull/1247): rollbar input plugin. Thanks @francois2metz and @cduez!
|
||||
- [#1208](https://github.com/influxdata/telegraf/pull/1208): Standardized AWS credentials evaluation & wildcard CloudWatch dimensions. Thanks @johnrengelman!
|
||||
- [#1264](https://github.com/influxdata/telegraf/pull/1264): Add SSL config options to http_response plugin.
|
||||
- [#1272](https://github.com/influxdata/telegraf/pull/1272): graphite parser: add ability to specify multiple tag keys, for consistency with influxdb parser.
|
||||
|
||||
@@ -114,7 +114,7 @@ creating the `Parser` object.
|
||||
You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to consume.
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
@@ -244,7 +244,7 @@ instantiating and creating the `Serializer` object.
|
||||
You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to output.
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
@@ -290,10 +290,6 @@ To execute Telegraf tests follow these simple steps:
|
||||
instructions
|
||||
- execute `make test`
|
||||
|
||||
**OSX users**: you will need to install `boot2docker` or `docker-machine`.
|
||||
The Makefile will assume that you have a `docker-machine` box called `default` to
|
||||
get the IP address.
|
||||
|
||||
### Unit test troubleshooting
|
||||
|
||||
Try cleaning up your test environment by executing `make docker-kill` and
|
||||
|
||||
3
Godeps
3
Godeps
@@ -1,5 +1,6 @@
|
||||
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
|
||||
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
|
||||
github.com/aerospike/aerospike-client-go 45863b7fd8640dc12f7fdd397104d97e1986f25a
|
||||
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
|
||||
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
|
||||
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
||||
@@ -45,11 +46,13 @@ github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil 586bb697f3ec9f8ec08ffefe18f521a64534037c
|
||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||
github.com/sparrc/aerospike-client-go d4bb42d2c2d39dae68e054116f4538af189e05d5
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
|
||||
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/yuin/gopher-lua bf3808abd44b1e55143a2d7f08571aaa80db1808
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
|
||||
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
|
||||
|
||||
18
Makefile
18
Makefile
@@ -1,4 +1,3 @@
|
||||
UNAME := $(shell sh -c 'uname')
|
||||
VERSION := $(shell sh -c 'git describe --always --tags')
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
@@ -26,10 +25,6 @@ build-for-docker:
|
||||
"-s -X main.version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# Build with race detector
|
||||
dev: prepare
|
||||
go build -race -ldflags "-X main.version=$(VERSION)" ./...
|
||||
|
||||
# run package script
|
||||
package:
|
||||
./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload
|
||||
@@ -46,26 +41,17 @@ prepare-windows:
|
||||
|
||||
# Run all docker containers necessary for unit tests
|
||||
docker-run:
|
||||
ifeq ($(UNAME), Darwin)
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=$(shell sh -c 'boot2docker ip || docker-machine ip default') \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
endif
|
||||
ifeq ($(UNAME), Linux)
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
endif
|
||||
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
||||
docker run --name memcached -p "11211:11211" -d memcached
|
||||
docker run --name postgres -p "5432:5432" -d postgres
|
||||
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
||||
docker run --name redis -p "6379:6379" -d redis
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
@@ -78,7 +64,7 @@ docker-run-circle:
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
|
||||
25
README.md
25
README.md
@@ -20,12 +20,12 @@ new plugins.
|
||||
### Linux deb and rpm Packages:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta2_amd64.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta2.x86_64.rpm
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta3_amd64.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta3.x86_64.rpm
|
||||
|
||||
Latest (arm):
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta2_armhf.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta2.armhf.rpm
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta3_armhf.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta3.armhf.rpm
|
||||
|
||||
##### Package Instructions:
|
||||
|
||||
@@ -46,14 +46,14 @@ to use this repo to install & update telegraf.
|
||||
### Linux tarballs:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta2_linux_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta2_linux_i386.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta2_linux_armhf.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_i386.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_armhf.tar.gz
|
||||
|
||||
### FreeBSD tarball:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta2_freebsd_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_freebsd_amd64.tar.gz
|
||||
|
||||
### Ansible Role:
|
||||
|
||||
@@ -69,7 +69,7 @@ brew install telegraf
|
||||
### Windows Binaries (EXPERIMENTAL)
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta2_windows_amd64.zip
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_windows_amd64.zip
|
||||
|
||||
### From Source:
|
||||
|
||||
@@ -217,8 +217,11 @@ Telegraf can also collect metrics via the following service plugins:
|
||||
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
|
||||
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
|
||||
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
|
||||
* [github_webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/github_webhooks)
|
||||
* [rollbar_webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rollbar_webhooks)
|
||||
* [webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks)
|
||||
* [github](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/github)
|
||||
* [mandrill](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/mandrill)
|
||||
* [rollbar](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/rollbar)
|
||||
* [nsq_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq_consumer)
|
||||
|
||||
We'll be adding support for many more over the coming months. Read on if you
|
||||
want to add support for another service or third-party API.
|
||||
|
||||
@@ -268,13 +268,33 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
|
||||
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
|
||||
a.flush()
|
||||
case m := <-metricC:
|
||||
for _, o := range a.Config.Outputs {
|
||||
o.AddMetric(m)
|
||||
for i, o := range a.Config.Outputs {
|
||||
if i == len(a.Config.Outputs)-1 {
|
||||
o.AddMetric(m)
|
||||
} else {
|
||||
o.AddMetric(copyMetric(m))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func copyMetric(m telegraf.Metric) telegraf.Metric {
|
||||
t := time.Time(m.Time())
|
||||
|
||||
tags := make(map[string]string)
|
||||
fields := make(map[string]interface{})
|
||||
for k, v := range m.Tags() {
|
||||
tags[k] = v
|
||||
}
|
||||
for k, v := range m.Fields() {
|
||||
fields[k] = v
|
||||
}
|
||||
|
||||
out, _ := telegraf.NewMetric(m.Name(), tags, fields, t)
|
||||
return out
|
||||
}
|
||||
|
||||
// Run runs the agent daemon, gathering every Interval
|
||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
@@ -197,6 +197,8 @@
|
||||
# # Configuration for Graphite server to send metrics to
|
||||
# [[outputs.graphite]]
|
||||
# ## TCP endpoint for your graphite instance.
|
||||
# ## If multiple endpoints are configured, the output will be load balanced.
|
||||
# ## Only one of the endpoints will be written to with each iteration.
|
||||
# servers = ["localhost:2003"]
|
||||
# ## Prefix metrics name
|
||||
# prefix = ""
|
||||
@@ -526,6 +528,19 @@
|
||||
# socket_suffix = "asok"
|
||||
|
||||
|
||||
# # Read specific statistics per cgroup
|
||||
# [[inputs.cgroup]]
|
||||
# ## Directories in which to look for files, globs are supported.
|
||||
# # paths = [
|
||||
# # "/cgroup/memory",
|
||||
# # "/cgroup/memory/child1",
|
||||
# # "/cgroup/memory/child2/*",
|
||||
# # ]
|
||||
# ## cgroup stat fields, as file names, globs are supported.
|
||||
# ## these file names are appended to each path from above.
|
||||
# # files = ["memory.*usage*", "memory.limit_in_bytes"]
|
||||
|
||||
|
||||
# # Pull Metric Statistics from Amazon CloudWatch
|
||||
# [[inputs.cloudwatch]]
|
||||
# ## Amazon Region
|
||||
@@ -679,6 +694,13 @@
|
||||
#
|
||||
# ## set cluster_health to true when you want to also obtain cluster level stats
|
||||
# cluster_health = false
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Read metrics from one or more commands that can output to stdout
|
||||
@@ -1259,10 +1281,15 @@
|
||||
# ## An array of urls to scrape metrics from.
|
||||
# urls = ["http://localhost:9100/metrics"]
|
||||
#
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
# ## Use bearer token for authorization
|
||||
# # bearer_token = /path/to/bearer/token
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = /path/to/cafile
|
||||
# # ssl_cert = /path/to/certfile
|
||||
# # ssl_key = /path/to/keyfile
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Reads last_run_summary.yaml file and converts to measurments
|
||||
@@ -1490,12 +1517,6 @@
|
||||
# SERVICE INPUT PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
# # A Github Webhook Event collector
|
||||
# [[inputs.github_webhooks]]
|
||||
# ## Address and port to host Webhook listener on
|
||||
# service_address = ":1618"
|
||||
|
||||
|
||||
# # Read metrics from Kafka topic(s)
|
||||
# [[inputs.kafka_consumer]]
|
||||
# ## topic(s) to consume
|
||||
@@ -1601,12 +1622,6 @@
|
||||
# data_format = "influx"
|
||||
|
||||
|
||||
# # A Rollbar Webhook Event collector
|
||||
# [[inputs.rollbar_webhooks]]
|
||||
# ## Address and port to host Webhook listener on
|
||||
# service_address = ":1619"
|
||||
|
||||
|
||||
# # Statsd Server
|
||||
# [[inputs.statsd]]
|
||||
# ## Address and port to host UDP listener on
|
||||
@@ -1701,3 +1716,15 @@
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
|
||||
|
||||
# # A Webhooks Event collector
|
||||
# [[inputs.webhooks]]
|
||||
# ## Address and port to host Webhook listener on
|
||||
# service_address = ":1619"
|
||||
#
|
||||
# [inputs.webhooks.github]
|
||||
# path = "/github"
|
||||
#
|
||||
# [inputs.webhooks.rollbar]
|
||||
# path = "/rollbar"
|
||||
|
||||
|
||||
@@ -539,6 +539,13 @@ func (c *Config) LoadConfig(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
|
||||
// this is for Windows compatability only.
|
||||
// see https://github.com/influxdata/telegraf/issues/1378
|
||||
func trimBOM(f []byte) []byte {
|
||||
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
|
||||
}
|
||||
|
||||
// parseFile loads a TOML configuration from a provided path and
|
||||
// returns the AST produced from the TOML parser. When loading the file, it
|
||||
// will find environment variables and replace them.
|
||||
@@ -547,6 +554,8 @@ func parseFile(fpath string) (*ast.Table, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// ugh windows why
|
||||
contents = trimBOM(contents)
|
||||
|
||||
env_vars := envVarRe.FindAll(contents, -1)
|
||||
for _, env_var := range env_vars {
|
||||
|
||||
@@ -133,8 +133,8 @@ func GetTLSConfig(
|
||||
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf(
|
||||
"Could not load TLS client key/certificate: %s",
|
||||
err))
|
||||
"Could not load TLS client key/certificate from %s:%s: %s",
|
||||
SSLKey, SSLCert, err))
|
||||
}
|
||||
|
||||
t.Certificates = []tls.Certificate{cert}
|
||||
|
||||
@@ -138,7 +138,7 @@ func (ro *RunningOutput) Write() error {
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
if len(metrics) == 0 {
|
||||
if metrics == nil || len(metrics) == 0 {
|
||||
return nil
|
||||
}
|
||||
start := time.Now()
|
||||
|
||||
@@ -1,104 +1,19 @@
|
||||
package aerospike
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
as "github.com/sparrc/aerospike-client-go"
|
||||
)
|
||||
|
||||
const (
|
||||
MSG_HEADER_SIZE = 8
|
||||
MSG_TYPE = 1 // Info is 1
|
||||
MSG_VERSION = 2
|
||||
)
|
||||
|
||||
var (
|
||||
STATISTICS_COMMAND = []byte("statistics\n")
|
||||
NAMESPACES_COMMAND = []byte("namespaces\n")
|
||||
)
|
||||
|
||||
type aerospikeMessageHeader struct {
|
||||
Version uint8
|
||||
Type uint8
|
||||
DataLen [6]byte
|
||||
}
|
||||
|
||||
type aerospikeMessage struct {
|
||||
aerospikeMessageHeader
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func (msg *aerospikeMessage) Serialize() []byte {
|
||||
msg.DataLen = msgLenToBytes(int64(len(msg.Data)))
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
binary.Write(buf, binary.BigEndian, msg.aerospikeMessageHeader)
|
||||
binary.Write(buf, binary.BigEndian, msg.Data[:])
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
type aerospikeInfoCommand struct {
|
||||
msg *aerospikeMessage
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/info.go
|
||||
func (nfo *aerospikeInfoCommand) parseMultiResponse() (map[string]string, error) {
|
||||
responses := make(map[string]string)
|
||||
offset := int64(0)
|
||||
begin := int64(0)
|
||||
|
||||
dataLen := int64(len(nfo.msg.Data))
|
||||
|
||||
// Create reusable StringBuilder for performance.
|
||||
for offset < dataLen {
|
||||
b := nfo.msg.Data[offset]
|
||||
|
||||
if b == '\t' {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
offset++
|
||||
begin = offset
|
||||
|
||||
// Parse field value.
|
||||
for offset < dataLen {
|
||||
if nfo.msg.Data[offset] == '\n' {
|
||||
break
|
||||
}
|
||||
offset++
|
||||
}
|
||||
|
||||
if offset > begin {
|
||||
value := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = string(value)
|
||||
} else {
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
offset++
|
||||
begin = offset
|
||||
} else if b == '\n' {
|
||||
if offset > begin {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
offset++
|
||||
begin = offset
|
||||
} else {
|
||||
offset++
|
||||
}
|
||||
}
|
||||
|
||||
if offset > begin {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
return responses, nil
|
||||
}
|
||||
|
||||
type Aerospike struct {
|
||||
Servers []string
|
||||
}
|
||||
@@ -115,7 +30,7 @@ func (a *Aerospike) SampleConfig() string {
|
||||
}
|
||||
|
||||
func (a *Aerospike) Description() string {
|
||||
return "Read stats from an aerospike server"
|
||||
return "Read stats from aerospike server(s)"
|
||||
}
|
||||
|
||||
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
||||
@@ -124,214 +39,90 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
errChan := errchan.New(len(a.Servers))
|
||||
wg.Add(len(a.Servers))
|
||||
for _, server := range a.Servers {
|
||||
wg.Add(1)
|
||||
go func(server string) {
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = a.gatherServer(server, acc)
|
||||
errChan.C <- a.gatherServer(serv, acc)
|
||||
}(server)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return outerr
|
||||
return errChan.Error()
|
||||
}
|
||||
|
||||
func (a *Aerospike) gatherServer(host string, acc telegraf.Accumulator) error {
|
||||
aerospikeInfo, err := getMap(STATISTICS_COMMAND, host)
|
||||
func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) error {
|
||||
host, port, err := net.SplitHostPort(hostport)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike info failed: %s", err)
|
||||
return err
|
||||
}
|
||||
readAerospikeStats(aerospikeInfo, acc, host, "")
|
||||
namespaces, err := getList(NAMESPACES_COMMAND, host)
|
||||
|
||||
iport, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike namespace list failed: %s", err)
|
||||
iport = 3000
|
||||
}
|
||||
for ix := range namespaces {
|
||||
nsInfo, err := getMap([]byte("namespace/"+namespaces[ix]+"\n"), host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike namespace '%s' query failed: %s", namespaces[ix], err)
|
||||
|
||||
c, err := as.NewClient(host, iport)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
nodes := c.GetNodes()
|
||||
for _, n := range nodes {
|
||||
tags := map[string]string{
|
||||
"node_name": n.GetName(),
|
||||
"aerospike_host": hostport,
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
stats, err := as.RequestNodeStats(n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range stats {
|
||||
if iv, err := strconv.ParseInt(v, 10, 64); err == nil {
|
||||
fields[strings.Replace(k, "-", "_", -1)] = iv
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike_node", fields, tags, time.Now())
|
||||
|
||||
info, err := as.RequestNodeInfo(n, "namespaces")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
namespaces := strings.Split(info["namespaces"], ";")
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
nTags := copyTags(tags)
|
||||
nTags["namespace"] = namespace
|
||||
nFields := make(map[string]interface{})
|
||||
info, err := as.RequestNodeInfo(n, "namespace/"+namespace)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
stats := strings.Split(info["namespace/"+namespace], ";")
|
||||
for _, stat := range stats {
|
||||
parts := strings.Split(stat, "=")
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
if iv, err := strconv.ParseInt(parts[1], 10, 64); err == nil {
|
||||
nFields[strings.Replace(parts[0], "-", "_", -1)] = iv
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
|
||||
}
|
||||
readAerospikeStats(nsInfo, acc, host, namespaces[ix])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMap(key []byte, host string) (map[string]string, error) {
|
||||
data, err := get(key, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get data: %s", err)
|
||||
func copyTags(m map[string]string) map[string]string {
|
||||
out := make(map[string]string)
|
||||
for k, v := range m {
|
||||
out[k] = v
|
||||
}
|
||||
parsed, err := unmarshalMapInfo(data, string(key))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
|
||||
}
|
||||
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func getList(key []byte, host string) ([]string, error) {
|
||||
data, err := get(key, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get data: %s", err)
|
||||
}
|
||||
parsed, err := unmarshalListInfo(data, string(key))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
|
||||
}
|
||||
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func get(key []byte, host string) (map[string]string, error) {
|
||||
var err error
|
||||
var data map[string]string
|
||||
|
||||
asInfo := &aerospikeInfoCommand{
|
||||
msg: &aerospikeMessage{
|
||||
aerospikeMessageHeader: aerospikeMessageHeader{
|
||||
Version: uint8(MSG_VERSION),
|
||||
Type: uint8(MSG_TYPE),
|
||||
DataLen: msgLenToBytes(int64(len(key))),
|
||||
},
|
||||
Data: key,
|
||||
},
|
||||
}
|
||||
|
||||
cmd := asInfo.msg.Serialize()
|
||||
addr, err := net.ResolveTCPAddr("tcp", host)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Lookup failed for '%s': %s", host, err)
|
||||
}
|
||||
|
||||
conn, err := net.DialTCP("tcp", nil, addr)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Connection failed for '%s': %s", host, err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
_, err = conn.Write(cmd)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to send to '%s': %s", host, err)
|
||||
}
|
||||
|
||||
msgHeader := bytes.NewBuffer(make([]byte, MSG_HEADER_SIZE))
|
||||
_, err = readLenFromConn(conn, msgHeader.Bytes(), MSG_HEADER_SIZE)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to read header: %s", err)
|
||||
}
|
||||
err = binary.Read(msgHeader, binary.BigEndian, &asInfo.msg.aerospikeMessageHeader)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to unmarshal header: %s", err)
|
||||
}
|
||||
|
||||
msgLen := msgLenFromBytes(asInfo.msg.aerospikeMessageHeader.DataLen)
|
||||
|
||||
if int64(len(asInfo.msg.Data)) != msgLen {
|
||||
asInfo.msg.Data = make([]byte, msgLen)
|
||||
}
|
||||
|
||||
_, err = readLenFromConn(conn, asInfo.msg.Data, len(asInfo.msg.Data))
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to read from connection to '%s': %s", host, err)
|
||||
}
|
||||
|
||||
data, err = asInfo.parseMultiResponse()
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to parse response from '%s': %s", host, err)
|
||||
}
|
||||
|
||||
return data, err
|
||||
}
|
||||
|
||||
func readAerospikeStats(
|
||||
stats map[string]string,
|
||||
acc telegraf.Accumulator,
|
||||
host string,
|
||||
namespace string,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{
|
||||
"aerospike_host": host,
|
||||
"namespace": "_service",
|
||||
}
|
||||
|
||||
if namespace != "" {
|
||||
tags["namespace"] = namespace
|
||||
}
|
||||
for key, value := range stats {
|
||||
// We are going to ignore all string based keys
|
||||
val, err := strconv.ParseInt(value, 10, 64)
|
||||
if err == nil {
|
||||
if strings.Contains(key, "-") {
|
||||
key = strings.Replace(key, "-", "_", -1)
|
||||
}
|
||||
fields[key] = val
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) {
|
||||
key = strings.TrimSuffix(key, "\n")
|
||||
res := map[string]string{}
|
||||
|
||||
v, exists := infoMap[key]
|
||||
if !exists {
|
||||
return res, fmt.Errorf("Key '%s' missing from info", key)
|
||||
}
|
||||
|
||||
values := strings.Split(v, ";")
|
||||
for i := range values {
|
||||
kv := strings.Split(values[i], "=")
|
||||
if len(kv) > 1 {
|
||||
res[kv[0]] = kv[1]
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func unmarshalListInfo(infoMap map[string]string, key string) ([]string, error) {
|
||||
key = strings.TrimSuffix(key, "\n")
|
||||
|
||||
v, exists := infoMap[key]
|
||||
if !exists {
|
||||
return []string{}, fmt.Errorf("Key '%s' missing from info", key)
|
||||
}
|
||||
|
||||
values := strings.Split(v, ";")
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func readLenFromConn(c net.Conn, buffer []byte, length int) (total int, err error) {
|
||||
var r int
|
||||
for total < length {
|
||||
r, err = c.Read(buffer[total:length])
|
||||
total += r
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func msgLenToBytes(DataLen int64) [6]byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(DataLen))
|
||||
res := [6]byte{}
|
||||
copy(res[:], b[2:])
|
||||
return res
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func msgLenFromBytes(buf [6]byte) int64 {
|
||||
nbytes := append([]byte{0, 0}, buf[:]...)
|
||||
DataLen := binary.BigEndian.Uint64(nbytes)
|
||||
return int64(DataLen)
|
||||
return out
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package aerospike
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
@@ -23,96 +22,29 @@ func TestAerospikeStatistics(t *testing.T) {
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Only use a few of the metrics
|
||||
asMetrics := []string{
|
||||
"transactions",
|
||||
"stat_write_errs",
|
||||
"stat_read_reqs",
|
||||
"stat_write_reqs",
|
||||
}
|
||||
|
||||
for _, metric := range asMetrics {
|
||||
assert.True(t, acc.HasIntField("aerospike", metric), metric)
|
||||
}
|
||||
|
||||
assert.True(t, acc.HasMeasurement("aerospike_node"))
|
||||
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
|
||||
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
|
||||
}
|
||||
|
||||
func TestAerospikeMsgLenFromToBytes(t *testing.T) {
|
||||
var i int64 = 8
|
||||
assert.True(t, i == msgLenFromBytes(msgLenToBytes(i)))
|
||||
}
|
||||
func TestAerospikeStatisticsPartialErr(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
a := &Aerospike{
|
||||
Servers: []string{
|
||||
testutil.GetLocalHost() + ":3000",
|
||||
testutil.GetLocalHost() + ":9999",
|
||||
},
|
||||
}
|
||||
|
||||
func TestReadAerospikeStatsNoNamespace(t *testing.T) {
|
||||
// Also test for re-writing
|
||||
var acc testutil.Accumulator
|
||||
stats := map[string]string{
|
||||
"stat-write-errs": "12345",
|
||||
"stat_read_reqs": "12345",
|
||||
}
|
||||
readAerospikeStats(stats, &acc, "host1", "")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"stat_write_errs": int64(12345),
|
||||
"stat_read_reqs": int64(12345),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"aerospike_host": "host1",
|
||||
"namespace": "_service",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func TestReadAerospikeStatsNamespace(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
stats := map[string]string{
|
||||
"stat_write_errs": "12345",
|
||||
"stat_read_reqs": "12345",
|
||||
}
|
||||
readAerospikeStats(stats, &acc, "host1", "test")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"stat_write_errs": int64(12345),
|
||||
"stat_read_reqs": int64(12345),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"aerospike_host": "host1",
|
||||
"namespace": "test",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func TestAerospikeUnmarshalList(t *testing.T) {
|
||||
i := map[string]string{
|
||||
"test": "one;two;three",
|
||||
}
|
||||
|
||||
expected := []string{"one", "two", "three"}
|
||||
|
||||
list, err := unmarshalListInfo(i, "test2")
|
||||
assert.True(t, err != nil)
|
||||
|
||||
list, err = unmarshalListInfo(i, "test")
|
||||
assert.True(t, err == nil)
|
||||
equal := true
|
||||
for ix := range expected {
|
||||
if list[ix] != expected[ix] {
|
||||
equal = false
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, equal)
|
||||
}
|
||||
|
||||
func TestAerospikeUnmarshalMap(t *testing.T) {
|
||||
i := map[string]string{
|
||||
"test": "key1=value1;key2=value2",
|
||||
}
|
||||
|
||||
expected := map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
}
|
||||
m, err := unmarshalMapInfo(i, "test")
|
||||
assert.True(t, err == nil)
|
||||
assert.True(t, reflect.DeepEqual(m, expected))
|
||||
err := a.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
|
||||
assert.True(t, acc.HasMeasurement("aerospike_node"))
|
||||
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
|
||||
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cgroup"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/chrony"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/conntrack"
|
||||
@@ -19,7 +20,6 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
|
||||
@@ -41,6 +41,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||
@@ -57,7 +58,6 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/rollbar_webhooks"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
||||
@@ -70,6 +70,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/webhooks"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -38,8 +37,8 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
n.Urls = []string{"http://localhost/server-status?auto"}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var outerr error
|
||||
var errch = make(chan error)
|
||||
|
||||
for _, u := range n.Urls {
|
||||
addr, err := url.Parse(u)
|
||||
@@ -47,14 +46,17 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
outerr = n.gatherUrl(addr, acc)
|
||||
errch <- n.gatherUrl(addr, acc)
|
||||
}(addr)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
// Drain channel, waiting for all requests to finish and save last error.
|
||||
for range n.Urls {
|
||||
if err := <-errch; err != nil {
|
||||
outerr = err
|
||||
}
|
||||
}
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
@@ -36,7 +36,8 @@ func TestHTTPApache(t *testing.T) {
|
||||
defer ts.Close()
|
||||
|
||||
a := Apache{
|
||||
Urls: []string{ts.URL},
|
||||
// Fetch it 2 times to catch possible data races.
|
||||
Urls: []string{ts.URL, ts.URL},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
@@ -148,7 +148,7 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
||||
tokens := parseJmxMetricRequest(r.(map[string]interface{})["mbean"].(string))
|
||||
// Requests with wildcards for keyspace or table names will return nested
|
||||
// maps in the json response
|
||||
if tokens["type"] == "Table" && (tokens["keyspace"] == "*" ||
|
||||
if (tokens["type"] == "Table" || tokens["type"] == "ColumnFamily") && (tokens["keyspace"] == "*" ||
|
||||
tokens["scope"] == "*") {
|
||||
if valuesMap, ok := out["value"]; ok {
|
||||
for k, v := range valuesMap.(map[string]interface{}) {
|
||||
|
||||
59
plugins/inputs/cgroup/README.md
Normal file
59
plugins/inputs/cgroup/README.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# CGroup Input Plugin For Telegraf Agent
|
||||
|
||||
This input plugin will capture specific statistics per cgroup.
|
||||
|
||||
Following file formats are supported:
|
||||
|
||||
* Single value
|
||||
|
||||
```
|
||||
VAL\n
|
||||
```
|
||||
|
||||
* New line separated values
|
||||
|
||||
```
|
||||
VAL0\n
|
||||
VAL1\n
|
||||
```
|
||||
|
||||
* Space separated values
|
||||
|
||||
```
|
||||
VAL0 VAL1 ...\n
|
||||
```
|
||||
|
||||
* New line separated key-space-value's
|
||||
|
||||
```
|
||||
KEY0 VAL0\n
|
||||
KEY1 VAL1\n
|
||||
```
|
||||
|
||||
|
||||
### Tags:
|
||||
|
||||
Measurements don't have any specific tags unless you define them at the telegraf level (defaults). We
|
||||
used to have the path listed as a tag, but to keep cardinality in check it's easier to move this
|
||||
value to a field. Thanks @sebito91!
|
||||
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# [[inputs.cgroup]]
|
||||
# paths = [
|
||||
# "/cgroup/memory", # root cgroup
|
||||
# "/cgroup/memory/child1", # container cgroup
|
||||
# "/cgroup/memory/child2/*", # all children cgroups under child2, but not child2 itself
|
||||
# ]
|
||||
# files = ["memory.*usage*", "memory.limit_in_bytes"]
|
||||
|
||||
# [[inputs.cgroup]]
|
||||
# paths = [
|
||||
# "/cgroup/cpu", # root cgroup
|
||||
# "/cgroup/cpu/*", # all container cgroups
|
||||
# "/cgroup/cpu/*/*", # all children cgroups under each container cgroup
|
||||
# ]
|
||||
# files = ["cpuacct.usage", "cpu.cfs_period_us", "cpu.cfs_quota_us"]
|
||||
```
|
||||
35
plugins/inputs/cgroup/cgroup.go
Normal file
35
plugins/inputs/cgroup/cgroup.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package cgroup
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type CGroup struct {
|
||||
Paths []string `toml:"paths"`
|
||||
Files []string `toml:"files"`
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Directories in which to look for files, globs are supported.
|
||||
# paths = [
|
||||
# "/cgroup/memory",
|
||||
# "/cgroup/memory/child1",
|
||||
# "/cgroup/memory/child2/*",
|
||||
# ]
|
||||
## cgroup stat fields, as file names, globs are supported.
|
||||
## these file names are appended to each path from above.
|
||||
# files = ["memory.*usage*", "memory.limit_in_bytes"]
|
||||
`
|
||||
|
||||
func (g *CGroup) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (g *CGroup) Description() string {
|
||||
return "Read specific statistics per cgroup"
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("cgroup", func() telegraf.Input { return &CGroup{} })
|
||||
}
|
||||
243
plugins/inputs/cgroup/cgroup_linux.go
Normal file
243
plugins/inputs/cgroup/cgroup_linux.go
Normal file
@@ -0,0 +1,243 @@
|
||||
// +build linux
|
||||
|
||||
package cgroup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
const metricName = "cgroup"
|
||||
|
||||
func (g *CGroup) Gather(acc telegraf.Accumulator) error {
|
||||
list := make(chan pathInfo)
|
||||
go g.generateDirs(list)
|
||||
|
||||
for dir := range list {
|
||||
if dir.err != nil {
|
||||
return dir.err
|
||||
}
|
||||
if err := g.gatherDir(dir.path, acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *CGroup) gatherDir(dir string, acc telegraf.Accumulator) error {
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
list := make(chan pathInfo)
|
||||
go g.generateFiles(dir, list)
|
||||
|
||||
for file := range list {
|
||||
if file.err != nil {
|
||||
return file.err
|
||||
}
|
||||
|
||||
raw, err := ioutil.ReadFile(file.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(raw) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
fd := fileData{data: raw, path: file.path}
|
||||
if err := fd.parse(fields); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fields["path"] = dir
|
||||
|
||||
acc.AddFields(metricName, fields, nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
type pathInfo struct {
|
||||
path string
|
||||
err error
|
||||
}
|
||||
|
||||
func isDir(path string) (bool, error) {
|
||||
result, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return result.IsDir(), nil
|
||||
}
|
||||
|
||||
func (g *CGroup) generateDirs(list chan<- pathInfo) {
|
||||
for _, dir := range g.Paths {
|
||||
// getting all dirs that match the pattern 'dir'
|
||||
items, err := filepath.Glob(dir)
|
||||
if err != nil {
|
||||
list <- pathInfo{err: err}
|
||||
return
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
ok, err := isDir(item)
|
||||
if err != nil {
|
||||
list <- pathInfo{err: err}
|
||||
return
|
||||
}
|
||||
// supply only dirs
|
||||
if ok {
|
||||
list <- pathInfo{path: item}
|
||||
}
|
||||
}
|
||||
}
|
||||
close(list)
|
||||
}
|
||||
|
||||
func (g *CGroup) generateFiles(dir string, list chan<- pathInfo) {
|
||||
for _, file := range g.Files {
|
||||
// getting all file paths that match the pattern 'dir + file'
|
||||
// path.Base make sure that file variable does not contains part of path
|
||||
items, err := filepath.Glob(path.Join(dir, path.Base(file)))
|
||||
if err != nil {
|
||||
list <- pathInfo{err: err}
|
||||
return
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
ok, err := isDir(item)
|
||||
if err != nil {
|
||||
list <- pathInfo{err: err}
|
||||
return
|
||||
}
|
||||
// supply only files not dirs
|
||||
if !ok {
|
||||
list <- pathInfo{path: item}
|
||||
}
|
||||
}
|
||||
}
|
||||
close(list)
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
type fileData struct {
|
||||
data []byte
|
||||
path string
|
||||
}
|
||||
|
||||
func (fd *fileData) format() (*fileFormat, error) {
|
||||
for _, ff := range fileFormats {
|
||||
ok, err := ff.match(fd.data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
return &ff, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("%v: unknown file format", fd.path)
|
||||
}
|
||||
|
||||
func (fd *fileData) parse(fields map[string]interface{}) error {
|
||||
format, err := fd.format()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
format.parser(filepath.Base(fd.path), fields, fd.data)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
type fileFormat struct {
|
||||
name string
|
||||
pattern string
|
||||
parser func(measurement string, fields map[string]interface{}, b []byte)
|
||||
}
|
||||
|
||||
const keyPattern = "[[:alpha:]_]+"
|
||||
const valuePattern = "[\\d-]+"
|
||||
|
||||
var fileFormats = [...]fileFormat{
|
||||
// VAL\n
|
||||
fileFormat{
|
||||
name: "Single value",
|
||||
pattern: "^" + valuePattern + "\n$",
|
||||
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||
re := regexp.MustCompile("^(" + valuePattern + ")\n$")
|
||||
matches := re.FindAllStringSubmatch(string(b), -1)
|
||||
fields[measurement] = numberOrString(matches[0][1])
|
||||
},
|
||||
},
|
||||
// VAL0\n
|
||||
// VAL1\n
|
||||
// ...
|
||||
fileFormat{
|
||||
name: "New line separated values",
|
||||
pattern: "^(" + valuePattern + "\n){2,}$",
|
||||
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||
re := regexp.MustCompile("(" + valuePattern + ")\n")
|
||||
matches := re.FindAllStringSubmatch(string(b), -1)
|
||||
for i, v := range matches {
|
||||
fields[measurement+"."+strconv.Itoa(i)] = numberOrString(v[1])
|
||||
}
|
||||
},
|
||||
},
|
||||
// VAL0 VAL1 ...\n
|
||||
fileFormat{
|
||||
name: "Space separated values",
|
||||
pattern: "^(" + valuePattern + " )+\n$",
|
||||
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||
re := regexp.MustCompile("(" + valuePattern + ") ")
|
||||
matches := re.FindAllStringSubmatch(string(b), -1)
|
||||
for i, v := range matches {
|
||||
fields[measurement+"."+strconv.Itoa(i)] = numberOrString(v[1])
|
||||
}
|
||||
},
|
||||
},
|
||||
// KEY0 VAL0\n
|
||||
// KEY1 VAL1\n
|
||||
// ...
|
||||
fileFormat{
|
||||
name: "New line separated key-space-value's",
|
||||
pattern: "^(" + keyPattern + " " + valuePattern + "\n)+$",
|
||||
parser: func(measurement string, fields map[string]interface{}, b []byte) {
|
||||
re := regexp.MustCompile("(" + keyPattern + ") (" + valuePattern + ")\n")
|
||||
matches := re.FindAllStringSubmatch(string(b), -1)
|
||||
for _, v := range matches {
|
||||
fields[measurement+"."+v[1]] = numberOrString(v[2])
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func numberOrString(s string) interface{} {
|
||||
i, err := strconv.Atoi(s)
|
||||
if err == nil {
|
||||
return i
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (f fileFormat) match(b []byte) (bool, error) {
|
||||
ok, err := regexp.Match(f.pattern, b)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
11
plugins/inputs/cgroup/cgroup_notlinux.go
Normal file
11
plugins/inputs/cgroup/cgroup_notlinux.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build !linux
|
||||
|
||||
package cgroup
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
func (g *CGroup) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
194
plugins/inputs/cgroup/cgroup_test.go
Normal file
194
plugins/inputs/cgroup/cgroup_test.go
Normal file
@@ -0,0 +1,194 @@
|
||||
// +build linux
|
||||
|
||||
package cgroup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var cg1 = &CGroup{
|
||||
Paths: []string{"testdata/memory"},
|
||||
Files: []string{
|
||||
"memory.empty",
|
||||
"memory.max_usage_in_bytes",
|
||||
"memory.limit_in_bytes",
|
||||
"memory.stat",
|
||||
"memory.use_hierarchy",
|
||||
"notify_on_release",
|
||||
},
|
||||
}
|
||||
|
||||
func assertContainsFields(a *testutil.Accumulator, t *testing.T, measurement string, fieldSet []map[string]interface{}) {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
numEquals := 0
|
||||
for _, p := range a.Metrics {
|
||||
if p.Measurement == measurement {
|
||||
for _, fields := range fieldSet {
|
||||
if reflect.DeepEqual(fields, p.Fields) {
|
||||
numEquals++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if numEquals != len(fieldSet) {
|
||||
assert.Fail(t, fmt.Sprintf("only %d of %d are equal", numEquals, len(fieldSet)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_1(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg1.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"memory.stat.cache": 1739362304123123123,
|
||||
"memory.stat.rss": 1775325184,
|
||||
"memory.stat.rss_huge": 778043392,
|
||||
"memory.stat.mapped_file": 421036032,
|
||||
"memory.stat.dirty": -307200,
|
||||
"memory.max_usage_in_bytes.0": 0,
|
||||
"memory.max_usage_in_bytes.1": -1,
|
||||
"memory.max_usage_in_bytes.2": 2,
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"memory.use_hierarchy": "12-781",
|
||||
"notify_on_release": 0,
|
||||
"path": "testdata/memory",
|
||||
}
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields})
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
var cg2 = &CGroup{
|
||||
Paths: []string{"testdata/cpu"},
|
||||
Files: []string{"cpuacct.usage_percpu"},
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_2(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg2.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"cpuacct.usage_percpu.0": -1452543795404,
|
||||
"cpuacct.usage_percpu.1": 1376681271659,
|
||||
"cpuacct.usage_percpu.2": 1450950799997,
|
||||
"cpuacct.usage_percpu.3": -1473113374257,
|
||||
"path": "testdata/cpu",
|
||||
}
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields})
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
var cg3 = &CGroup{
|
||||
Paths: []string{"testdata/memory/*"},
|
||||
Files: []string{"memory.limit_in_bytes"},
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_3(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg3.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_1",
|
||||
}
|
||||
|
||||
fieldsTwo := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_2",
|
||||
}
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo})
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
var cg4 = &CGroup{
|
||||
Paths: []string{"testdata/memory/*/*", "testdata/memory/group_2"},
|
||||
Files: []string{"memory.limit_in_bytes"},
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_4(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg4.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_1/group_1_1",
|
||||
}
|
||||
|
||||
fieldsTwo := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_1/group_1_2",
|
||||
}
|
||||
|
||||
fieldsThree := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_2",
|
||||
}
|
||||
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo, fieldsThree})
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
var cg5 = &CGroup{
|
||||
Paths: []string{"testdata/memory/*/group_1_1"},
|
||||
Files: []string{"memory.limit_in_bytes"},
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_5(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg5.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_1/group_1_1",
|
||||
}
|
||||
|
||||
fieldsTwo := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_2/group_1_1",
|
||||
}
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo})
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
|
||||
var cg6 = &CGroup{
|
||||
Paths: []string{"testdata/memory"},
|
||||
Files: []string{"memory.us*", "*/memory.kmem.*"},
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_6(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg6.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"memory.usage_in_bytes": 3513667584,
|
||||
"memory.use_hierarchy": "12-781",
|
||||
"memory.kmem.limit_in_bytes": 9223372036854771712,
|
||||
"path": "testdata/memory",
|
||||
}
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields})
|
||||
}
|
||||
1
plugins/inputs/cgroup/testdata/blkio/blkio.io_serviced
vendored
Normal file
1
plugins/inputs/cgroup/testdata/blkio/blkio.io_serviced
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Total 0
|
||||
131
plugins/inputs/cgroup/testdata/blkio/blkio.throttle.io_serviced
vendored
Normal file
131
plugins/inputs/cgroup/testdata/blkio/blkio.throttle.io_serviced
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
11:0 Read 0
|
||||
11:0 Write 0
|
||||
11:0 Sync 0
|
||||
11:0 Async 0
|
||||
11:0 Total 0
|
||||
8:0 Read 49134
|
||||
8:0 Write 216703
|
||||
8:0 Sync 177906
|
||||
8:0 Async 87931
|
||||
8:0 Total 265837
|
||||
7:7 Read 0
|
||||
7:7 Write 0
|
||||
7:7 Sync 0
|
||||
7:7 Async 0
|
||||
7:7 Total 0
|
||||
7:6 Read 0
|
||||
7:6 Write 0
|
||||
7:6 Sync 0
|
||||
7:6 Async 0
|
||||
7:6 Total 0
|
||||
7:5 Read 0
|
||||
7:5 Write 0
|
||||
7:5 Sync 0
|
||||
7:5 Async 0
|
||||
7:5 Total 0
|
||||
7:4 Read 0
|
||||
7:4 Write 0
|
||||
7:4 Sync 0
|
||||
7:4 Async 0
|
||||
7:4 Total 0
|
||||
7:3 Read 0
|
||||
7:3 Write 0
|
||||
7:3 Sync 0
|
||||
7:3 Async 0
|
||||
7:3 Total 0
|
||||
7:2 Read 0
|
||||
7:2 Write 0
|
||||
7:2 Sync 0
|
||||
7:2 Async 0
|
||||
7:2 Total 0
|
||||
7:1 Read 0
|
||||
7:1 Write 0
|
||||
7:1 Sync 0
|
||||
7:1 Async 0
|
||||
7:1 Total 0
|
||||
7:0 Read 0
|
||||
7:0 Write 0
|
||||
7:0 Sync 0
|
||||
7:0 Async 0
|
||||
7:0 Total 0
|
||||
1:15 Read 3
|
||||
1:15 Write 0
|
||||
1:15 Sync 0
|
||||
1:15 Async 3
|
||||
1:15 Total 3
|
||||
1:14 Read 3
|
||||
1:14 Write 0
|
||||
1:14 Sync 0
|
||||
1:14 Async 3
|
||||
1:14 Total 3
|
||||
1:13 Read 3
|
||||
1:13 Write 0
|
||||
1:13 Sync 0
|
||||
1:13 Async 3
|
||||
1:13 Total 3
|
||||
1:12 Read 3
|
||||
1:12 Write 0
|
||||
1:12 Sync 0
|
||||
1:12 Async 3
|
||||
1:12 Total 3
|
||||
1:11 Read 3
|
||||
1:11 Write 0
|
||||
1:11 Sync 0
|
||||
1:11 Async 3
|
||||
1:11 Total 3
|
||||
1:10 Read 3
|
||||
1:10 Write 0
|
||||
1:10 Sync 0
|
||||
1:10 Async 3
|
||||
1:10 Total 3
|
||||
1:9 Read 3
|
||||
1:9 Write 0
|
||||
1:9 Sync 0
|
||||
1:9 Async 3
|
||||
1:9 Total 3
|
||||
1:8 Read 3
|
||||
1:8 Write 0
|
||||
1:8 Sync 0
|
||||
1:8 Async 3
|
||||
1:8 Total 3
|
||||
1:7 Read 3
|
||||
1:7 Write 0
|
||||
1:7 Sync 0
|
||||
1:7 Async 3
|
||||
1:7 Total 3
|
||||
1:6 Read 3
|
||||
1:6 Write 0
|
||||
1:6 Sync 0
|
||||
1:6 Async 3
|
||||
1:6 Total 3
|
||||
1:5 Read 3
|
||||
1:5 Write 0
|
||||
1:5 Sync 0
|
||||
1:5 Async 3
|
||||
1:5 Total 3
|
||||
1:4 Read 3
|
||||
1:4 Write 0
|
||||
1:4 Sync 0
|
||||
1:4 Async 3
|
||||
1:4 Total 3
|
||||
1:3 Read 3
|
||||
1:3 Write 0
|
||||
1:3 Sync 0
|
||||
1:3 Async 3
|
||||
1:3 Total 3
|
||||
1:2 Read 3
|
||||
1:2 Write 0
|
||||
1:2 Sync 0
|
||||
1:2 Async 3
|
||||
1:2 Total 3
|
||||
1:1 Read 3
|
||||
1:1 Write 0
|
||||
1:1 Sync 0
|
||||
1:1 Async 3
|
||||
1:1 Total 3
|
||||
1:0 Read 3
|
||||
1:0 Write 0
|
||||
1:0 Sync 0
|
||||
1:0 Async 3
|
||||
1:0 Total 3
|
||||
Total 265885
|
||||
1
plugins/inputs/cgroup/testdata/cpu/cpu.cfs_quota_us
vendored
Normal file
1
plugins/inputs/cgroup/testdata/cpu/cpu.cfs_quota_us
vendored
Normal file
@@ -0,0 +1 @@
|
||||
-1
|
||||
1
plugins/inputs/cgroup/testdata/cpu/cpuacct.usage_percpu
vendored
Normal file
1
plugins/inputs/cgroup/testdata/cpu/cpuacct.usage_percpu
vendored
Normal file
@@ -0,0 +1 @@
|
||||
-1452543795404 1376681271659 1450950799997 -1473113374257
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_1/group_1_1/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/group_1_1/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
223372036854771712
|
||||
5
plugins/inputs/cgroup/testdata/memory/group_1/group_1_1/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_1/group_1_1/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
cache 1739362304123123123
|
||||
rss 1775325184
|
||||
rss_huge 778043392
|
||||
mapped_file 421036032
|
||||
dirty -307200
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_1/group_1_2/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/group_1_2/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
223372036854771712
|
||||
5
plugins/inputs/cgroup/testdata/memory/group_1/group_1_2/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_1/group_1_2/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
cache 1739362304123123123
|
||||
rss 1775325184
|
||||
rss_huge 778043392
|
||||
mapped_file 421036032
|
||||
dirty -307200
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.kmem.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.kmem.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
9223372036854771712
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.kmem.max_usage_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.kmem.max_usage_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
0
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_1/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
223372036854771712
|
||||
5
plugins/inputs/cgroup/testdata/memory/group_1/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_1/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
cache 1739362304123123123
|
||||
rss 1775325184
|
||||
rss_huge 778043392
|
||||
mapped_file 421036032
|
||||
dirty -307200
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_2/group_1_1/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_2/group_1_1/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
223372036854771712
|
||||
5
plugins/inputs/cgroup/testdata/memory/group_2/group_1_1/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_2/group_1_1/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
cache 1739362304123123123
|
||||
rss 1775325184
|
||||
rss_huge 778043392
|
||||
mapped_file 421036032
|
||||
dirty -307200
|
||||
1
plugins/inputs/cgroup/testdata/memory/group_2/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/group_2/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
223372036854771712
|
||||
5
plugins/inputs/cgroup/testdata/memory/group_2/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/group_2/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
cache 1739362304123123123
|
||||
rss 1775325184
|
||||
rss_huge 778043392
|
||||
mapped_file 421036032
|
||||
dirty -307200
|
||||
0
plugins/inputs/cgroup/testdata/memory/memory.empty
vendored
Normal file
0
plugins/inputs/cgroup/testdata/memory/memory.empty
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/memory.kmem.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/memory.kmem.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
9223372036854771712
|
||||
1
plugins/inputs/cgroup/testdata/memory/memory.limit_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/memory.limit_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
223372036854771712
|
||||
3
plugins/inputs/cgroup/testdata/memory/memory.max_usage_in_bytes
vendored
Normal file
3
plugins/inputs/cgroup/testdata/memory/memory.max_usage_in_bytes
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
0
|
||||
-1
|
||||
2
|
||||
8
plugins/inputs/cgroup/testdata/memory/memory.numa_stat
vendored
Normal file
8
plugins/inputs/cgroup/testdata/memory/memory.numa_stat
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
total=858067 N0=858067
|
||||
file=406254 N0=406254
|
||||
anon=451792 N0=451792
|
||||
unevictable=21 N0=21
|
||||
hierarchical_total=858067 N0=858067
|
||||
hierarchical_file=406254 N0=406254
|
||||
hierarchical_anon=451792 N0=451792
|
||||
hierarchical_unevictable=21 N0=21
|
||||
5
plugins/inputs/cgroup/testdata/memory/memory.stat
vendored
Normal file
5
plugins/inputs/cgroup/testdata/memory/memory.stat
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
cache 1739362304123123123
|
||||
rss 1775325184
|
||||
rss_huge 778043392
|
||||
mapped_file 421036032
|
||||
dirty -307200
|
||||
1
plugins/inputs/cgroup/testdata/memory/memory.usage_in_bytes
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/memory.usage_in_bytes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
3513667584
|
||||
1
plugins/inputs/cgroup/testdata/memory/memory.use_hierarchy
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/memory.use_hierarchy
vendored
Normal file
@@ -0,0 +1 @@
|
||||
12-781
|
||||
1
plugins/inputs/cgroup/testdata/memory/notify_on_release
vendored
Normal file
1
plugins/inputs/cgroup/testdata/memory/notify_on_release
vendored
Normal file
@@ -0,0 +1 @@
|
||||
0
|
||||
@@ -11,6 +11,13 @@ and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference
|
||||
servers = ["http://localhost:9200"]
|
||||
local = true
|
||||
cluster_health = true
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
@@ -67,25 +68,31 @@ const sampleConfig = `
|
||||
|
||||
## set cluster_health to true when you want to also obtain cluster level stats
|
||||
cluster_health = false
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
// Elasticsearch is a plugin to read stats from one or many Elasticsearch
|
||||
// servers.
|
||||
type Elasticsearch struct {
|
||||
Local bool
|
||||
Servers []string
|
||||
ClusterHealth bool
|
||||
client *http.Client
|
||||
Local bool
|
||||
Servers []string
|
||||
ClusterHealth bool
|
||||
SSLCA string `toml:"ssl_ca"` // Path to CA file
|
||||
SSLCert string `toml:"ssl_cert"` // Path to host cert file
|
||||
SSLKey string `toml:"ssl_key"` // Path to cert key file
|
||||
InsecureSkipVerify bool // Use SSL but skip chain & host verification
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewElasticsearch return a new instance of Elasticsearch
|
||||
func NewElasticsearch() *Elasticsearch {
|
||||
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
return &Elasticsearch{client: client}
|
||||
return &Elasticsearch{}
|
||||
}
|
||||
|
||||
// SampleConfig returns sample configuration for this plugin.
|
||||
@@ -101,6 +108,15 @@ func (e *Elasticsearch) Description() string {
|
||||
// Gather reads the stats from Elasticsearch and writes it to the
|
||||
// Accumulator.
|
||||
func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
if e.client == nil {
|
||||
client, err := e.createHttpClient()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.client = client
|
||||
}
|
||||
|
||||
errChan := errchan.New(len(e.Servers))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(e.Servers))
|
||||
@@ -128,6 +144,23 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
return errChan.Error()
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
|
||||
tlsCfg, err := internal.GetTLSConfig(e.SSLCert, e.SSLKey, e.SSLCA, e.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tr := &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
|
||||
nodeStats := &struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
|
||||
@@ -38,7 +38,7 @@ func (t *transportMock) CancelRequest(_ *http.Request) {
|
||||
}
|
||||
|
||||
func TestElasticsearch(t *testing.T) {
|
||||
es := NewElasticsearch()
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.client.Transport = newTransportMock(http.StatusOK, statsResponse)
|
||||
|
||||
@@ -67,7 +67,7 @@ func TestElasticsearch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGatherClusterStats(t *testing.T) {
|
||||
es := NewElasticsearch()
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.ClusterHealth = true
|
||||
es.client.Transport = newTransportMock(http.StatusOK, clusterResponse)
|
||||
@@ -87,3 +87,9 @@ func TestGatherClusterStats(t *testing.T) {
|
||||
v2IndexExpected,
|
||||
map[string]string{"index": "v2"})
|
||||
}
|
||||
|
||||
func newElasticsearchWithClient() *Elasticsearch {
|
||||
es := NewElasticsearch()
|
||||
es.client = &http.Client{}
|
||||
return es
|
||||
}
|
||||
|
||||
@@ -48,8 +48,6 @@ type Exec struct {
|
||||
|
||||
parser parsers.Parser
|
||||
|
||||
wg sync.WaitGroup
|
||||
|
||||
runner Runner
|
||||
errChan chan error
|
||||
}
|
||||
@@ -119,8 +117,8 @@ func (c CommandRunner) Run(
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
|
||||
func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator) {
|
||||
defer e.wg.Done()
|
||||
func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
|
||||
out, err := e.runner.Run(e, command, acc)
|
||||
if err != nil {
|
||||
@@ -151,6 +149,7 @@ func (e *Exec) SetParser(parser parsers.Parser) {
|
||||
}
|
||||
|
||||
func (e *Exec) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
// Legacy single command support
|
||||
if e.Command != "" {
|
||||
e.Commands = append(e.Commands, e.Command)
|
||||
@@ -190,11 +189,11 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error {
|
||||
errChan := errchan.New(len(commands))
|
||||
e.errChan = errChan.C
|
||||
|
||||
e.wg.Add(len(commands))
|
||||
wg.Add(len(commands))
|
||||
for _, command := range commands {
|
||||
go e.ProcessCommand(command, acc)
|
||||
go e.ProcessCommand(command, acc, &wg)
|
||||
}
|
||||
e.wg.Wait()
|
||||
wg.Wait()
|
||||
return errChan.Error()
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ from the same topic in parallel.
|
||||
## Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
## Data format to consume.
|
||||
## Data format to consume.
|
||||
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
@@ -32,11 +32,5 @@ from the same topic in parallel.
|
||||
|
||||
## Testing
|
||||
|
||||
Running integration tests requires running Zookeeper & Kafka. The following
|
||||
commands assume you're on OS X & using [boot2docker](http://boot2docker.io/) or docker-machine through [Docker Toolbox](https://www.docker.com/docker-toolbox).
|
||||
|
||||
To start Kafka & Zookeeper:
|
||||
|
||||
```
|
||||
docker run -d -p 2181:2181 -p 9092:9092 --env ADVERTISED_HOST=`boot2docker ip || docker-machine ip <your_machine_name>` --env ADVERTISED_PORT=9092 spotify/kafka
|
||||
```
|
||||
Running integration tests requires running Zookeeper & Kafka. See Makefile
|
||||
for kafka container command.
|
||||
|
||||
@@ -32,6 +32,8 @@ regex patterns.
|
||||
'''
|
||||
```
|
||||
|
||||
> **Note:** The InfluxDB log pattern in the default configuration only works for Influx versions 1.0.0-beta1 or higher.
|
||||
|
||||
## Grok Parser
|
||||
|
||||
The grok parser uses a slightly modified version of logstash "grok" patterns,
|
||||
|
||||
@@ -53,9 +53,15 @@ var (
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
Patterns []string
|
||||
Patterns []string
|
||||
// namedPatterns is a list of internally-assigned names to the patterns
|
||||
// specified by the user in Patterns.
|
||||
// They will look like:
|
||||
// GROK_INTERNAL_PATTERN_0, GROK_INTERNAL_PATTERN_1, etc.
|
||||
namedPatterns []string
|
||||
CustomPatterns string
|
||||
CustomPatternFiles []string
|
||||
Measurement string
|
||||
|
||||
// typeMap is a map of patterns -> capture name -> modifier,
|
||||
// ie, {
|
||||
@@ -97,13 +103,24 @@ func (p *Parser) Compile() error {
|
||||
return err
|
||||
}
|
||||
|
||||
p.CustomPatterns = DEFAULT_PATTERNS + p.CustomPatterns
|
||||
// Give Patterns fake names so that they can be treated as named
|
||||
// "custom patterns"
|
||||
p.namedPatterns = make([]string, len(p.Patterns))
|
||||
for i, pattern := range p.Patterns {
|
||||
name := fmt.Sprintf("GROK_INTERNAL_PATTERN_%d", i)
|
||||
p.CustomPatterns += "\n" + name + " " + pattern + "\n"
|
||||
p.namedPatterns[i] = "%{" + name + "}"
|
||||
}
|
||||
|
||||
// Combine user-supplied CustomPatterns with DEFAULT_PATTERNS and parse
|
||||
// them together as the same type of pattern.
|
||||
p.CustomPatterns = DEFAULT_PATTERNS + p.CustomPatterns
|
||||
if len(p.CustomPatterns) != 0 {
|
||||
scanner := bufio.NewScanner(strings.NewReader(p.CustomPatterns))
|
||||
p.addCustomPatterns(scanner)
|
||||
}
|
||||
|
||||
// Parse any custom pattern files supplied.
|
||||
for _, filename := range p.CustomPatternFiles {
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
@@ -114,6 +131,10 @@ func (p *Parser) Compile() error {
|
||||
p.addCustomPatterns(scanner)
|
||||
}
|
||||
|
||||
if p.Measurement == "" {
|
||||
p.Measurement = "logparser_grok"
|
||||
}
|
||||
|
||||
return p.compileCustomPatterns()
|
||||
}
|
||||
|
||||
@@ -122,7 +143,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||
var values map[string]string
|
||||
// the matching pattern string
|
||||
var patternName string
|
||||
for _, pattern := range p.Patterns {
|
||||
for _, pattern := range p.namedPatterns {
|
||||
if values, err = p.g.Parse(pattern, line); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -215,7 +236,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return telegraf.NewMetric("logparser_grok", tags, fields, p.tsModder.tsMod(timestamp))
|
||||
return telegraf.NewMetric(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp))
|
||||
}
|
||||
|
||||
func (p *Parser) addCustomPatterns(scanner *bufio.Scanner) {
|
||||
|
||||
@@ -83,6 +83,31 @@ func Benchmark_ParseLine_CustomPattern(b *testing.B) {
|
||||
benchM = m
|
||||
}
|
||||
|
||||
func TestMeasurementName(t *testing.T) {
|
||||
p := &Parser{
|
||||
Measurement: "my_web_log",
|
||||
Patterns: []string{"%{COMMON_LOG_FORMAT}"},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
|
||||
// Parse an influxdb POST request
|
||||
m, err := p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)
|
||||
require.NotNil(t, m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"resp_bytes": int64(2326),
|
||||
"auth": "frank",
|
||||
"client_ip": "127.0.0.1",
|
||||
"http_version": float64(1.0),
|
||||
"ident": "user-identifier",
|
||||
"request": "/apache_pb.gif",
|
||||
},
|
||||
m.Fields())
|
||||
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
assert.Equal(t, "my_web_log", m.Name())
|
||||
}
|
||||
|
||||
func TestBuiltinInfluxdbHttpd(t *testing.T) {
|
||||
p := &Parser{
|
||||
Patterns: []string{"%{INFLUXDB_HTTPD_LOG}"},
|
||||
@@ -98,7 +123,6 @@ func TestBuiltinInfluxdbHttpd(t *testing.T) {
|
||||
"resp_bytes": int64(0),
|
||||
"auth": "-",
|
||||
"client_ip": "::1",
|
||||
"resp_code": int64(204),
|
||||
"http_version": float64(1.1),
|
||||
"ident": "-",
|
||||
"referrer": "-",
|
||||
@@ -107,7 +131,7 @@ func TestBuiltinInfluxdbHttpd(t *testing.T) {
|
||||
"agent": "InfluxDBClient",
|
||||
},
|
||||
m.Fields())
|
||||
assert.Equal(t, map[string]string{"verb": "POST"}, m.Tags())
|
||||
assert.Equal(t, map[string]string{"verb": "POST", "resp_code": "204"}, m.Tags())
|
||||
|
||||
// Parse an influxdb GET request
|
||||
m, err = p.ParseLine(`[httpd] ::1 - - [14/Jun/2016:12:10:02 +0100] "GET /query?db=telegraf&q=SELECT+bytes%2Cresponse_time_us+FROM+logparser_grok+WHERE+http_method+%3D+%27GET%27+AND+response_time_us+%3E+0+AND+time+%3E+now%28%29+-+1h HTTP/1.1" 200 578 "http://localhost:8083/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36" 8a3806f1-3220-11e6-8006-000000000000 988`)
|
||||
@@ -118,7 +142,6 @@ func TestBuiltinInfluxdbHttpd(t *testing.T) {
|
||||
"resp_bytes": int64(578),
|
||||
"auth": "-",
|
||||
"client_ip": "::1",
|
||||
"resp_code": int64(200),
|
||||
"http_version": float64(1.1),
|
||||
"ident": "-",
|
||||
"referrer": "http://localhost:8083/",
|
||||
@@ -127,7 +150,7 @@ func TestBuiltinInfluxdbHttpd(t *testing.T) {
|
||||
"agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36",
|
||||
},
|
||||
m.Fields())
|
||||
assert.Equal(t, map[string]string{"verb": "GET"}, m.Tags())
|
||||
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
}
|
||||
|
||||
// common log format
|
||||
@@ -147,13 +170,12 @@ func TestBuiltinCommonLogFormat(t *testing.T) {
|
||||
"resp_bytes": int64(2326),
|
||||
"auth": "frank",
|
||||
"client_ip": "127.0.0.1",
|
||||
"resp_code": int64(200),
|
||||
"http_version": float64(1.0),
|
||||
"ident": "user-identifier",
|
||||
"request": "/apache_pb.gif",
|
||||
},
|
||||
m.Fields())
|
||||
assert.Equal(t, map[string]string{"verb": "GET"}, m.Tags())
|
||||
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
}
|
||||
|
||||
// combined log format
|
||||
@@ -173,7 +195,6 @@ func TestBuiltinCombinedLogFormat(t *testing.T) {
|
||||
"resp_bytes": int64(2326),
|
||||
"auth": "frank",
|
||||
"client_ip": "127.0.0.1",
|
||||
"resp_code": int64(200),
|
||||
"http_version": float64(1.0),
|
||||
"ident": "user-identifier",
|
||||
"request": "/apache_pb.gif",
|
||||
@@ -181,12 +202,12 @@ func TestBuiltinCombinedLogFormat(t *testing.T) {
|
||||
"agent": "Mozilla",
|
||||
},
|
||||
m.Fields())
|
||||
assert.Equal(t, map[string]string{"verb": "GET"}, m.Tags())
|
||||
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
}
|
||||
|
||||
func TestCompileStringAndParse(t *testing.T) {
|
||||
p := &Parser{
|
||||
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||
Patterns: []string{"%{TEST_LOG_A}"},
|
||||
CustomPatterns: `
|
||||
DURATION %{NUMBER}[nuµm]?s
|
||||
RESPONSE_CODE %{NUMBER:response_code:tag}
|
||||
@@ -209,6 +230,41 @@ func TestCompileStringAndParse(t *testing.T) {
|
||||
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||
}
|
||||
|
||||
func TestCompileErrorsOnInvalidPattern(t *testing.T) {
|
||||
p := &Parser{
|
||||
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||
CustomPatterns: `
|
||||
DURATION %{NUMBER}[nuµm]?s
|
||||
RESPONSE_CODE %{NUMBER:response_code:tag}
|
||||
RESPONSE_TIME %{DURATION:response_time:duration}
|
||||
TEST_LOG_A %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
|
||||
`,
|
||||
}
|
||||
assert.Error(t, p.Compile())
|
||||
|
||||
metricA, _ := p.ParseLine(`1.25 200 192.168.1.1 5.432µs`)
|
||||
require.Nil(t, metricA)
|
||||
}
|
||||
|
||||
func TestParsePatternsWithoutCustom(t *testing.T) {
|
||||
p := &Parser{
|
||||
Patterns: []string{"%{POSINT:ts:ts-epochnano} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}"},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
|
||||
metricA, err := p.ParseLine(`1466004605359052000 response_time=20821 mymetric=10890.645`)
|
||||
require.NotNil(t, metricA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"response_time": int64(20821),
|
||||
"metric": float64(10890.645),
|
||||
},
|
||||
metricA.Fields())
|
||||
assert.Equal(t, map[string]string{}, metricA.Tags())
|
||||
assert.Equal(t, time.Unix(0, 1466004605359052000), metricA.Time())
|
||||
}
|
||||
|
||||
func TestParseEpochNano(t *testing.T) {
|
||||
p := &Parser{
|
||||
Patterns: []string{"%{MYAPP}"},
|
||||
@@ -392,7 +448,7 @@ func TestParseErrors(t *testing.T) {
|
||||
TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:int} %{}
|
||||
`,
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
assert.Error(t, p.Compile())
|
||||
_, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] notnumber 200 192.168.1.1 5.432µs 101`)
|
||||
assert.Error(t, err)
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ INFLUXDB_HTTPD_LOG \[httpd\] %{COMBINED_LOG_FORMAT} %{UUID:uuid:drop} %{NUMBER:r
|
||||
|
||||
# apache & nginx logs, this is also known as the "common log format"
|
||||
# see https://en.wikipedia.org/wiki/Common_Log_Format
|
||||
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:int} (?:%{NUMBER:resp_bytes:int}|-)
|
||||
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-)
|
||||
|
||||
# Combined log format is the same as the common log format but with the addition
|
||||
# of two quoted strings at the end for "referrer" and "agent"
|
||||
|
||||
@@ -62,7 +62,7 @@ INFLUXDB_HTTPD_LOG \[httpd\] %{COMBINED_LOG_FORMAT} %{UUID:uuid:drop} %{NUMBER:r
|
||||
|
||||
# apache & nginx logs, this is also known as the "common log format"
|
||||
# see https://en.wikipedia.org/wiki/Common_Log_Format
|
||||
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:int} (?:%{NUMBER:resp_bytes:int}|-)
|
||||
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-)
|
||||
|
||||
# Combined log format is the same as the common log format but with the addition
|
||||
# of two quoted strings at the end for "referrer" and "agent"
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/hpcloud/tail"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/internal/globpath"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
@@ -58,6 +59,8 @@ const sampleConfig = `
|
||||
## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
|
||||
## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
|
||||
patterns = ["%{INFLUXDB_HTTPD_LOG}"]
|
||||
## Name of the outputted measurement name.
|
||||
measurement = "influxdb_log"
|
||||
## Full path(s) to custom pattern files.
|
||||
custom_pattern_files = []
|
||||
## Custom patterns can also be defined here. Put one pattern per line.
|
||||
@@ -108,11 +111,15 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
// compile log parser patterns:
|
||||
errChan := errchan.New(len(l.parsers))
|
||||
for _, parser := range l.parsers {
|
||||
if err := parser.Compile(); err != nil {
|
||||
return err
|
||||
errChan.C <- err
|
||||
}
|
||||
}
|
||||
if err := errChan.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var seek tail.SeekInfo
|
||||
if !l.FromBeginning {
|
||||
@@ -123,24 +130,25 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error {
|
||||
l.wg.Add(1)
|
||||
go l.parser()
|
||||
|
||||
var errS string
|
||||
// Create a "tailer" for each file
|
||||
for _, filepath := range l.Files {
|
||||
g, err := globpath.Compile(filepath)
|
||||
if err != nil {
|
||||
log.Printf("ERROR Glob %s failed to compile, %s", filepath, err)
|
||||
continue
|
||||
}
|
||||
for file, _ := range g.Match() {
|
||||
files := g.Match()
|
||||
errChan = errchan.New(len(files))
|
||||
for file, _ := range files {
|
||||
tailer, err := tail.TailFile(file,
|
||||
tail.Config{
|
||||
ReOpen: true,
|
||||
Follow: true,
|
||||
Location: &seek,
|
||||
ReOpen: true,
|
||||
Follow: true,
|
||||
Location: &seek,
|
||||
MustExist: true,
|
||||
})
|
||||
if err != nil {
|
||||
errS += err.Error() + " "
|
||||
continue
|
||||
}
|
||||
errChan.C <- err
|
||||
|
||||
// create a goroutine for each "tailer"
|
||||
l.wg.Add(1)
|
||||
go l.receiver(tailer)
|
||||
@@ -148,10 +156,7 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
|
||||
if errS != "" {
|
||||
return fmt.Errorf(errS)
|
||||
}
|
||||
return nil
|
||||
return errChan.Error()
|
||||
}
|
||||
|
||||
// receiver is launched as a goroutine to continuously watch a tailed logfile
|
||||
@@ -199,8 +204,6 @@ func (l *LogParserPlugin) parser() {
|
||||
if m != nil {
|
||||
l.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||
}
|
||||
} else {
|
||||
log.Printf("Malformed log line in [%s], Error: %s\n", line, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestGrokParseLogFilesNonExistPattern(t *testing.T) {
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
assert.NoError(t, logparser.Start(&acc))
|
||||
assert.Error(t, logparser.Start(&acc))
|
||||
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
logparser.Stop()
|
||||
@@ -80,6 +80,8 @@ func TestGrokParseLogFiles(t *testing.T) {
|
||||
map[string]string{})
|
||||
}
|
||||
|
||||
// Test that test_a.log line gets parsed even though we don't have the correct
|
||||
// pattern available for test_b.log
|
||||
func TestGrokParseLogFilesOneBad(t *testing.T) {
|
||||
thisdir := getCurrentDir()
|
||||
p := &grok.Parser{
|
||||
@@ -90,11 +92,12 @@ func TestGrokParseLogFilesOneBad(t *testing.T) {
|
||||
|
||||
logparser := &LogParserPlugin{
|
||||
FromBeginning: true,
|
||||
Files: []string{thisdir + "grok/testdata/*.log"},
|
||||
Files: []string{thisdir + "grok/testdata/test_a.log"},
|
||||
GrokParser: p,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
acc.SetDebug(true)
|
||||
assert.NoError(t, logparser.Start(&acc))
|
||||
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
|
||||
@@ -306,6 +306,10 @@ var mappings = []*mapping{
|
||||
onServer: "Threadpool_",
|
||||
inExport: "threadpool_",
|
||||
},
|
||||
{
|
||||
onServer: "wsrep_",
|
||||
inExport: "wsrep_",
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
25
plugins/inputs/nsq_consumer/README.md
Normal file
25
plugins/inputs/nsq_consumer/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# NSQ Consumer Input Plugin
|
||||
|
||||
The [NSQ](http://nsq.io/) consumer plugin polls a specified NSQD
|
||||
topic and adds messages to InfluxDB. This plugin allows a message to be in any of the supported `data_format` types.
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml
|
||||
# Read metrics from NSQD topic(s)
|
||||
[[inputs.nsq_consumer]]
|
||||
## An array of NSQD HTTP API endpoints
|
||||
server = "localhost:4150"
|
||||
topic = "telegraf"
|
||||
channel = "consumer"
|
||||
max_in_flight = 100
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
## Testing
|
||||
The `nsq_consumer_test` mocks out the interaction with `NSQD`. It requires no outside dependencies.
|
||||
99
plugins/inputs/nsq_consumer/nsq_consumer.go
Normal file
99
plugins/inputs/nsq_consumer/nsq_consumer.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package nsq_consumer
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/nsqio/go-nsq"
|
||||
)
|
||||
|
||||
//NSQConsumer represents the configuration of the plugin
|
||||
type NSQConsumer struct {
|
||||
Server string
|
||||
Topic string
|
||||
Channel string
|
||||
MaxInFlight int
|
||||
parser parsers.Parser
|
||||
consumer *nsq.Consumer
|
||||
acc telegraf.Accumulator
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## An string representing the NSQD TCP Endpoint
|
||||
server = "localhost:4150"
|
||||
topic = "telegraf"
|
||||
channel = "consumer"
|
||||
max_in_flight = 100
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
`
|
||||
|
||||
func init() {
|
||||
inputs.Add("nsq_consumer", func() telegraf.Input {
|
||||
return &NSQConsumer{}
|
||||
})
|
||||
}
|
||||
|
||||
// SetParser takes the data_format from the config and finds the right parser for that format
|
||||
func (n *NSQConsumer) SetParser(parser parsers.Parser) {
|
||||
n.parser = parser
|
||||
}
|
||||
|
||||
// SampleConfig returns config values for generating a sample configuration file
|
||||
func (n *NSQConsumer) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description prints description string
|
||||
func (n *NSQConsumer) Description() string {
|
||||
return "Read NSQ topic for metrics."
|
||||
}
|
||||
|
||||
// Start pulls data from nsq
|
||||
func (n *NSQConsumer) Start(acc telegraf.Accumulator) error {
|
||||
n.acc = acc
|
||||
n.connect()
|
||||
n.consumer.AddConcurrentHandlers(nsq.HandlerFunc(func(message *nsq.Message) error {
|
||||
metrics, err := n.parser.Parse(message.Body)
|
||||
if err != nil {
|
||||
log.Printf("NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error())
|
||||
return nil
|
||||
}
|
||||
for _, metric := range metrics {
|
||||
n.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
|
||||
}
|
||||
message.Finish()
|
||||
return nil
|
||||
}), n.MaxInFlight)
|
||||
n.consumer.ConnectToNSQD(n.Server)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop processing messages
|
||||
func (n *NSQConsumer) Stop() {
|
||||
n.consumer.Stop()
|
||||
}
|
||||
|
||||
// Gather is a noop
|
||||
func (n *NSQConsumer) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NSQConsumer) connect() error {
|
||||
if n.consumer == nil {
|
||||
config := nsq.NewConfig()
|
||||
config.MaxInFlight = n.MaxInFlight
|
||||
consumer, err := nsq.NewConsumer(n.Topic, n.Channel, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.consumer = consumer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
245
plugins/inputs/nsq_consumer/nsq_consumer_test.go
Normal file
245
plugins/inputs/nsq_consumer/nsq_consumer_test.go
Normal file
@@ -0,0 +1,245 @@
|
||||
package nsq_consumer
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/nsqio/go-nsq"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// This test is modeled after the kafka consumer integration test
|
||||
func TestReadsMetricsFromNSQ(t *testing.T) {
|
||||
msgID := nsq.MessageID{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'}
|
||||
msg := nsq.NewMessage(msgID, []byte("cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257"))
|
||||
|
||||
script := []instruction{
|
||||
// SUB
|
||||
instruction{0, nsq.FrameTypeResponse, []byte("OK")},
|
||||
// IDENTIFY
|
||||
instruction{0, nsq.FrameTypeResponse, []byte("OK")},
|
||||
instruction{20 * time.Millisecond, nsq.FrameTypeMessage, frameMessage(msg)},
|
||||
// needed to exit test
|
||||
instruction{100 * time.Millisecond, -1, []byte("exit")},
|
||||
}
|
||||
|
||||
addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:4155")
|
||||
newMockNSQD(script, addr.String())
|
||||
|
||||
consumer := &NSQConsumer{
|
||||
Server: "127.0.0.1:4155",
|
||||
Topic: "telegraf",
|
||||
Channel: "consume",
|
||||
MaxInFlight: 1,
|
||||
}
|
||||
|
||||
p, _ := parsers.NewInfluxParser()
|
||||
consumer.SetParser(p)
|
||||
var acc testutil.Accumulator
|
||||
assert.Equal(t, 0, len(acc.Metrics), "There should not be any points")
|
||||
if err := consumer.Start(&acc); err != nil {
|
||||
t.Fatal(err.Error())
|
||||
} else {
|
||||
defer consumer.Stop()
|
||||
}
|
||||
|
||||
waitForPoint(&acc, t)
|
||||
|
||||
if len(acc.Metrics) == 1 {
|
||||
point := acc.Metrics[0]
|
||||
assert.Equal(t, "cpu_load_short", point.Measurement)
|
||||
assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
|
||||
assert.Equal(t, map[string]string{
|
||||
"host": "server01",
|
||||
"direction": "in",
|
||||
"region": "us-west",
|
||||
}, point.Tags)
|
||||
assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix())
|
||||
} else {
|
||||
t.Errorf("No points found in accumulator, expected 1")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Waits for the metric that was sent to the kafka broker to arrive at the kafka
|
||||
// consumer
|
||||
func waitForPoint(acc *testutil.Accumulator, t *testing.T) {
|
||||
// Give the kafka container up to 2 seconds to get the point to the consumer
|
||||
ticker := time.NewTicker(5 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
counter := 0
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
counter++
|
||||
if counter > 1000 {
|
||||
t.Fatal("Waited for 5s, point never arrived to consumer")
|
||||
} else if acc.NFields() == 1 {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newMockNSQD(script []instruction, addr string) *mockNSQD {
|
||||
n := &mockNSQD{
|
||||
script: script,
|
||||
exitChan: make(chan int),
|
||||
}
|
||||
|
||||
tcpListener, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
log.Fatalf("FATAL: listen (%s) failed - %s", n.tcpAddr.String(), err)
|
||||
}
|
||||
n.tcpListener = tcpListener
|
||||
n.tcpAddr = tcpListener.Addr().(*net.TCPAddr)
|
||||
|
||||
go n.listen()
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// The code below allows us to mock the interactions with nsqd. This is taken from:
|
||||
// https://github.com/nsqio/go-nsq/blob/master/mock_test.go
|
||||
type instruction struct {
|
||||
delay time.Duration
|
||||
frameType int32
|
||||
body []byte
|
||||
}
|
||||
|
||||
type mockNSQD struct {
|
||||
script []instruction
|
||||
got [][]byte
|
||||
tcpAddr *net.TCPAddr
|
||||
tcpListener net.Listener
|
||||
exitChan chan int
|
||||
}
|
||||
|
||||
func (n *mockNSQD) listen() {
|
||||
for {
|
||||
conn, err := n.tcpListener.Accept()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
go n.handle(conn)
|
||||
}
|
||||
close(n.exitChan)
|
||||
}
|
||||
|
||||
func (n *mockNSQD) handle(conn net.Conn) {
|
||||
var idx int
|
||||
buf := make([]byte, 4)
|
||||
_, err := io.ReadFull(conn, buf)
|
||||
if err != nil {
|
||||
log.Fatalf("ERROR: failed to read protocol version - %s", err)
|
||||
}
|
||||
|
||||
readChan := make(chan []byte)
|
||||
readDoneChan := make(chan int)
|
||||
scriptTime := time.After(n.script[0].delay)
|
||||
rdr := bufio.NewReader(conn)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
line, err := rdr.ReadBytes('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// trim the '\n'
|
||||
line = line[:len(line)-1]
|
||||
readChan <- line
|
||||
<-readDoneChan
|
||||
}
|
||||
}()
|
||||
|
||||
var rdyCount int
|
||||
for idx < len(n.script) {
|
||||
select {
|
||||
case line := <-readChan:
|
||||
n.got = append(n.got, line)
|
||||
params := bytes.Split(line, []byte(" "))
|
||||
switch {
|
||||
case bytes.Equal(params[0], []byte("IDENTIFY")):
|
||||
l := make([]byte, 4)
|
||||
_, err := io.ReadFull(rdr, l)
|
||||
if err != nil {
|
||||
log.Printf(err.Error())
|
||||
goto exit
|
||||
}
|
||||
size := int32(binary.BigEndian.Uint32(l))
|
||||
b := make([]byte, size)
|
||||
_, err = io.ReadFull(rdr, b)
|
||||
if err != nil {
|
||||
log.Printf(err.Error())
|
||||
goto exit
|
||||
}
|
||||
case bytes.Equal(params[0], []byte("RDY")):
|
||||
rdy, _ := strconv.Atoi(string(params[1]))
|
||||
rdyCount = rdy
|
||||
case bytes.Equal(params[0], []byte("FIN")):
|
||||
case bytes.Equal(params[0], []byte("REQ")):
|
||||
}
|
||||
readDoneChan <- 1
|
||||
case <-scriptTime:
|
||||
inst := n.script[idx]
|
||||
if bytes.Equal(inst.body, []byte("exit")) {
|
||||
goto exit
|
||||
}
|
||||
if inst.frameType == nsq.FrameTypeMessage {
|
||||
if rdyCount == 0 {
|
||||
scriptTime = time.After(n.script[idx+1].delay)
|
||||
continue
|
||||
}
|
||||
rdyCount--
|
||||
}
|
||||
_, err := conn.Write(framedResponse(inst.frameType, inst.body))
|
||||
if err != nil {
|
||||
log.Printf(err.Error())
|
||||
goto exit
|
||||
}
|
||||
scriptTime = time.After(n.script[idx+1].delay)
|
||||
idx++
|
||||
}
|
||||
}
|
||||
|
||||
exit:
|
||||
n.tcpListener.Close()
|
||||
conn.Close()
|
||||
}
|
||||
|
||||
func framedResponse(frameType int32, data []byte) []byte {
|
||||
var w bytes.Buffer
|
||||
|
||||
beBuf := make([]byte, 4)
|
||||
size := uint32(len(data)) + 4
|
||||
|
||||
binary.BigEndian.PutUint32(beBuf, size)
|
||||
_, err := w.Write(beBuf)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint32(beBuf, uint32(frameType))
|
||||
_, err = w.Write(beBuf)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.Write(data)
|
||||
return w.Bytes()
|
||||
}
|
||||
|
||||
func frameMessage(m *nsq.Message) []byte {
|
||||
var b bytes.Buffer
|
||||
m.WriteTo(&b)
|
||||
return b.Bytes()
|
||||
}
|
||||
@@ -140,7 +140,7 @@ func (p *Ping) args(url string) []string {
|
||||
if p.Timeout > 0 {
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
args = append(args, "-W", strconv.FormatFloat(p.Timeout/1000, 'f', 1, 64))
|
||||
args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', 1, 64))
|
||||
case "linux":
|
||||
args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', 1, 64))
|
||||
default:
|
||||
|
||||
@@ -95,7 +95,10 @@ func TestArgs(t *testing.T) {
|
||||
p.Timeout = 12.0
|
||||
actual = p.args("www.google.com")
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "freebsd":
|
||||
case "darwin":
|
||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W",
|
||||
"12000.0", "www.google.com"}
|
||||
case "freebsd":
|
||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-t",
|
||||
"12.0", "www.google.com"}
|
||||
default:
|
||||
@@ -111,7 +114,10 @@ func TestArgs(t *testing.T) {
|
||||
p.PingInterval = 1.2
|
||||
actual = p.args("www.google.com")
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "freebsd":
|
||||
case "darwin":
|
||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W",
|
||||
"12000.0", "-i", "1.2", "www.google.com"}
|
||||
case "freebsd":
|
||||
expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-t",
|
||||
"12.0", "-i", "1.2", "www.google.com"}
|
||||
default:
|
||||
|
||||
@@ -70,7 +70,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error {
|
||||
p.Exe, p.PidFile, p.Pattern, p.User, err.Error())
|
||||
} else {
|
||||
for pid, proc := range p.pidmap {
|
||||
p := NewSpecProcessor(p.ProcessName, p.Prefix, acc, proc, p.tagmap[pid])
|
||||
p := NewSpecProcessor(p.ProcessName, p.Prefix, pid, acc, proc, p.tagmap[pid])
|
||||
p.pushMetrics()
|
||||
}
|
||||
}
|
||||
@@ -140,7 +140,6 @@ func (p *Procstat) pidsFromFile() ([]int32, error) {
|
||||
out = append(out, int32(pid))
|
||||
p.tagmap[int32(pid)] = map[string]string{
|
||||
"pidfile": p.PidFile,
|
||||
"pid": strings.TrimSpace(string(pidString)),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -165,7 +164,6 @@ func (p *Procstat) pidsFromExe() ([]int32, error) {
|
||||
out = append(out, int32(ipid))
|
||||
p.tagmap[int32(ipid)] = map[string]string{
|
||||
"exe": p.Exe,
|
||||
"pid": pid,
|
||||
}
|
||||
} else {
|
||||
outerr = err
|
||||
@@ -193,7 +191,6 @@ func (p *Procstat) pidsFromPattern() ([]int32, error) {
|
||||
out = append(out, int32(ipid))
|
||||
p.tagmap[int32(ipid)] = map[string]string{
|
||||
"pattern": p.Pattern,
|
||||
"pid": pid,
|
||||
}
|
||||
} else {
|
||||
outerr = err
|
||||
@@ -221,7 +218,6 @@ func (p *Procstat) pidsFromUser() ([]int32, error) {
|
||||
out = append(out, int32(ipid))
|
||||
p.tagmap[int32(ipid)] = map[string]string{
|
||||
"user": p.User,
|
||||
"pid": pid,
|
||||
}
|
||||
} else {
|
||||
outerr = err
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
type SpecProcessor struct {
|
||||
Prefix string
|
||||
pid int32
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
acc telegraf.Accumulator
|
||||
@@ -19,6 +20,7 @@ type SpecProcessor struct {
|
||||
func NewSpecProcessor(
|
||||
processName string,
|
||||
prefix string,
|
||||
pid int32,
|
||||
acc telegraf.Accumulator,
|
||||
p *process.Process,
|
||||
tags map[string]string,
|
||||
@@ -33,6 +35,7 @@ func NewSpecProcessor(
|
||||
}
|
||||
return &SpecProcessor{
|
||||
Prefix: prefix,
|
||||
pid: pid,
|
||||
tags: tags,
|
||||
fields: make(map[string]interface{}),
|
||||
acc: acc,
|
||||
@@ -45,7 +48,7 @@ func (p *SpecProcessor) pushMetrics() {
|
||||
if p.Prefix != "" {
|
||||
prefix = p.Prefix + "_"
|
||||
}
|
||||
fields := map[string]interface{}{}
|
||||
fields := map[string]interface{}{"pid": p.pid}
|
||||
|
||||
numThreads, err := p.proc.NumThreads()
|
||||
if err == nil {
|
||||
|
||||
@@ -30,6 +30,26 @@ to filter and some tags
|
||||
kubeservice = "kube-apiserver"
|
||||
```
|
||||
|
||||
```toml
|
||||
# Authorize with a bearer token skipping cert verification
|
||||
[[inputs.prometheus]]
|
||||
# An array of urls to scrape metrics from.
|
||||
urls = ["http://my-kube-apiserver:8080/metrics"]
|
||||
bearer_token = '/path/to/bearer/token'
|
||||
insecure_skip_verify = true
|
||||
```
|
||||
|
||||
```toml
|
||||
# Authorize using x509 certs
|
||||
[[inputs.prometheus]]
|
||||
# An array of urls to scrape metrics from.
|
||||
urls = ["https://my-kube-apiserver:8080/metrics"]
|
||||
|
||||
ssl_ca = '/path/to/cafile'
|
||||
ssl_cert = '/path/to/certfile'
|
||||
ssl_key = '/path/to/keyfile'
|
||||
```
|
||||
|
||||
### Measurements & Fields & Tags:
|
||||
|
||||
Measurements and fields could be any thing.
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"io"
|
||||
"math"
|
||||
"mime"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -19,17 +20,9 @@ import (
|
||||
"github.com/prometheus/common/expfmt"
|
||||
)
|
||||
|
||||
// PrometheusParser is an object for Parsing incoming metrics.
|
||||
type PrometheusParser struct {
|
||||
// PromFormat
|
||||
PromFormat map[string]string
|
||||
// DefaultTags will be added to every parsed metric
|
||||
// DefaultTags map[string]string
|
||||
}
|
||||
|
||||
// Parse returns a slice of Metrics from a text representation of a
|
||||
// metrics
|
||||
func (p *PrometheusParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) {
|
||||
var metrics []telegraf.Metric
|
||||
var parser expfmt.TextParser
|
||||
// parse even if the buffer begins with a newline
|
||||
@@ -38,97 +31,71 @@ func (p *PrometheusParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
buffer := bytes.NewBuffer(buf)
|
||||
reader := bufio.NewReader(buffer)
|
||||
|
||||
// Get format
|
||||
mediatype, params, err := mime.ParseMediaType(p.PromFormat["Content-Type"])
|
||||
mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type"))
|
||||
// Prepare output
|
||||
metricFamilies := make(map[string]*dto.MetricFamily)
|
||||
|
||||
if err == nil && mediatype == "application/vnd.google.protobuf" &&
|
||||
params["encoding"] == "delimited" &&
|
||||
params["proto"] == "io.prometheus.client.MetricFamily" {
|
||||
for {
|
||||
metricFamily := &dto.MetricFamily{}
|
||||
if _, err = pbutil.ReadDelimited(reader, metricFamily); err != nil {
|
||||
if err == io.EOF {
|
||||
mf := &dto.MetricFamily{}
|
||||
if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil {
|
||||
if ierr == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", err)
|
||||
return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", ierr)
|
||||
}
|
||||
metricFamilies[metricFamily.GetName()] = metricFamily
|
||||
metricFamilies[mf.GetName()] = mf
|
||||
}
|
||||
} else {
|
||||
metricFamilies, err = parser.TextToMetricFamilies(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading text format failed: %s", err)
|
||||
}
|
||||
// read metrics
|
||||
for metricName, mf := range metricFamilies {
|
||||
for _, m := range mf.Metric {
|
||||
// reading tags
|
||||
tags := makeLabels(m)
|
||||
/*
|
||||
for key, value := range p.DefaultTags {
|
||||
tags[key] = value
|
||||
}
|
||||
*/
|
||||
// reading fields
|
||||
fields := make(map[string]interface{})
|
||||
if mf.GetType() == dto.MetricType_SUMMARY {
|
||||
// summary metric
|
||||
fields = makeQuantiles(m)
|
||||
fields["count"] = float64(m.GetSummary().GetSampleCount())
|
||||
fields["sum"] = float64(m.GetSummary().GetSampleSum())
|
||||
} else if mf.GetType() == dto.MetricType_HISTOGRAM {
|
||||
// historgram metric
|
||||
fields = makeBuckets(m)
|
||||
fields["count"] = float64(m.GetHistogram().GetSampleCount())
|
||||
fields["sum"] = float64(m.GetHistogram().GetSampleSum())
|
||||
}
|
||||
|
||||
// read metrics
|
||||
for metricName, mf := range metricFamilies {
|
||||
for _, m := range mf.Metric {
|
||||
// reading tags
|
||||
tags := makeLabels(m)
|
||||
// reading fields
|
||||
fields := make(map[string]interface{})
|
||||
if mf.GetType() == dto.MetricType_SUMMARY {
|
||||
// summary metric
|
||||
fields = makeQuantiles(m)
|
||||
fields["count"] = float64(m.GetSummary().GetSampleCount())
|
||||
fields["sum"] = float64(m.GetSummary().GetSampleSum())
|
||||
} else if mf.GetType() == dto.MetricType_HISTOGRAM {
|
||||
// historgram metric
|
||||
fields = makeBuckets(m)
|
||||
fields["count"] = float64(m.GetHistogram().GetSampleCount())
|
||||
fields["sum"] = float64(m.GetHistogram().GetSampleSum())
|
||||
|
||||
} else {
|
||||
// standard metric
|
||||
fields = getNameAndValue(m)
|
||||
}
|
||||
// converting to telegraf metric
|
||||
if len(fields) > 0 {
|
||||
var t time.Time
|
||||
if m.TimestampMs != nil && *m.TimestampMs > 0 {
|
||||
t = time.Unix(0, *m.TimestampMs*1000000)
|
||||
} else {
|
||||
// standard metric
|
||||
fields = getNameAndValue(m)
|
||||
t = time.Now()
|
||||
}
|
||||
// converting to telegraf metric
|
||||
if len(fields) > 0 {
|
||||
var t time.Time
|
||||
if m.TimestampMs != nil && *m.TimestampMs > 0 {
|
||||
t = time.Unix(0, *m.TimestampMs*1000000)
|
||||
} else {
|
||||
t = time.Now()
|
||||
}
|
||||
metric, err := telegraf.NewMetric(metricName, tags, fields, t)
|
||||
if err == nil {
|
||||
metrics = append(metrics, metric)
|
||||
}
|
||||
metric, err := telegraf.NewMetric(metricName, tags, fields, t)
|
||||
if err == nil {
|
||||
metrics = append(metrics, metric)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
// Parse one line
|
||||
func (p *PrometheusParser) ParseLine(line string) (telegraf.Metric, error) {
|
||||
metrics, err := p.Parse([]byte(line + "\n"))
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(metrics) < 1 {
|
||||
return nil, fmt.Errorf(
|
||||
"Can not parse the line: %s, for data format: prometheus", line)
|
||||
}
|
||||
|
||||
return metrics[0], nil
|
||||
}
|
||||
|
||||
/*
|
||||
// Set default tags
|
||||
func (p *PrometheusParser) SetDefaultTags(tags map[string]string) {
|
||||
p.DefaultTags = tags
|
||||
}
|
||||
*/
|
||||
|
||||
// Get Quantiles from summary metric
|
||||
func makeQuantiles(m *dto.Metric) map[string]interface{} {
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -101,10 +102,8 @@ cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
`
|
||||
|
||||
func TestParseValidPrometheus(t *testing.T) {
|
||||
parser := PrometheusParser{}
|
||||
|
||||
// Gauge value
|
||||
metrics, err := parser.Parse([]byte(validUniqueGauge))
|
||||
metrics, err := Parse([]byte(validUniqueGauge), http.Header{})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "cadvisor_version_info", metrics[0].Name())
|
||||
@@ -118,8 +117,7 @@ func TestParseValidPrometheus(t *testing.T) {
|
||||
}, metrics[0].Tags())
|
||||
|
||||
// Counter value
|
||||
//parser.SetDefaultTags(map[string]string{"mytag": "mytagvalue"})
|
||||
metrics, err = parser.Parse([]byte(validUniqueCounter))
|
||||
metrics, err = Parse([]byte(validUniqueCounter), http.Header{})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "get_token_fail_count", metrics[0].Name())
|
||||
@@ -129,8 +127,8 @@ func TestParseValidPrometheus(t *testing.T) {
|
||||
assert.Equal(t, map[string]string{}, metrics[0].Tags())
|
||||
|
||||
// Summary data
|
||||
//parser.SetDefaultTags(map[string]string{})
|
||||
metrics, err = parser.Parse([]byte(validUniqueSummary))
|
||||
//SetDefaultTags(map[string]string{})
|
||||
metrics, err = Parse([]byte(validUniqueSummary), http.Header{})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "http_request_duration_microseconds", metrics[0].Name())
|
||||
@@ -144,7 +142,7 @@ func TestParseValidPrometheus(t *testing.T) {
|
||||
assert.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags())
|
||||
|
||||
// histogram data
|
||||
metrics, err = parser.Parse([]byte(validUniqueHistogram))
|
||||
metrics, err = Parse([]byte(validUniqueHistogram), http.Header{})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
assert.Equal(t, "apiserver_request_latencies", metrics[0].Name())
|
||||
@@ -165,11 +163,3 @@ func TestParseValidPrometheus(t *testing.T) {
|
||||
metrics[0].Tags())
|
||||
|
||||
}
|
||||
|
||||
func TestParseLineInvalidPrometheus(t *testing.T) {
|
||||
parser := PrometheusParser{}
|
||||
metric, err := parser.ParseLine(validUniqueLine)
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, metric)
|
||||
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
@@ -13,23 +13,37 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3`
|
||||
|
||||
type Prometheus struct {
|
||||
Urls []string
|
||||
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
// Bearer Token authorization file path
|
||||
BearerToken string `toml:"bearer_token"`
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of urls to scrape metrics from.
|
||||
urls = ["http://localhost:9100/metrics"]
|
||||
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
## Use bearer token for authorization
|
||||
# bearer_token = /path/to/bearer/token
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = /path/to/cafile
|
||||
# ssl_cert = /path/to/certfile
|
||||
# ssl_key = /path/to/keyfile
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
func (p *Prometheus) SampleConfig() string {
|
||||
@@ -74,20 +88,25 @@ var client = &http.Client{
|
||||
func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error {
|
||||
collectDate := time.Now()
|
||||
var req, err = http.NewRequest("GET", url, nil)
|
||||
req.Header = make(http.Header)
|
||||
req.Header.Add("Accept", acceptHeader)
|
||||
var token []byte
|
||||
var resp *http.Response
|
||||
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
p.SSLCert, p.SSLKey, p.SSLCA, p.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var rt http.RoundTripper = &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 5 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 5 * time.Second,
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: p.InsecureSkipVerify,
|
||||
},
|
||||
TLSHandshakeTimeout: 5 * time.Second,
|
||||
TLSClientConfig: tlsCfg,
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
DisableKeepAlives: true,
|
||||
}
|
||||
|
||||
if p.BearerToken != "" {
|
||||
@@ -112,20 +131,9 @@ func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error {
|
||||
return fmt.Errorf("error reading body: %s", err)
|
||||
}
|
||||
|
||||
// Headers
|
||||
headers := make(map[string]string)
|
||||
for key, value := range headers {
|
||||
headers[key] = value
|
||||
}
|
||||
|
||||
// Prepare Prometheus parser config
|
||||
promparser := PrometheusParser{
|
||||
PromFormat: headers,
|
||||
}
|
||||
|
||||
metrics, err := promparser.Parse(body)
|
||||
metrics, err := Parse(body, resp.Header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting processing samples for %s: %s",
|
||||
return fmt.Errorf("error reading metrics for %s: %s",
|
||||
url, err)
|
||||
}
|
||||
// Add (or not) collected metrics
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
- latest_fork_usec
|
||||
- connected_slaves
|
||||
- master_repl_offset
|
||||
- master_last_io_seconds_ago
|
||||
- repl_backlog_active
|
||||
- repl_backlog_size
|
||||
- repl_backlog_histlen
|
||||
@@ -57,6 +58,7 @@
|
||||
- All measurements have the following tags:
|
||||
- port
|
||||
- server
|
||||
- replication role
|
||||
|
||||
### Example Output:
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
@@ -25,6 +26,7 @@ var sampleConfig = `
|
||||
## e.g.
|
||||
## tcp://localhost:6379
|
||||
## tcp://:password@192.168.99.100
|
||||
## unix:///var/run/redis.sock
|
||||
##
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
## If no port is specified, 6379 is used
|
||||
@@ -66,6 +68,7 @@ var Tracking = map[string]string{
|
||||
"latest_fork_usec": "latest_fork_usec",
|
||||
"connected_slaves": "connected_slaves",
|
||||
"master_repl_offset": "master_repl_offset",
|
||||
"master_last_io_seconds_ago": "master_last_io_seconds_ago",
|
||||
"repl_backlog_active": "repl_backlog_active",
|
||||
"repl_backlog_size": "repl_backlog_size",
|
||||
"repl_backlog_histlen": "repl_backlog_histlen",
|
||||
@@ -74,27 +77,32 @@ var Tracking = map[string]string{
|
||||
"used_cpu_user": "used_cpu_user",
|
||||
"used_cpu_sys_children": "used_cpu_sys_children",
|
||||
"used_cpu_user_children": "used_cpu_user_children",
|
||||
"role": "role",
|
||||
"role": "replication_role",
|
||||
}
|
||||
|
||||
var ErrProtocolError = errors.New("redis protocol error")
|
||||
|
||||
const defaultPort = "6379"
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (r *Redis) Gather(acc telegraf.Accumulator) error {
|
||||
if len(r.Servers) == 0 {
|
||||
url := &url.URL{
|
||||
Host: ":6379",
|
||||
Scheme: "tcp",
|
||||
Host: ":6379",
|
||||
}
|
||||
r.gatherServer(url, acc)
|
||||
return nil
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
errChan := errchan.New(len(r.Servers))
|
||||
for _, serv := range r.Servers {
|
||||
if !strings.HasPrefix(serv, "tcp://") || !strings.HasPrefix(serv, "unix://") {
|
||||
serv = "tcp://" + serv
|
||||
}
|
||||
|
||||
u, err := url.Parse(serv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err)
|
||||
@@ -104,29 +112,35 @@ func (r *Redis) Gather(acc telegraf.Accumulator) error {
|
||||
u.Host = serv
|
||||
u.Path = ""
|
||||
}
|
||||
if u.Scheme == "tcp" {
|
||||
_, _, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
u.Host = u.Host + ":" + defaultPort
|
||||
}
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = r.gatherServer(u, acc)
|
||||
errChan.C <- r.gatherServer(u, acc)
|
||||
}(serv)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
return errChan.Error()
|
||||
}
|
||||
|
||||
const defaultPort = "6379"
|
||||
|
||||
func (r *Redis) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
_, _, err := net.SplitHostPort(addr.Host)
|
||||
if err != nil {
|
||||
addr.Host = addr.Host + ":" + defaultPort
|
||||
}
|
||||
var address string
|
||||
|
||||
c, err := net.DialTimeout("tcp", addr.Host, defaultTimeout)
|
||||
if addr.Scheme == "unix" {
|
||||
address = addr.Path
|
||||
} else {
|
||||
address = addr.Host
|
||||
}
|
||||
c, err := net.DialTimeout(addr.Scheme, address, defaultTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to redis server '%s': %s", addr.Host, err)
|
||||
return fmt.Errorf("Unable to connect to redis server '%s': %s", address, err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
@@ -154,12 +168,17 @@ func (r *Redis) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
c.Write([]byte("EOF\r\n"))
|
||||
rdr := bufio.NewReader(c)
|
||||
|
||||
// Setup tags for all redis metrics
|
||||
host, port := "unknown", "unknown"
|
||||
// If there's an error, ignore and use 'unknown' tags
|
||||
host, port, _ = net.SplitHostPort(addr.Host)
|
||||
tags := map[string]string{"server": host, "port": port}
|
||||
var tags map[string]string
|
||||
|
||||
if addr.Scheme == "unix" {
|
||||
tags = map[string]string{"socket": addr.Path}
|
||||
} else {
|
||||
// Setup tags for all redis metrics
|
||||
host, port := "unknown", "unknown"
|
||||
// If there's an error, ignore and use 'unknown' tags
|
||||
host, port, _ = net.SplitHostPort(addr.Host)
|
||||
tags = map[string]string{"server": host, "port": port}
|
||||
}
|
||||
return gatherInfoOutput(rdr, acc, tags)
|
||||
}
|
||||
|
||||
@@ -208,7 +227,7 @@ func gatherInfoOutput(
|
||||
}
|
||||
|
||||
if name == "role" {
|
||||
tags["role"] = val
|
||||
tags["replication_role"] = val
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestRedis_ParseMetrics(t *testing.T) {
|
||||
err := gatherInfoOutput(rdr, &acc, tags)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags = map[string]string{"host": "redis.net", "role": "master"}
|
||||
tags = map[string]string{"host": "redis.net", "replication_role": "master"}
|
||||
fields := map[string]interface{}{
|
||||
"uptime": uint64(238),
|
||||
"clients": uint64(1),
|
||||
@@ -71,7 +71,7 @@ func TestRedis_ParseMetrics(t *testing.T) {
|
||||
"used_cpu_user_children": float64(0.00),
|
||||
"keyspace_hitrate": float64(0.50),
|
||||
}
|
||||
keyspaceTags := map[string]string{"host": "redis.net", "role": "master", "database": "db0"}
|
||||
keyspaceTags := map[string]string{"host": "redis.net", "replication_role": "master", "database": "db0"}
|
||||
keyspaceFields := map[string]interface{}{
|
||||
"avg_ttl": uint64(0),
|
||||
"expires": uint64(0),
|
||||
|
||||
@@ -58,6 +58,8 @@ Riak provides one measurement named "riak", with the following fields:
|
||||
- vnode_index_writes_total
|
||||
- vnode_puts
|
||||
- vnode_puts_total
|
||||
- read_repairs
|
||||
- read_repairs_total
|
||||
|
||||
Measurements of time (such as node_get_fsm_time_mean) are measured in nanoseconds.
|
||||
|
||||
@@ -72,5 +74,5 @@ All measurements have the following tags:
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter riak -test
|
||||
> riak,nodename=riak@127.0.0.1,server=localhost:8098 cpu_avg1=31i,cpu_avg15=69i,cpu_avg5=51i,memory_code=11563738i,memory_ets=5925872i,memory_processes=30236069i,memory_system=93074971i,memory_total=123311040i,node_get_fsm_objsize_100=0i,node_get_fsm_objsize_95=0i,node_get_fsm_objsize_99=0i,node_get_fsm_objsize_mean=0i,node_get_fsm_objsize_median=0i,node_get_fsm_siblings_100=0i,node_get_fsm_siblings_95=0i,node_get_fsm_siblings_99=0i,node_get_fsm_siblings_mean=0i,node_get_fsm_siblings_median=0i,node_get_fsm_time_100=0i,node_get_fsm_time_95=0i,node_get_fsm_time_99=0i,node_get_fsm_time_mean=0i,node_get_fsm_time_median=0i,node_gets=0i,node_gets_total=19i,node_put_fsm_time_100=0i,node_put_fsm_time_95=0i,node_put_fsm_time_99=0i,node_put_fsm_time_mean=0i,node_put_fsm_time_median=0i,node_puts=0i,node_puts_total=0i,pbc_active=0i,pbc_connects=0i,pbc_connects_total=20i,vnode_gets=0i,vnode_gets_total=57i,vnode_index_reads=0i,vnode_index_reads_total=0i,vnode_index_writes=0i,vnode_index_writes_total=0i,vnode_puts=0i,vnode_puts_total=0i 1455913392622482332
|
||||
```
|
||||
> riak,nodename=riak@127.0.0.1,server=localhost:8098 cpu_avg1=31i,cpu_avg15=69i,cpu_avg5=51i,memory_code=11563738i,memory_ets=5925872i,memory_processes=30236069i,memory_system=93074971i,memory_total=123311040i,node_get_fsm_objsize_100=0i,node_get_fsm_objsize_95=0i,node_get_fsm_objsize_99=0i,node_get_fsm_objsize_mean=0i,node_get_fsm_objsize_median=0i,node_get_fsm_siblings_100=0i,node_get_fsm_siblings_95=0i,node_get_fsm_siblings_99=0i,node_get_fsm_siblings_mean=0i,node_get_fsm_siblings_median=0i,node_get_fsm_time_100=0i,node_get_fsm_time_95=0i,node_get_fsm_time_99=0i,node_get_fsm_time_mean=0i,node_get_fsm_time_median=0i,node_gets=0i,node_gets_total=19i,node_put_fsm_time_100=0i,node_put_fsm_time_95=0i,node_put_fsm_time_99=0i,node_put_fsm_time_mean=0i,node_put_fsm_time_median=0i,node_puts=0i,node_puts_total=0i,pbc_active=0i,pbc_connects=0i,pbc_connects_total=20i,vnode_gets=0i,vnode_gets_total=57i,vnode_index_reads=0i,vnode_index_reads_total=0i,vnode_index_writes=0i,vnode_index_writes_total=0i,vnode_puts=0i,vnode_puts_total=0i,read_repair=0i,read_repairs_total=0i 1455913392622482332
|
||||
```
|
||||
|
||||
@@ -75,6 +75,8 @@ type riakStats struct {
|
||||
VnodeIndexWritesTotal int64 `json:"vnode_index_writes_total"`
|
||||
VnodePuts int64 `json:"vnode_puts"`
|
||||
VnodePutsTotal int64 `json:"vnode_puts_total"`
|
||||
ReadRepairs int64 `json:"read_repairs"`
|
||||
ReadRepairsTotal int64 `json:"read_repairs_total"`
|
||||
}
|
||||
|
||||
// A sample configuration to only gather stats from localhost, default port.
|
||||
@@ -187,6 +189,8 @@ func (r *Riak) gatherServer(s string, acc telegraf.Accumulator) error {
|
||||
"vnode_index_writes_total": stats.VnodeIndexWritesTotal,
|
||||
"vnode_puts": stats.VnodePuts,
|
||||
"vnode_puts_total": stats.VnodePutsTotal,
|
||||
"read_repairs": stats.ReadRepairs,
|
||||
"read_repairs_total": stats.ReadRepairsTotal,
|
||||
}
|
||||
|
||||
// Accumulate the tags and values
|
||||
|
||||
@@ -66,6 +66,8 @@ func TestRiak(t *testing.T) {
|
||||
"node_put_fsm_time_99": int64(84422),
|
||||
"node_put_fsm_time_mean": int64(10832),
|
||||
"node_put_fsm_time_median": int64(4085),
|
||||
"read_repairs": int64(2),
|
||||
"read_repairs_total": int64(7918375),
|
||||
"node_puts": int64(1155),
|
||||
"node_puts_total": int64(444895769),
|
||||
"pbc_active": int64(360),
|
||||
|
||||
@@ -1,119 +0,0 @@
|
||||
package rollbar_webhooks
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
inputs.Add("rollbar_webhooks", func() telegraf.Input { return NewRollbarWebhooks() })
|
||||
}
|
||||
|
||||
type RollbarWebhooks struct {
|
||||
ServiceAddress string
|
||||
// Lock for the struct
|
||||
sync.Mutex
|
||||
// Events buffer to store events between Gather calls
|
||||
events []Event
|
||||
}
|
||||
|
||||
func NewRollbarWebhooks() *RollbarWebhooks {
|
||||
return &RollbarWebhooks{}
|
||||
}
|
||||
|
||||
func (rb *RollbarWebhooks) SampleConfig() string {
|
||||
return `
|
||||
## Address and port to host Webhook listener on
|
||||
service_address = ":1619"
|
||||
`
|
||||
}
|
||||
|
||||
func (rb *RollbarWebhooks) Description() string {
|
||||
return "A Rollbar Webhook Event collector"
|
||||
}
|
||||
|
||||
func (rb *RollbarWebhooks) Gather(acc telegraf.Accumulator) error {
|
||||
rb.Lock()
|
||||
defer rb.Unlock()
|
||||
for _, event := range rb.events {
|
||||
acc.AddFields("rollbar_webhooks", event.Fields(), event.Tags(), time.Now())
|
||||
}
|
||||
rb.events = make([]Event, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *RollbarWebhooks) Listen() {
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", rb.eventHandler).Methods("POST")
|
||||
err := http.ListenAndServe(fmt.Sprintf("%s", rb.ServiceAddress), r)
|
||||
if err != nil {
|
||||
log.Printf("Error starting server: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (rb *RollbarWebhooks) Start(_ telegraf.Accumulator) error {
|
||||
go rb.Listen()
|
||||
log.Printf("Started the rollbar_webhooks service on %s\n", rb.ServiceAddress)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *RollbarWebhooks) Stop() {
|
||||
log.Println("Stopping the rbWebhooks service")
|
||||
}
|
||||
|
||||
func (rb *RollbarWebhooks) eventHandler(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
dummyEvent := &DummyEvent{}
|
||||
err = json.Unmarshal(data, dummyEvent)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
event, err := NewEvent(dummyEvent, data)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
rb.Lock()
|
||||
rb.events = append(rb.events, event)
|
||||
rb.Unlock()
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func generateEvent(event Event, data []byte) (Event, error) {
|
||||
err := json.Unmarshal(data, event)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return event, nil
|
||||
}
|
||||
|
||||
func NewEvent(dummyEvent *DummyEvent, data []byte) (Event, error) {
|
||||
switch dummyEvent.EventName {
|
||||
case "new_item":
|
||||
return generateEvent(&NewItem{}, data)
|
||||
case "deploy":
|
||||
return generateEvent(&Deploy{}, data)
|
||||
default:
|
||||
return nil, errors.New("Not implemented type: " + dummyEvent.EventName)
|
||||
}
|
||||
}
|
||||
@@ -400,6 +400,8 @@ IF OBJECT_ID('tempdb..#baseline') IS NOT NULL
|
||||
DROP TABLE #baseline;
|
||||
SELECT
|
||||
DB_NAME(mf.database_id) AS database_name ,
|
||||
mf.size as database_size_8k_pages,
|
||||
mf.max_size as database_max_size_8k_pages,
|
||||
size_on_disk_bytes ,
|
||||
type_desc as datafile_type,
|
||||
GETDATE() AS baselineDate
|
||||
@@ -435,6 +437,50 @@ FROM #baseline
|
||||
WHERE datafile_type = ''ROWS''
|
||||
) as V
|
||||
PIVOT(SUM(size_on_disk_bytes) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT measurement = ''Rows size (8KB pages)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
|
||||
, ' + @ColumnName + ' FROM
|
||||
(
|
||||
SELECT database_name, database_size_8k_pages
|
||||
FROM #baseline
|
||||
WHERE datafile_type = ''ROWS''
|
||||
) as V
|
||||
PIVOT(SUM(database_size_8k_pages) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT measurement = ''Log size (8KB pages)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
|
||||
, ' + @ColumnName + ' FROM
|
||||
(
|
||||
SELECT database_name, database_size_8k_pages
|
||||
FROM #baseline
|
||||
WHERE datafile_type = ''LOG''
|
||||
) as V
|
||||
PIVOT(SUM(database_size_8k_pages) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT measurement = ''Rows max size (8KB pages)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
|
||||
, ' + @ColumnName + ' FROM
|
||||
(
|
||||
SELECT database_name, database_max_size_8k_pages
|
||||
FROM #baseline
|
||||
WHERE datafile_type = ''ROWS''
|
||||
) as V
|
||||
PIVOT(SUM(database_max_size_8k_pages) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT measurement = ''Logs max size (8KB pages)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database size''
|
||||
, ' + @ColumnName + ' FROM
|
||||
(
|
||||
SELECT database_name, database_max_size_8k_pages
|
||||
FROM #baseline
|
||||
WHERE datafile_type = ''LOG''
|
||||
) as V
|
||||
PIVOT(SUM(database_max_size_8k_pages) FOR database_name IN (' + @ColumnName + ')) AS PVTTable
|
||||
'
|
||||
--PRINT @DynamicPivotQuery
|
||||
EXEC sp_executesql @DynamicPivotQuery;
|
||||
|
||||
@@ -92,8 +92,8 @@ var diskIoSampleConfig = `
|
||||
## disk partitions.
|
||||
## Setting devices will restrict the stats to the specified devices.
|
||||
# devices = ["sda", "sdb"]
|
||||
## Uncomment the following line if you do not need disk serial numbers.
|
||||
# skip_serial_number = true
|
||||
## Uncomment the following line if you need disk serial numbers.
|
||||
# skip_serial_number = false
|
||||
`
|
||||
|
||||
func (_ *DiskIOStats) SampleConfig() string {
|
||||
@@ -151,6 +151,6 @@ func init() {
|
||||
})
|
||||
|
||||
inputs.Add("diskio", func() telegraf.Input {
|
||||
return &DiskIOStats{ps: &systemPS{}}
|
||||
return &DiskIOStats{ps: &systemPS{}, SkipSerialNumber: true}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -86,9 +86,10 @@ func (t *Tail) Start(acc telegraf.Accumulator) error {
|
||||
for file, _ := range g.Match() {
|
||||
tailer, err := tail.TailFile(file,
|
||||
tail.Config{
|
||||
ReOpen: true,
|
||||
Follow: true,
|
||||
Location: &seek,
|
||||
ReOpen: true,
|
||||
Follow: true,
|
||||
Location: &seek,
|
||||
MustExist: true,
|
||||
})
|
||||
if err != nil {
|
||||
errS += err.Error() + " "
|
||||
|
||||
@@ -31,6 +31,8 @@ type TcpListener struct {
|
||||
accept chan bool
|
||||
// drops tracks the number of dropped metrics.
|
||||
drops int
|
||||
// malformed tracks the number of malformed packets
|
||||
malformed int
|
||||
|
||||
// track the listener here so we can close it in Stop()
|
||||
listener *net.TCPListener
|
||||
@@ -45,6 +47,9 @@ var dropwarn = "ERROR: tcp_listener message queue full. " +
|
||||
"We have dropped %d messages so far. " +
|
||||
"You may want to increase allowed_pending_messages in the config\n"
|
||||
|
||||
var malformedwarn = "WARNING: tcp_listener has received %d malformed packets" +
|
||||
" thus far."
|
||||
|
||||
const sampleConfig = `
|
||||
## Address and port to host TCP listener on
|
||||
service_address = ":8094"
|
||||
@@ -243,8 +248,10 @@ func (t *TcpListener) tcpParser() error {
|
||||
if err == nil {
|
||||
t.storeMetrics(metrics)
|
||||
} else {
|
||||
log.Printf("Malformed packet: [%s], Error: %s\n",
|
||||
string(packet), err)
|
||||
t.malformed++
|
||||
if t.malformed == 1 || t.malformed%1000 == 0 {
|
||||
log.Printf(malformedwarn, t.malformed)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,6 +27,8 @@ type UdpListener struct {
|
||||
done chan struct{}
|
||||
// drops tracks the number of dropped metrics.
|
||||
drops int
|
||||
// malformed tracks the number of malformed packets
|
||||
malformed int
|
||||
|
||||
parser parsers.Parser
|
||||
|
||||
@@ -44,6 +46,9 @@ var dropwarn = "ERROR: udp_listener message queue full. " +
|
||||
"We have dropped %d messages so far. " +
|
||||
"You may want to increase allowed_pending_messages in the config\n"
|
||||
|
||||
var malformedwarn = "WARNING: udp_listener has received %d malformed packets" +
|
||||
" thus far."
|
||||
|
||||
const sampleConfig = `
|
||||
## Address and port to host UDP listener on
|
||||
service_address = ":8092"
|
||||
@@ -152,7 +157,10 @@ func (u *UdpListener) udpParser() error {
|
||||
if err == nil {
|
||||
u.storeMetrics(metrics)
|
||||
} else {
|
||||
log.Printf("Malformed packet: [%s], Error: %s\n", packet, err)
|
||||
u.malformed++
|
||||
if u.malformed == 1 || u.malformed%1000 == 0 {
|
||||
log.Printf(malformedwarn, u.malformed)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
28
plugins/inputs/webhooks/README.md
Normal file
28
plugins/inputs/webhooks/README.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# Webhooks
|
||||
|
||||
This is a Telegraf service plugin that start an http server and register multiple webhook listeners.
|
||||
|
||||
```sh
|
||||
$ telegraf -sample-config -input-filter webhooks -output-filter influxdb > config.conf.new
|
||||
```
|
||||
|
||||
Change the config file to point to the InfluxDB server you are using and adjust the settings to match your environment. Once that is complete:
|
||||
|
||||
```sh
|
||||
$ cp config.conf.new /etc/telegraf/telegraf.conf
|
||||
$ sudo service telegraf start
|
||||
```
|
||||
|
||||
## Available webhooks
|
||||
|
||||
- [Github](github/)
|
||||
- [Mandrill](mandrill/)
|
||||
- [Rollbar](rollbar/)
|
||||
|
||||
## Adding new webhooks plugin
|
||||
|
||||
1. Add your webhook plugin inside the `webhooks` folder
|
||||
1. Your plugin must implement the `Webhook` interface
|
||||
1. Import your plugin in the `webhooks.go` file and add it to the `Webhooks` struct
|
||||
|
||||
Both [Github](github/) and [Rollbar](rollbar/) are good example to follow.
|
||||
@@ -1,15 +1,6 @@
|
||||
# github_webhooks
|
||||
# github webhooks
|
||||
|
||||
This is a Telegraf service plugin that listens for events kicked off by Github's Webhooks service and persists data from them into configured outputs. To set up the listener first generate the proper configuration:
|
||||
```sh
|
||||
$ telegraf -sample-config -input-filter github_webhooks -output-filter influxdb > config.conf.new
|
||||
```
|
||||
Change the config file to point to the InfluxDB server you are using and adjust the settings to match your environment. Once that is complete:
|
||||
```sh
|
||||
$ cp config.conf.new /etc/telegraf/telegraf.conf
|
||||
$ sudo service telegraf start
|
||||
```
|
||||
Once the server is running you should configure your Organization's Webhooks to point at the `github_webhooks` service. To do this go to `github.com/{my_organization}` and click `Settings > Webhooks > Add webhook`. In the resulting menu set `Payload URL` to `http://<my_ip>:1618`, `Content type` to `application/json` and under the section `Which events would you like to trigger this webhook?` select 'Send me <b>everything</b>'. By default all of the events will write to the `github_webhooks` measurement, this is configurable by setting the `measurement_name` in the config file.
|
||||
You should configure your Organization's Webhooks to point at the `webhooks` service. To do this go to `github.com/{my_organization}` and click `Settings > Webhooks > Add webhook`. In the resulting menu set `Payload URL` to `http://<my_ip>:1619/github`, `Content type` to `application/json` and under the section `Which events would you like to trigger this webhook?` select 'Send me <b>everything</b>'. By default all of the events will write to the `github_webhooks` measurement, this is configurable by setting the `measurement_name` in the config file.
|
||||
|
||||
## Events
|
||||
|
||||
@@ -1,78 +1,27 @@
|
||||
package github_webhooks
|
||||
package github
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
inputs.Add("github_webhooks", func() telegraf.Input { return &GithubWebhooks{} })
|
||||
type GithubWebhook struct {
|
||||
Path string
|
||||
acc telegraf.Accumulator
|
||||
}
|
||||
|
||||
type GithubWebhooks struct {
|
||||
ServiceAddress string
|
||||
// Lock for the struct
|
||||
sync.Mutex
|
||||
// Events buffer to store events between Gather calls
|
||||
events []Event
|
||||
func (gh *GithubWebhook) Register(router *mux.Router, acc telegraf.Accumulator) {
|
||||
router.HandleFunc(gh.Path, gh.eventHandler).Methods("POST")
|
||||
log.Printf("Started the webhooks_github on %s\n", gh.Path)
|
||||
gh.acc = acc
|
||||
}
|
||||
|
||||
func NewGithubWebhooks() *GithubWebhooks {
|
||||
return &GithubWebhooks{}
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) SampleConfig() string {
|
||||
return `
|
||||
## Address and port to host Webhook listener on
|
||||
service_address = ":1618"
|
||||
`
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) Description() string {
|
||||
return "A Github Webhook Event collector"
|
||||
}
|
||||
|
||||
// Writes the points from <-gh.in to the Accumulator
|
||||
func (gh *GithubWebhooks) Gather(acc telegraf.Accumulator) error {
|
||||
gh.Lock()
|
||||
defer gh.Unlock()
|
||||
for _, event := range gh.events {
|
||||
p := event.NewMetric()
|
||||
acc.AddFields("github_webhooks", p.Fields(), p.Tags(), p.Time())
|
||||
}
|
||||
gh.events = make([]Event, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) Listen() {
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", gh.eventHandler).Methods("POST")
|
||||
err := http.ListenAndServe(fmt.Sprintf("%s", gh.ServiceAddress), r)
|
||||
if err != nil {
|
||||
log.Printf("Error starting server: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) Start(_ telegraf.Accumulator) error {
|
||||
go gh.Listen()
|
||||
log.Printf("Started the github_webhooks service on %s\n", gh.ServiceAddress)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) Stop() {
|
||||
log.Println("Stopping the ghWebhooks service")
|
||||
}
|
||||
|
||||
// Handles the / route
|
||||
func (gh *GithubWebhooks) eventHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (gh *GithubWebhook) eventHandler(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
eventType := r.Header["X-Github-Event"][0]
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
@@ -85,9 +34,10 @@ func (gh *GithubWebhooks) eventHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
gh.Lock()
|
||||
gh.events = append(gh.events, e)
|
||||
gh.Unlock()
|
||||
|
||||
p := e.NewMetric()
|
||||
gh.acc.AddFields("github_webhooks", p.Fields(), p.Tags(), p.Time())
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package github_webhooks
|
||||
package github
|
||||
|
||||
func CommitCommentEventJSON() string {
|
||||
return `{
|
||||
@@ -1,4 +1,4 @@
|
||||
package github_webhooks
|
||||
package github
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -1,15 +1,18 @@
|
||||
package github_webhooks
|
||||
package github
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func GithubWebhookRequest(event string, jsonString string, t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
var acc testutil.Accumulator
|
||||
gh := &GithubWebhook{Path: "/github", acc: &acc}
|
||||
req, _ := http.NewRequest("POST", "/github", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", event)
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
15
plugins/inputs/webhooks/mandrill/README.md
Normal file
15
plugins/inputs/webhooks/mandrill/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# mandrill webhook
|
||||
|
||||
You should configure your Mandrill's Webhooks to point at the `webhooks` service. To do this go to `mandrillapp.com/` and click `Settings > Webhooks`. In the resulting page, click on `Add a Webhook`, select all events, and set the `URL` to `http://<my_ip>:1619/mandrill`, and click on `Create Webhook`.
|
||||
|
||||
## Events
|
||||
|
||||
See the [webhook doc](https://mandrill.zendesk.com/hc/en-us/articles/205583307-Message-Event-Webhook-format).
|
||||
|
||||
All events for logs the original timestamp, the event name and the unique identifier of the message that generated the event.
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `event.event` string
|
||||
|
||||
**Fields:**
|
||||
* 'id' = `event._id` string
|
||||
56
plugins/inputs/webhooks/mandrill/mandrill_webhooks.go
Normal file
56
plugins/inputs/webhooks/mandrill/mandrill_webhooks.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package mandrill
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type MandrillWebhook struct {
|
||||
Path string
|
||||
acc telegraf.Accumulator
|
||||
}
|
||||
|
||||
func (md *MandrillWebhook) Register(router *mux.Router, acc telegraf.Accumulator) {
|
||||
router.HandleFunc(md.Path, md.returnOK).Methods("HEAD")
|
||||
router.HandleFunc(md.Path, md.eventHandler).Methods("POST")
|
||||
|
||||
log.Printf("Started the webhooks_mandrill on %s\n", md.Path)
|
||||
md.acc = acc
|
||||
}
|
||||
|
||||
func (md *MandrillWebhook) returnOK(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (md *MandrillWebhook) eventHandler(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
data, err := url.ParseQuery(string(body))
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
var events []MandrillEvent
|
||||
err = json.Unmarshal([]byte(data.Get("mandrill_events")), &events)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
for _, event := range events {
|
||||
md.acc.AddFields("mandrill_webhooks", event.Fields(), event.Tags(), time.Unix(event.TimeStamp, 0))
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
24
plugins/inputs/webhooks/mandrill/mandrill_webhooks_events.go
Normal file
24
plugins/inputs/webhooks/mandrill/mandrill_webhooks_events.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package mandrill
|
||||
|
||||
type Event interface {
|
||||
Tags() map[string]string
|
||||
Fields() map[string]interface{}
|
||||
}
|
||||
|
||||
type MandrillEvent struct {
|
||||
EventName string `json:"event"`
|
||||
TimeStamp int64 `json:"ts"`
|
||||
Id string `json:"_id"`
|
||||
}
|
||||
|
||||
func (me *MandrillEvent) Tags() map[string]string {
|
||||
return map[string]string{
|
||||
"event": me.EventName,
|
||||
}
|
||||
}
|
||||
|
||||
func (me *MandrillEvent) Fields() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"id": me.Id,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
package mandrill
|
||||
|
||||
func SendEventJSON() string {
|
||||
return `
|
||||
{
|
||||
"event": "send",
|
||||
"msg": {
|
||||
"ts": 1365109999,
|
||||
"subject": "This an example webhook message",
|
||||
"email": "example.webhook@mandrillapp.com",
|
||||
"sender": "example.sender@mandrillapp.com",
|
||||
"tags": [
|
||||
"webhook-example"
|
||||
],
|
||||
"opens": [
|
||||
|
||||
],
|
||||
"clicks": [
|
||||
|
||||
],
|
||||
"state": "sent",
|
||||
"metadata": {
|
||||
"user_id": 111
|
||||
},
|
||||
"_id": "exampleaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
"_version": "exampleaaaaaaaaaaaaaaa"
|
||||
},
|
||||
"_id": "id1",
|
||||
"ts": 1384954004
|
||||
}`
|
||||
}
|
||||
|
||||
func HardBounceEventJSON() string {
|
||||
return `
|
||||
{
|
||||
"event": "hard_bounce",
|
||||
"msg": {
|
||||
"ts": 1365109999,
|
||||
"subject": "This an example webhook message",
|
||||
"email": "example.webhook@mandrillapp.com",
|
||||
"sender": "example.sender@mandrillapp.com",
|
||||
"tags": [
|
||||
"webhook-example"
|
||||
],
|
||||
"state": "bounced",
|
||||
"metadata": {
|
||||
"user_id": 111
|
||||
},
|
||||
"_id": "exampleaaaaaaaaaaaaaaaaaaaaaaaaa2",
|
||||
"_version": "exampleaaaaaaaaaaaaaaa",
|
||||
"bounce_description": "bad_mailbox",
|
||||
"bgtools_code": 10,
|
||||
"diag": "smtp;550 5.1.1 The email account that you tried to reach does not exist. Please try double-checking the recipient's email address for typos or unnecessary spaces."
|
||||
},
|
||||
"_id": "id2",
|
||||
"ts": 1384954004
|
||||
}`
|
||||
}
|
||||
85
plugins/inputs/webhooks/mandrill/mandrill_webhooks_test.go
Normal file
85
plugins/inputs/webhooks/mandrill/mandrill_webhooks_test.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package mandrill
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func postWebhooks(md *MandrillWebhook, eventBody string) *httptest.ResponseRecorder {
|
||||
body := url.Values{}
|
||||
body.Set("mandrill_events", eventBody)
|
||||
req, _ := http.NewRequest("POST", "/mandrill", strings.NewReader(body.Encode()))
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
md.eventHandler(w, req)
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
func headRequest(md *MandrillWebhook) *httptest.ResponseRecorder {
|
||||
req, _ := http.NewRequest("HEAD", "/mandrill", strings.NewReader(""))
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
md.returnOK(w, req)
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
func TestHead(t *testing.T) {
|
||||
md := &MandrillWebhook{Path: "/mandrill"}
|
||||
resp := headRequest(md)
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("HEAD returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSendEvent(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
md := &MandrillWebhook{Path: "/mandrill", acc: &acc}
|
||||
resp := postWebhooks(md, "["+SendEventJSON()+"]")
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST send returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"id": "id1",
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"event": "send",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "mandrill_webhooks", fields, tags)
|
||||
}
|
||||
|
||||
func TestMultipleEvents(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
md := &MandrillWebhook{Path: "/mandrill", acc: &acc}
|
||||
resp := postWebhooks(md, "["+SendEventJSON()+","+HardBounceEventJSON()+"]")
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST send returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"id": "id1",
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"event": "send",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "mandrill_webhooks", fields, tags)
|
||||
|
||||
fields = map[string]interface{}{
|
||||
"id": "id2",
|
||||
}
|
||||
|
||||
tags = map[string]string{
|
||||
"event": "hard_bounce",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "mandrill_webhooks", fields, tags)
|
||||
}
|
||||
@@ -1,15 +1,6 @@
|
||||
# rollbar_webhooks
|
||||
# rollbar webhooks
|
||||
|
||||
This is a Telegraf service plugin that listens for events kicked off by Rollbar Webhooks service and persists data from them into configured outputs. To set up the listener first generate the proper configuration:
|
||||
```sh
|
||||
$ telegraf -sample-config -input-filter rollbar_webhooks -output-filter influxdb > config.conf.new
|
||||
```
|
||||
Change the config file to point to the InfluxDB server you are using and adjust the settings to match your environment. Once that is complete:
|
||||
```sh
|
||||
$ cp config.conf.new /etc/telegraf/telegraf.conf
|
||||
$ sudo service telegraf start
|
||||
```
|
||||
Once the server is running you should configure your Rollbar's Webhooks to point at the `rollbar_webhooks` service. To do this go to `rollbar.com/` and click `Settings > Notifications > Webhook`. In the resulting page set `URL` to `http://<my_ip>:1619`, and click on `Enable Webhook Integration`.
|
||||
You should configure your Rollbar's Webhooks to point at the `webhooks` service. To do this go to `rollbar.com/` and click `Settings > Notifications > Webhook`. In the resulting page set `URL` to `http://<my_ip>:1619/rollbar`, and click on `Enable Webhook Integration`.
|
||||
|
||||
## Events
|
||||
|
||||
69
plugins/inputs/webhooks/rollbar/rollbar_webhooks.go
Normal file
69
plugins/inputs/webhooks/rollbar/rollbar_webhooks.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package rollbar
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type RollbarWebhook struct {
|
||||
Path string
|
||||
acc telegraf.Accumulator
|
||||
}
|
||||
|
||||
func (rb *RollbarWebhook) Register(router *mux.Router, acc telegraf.Accumulator) {
|
||||
router.HandleFunc(rb.Path, rb.eventHandler).Methods("POST")
|
||||
log.Printf("Started the webhooks_rollbar on %s\n", rb.Path)
|
||||
rb.acc = acc
|
||||
}
|
||||
|
||||
func (rb *RollbarWebhook) eventHandler(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
dummyEvent := &DummyEvent{}
|
||||
err = json.Unmarshal(data, dummyEvent)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
event, err := NewEvent(dummyEvent, data)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
rb.acc.AddFields("rollbar_webhooks", event.Fields(), event.Tags(), time.Now())
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func generateEvent(event Event, data []byte) (Event, error) {
|
||||
err := json.Unmarshal(data, event)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return event, nil
|
||||
}
|
||||
|
||||
func NewEvent(dummyEvent *DummyEvent, data []byte) (Event, error) {
|
||||
switch dummyEvent.EventName {
|
||||
case "new_item":
|
||||
return generateEvent(&NewItem{}, data)
|
||||
case "deploy":
|
||||
return generateEvent(&Deploy{}, data)
|
||||
default:
|
||||
return nil, errors.New("Not implemented type: " + dummyEvent.EventName)
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package rollbar_webhooks
|
||||
package rollbar
|
||||
|
||||
import "strconv"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package rollbar_webhooks
|
||||
package rollbar
|
||||
|
||||
func NewItemJSON() string {
|
||||
return `
|
||||
@@ -1,4 +1,4 @@
|
||||
package rollbar_webhooks
|
||||
package rollbar
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func postWebhooks(rb *RollbarWebhooks, eventBody string) *httptest.ResponseRecorder {
|
||||
func postWebhooks(rb *RollbarWebhook, eventBody string) *httptest.ResponseRecorder {
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(eventBody))
|
||||
w := httptest.NewRecorder()
|
||||
w.Code = 500
|
||||
@@ -21,12 +21,11 @@ func postWebhooks(rb *RollbarWebhooks, eventBody string) *httptest.ResponseRecor
|
||||
|
||||
func TestNewItem(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
rb := NewRollbarWebhooks()
|
||||
rb := &RollbarWebhook{Path: "/rollbar", acc: &acc}
|
||||
resp := postWebhooks(rb, NewItemJSON())
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST new_item returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
rb.Gather(&acc)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"id": 272716944,
|
||||
@@ -45,12 +44,11 @@ func TestNewItem(t *testing.T) {
|
||||
|
||||
func TestDeploy(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
rb := NewRollbarWebhooks()
|
||||
rb := &RollbarWebhook{Path: "/rollbar", acc: &acc}
|
||||
resp := postWebhooks(rb, DeployJSON())
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST deploy returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
rb.Gather(&acc)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"id": 187585,
|
||||
@@ -66,7 +64,7 @@ func TestDeploy(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUnknowItem(t *testing.T) {
|
||||
rb := NewRollbarWebhooks()
|
||||
rb := &RollbarWebhook{Path: "/rollbar"}
|
||||
resp := postWebhooks(rb, UnknowJSON())
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST unknow returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
104
plugins/inputs/webhooks/webhooks.go
Normal file
104
plugins/inputs/webhooks/webhooks.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package webhooks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs/webhooks/github"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/webhooks/mandrill"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/webhooks/rollbar"
|
||||
)
|
||||
|
||||
type Webhook interface {
|
||||
Register(router *mux.Router, acc telegraf.Accumulator)
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("webhooks", func() telegraf.Input { return NewWebhooks() })
|
||||
}
|
||||
|
||||
type Webhooks struct {
|
||||
ServiceAddress string
|
||||
|
||||
Github *github.GithubWebhook
|
||||
Mandrill *mandrill.MandrillWebhook
|
||||
Rollbar *rollbar.RollbarWebhook
|
||||
}
|
||||
|
||||
func NewWebhooks() *Webhooks {
|
||||
return &Webhooks{}
|
||||
}
|
||||
|
||||
func (wb *Webhooks) SampleConfig() string {
|
||||
return `
|
||||
## Address and port to host Webhook listener on
|
||||
service_address = ":1619"
|
||||
|
||||
[inputs.webhooks.github]
|
||||
path = "/github"
|
||||
|
||||
[inputs.webhooks.mandrill]
|
||||
path = "/mandrill"
|
||||
|
||||
[inputs.webhooks.rollbar]
|
||||
path = "/rollbar"
|
||||
`
|
||||
}
|
||||
|
||||
func (wb *Webhooks) Description() string {
|
||||
return "A Webhooks Event collector"
|
||||
}
|
||||
|
||||
func (wb *Webhooks) Gather(_ telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (wb *Webhooks) Listen(acc telegraf.Accumulator) {
|
||||
r := mux.NewRouter()
|
||||
|
||||
for _, webhook := range wb.AvailableWebhooks() {
|
||||
webhook.Register(r, acc)
|
||||
}
|
||||
|
||||
err := http.ListenAndServe(fmt.Sprintf("%s", wb.ServiceAddress), r)
|
||||
if err != nil {
|
||||
log.Printf("Error starting server: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Looks for fields which implement Webhook interface
|
||||
func (wb *Webhooks) AvailableWebhooks() []Webhook {
|
||||
webhooks := make([]Webhook, 0)
|
||||
s := reflect.ValueOf(wb).Elem()
|
||||
for i := 0; i < s.NumField(); i++ {
|
||||
f := s.Field(i)
|
||||
|
||||
if !f.CanInterface() {
|
||||
continue
|
||||
}
|
||||
|
||||
if wbPlugin, ok := f.Interface().(Webhook); ok {
|
||||
if !reflect.ValueOf(wbPlugin).IsNil() {
|
||||
webhooks = append(webhooks, wbPlugin)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return webhooks
|
||||
}
|
||||
|
||||
func (wb *Webhooks) Start(acc telegraf.Accumulator) error {
|
||||
go wb.Listen(acc)
|
||||
log.Printf("Started the webhooks service on %s\n", wb.ServiceAddress)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *Webhooks) Stop() {
|
||||
log.Println("Stopping the Webhooks service")
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user