Compare commits
51 Commits
release-1.
...
ga-version
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9480b28681 | ||
|
|
c3aa43a6bd | ||
|
|
b2ea39077e | ||
|
|
811567a2f4 | ||
|
|
ca8fb440cc | ||
|
|
ac58a6bb3c | ||
|
|
9757d39240 | ||
|
|
5a9e7d77b8 | ||
|
|
e963b7f01b | ||
|
|
e7899d4dc5 | ||
|
|
301c79e57c | ||
|
|
67c288abda | ||
|
|
8dd2a8527a | ||
|
|
2fe427b3b3 | ||
|
|
6b1cc67664 | ||
|
|
1271f9d71a | ||
|
|
49ea4e9f39 | ||
|
|
50ef3282b6 | ||
|
|
b63dedb74d | ||
|
|
5628049440 | ||
|
|
54c9ba7639 | ||
|
|
b18d375d6c | ||
|
|
6dbbe65897 | ||
|
|
03d8abccdd | ||
|
|
0f6d317a8e | ||
|
|
792682590c | ||
|
|
2d3da343b3 | ||
|
|
094eda22c0 | ||
|
|
4886109d9c | ||
|
|
2dc47285bd | ||
|
|
6e33a6d62f | ||
|
|
a8f9eb23cc | ||
|
|
41a5ee6571 | ||
|
|
7d8de4b8e1 | ||
|
|
cc2b53abf4 | ||
|
|
32aa1cc814 | ||
|
|
38d877165a | ||
|
|
5c5984bfe1 | ||
|
|
30cdc31a27 | ||
|
|
602a36e241 | ||
|
|
b863ee1d65 | ||
|
|
ca49babf3a | ||
|
|
cf37b5cdcf | ||
|
|
969f388ef2 | ||
|
|
0589a1d0a5 | ||
|
|
4e019a176d | ||
|
|
a0e23d30fe | ||
|
|
e931706249 | ||
|
|
2457d95262 | ||
|
|
e9d33726a9 | ||
|
|
2462e04bf2 |
43
CHANGELOG.md
43
CHANGELOG.md
@@ -1,19 +1,45 @@
|
||||
## v1.0 [unreleased]
|
||||
## v1.1 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
### Features
|
||||
|
||||
- [#1694](https://github.com/influxdata/telegraf/pull/1694): Adding Gauge and Counter metric types.
|
||||
- [#1606](https://github.com/influxdata/telegraf/pull/1606): Remove carraige returns from exec plugin output on Windows
|
||||
- [#1674](https://github.com/influxdata/telegraf/issues/1674): elasticsearch input: configurable timeout.
|
||||
- [#1607](https://github.com/influxdata/telegraf/pull/1607): Massage metric names in Instrumental output plugin
|
||||
- [#1572](https://github.com/influxdata/telegraf/pull/1572): mesos improvements.
|
||||
- [#1513](https://github.com/influxdata/telegraf/issues/1513): Add Ceph Cluster Performance Statistics
|
||||
- [#1650](https://github.com/influxdata/telegraf/issues/1650): Ability to configure response_timeout in httpjson input.
|
||||
- [#1685](https://github.com/influxdata/telegraf/issues/1685): Add additional redis metrics.
|
||||
- [#1539](https://github.com/influxdata/telegraf/pull/1539): Added capability to send metrics through Http API for OpenTSDB.
|
||||
- [#1471](https://github.com/influxdata/telegraf/pull/1471): iptables input plugin.
|
||||
- [#1542](https://github.com/influxdata/telegraf/pull/1542): Add filestack webhook plugin.
|
||||
- [#1599](https://github.com/influxdata/telegraf/pull/1599): Add server hostname for each docker measurements.
|
||||
- [#1697](https://github.com/influxdata/telegraf/pull/1697): Add NATS output plugin.
|
||||
- [#1407](https://github.com/influxdata/telegraf/pull/1407): HTTP service listener input plugin.
|
||||
- [#1699](https://github.com/influxdata/telegraf/pull/1699): Add database blacklist option for Postgresql
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1628](https://github.com/influxdata/telegraf/issues/1628): Fix mongodb input panic on version 2.2.
|
||||
- [#1738](https://github.com/influxdata/telegraf/issues/1738): Fix unmarshal of influxdb metrics with null tags
|
||||
- [#1733](https://github.com/influxdata/telegraf/issues/1733): Fix statsd scientific notation parsing
|
||||
- [#1716](https://github.com/influxdata/telegraf/issues/1716): Sensors plugin strconv.ParseFloat: parsing "": invalid syntax
|
||||
- [#1530](https://github.com/influxdata/telegraf/issues/1530): Fix prometheus_client reload panic
|
||||
|
||||
## v1.0 [2016-09-08]
|
||||
|
||||
### Release Notes
|
||||
|
||||
**Breaking Change** The SNMP plugin is being deprecated in it's current form.
|
||||
There is a [new SNMP plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
|
||||
which fixes many of the issues and confusions
|
||||
of it's predecessor. For users wanting to continue to use the deprecated SNMP
|
||||
of its predecessor. For users wanting to continue to use the deprecated SNMP
|
||||
plugin, you will need to change your config file from `[[inputs.snmp]]` to
|
||||
`[[inputs.snmp_legacy]]`. The configuration of the new SNMP plugin is _not_
|
||||
backwards-compatible.
|
||||
|
||||
- Telegraf now supports being installed as an official windows service,
|
||||
which can be installed via
|
||||
`> C:\Program Files\Telegraf\telegraf.exe --service install`
|
||||
|
||||
**Breaking Change**: Aerospike main server node measurements have been renamed
|
||||
aerospike_node. Aerospike namespace measurements have been renamed to
|
||||
aerospike_namespace. They will also now be tagged with the node_name
|
||||
@@ -44,6 +70,10 @@ should now look like:
|
||||
path = "/"
|
||||
```
|
||||
|
||||
- Telegraf now supports being installed as an official windows service,
|
||||
which can be installed via
|
||||
`> C:\Program Files\Telegraf\telegraf.exe --service install`
|
||||
|
||||
- `flush_jitter` behavior has been changed. The random jitter will now be
|
||||
evaluated at every flush interval, rather than once at startup. This makes it
|
||||
consistent with the behavior of `collection_jitter`.
|
||||
@@ -140,6 +170,7 @@ consistent with the behavior of `collection_jitter`.
|
||||
- [#1425](https://github.com/influxdata/telegraf/issues/1425): Fix win_perf_counter "index out of range" panic.
|
||||
- [#1634](https://github.com/influxdata/telegraf/issues/1634): Fix ntpq panic when field is missing.
|
||||
- [#1637](https://github.com/influxdata/telegraf/issues/1637): Sanitize graphite output field names.
|
||||
- [#1695](https://github.com/influxdata/telegraf/pull/1695): Fix MySQL plugin not sending 0 value fields.
|
||||
|
||||
## v0.13.1 [2016-05-24]
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ Assuming you can already build the project, run these in the telegraf directory:
|
||||
|
||||
1. `go get github.com/sparrc/gdm`
|
||||
1. `gdm restore`
|
||||
1. `gdm save`
|
||||
1. `GOOS=linux gdm save`
|
||||
|
||||
## Input Plugins
|
||||
|
||||
@@ -84,9 +84,9 @@ func (s *Simple) SampleConfig() string {
|
||||
|
||||
func (s *Simple) Gather(acc telegraf.Accumulator) error {
|
||||
if s.Ok {
|
||||
acc.Add("state", "pretty good", nil)
|
||||
acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil)
|
||||
} else {
|
||||
acc.Add("state", "not great", nil)
|
||||
acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -97,6 +97,13 @@ func init() {
|
||||
}
|
||||
```
|
||||
|
||||
## Adding Typed Metrics
|
||||
|
||||
In addition the the `AddFields` function, the accumulator also supports an
|
||||
`AddGauge` and `AddCounter` function. These functions are for adding _typed_
|
||||
metrics. Metric types are ignored for the InfluxDB output, but can be used
|
||||
for other outputs, such as [prometheus](https://prometheus.io/docs/concepts/metric_types/).
|
||||
|
||||
## Input Plugins Accepting Arbitrary Data Formats
|
||||
|
||||
Some input plugins (such as
|
||||
|
||||
7
Godeps
7
Godeps
@@ -1,6 +1,6 @@
|
||||
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
|
||||
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
|
||||
github.com/aerospike/aerospike-client-go 45863b7fd8640dc12f7fdd397104d97e1986f25a
|
||||
github.com/aerospike/aerospike-client-go 7f3a312c3b2a60ac083ec6da296091c52c795c63
|
||||
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
|
||||
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
|
||||
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
||||
@@ -37,8 +37,8 @@ github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3
|
||||
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
|
||||
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
|
||||
github.com/nats-io/nats ea8b4fd12ebb823073c0004b9f09ac8748f4f165
|
||||
github.com/nats-io/nuid a5152d67cf63cbfb5d992a395458722a45194715
|
||||
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
|
||||
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
||||
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
|
||||
@@ -48,7 +48,6 @@ github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil 4d0c402af66c78735c5ccf820dc2ca7de5e4ff08
|
||||
github.com/soniah/gosnmp eb32571c2410868d85849ad67d1e51d01273eb84
|
||||
github.com/sparrc/aerospike-client-go d4bb42d2c2d39dae68e054116f4538af189e05d5
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
|
||||
|
||||
12
Makefile
12
Makefile
@@ -42,6 +42,7 @@ prepare-windows:
|
||||
|
||||
# Run all docker containers necessary for unit tests
|
||||
docker-run:
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
@@ -52,29 +53,28 @@ docker-run:
|
||||
docker run --name postgres -p "5432:5432" -d postgres
|
||||
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
||||
docker run --name redis -p "6379:6379" -d redis
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
|
||||
# Run docker containers necessary for CircleCI unit tests
|
||||
docker-run-circle:
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
|
||||
# Kill all docker containers, ignore errors
|
||||
docker-kill:
|
||||
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
|
||||
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
|
||||
|
||||
# Run full unit tests using docker containers (includes setup and teardown)
|
||||
test: vet docker-kill docker-run
|
||||
|
||||
33
README.md
33
README.md
@@ -20,12 +20,12 @@ new plugins.
|
||||
### Linux deb and rpm Packages:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta3_amd64.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta3.x86_64.rpm
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0_amd64.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0.x86_64.rpm
|
||||
|
||||
Latest (arm):
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta3_armhf.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta3.armhf.rpm
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0_armhf.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0.armhf.rpm
|
||||
|
||||
##### Package Instructions:
|
||||
|
||||
@@ -46,14 +46,14 @@ to use this repo to install & update telegraf.
|
||||
### Linux tarballs:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_i386.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_armhf.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_linux_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_linux_i386.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_linux_armhf.tar.gz
|
||||
|
||||
### FreeBSD tarball:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_freebsd_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_freebsd_amd64.tar.gz
|
||||
|
||||
### Ansible Role:
|
||||
|
||||
@@ -69,7 +69,7 @@ brew install telegraf
|
||||
### Windows Binaries (EXPERIMENTAL)
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_windows_amd64.zip
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_windows_amd64.zip
|
||||
|
||||
### From Source:
|
||||
|
||||
@@ -161,6 +161,7 @@ Currently implemented sources:
|
||||
* [httpjson](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||
* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb)
|
||||
* [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor)
|
||||
* [iptables](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/iptables)
|
||||
* [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia)
|
||||
* [leofs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/leofs)
|
||||
* [lustre2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/lustre2)
|
||||
@@ -212,18 +213,21 @@ Currently implemented sources:
|
||||
|
||||
Telegraf can also collect metrics via the following service plugins:
|
||||
|
||||
* [http_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_listener)
|
||||
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
|
||||
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
|
||||
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
|
||||
* [nsq_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq_consumer)
|
||||
* [logparser](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/logparser)
|
||||
* [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd)
|
||||
* [tail](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail)
|
||||
* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
|
||||
* [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener)
|
||||
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
|
||||
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
|
||||
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
|
||||
* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
|
||||
* [webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks)
|
||||
* [filestack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/filestack)
|
||||
* [github](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/github)
|
||||
* [mandrill](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/mandrill)
|
||||
* [rollbar](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/rollbar)
|
||||
* [nsq_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq_consumer)
|
||||
|
||||
We'll be adding support for many more over the coming months. Read on if you
|
||||
want to add support for another service or third-party API.
|
||||
@@ -243,6 +247,7 @@ want to add support for another service or third-party API.
|
||||
* [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka)
|
||||
* [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato)
|
||||
* [mqtt](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/mqtt)
|
||||
* [nats](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nats)
|
||||
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nsq)
|
||||
* [opentsdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
||||
* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client)
|
||||
|
||||
@@ -2,16 +2,29 @@ package telegraf
|
||||
|
||||
import "time"
|
||||
|
||||
// Accumulator is an interface for "accumulating" metrics from input plugin(s).
|
||||
// The metrics are sent down a channel shared between all input plugins and then
|
||||
// flushed on the configured flush_interval.
|
||||
type Accumulator interface {
|
||||
// AddFields adds a metric to the accumulator with the given measurement
|
||||
// name, fields, and tags (and timestamp). If a timestamp is not provided,
|
||||
// then the accumulator sets it to "now".
|
||||
// Create a point with a value, decorating it with tags
|
||||
// NOTE: tags is expected to be owned by the caller, don't mutate
|
||||
// it after passing to Add.
|
||||
Add(measurement string,
|
||||
value interface{},
|
||||
AddFields(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
AddFields(measurement string,
|
||||
// AddGauge is the same as AddFields, but will add the metric as a "Gauge" type
|
||||
AddGauge(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
// AddCounter is the same as AddFields, but will add the metric as a "Counter" type
|
||||
AddCounter(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
@@ -38,38 +38,53 @@ type accumulator struct {
|
||||
errCount uint64
|
||||
}
|
||||
|
||||
func (ac *accumulator) Add(
|
||||
measurement string,
|
||||
value interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
fields["value"] = value
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
|
||||
return
|
||||
}
|
||||
|
||||
ac.AddFields(measurement, fields, tags, t...)
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddFields(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.makeMetric(measurement, fields, tags, telegraf.Untyped, t...); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddGauge(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.makeMetric(measurement, fields, tags, telegraf.Gauge, t...); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddCounter(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.makeMetric(measurement, fields, tags, telegraf.Counter, t...); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
// makeMetric either returns a metric, or returns nil if the metric doesn't
|
||||
// need to be created (because of filtering, an error, etc.)
|
||||
func (ac *accumulator) makeMetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
mType telegraf.ValueType,
|
||||
t ...time.Time,
|
||||
) telegraf.Metric {
|
||||
if len(fields) == 0 || len(measurement) == 0 {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
|
||||
return
|
||||
}
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldTagsPass(tags) {
|
||||
return
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
|
||||
// Override measurement name if set
|
||||
@@ -84,9 +99,6 @@ func (ac *accumulator) AddFields(
|
||||
measurement = measurement + ac.inputConfig.MeasurementSuffix
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
// Apply plugin-wide tags if set
|
||||
for k, v := range ac.inputConfig.Tags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
@@ -99,25 +111,21 @@ func (ac *accumulator) AddFields(
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
ac.inputConfig.Filter.FilterTags(tags)
|
||||
|
||||
result := make(map[string]interface{})
|
||||
// Apply the metric filter(s)
|
||||
if ok := ac.inputConfig.Filter.Apply(measurement, fields, tags); !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
for k, v := range fields {
|
||||
// Filter out any filtered fields
|
||||
if ac.inputConfig != nil {
|
||||
if !ac.inputConfig.Filter.ShouldFieldsPass(k) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Validate uint64 and float64 fields
|
||||
switch val := v.(type) {
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
result[k] = int64(val)
|
||||
fields[k] = int64(val)
|
||||
} else {
|
||||
result[k] = int64(9223372036854775807)
|
||||
fields[k] = int64(9223372036854775807)
|
||||
}
|
||||
continue
|
||||
case float64:
|
||||
@@ -128,15 +136,12 @@ func (ac *accumulator) AddFields(
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
}
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
result[k] = v
|
||||
}
|
||||
fields = nil
|
||||
if len(result) == 0 {
|
||||
return
|
||||
fields[k] = v
|
||||
}
|
||||
|
||||
var timestamp time.Time
|
||||
@@ -147,15 +152,26 @@ func (ac *accumulator) AddFields(
|
||||
}
|
||||
timestamp = timestamp.Round(ac.precision)
|
||||
|
||||
m, err := telegraf.NewMetric(measurement, tags, result, timestamp)
|
||||
var m telegraf.Metric
|
||||
var err error
|
||||
switch mType {
|
||||
case telegraf.Counter:
|
||||
m, err = telegraf.NewCounterMetric(measurement, tags, fields, timestamp)
|
||||
case telegraf.Gauge:
|
||||
m, err = telegraf.NewGaugeMetric(measurement, tags, fields, timestamp)
|
||||
default:
|
||||
m, err = telegraf.NewMetric(measurement, tags, fields, timestamp)
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
if ac.trace {
|
||||
fmt.Println("> " + m.String())
|
||||
}
|
||||
ac.metrics <- m
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// AddError passes a runtime error to the accumulator.
|
||||
|
||||
@@ -23,9 +23,15 @@ func TestAdd(t *testing.T) {
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.Add("acctest", float64(101), map[string]string{})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
@@ -42,6 +48,76 @@ func TestAdd(t *testing.T) {
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddGauge(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
}
|
||||
|
||||
func TestAddCounter(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
}
|
||||
|
||||
func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
@@ -50,9 +126,15 @@ func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.SetPrecision(0, time.Second)
|
||||
a.Add("acctest", float64(101), map[string]string{})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
@@ -77,9 +159,15 @@ func TestAddNoIntervalWithPrecision(t *testing.T) {
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.SetPrecision(time.Second, time.Millisecond)
|
||||
a.Add("acctest", float64(101), map[string]string{})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
@@ -105,9 +193,15 @@ func TestAddDisablePrecision(t *testing.T) {
|
||||
|
||||
a.SetPrecision(time.Second, time.Millisecond)
|
||||
a.DisablePrecision()
|
||||
a.Add("acctest", float64(101), map[string]string{})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
@@ -132,7 +226,9 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.SetPrecision(0, time.Second)
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Equal(t,
|
||||
@@ -140,7 +236,9 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Millisecond)
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
@@ -148,7 +246,9 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Microsecond)
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
@@ -156,7 +256,9 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Nanosecond)
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
@@ -172,9 +274,15 @@ func TestAddDefaultTags(t *testing.T) {
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.Add("acctest", float64(101), map[string]string{})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
@@ -342,9 +450,15 @@ func TestAddInts(t *testing.T) {
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.Add("acctest", int(101), map[string]string{})
|
||||
a.Add("acctest", int32(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", int64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": int32(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": int64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
@@ -369,8 +483,12 @@ func TestAddFloats(t *testing.T) {
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.Add("acctest", float32(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float32(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
@@ -391,8 +509,12 @@ func TestAddStrings(t *testing.T) {
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.Add("acctest", "test", map[string]string{"acc": "test"})
|
||||
a.Add("acctest", "foo", map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": "test"},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": "foo"},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
@@ -413,8 +535,10 @@ func TestAddBools(t *testing.T) {
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.Add("acctest", true, map[string]string{"acc": "test"})
|
||||
a.Add("acctest", false, map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": true}, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": false}, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
@@ -436,13 +560,19 @@ func TestAccFilterTags(t *testing.T) {
|
||||
filter := models.Filter{
|
||||
TagExclude: []string{"acc"},
|
||||
}
|
||||
assert.NoError(t, filter.CompileFilter())
|
||||
assert.NoError(t, filter.Compile())
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
a.inputConfig.Filter = filter
|
||||
|
||||
a.Add("acctest", float64(101), map[string]string{})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
|
||||
@@ -4,9 +4,9 @@ machine:
|
||||
post:
|
||||
- sudo service zookeeper stop
|
||||
- go version
|
||||
- go version | grep 1.6.2 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.6.2.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.6.2.linux-amd64.tar.gz
|
||||
- go version | grep 1.7.1 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.7.1.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.7.1.linux-amd64.tar.gz
|
||||
- go version
|
||||
|
||||
dependencies:
|
||||
|
||||
@@ -52,6 +52,16 @@ var (
|
||||
branch string
|
||||
)
|
||||
|
||||
func init() {
|
||||
// If commit or branch are not set, make that clear.
|
||||
if commit == "" {
|
||||
commit = "unknown"
|
||||
}
|
||||
if branch == "" {
|
||||
branch = "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
@@ -137,8 +147,7 @@ func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
if len(args) > 0 {
|
||||
switch args[0] {
|
||||
case "version":
|
||||
v := fmt.Sprintf("Telegraf - version %s", version)
|
||||
fmt.Println(v)
|
||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
||||
return
|
||||
case "config":
|
||||
config.PrintSampleConfig(inputFilters, outputFilters)
|
||||
|
||||
@@ -86,6 +86,10 @@ as it is more efficient to filter out tags at the ingestion point.
|
||||
* **taginclude**: taginclude is the inverse of tagexclude. It will only include
|
||||
the tag keys in the final measurement.
|
||||
|
||||
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
|
||||
the plugin definition, otherwise subsequent plugin config options will be
|
||||
interpreted as part of the tagpass/tagdrop map.
|
||||
|
||||
## Input Configuration
|
||||
|
||||
Some configuration options are configurable per input:
|
||||
@@ -129,6 +133,10 @@ fields which begin with `time_`.
|
||||
|
||||
#### Input Config: tagpass and tagdrop
|
||||
|
||||
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
|
||||
the plugin definition, otherwise subsequent plugin config options will be
|
||||
interpreted as part of the tagpass/tagdrop map.
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
|
||||
@@ -6,19 +6,18 @@ the general steps to set it up.
|
||||
1. Obtain the telegraf windows distribution
|
||||
2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
|
||||
location simply specify the `-config` parameter with the desired location)
|
||||
3. Place the telegraf.exe and the config file into `C:\Program Files\Telegraf`
|
||||
4. To install the service into the Windows Service Manager, run (as an
|
||||
administrator):
|
||||
3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf`
|
||||
4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""):
|
||||
|
||||
```
|
||||
> C:\Program Files\Telegraf\telegraf.exe --service install
|
||||
> C:\"Program Files"\Telegraf\telegraf.exe --service install
|
||||
```
|
||||
|
||||
5. Edit the configuration file to meet your needs
|
||||
6. To check that it works, run:
|
||||
|
||||
```
|
||||
> C:\Program Files\Telegraf\telegraf.exe --config C:\Program Files\Telegraf\telegraf.conf --test
|
||||
> C:\"Program Files"\Telegraf\telegraf.exe --config C:\"Program Files"\Telegraf\telegraf.conf --test
|
||||
```
|
||||
|
||||
7. To start collecting data, run:
|
||||
|
||||
@@ -511,6 +511,10 @@
|
||||
|
||||
# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
|
||||
# [[inputs.ceph]]
|
||||
# ## This is the recommended interval to poll. Too frequent and you will lose
|
||||
# ## data points due to timeouts during rebalancing and recovery
|
||||
# interval = '1m'
|
||||
#
|
||||
# ## All configuration values are optional, defaults are shown below
|
||||
#
|
||||
# ## location of ceph binary
|
||||
@@ -525,6 +529,26 @@
|
||||
#
|
||||
# ## suffix used to identify socket files
|
||||
# socket_suffix = "asok"
|
||||
#
|
||||
# ## Ceph user to authenticate as, ceph will search for the corresponding keyring
|
||||
# ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the
|
||||
# ## client section of ceph.conf for example:
|
||||
# ##
|
||||
# ## [client.telegraf]
|
||||
# ## keyring = /etc/ceph/client.telegraf.keyring
|
||||
# ##
|
||||
# ## Consult the ceph documentation for more detail on keyring generation.
|
||||
# ceph_user = "client.admin"
|
||||
#
|
||||
# ## Ceph configuration to use to locate the cluster
|
||||
# ceph_config = "/etc/ceph/ceph.conf"
|
||||
#
|
||||
# ## Whether to gather statistics via the admin socket
|
||||
# gather_admin_socket_stats = true
|
||||
#
|
||||
# ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config
|
||||
# ## to be specified
|
||||
# gather_cluster_stats = true
|
||||
|
||||
|
||||
# # Read specific statistics per cgroup
|
||||
@@ -886,6 +910,18 @@
|
||||
# ##
|
||||
# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
|
||||
|
||||
# # Gather packets and bytes throughput from iptables
|
||||
# [[inputs.iptables]]
|
||||
# ## iptables require root access on most systems.
|
||||
# ## Setting 'use_sudo' to true will make use of sudo to run iptables.
|
||||
# ## Users must configure sudo to allow telegraf user to run iptables.
|
||||
# ## iptables can be restricted to only use list command "iptables -nvL"
|
||||
# use_sudo = false
|
||||
# ## define the table to monitor:
|
||||
# table = "filter"
|
||||
# ## Defines the chains to monitor:
|
||||
# chains = [ "INPUT" ]
|
||||
|
||||
|
||||
# # Read JMX metrics through Jolokia
|
||||
# [[inputs.jolokia]]
|
||||
|
||||
@@ -10,16 +10,16 @@ type Filter interface {
|
||||
Match(string) bool
|
||||
}
|
||||
|
||||
// CompileFilter takes a list of string filters and returns a Filter interface
|
||||
// Compile takes a list of string filters and returns a Filter interface
|
||||
// for matching a given string against the filter list. The filter list
|
||||
// supports glob matching too, ie:
|
||||
//
|
||||
// f, _ := CompileFilter([]string{"cpu", "mem", "net*"})
|
||||
// f, _ := Compile([]string{"cpu", "mem", "net*"})
|
||||
// f.Match("cpu") // true
|
||||
// f.Match("network") // true
|
||||
// f.Match("memory") // false
|
||||
//
|
||||
func CompileFilter(filters []string) (Filter, error) {
|
||||
func Compile(filters []string) (Filter, error) {
|
||||
// return if there is nothing to compile
|
||||
if len(filters) == 0 {
|
||||
return nil, nil
|
||||
|
||||
@@ -6,30 +6,30 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCompileFilter(t *testing.T) {
|
||||
f, err := CompileFilter([]string{})
|
||||
func TestCompile(t *testing.T) {
|
||||
f, err := Compile([]string{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, f)
|
||||
|
||||
f, err = CompileFilter([]string{"cpu"})
|
||||
f, err = Compile([]string{"cpu"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, f.Match("cpu"))
|
||||
assert.False(t, f.Match("cpu0"))
|
||||
assert.False(t, f.Match("mem"))
|
||||
|
||||
f, err = CompileFilter([]string{"cpu*"})
|
||||
f, err = Compile([]string{"cpu*"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, f.Match("cpu"))
|
||||
assert.True(t, f.Match("cpu0"))
|
||||
assert.False(t, f.Match("mem"))
|
||||
|
||||
f, err = CompileFilter([]string{"cpu", "mem"})
|
||||
f, err = Compile([]string{"cpu", "mem"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, f.Match("cpu"))
|
||||
assert.False(t, f.Match("cpu0"))
|
||||
assert.True(t, f.Match("mem"))
|
||||
|
||||
f, err = CompileFilter([]string{"cpu", "mem", "net*"})
|
||||
f, err = Compile([]string{"cpu", "mem", "net*"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, f.Match("cpu"))
|
||||
assert.False(t, f.Match("cpu0"))
|
||||
@@ -40,7 +40,7 @@ func TestCompileFilter(t *testing.T) {
|
||||
var benchbool bool
|
||||
|
||||
func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
|
||||
f, _ := CompileFilter([]string{"cpu"})
|
||||
f, _ := Compile([]string{"cpu"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("network")
|
||||
@@ -49,7 +49,7 @@ func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
|
||||
}
|
||||
|
||||
func BenchmarkFilterSingleNoGlobTrue(b *testing.B) {
|
||||
f, _ := CompileFilter([]string{"cpu"})
|
||||
f, _ := Compile([]string{"cpu"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("cpu")
|
||||
@@ -58,7 +58,7 @@ func BenchmarkFilterSingleNoGlobTrue(b *testing.B) {
|
||||
}
|
||||
|
||||
func BenchmarkFilter(b *testing.B) {
|
||||
f, _ := CompileFilter([]string{"cpu", "mem", "net*"})
|
||||
f, _ := Compile([]string{"cpu", "mem", "net*"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("network")
|
||||
@@ -67,7 +67,7 @@ func BenchmarkFilter(b *testing.B) {
|
||||
}
|
||||
|
||||
func BenchmarkFilterNoGlob(b *testing.B) {
|
||||
f, _ := CompileFilter([]string{"cpu", "mem", "net"})
|
||||
f, _ := Compile([]string{"cpu", "mem", "net"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
tmp = f.Match("net")
|
||||
@@ -76,7 +76,7 @@ func BenchmarkFilterNoGlob(b *testing.B) {
|
||||
}
|
||||
|
||||
func BenchmarkFilter2(b *testing.B) {
|
||||
f, _ := CompileFilter([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
||||
f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
||||
"aw", "az", "axxx", "ab", "cpu", "mem", "net*"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
@@ -86,7 +86,7 @@ func BenchmarkFilter2(b *testing.B) {
|
||||
}
|
||||
|
||||
func BenchmarkFilter2NoGlob(b *testing.B) {
|
||||
f, _ := CompileFilter([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
||||
f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
||||
"aw", "az", "axxx", "ab", "cpu", "mem", "net"})
|
||||
var tmp bool
|
||||
for n := 0; n < b.N; n++ {
|
||||
|
||||
@@ -665,7 +665,6 @@ func buildFilter(tbl *ast.Table) (models.Filter, error) {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.NamePass = append(f.NamePass, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -678,7 +677,6 @@ func buildFilter(tbl *ast.Table) (models.Filter, error) {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.NameDrop = append(f.NameDrop, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -693,7 +691,6 @@ func buildFilter(tbl *ast.Table) (models.Filter, error) {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.FieldPass = append(f.FieldPass, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -709,7 +706,6 @@ func buildFilter(tbl *ast.Table) (models.Filter, error) {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.FieldDrop = append(f.FieldDrop, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -730,7 +726,6 @@ func buildFilter(tbl *ast.Table) (models.Filter, error) {
|
||||
}
|
||||
}
|
||||
f.TagPass = append(f.TagPass, *tagfilter)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -749,7 +744,6 @@ func buildFilter(tbl *ast.Table) (models.Filter, error) {
|
||||
}
|
||||
}
|
||||
f.TagDrop = append(f.TagDrop, *tagfilter)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -778,7 +772,7 @@ func buildFilter(tbl *ast.Table) (models.Filter, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := f.CompileFilter(); err != nil {
|
||||
if err := f.Compile(); err != nil {
|
||||
return f, err
|
||||
}
|
||||
|
||||
|
||||
@@ -43,9 +43,8 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
}
|
||||
assert.NoError(t, filter.CompileFilter())
|
||||
assert.NoError(t, filter.Compile())
|
||||
mConfig := &models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: filter,
|
||||
@@ -83,9 +82,8 @@ func TestConfig_LoadSingleInput(t *testing.T) {
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
}
|
||||
assert.NoError(t, filter.CompileFilter())
|
||||
assert.NoError(t, filter.Compile())
|
||||
mConfig := &models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: filter,
|
||||
@@ -130,9 +128,8 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
}
|
||||
assert.NoError(t, filter.CompileFilter())
|
||||
assert.NoError(t, filter.Compile())
|
||||
mConfig := &models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: filter,
|
||||
|
||||
@@ -118,7 +118,7 @@ func TestRandomSleep(t *testing.T) {
|
||||
s = time.Now()
|
||||
RandomSleep(time.Millisecond*50, make(chan struct{}))
|
||||
elapsed = time.Since(s)
|
||||
assert.True(t, elapsed < time.Millisecond*50)
|
||||
assert.True(t, elapsed < time.Millisecond*100)
|
||||
|
||||
// test that shutdown is respected
|
||||
s = time.Now()
|
||||
|
||||
@@ -3,7 +3,6 @@ package models
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
)
|
||||
|
||||
@@ -34,47 +33,59 @@ type Filter struct {
|
||||
TagInclude []string
|
||||
tagInclude filter.Filter
|
||||
|
||||
IsActive bool
|
||||
isActive bool
|
||||
}
|
||||
|
||||
// Compile all Filter lists into filter.Filter objects.
|
||||
func (f *Filter) CompileFilter() error {
|
||||
func (f *Filter) Compile() error {
|
||||
if len(f.NameDrop) == 0 &&
|
||||
len(f.NamePass) == 0 &&
|
||||
len(f.FieldDrop) == 0 &&
|
||||
len(f.FieldPass) == 0 &&
|
||||
len(f.TagInclude) == 0 &&
|
||||
len(f.TagExclude) == 0 &&
|
||||
len(f.TagPass) == 0 &&
|
||||
len(f.TagDrop) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
f.isActive = true
|
||||
var err error
|
||||
f.nameDrop, err = filter.CompileFilter(f.NameDrop)
|
||||
f.nameDrop, err = filter.Compile(f.NameDrop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'namedrop', %s", err)
|
||||
}
|
||||
f.namePass, err = filter.CompileFilter(f.NamePass)
|
||||
f.namePass, err = filter.Compile(f.NamePass)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'namepass', %s", err)
|
||||
}
|
||||
|
||||
f.fieldDrop, err = filter.CompileFilter(f.FieldDrop)
|
||||
f.fieldDrop, err = filter.Compile(f.FieldDrop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'fielddrop', %s", err)
|
||||
}
|
||||
f.fieldPass, err = filter.CompileFilter(f.FieldPass)
|
||||
f.fieldPass, err = filter.Compile(f.FieldPass)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'fieldpass', %s", err)
|
||||
}
|
||||
|
||||
f.tagExclude, err = filter.CompileFilter(f.TagExclude)
|
||||
f.tagExclude, err = filter.Compile(f.TagExclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagexclude', %s", err)
|
||||
}
|
||||
f.tagInclude, err = filter.CompileFilter(f.TagInclude)
|
||||
f.tagInclude, err = filter.Compile(f.TagInclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'taginclude', %s", err)
|
||||
}
|
||||
|
||||
for i, _ := range f.TagDrop {
|
||||
f.TagDrop[i].filter, err = filter.CompileFilter(f.TagDrop[i].Filter)
|
||||
f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
|
||||
}
|
||||
}
|
||||
for i, _ := range f.TagPass {
|
||||
f.TagPass[i].filter, err = filter.CompileFilter(f.TagPass[i].Filter)
|
||||
f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagpass', %s", err)
|
||||
}
|
||||
@@ -82,16 +93,52 @@ func (f *Filter) CompileFilter() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Filter) ShouldMetricPass(metric telegraf.Metric) bool {
|
||||
if f.ShouldNamePass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) {
|
||||
// Apply applies the filter to the given measurement name, fields map, and
|
||||
// tags map. It will return false if the metric should be "filtered out", and
|
||||
// true if the metric should "pass".
|
||||
// It will modify tags in-place if they need to be deleted.
|
||||
func (f *Filter) Apply(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
) bool {
|
||||
if !f.isActive {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
||||
// check if the measurement name should pass
|
||||
if !f.shouldNamePass(measurement) {
|
||||
return false
|
||||
}
|
||||
|
||||
// check if the tags should pass
|
||||
if !f.shouldTagsPass(tags) {
|
||||
return false
|
||||
}
|
||||
|
||||
// filter fields
|
||||
for fieldkey, _ := range fields {
|
||||
if !f.shouldFieldPass(fieldkey) {
|
||||
delete(fields, fieldkey)
|
||||
}
|
||||
}
|
||||
if len(fields) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// filter tags
|
||||
f.filterTags(tags)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldFieldsPass returns true if the metric should pass, false if should drop
|
||||
func (f *Filter) IsActive() bool {
|
||||
return f.isActive
|
||||
}
|
||||
|
||||
// shouldNamePass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f *Filter) ShouldNamePass(key string) bool {
|
||||
func (f *Filter) shouldNamePass(key string) bool {
|
||||
if f.namePass != nil {
|
||||
if f.namePass.Match(key) {
|
||||
return true
|
||||
@@ -107,9 +154,9 @@ func (f *Filter) ShouldNamePass(key string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldFieldsPass returns true if the metric should pass, false if should drop
|
||||
// shouldFieldPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f *Filter) ShouldFieldsPass(key string) bool {
|
||||
func (f *Filter) shouldFieldPass(key string) bool {
|
||||
if f.fieldPass != nil {
|
||||
if f.fieldPass.Match(key) {
|
||||
return true
|
||||
@@ -125,9 +172,9 @@ func (f *Filter) ShouldFieldsPass(key string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldTagsPass returns true if the metric should pass, false if should drop
|
||||
// shouldTagsPass returns true if the metric should pass, false if should drop
|
||||
// based on the tagdrop/tagpass filter parameters
|
||||
func (f *Filter) ShouldTagsPass(tags map[string]string) bool {
|
||||
func (f *Filter) shouldTagsPass(tags map[string]string) bool {
|
||||
if f.TagPass != nil {
|
||||
for _, pat := range f.TagPass {
|
||||
if pat.filter == nil {
|
||||
@@ -161,7 +208,7 @@ func (f *Filter) ShouldTagsPass(tags map[string]string) bool {
|
||||
|
||||
// Apply TagInclude and TagExclude filters.
|
||||
// modifies the tags map in-place.
|
||||
func (f *Filter) FilterTags(tags map[string]string) {
|
||||
func (f *Filter) filterTags(tags map[string]string) {
|
||||
if f.tagInclude != nil {
|
||||
for k, _ := range tags {
|
||||
if !f.tagInclude.Match(k) {
|
||||
|
||||
@@ -3,12 +3,62 @@ package models
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFilter_ApplyEmpty(t *testing.T) {
|
||||
f := Filter{}
|
||||
require.NoError(t, f.Compile())
|
||||
assert.False(t, f.IsActive())
|
||||
|
||||
assert.True(t, f.Apply("m", map[string]interface{}{"value": int64(1)}, map[string]string{}))
|
||||
}
|
||||
|
||||
func TestFilter_ApplyTagsDontPass(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
}
|
||||
f := Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
require.NoError(t, f.Compile())
|
||||
assert.True(t, f.IsActive())
|
||||
|
||||
assert.False(t, f.Apply("m",
|
||||
map[string]interface{}{"value": int64(1)},
|
||||
map[string]string{"cpu": "cpu-total"}))
|
||||
}
|
||||
|
||||
func TestFilter_ApplyDeleteFields(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldDrop: []string{"value"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
require.NoError(t, f.Compile())
|
||||
assert.True(t, f.IsActive())
|
||||
|
||||
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
|
||||
assert.True(t, f.Apply("m", fields, nil))
|
||||
assert.Equal(t, map[string]interface{}{"value2": int64(2)}, fields)
|
||||
}
|
||||
|
||||
func TestFilter_ApplyDeleteAllFields(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldDrop: []string{"value*"},
|
||||
}
|
||||
require.NoError(t, f.Compile())
|
||||
require.NoError(t, f.Compile())
|
||||
assert.True(t, f.IsActive())
|
||||
|
||||
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
|
||||
assert.False(t, f.Apply("m", fields, nil))
|
||||
}
|
||||
|
||||
func TestFilter_Empty(t *testing.T) {
|
||||
f := Filter{}
|
||||
|
||||
@@ -23,7 +73,7 @@ func TestFilter_Empty(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, measurement := range measurements {
|
||||
if !f.ShouldFieldsPass(measurement) {
|
||||
if !f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
@@ -33,7 +83,7 @@ func TestFilter_NamePass(t *testing.T) {
|
||||
f := Filter{
|
||||
NamePass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
@@ -51,13 +101,13 @@ func TestFilter_NamePass(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldNamePass(measurement) {
|
||||
if !f.shouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldNamePass(measurement) {
|
||||
if f.shouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
@@ -67,7 +117,7 @@ func TestFilter_NameDrop(t *testing.T) {
|
||||
f := Filter{
|
||||
NameDrop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
@@ -85,13 +135,13 @@ func TestFilter_NameDrop(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldNamePass(measurement) {
|
||||
if !f.shouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldNamePass(measurement) {
|
||||
if f.shouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
@@ -101,7 +151,7 @@ func TestFilter_FieldPass(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldPass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
@@ -119,13 +169,13 @@ func TestFilter_FieldPass(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldFieldsPass(measurement) {
|
||||
if !f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldFieldsPass(measurement) {
|
||||
if f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
@@ -135,7 +185,7 @@ func TestFilter_FieldDrop(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldDrop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
@@ -153,13 +203,13 @@ func TestFilter_FieldDrop(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldFieldsPass(measurement) {
|
||||
if !f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldFieldsPass(measurement) {
|
||||
if f.shouldFieldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
@@ -178,7 +228,7 @@ func TestFilter_TagPass(t *testing.T) {
|
||||
f := Filter{
|
||||
TagPass: filters,
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
@@ -197,13 +247,13 @@ func TestFilter_TagPass(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
if !f.shouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
if f.shouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
@@ -222,7 +272,7 @@ func TestFilter_TagDrop(t *testing.T) {
|
||||
f := Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
@@ -241,30 +291,18 @@ func TestFilter_TagDrop(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
if !f.shouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
if f.shouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_ShouldMetricsPass(t *testing.T) {
|
||||
m := testutil.TestMetric(1, "testmetric")
|
||||
f := Filter{
|
||||
NameDrop: []string{"foobar"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.True(t, f.ShouldMetricPass(m))
|
||||
|
||||
m = testutil.TestMetric(1, "foobar")
|
||||
require.False(t, f.ShouldMetricPass(m))
|
||||
}
|
||||
|
||||
func TestFilter_FilterTagsNoMatches(t *testing.T) {
|
||||
pretags := map[string]string{
|
||||
"host": "localhost",
|
||||
@@ -273,9 +311,9 @@ func TestFilter_FilterTagsNoMatches(t *testing.T) {
|
||||
f := Filter{
|
||||
TagExclude: []string{"nomatch"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
f.FilterTags(pretags)
|
||||
f.filterTags(pretags)
|
||||
assert.Equal(t, map[string]string{
|
||||
"host": "localhost",
|
||||
"mytag": "foobar",
|
||||
@@ -284,9 +322,9 @@ func TestFilter_FilterTagsNoMatches(t *testing.T) {
|
||||
f = Filter{
|
||||
TagInclude: []string{"nomatch"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
f.FilterTags(pretags)
|
||||
f.filterTags(pretags)
|
||||
assert.Equal(t, map[string]string{}, pretags)
|
||||
}
|
||||
|
||||
@@ -298,9 +336,9 @@ func TestFilter_FilterTagsMatches(t *testing.T) {
|
||||
f := Filter{
|
||||
TagExclude: []string{"ho*"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
f.FilterTags(pretags)
|
||||
f.filterTags(pretags)
|
||||
assert.Equal(t, map[string]string{
|
||||
"mytag": "foobar",
|
||||
}, pretags)
|
||||
@@ -312,9 +350,9 @@ func TestFilter_FilterTagsMatches(t *testing.T) {
|
||||
f = Filter{
|
||||
TagInclude: []string{"my*"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
f.FilterTags(pretags)
|
||||
f.filterTags(pretags)
|
||||
assert.Equal(t, map[string]string{
|
||||
"mytag": "foobar",
|
||||
}, pretags)
|
||||
|
||||
@@ -57,21 +57,17 @@ func NewRunningOutput(
|
||||
// AddMetric adds a metric to the output. This function can also write cached
|
||||
// points if FlushBufferWhenFull is true.
|
||||
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
||||
if ro.Config.Filter.IsActive {
|
||||
if !ro.Config.Filter.ShouldMetricPass(metric) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Filter any tagexclude/taginclude parameters before adding metric
|
||||
if len(ro.Config.Filter.TagExclude) != 0 || len(ro.Config.Filter.TagInclude) != 0 {
|
||||
if ro.Config.Filter.IsActive() {
|
||||
// In order to filter out tags, we need to create a new metric, since
|
||||
// metrics are immutable once created.
|
||||
name := metric.Name()
|
||||
tags := metric.Tags()
|
||||
fields := metric.Fields()
|
||||
t := metric.Time()
|
||||
name := metric.Name()
|
||||
ro.Config.Filter.FilterTags(tags)
|
||||
if ok := ro.Config.Filter.Apply(name, fields, tags); !ok {
|
||||
return
|
||||
}
|
||||
// error is not possible if creating from another metric, so ignore.
|
||||
metric, _ = telegraf.NewMetric(name, tags, fields, t)
|
||||
}
|
||||
|
||||
@@ -31,9 +31,7 @@ var next5 = []telegraf.Metric{
|
||||
// Benchmark adding metrics.
|
||||
func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &perfOutput{}
|
||||
@@ -49,9 +47,7 @@ func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
||||
// Benchmark adding metrics.
|
||||
func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &perfOutput{}
|
||||
@@ -69,9 +65,7 @@ func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
|
||||
// Benchmark adding metrics.
|
||||
func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &perfOutput{}
|
||||
@@ -88,11 +82,10 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
|
||||
func TestRunningOutput_DropFilter(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
NameDrop: []string{"metric1", "metric2"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
@@ -114,11 +107,10 @@ func TestRunningOutput_DropFilter(t *testing.T) {
|
||||
func TestRunningOutput_PassFilter(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
NameDrop: []string{"metric1000", "foo*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
@@ -140,11 +132,11 @@ func TestRunningOutput_PassFilter(t *testing.T) {
|
||||
func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
|
||||
TagInclude: []string{"nothing*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
@@ -162,11 +154,11 @@ func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
|
||||
func TestRunningOutput_TagExcludeMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
|
||||
TagExclude: []string{"tag*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
@@ -184,11 +176,11 @@ func TestRunningOutput_TagExcludeMatch(t *testing.T) {
|
||||
func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
|
||||
TagExclude: []string{"nothing*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
@@ -206,11 +198,11 @@ func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
|
||||
func TestRunningOutput_TagIncludeMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
|
||||
TagInclude: []string{"tag*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
assert.NoError(t, conf.Filter.Compile())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
@@ -227,9 +219,7 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) {
|
||||
// Test that we can write metrics with simple default setup.
|
||||
func TestRunningOutputDefault(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
@@ -252,9 +242,7 @@ func TestRunningOutputDefault(t *testing.T) {
|
||||
// FlushBufferWhenFull is set.
|
||||
func TestRunningOutputFlushWhenFull(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
@@ -283,9 +271,7 @@ func TestRunningOutputFlushWhenFull(t *testing.T) {
|
||||
// FlushBufferWhenFull is set, twice.
|
||||
func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
@@ -304,9 +290,7 @@ func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
|
||||
|
||||
func TestRunningOutputWriteFail(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
@@ -339,9 +323,7 @@ func TestRunningOutputWriteFail(t *testing.T) {
|
||||
// Verify that the order of points is preserved during a write failure.
|
||||
func TestRunningOutputWriteFailOrder(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
@@ -379,9 +361,7 @@ func TestRunningOutputWriteFailOrder(t *testing.T) {
|
||||
// Verify that the order of points is preserved during many write failures.
|
||||
func TestRunningOutputWriteFailOrder2(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
@@ -452,9 +432,7 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) {
|
||||
//
|
||||
func TestRunningOutputWriteFailOrder3(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
|
||||
66
metric.go
66
metric.go
@@ -6,6 +6,17 @@ import (
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
)
|
||||
|
||||
// ValueType is an enumeration of metric types that represent a simple value.
|
||||
type ValueType int
|
||||
|
||||
// Possible values for the ValueType enum.
|
||||
const (
|
||||
_ ValueType = iota
|
||||
Counter
|
||||
Gauge
|
||||
Untyped
|
||||
)
|
||||
|
||||
type Metric interface {
|
||||
// Name returns the measurement name of the metric
|
||||
Name() string
|
||||
@@ -16,6 +27,9 @@ type Metric interface {
|
||||
// Time return the timestamp for the metric
|
||||
Time() time.Time
|
||||
|
||||
// Type returns the metric type. Can be either telegraf.Gauge or telegraf.Counter
|
||||
Type() ValueType
|
||||
|
||||
// UnixNano returns the unix nano time of the metric
|
||||
UnixNano() int64
|
||||
|
||||
@@ -35,12 +49,11 @@ type Metric interface {
|
||||
// metric is a wrapper of the influxdb client.Point struct
|
||||
type metric struct {
|
||||
pt *client.Point
|
||||
|
||||
mType ValueType
|
||||
}
|
||||
|
||||
// NewMetric returns a metric with the given timestamp. If a timestamp is not
|
||||
// given, then data is sent to the database without a timestamp, in which case
|
||||
// the server will assign local time upon reception. NOTE: it is recommended to
|
||||
// send data with a timestamp.
|
||||
// NewMetric returns an untyped metric.
|
||||
func NewMetric(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
@@ -52,7 +65,46 @@ func NewMetric(
|
||||
return nil, err
|
||||
}
|
||||
return &metric{
|
||||
pt: pt,
|
||||
pt: pt,
|
||||
mType: Untyped,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewGaugeMetric returns a gauge metric.
|
||||
// Gauge metrics should be used when the metric is can arbitrarily go up and
|
||||
// down. ie, temperature, memory usage, cpu usage, etc.
|
||||
func NewGaugeMetric(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
) (Metric, error) {
|
||||
pt, err := client.NewPoint(name, tags, fields, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metric{
|
||||
pt: pt,
|
||||
mType: Gauge,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewCounterMetric returns a Counter metric.
|
||||
// Counter metrics should be used when the metric being created is an
|
||||
// always-increasing counter. ie, net bytes received, requests served, errors, etc.
|
||||
func NewCounterMetric(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
) (Metric, error) {
|
||||
pt, err := client.NewPoint(name, tags, fields, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metric{
|
||||
pt: pt,
|
||||
mType: Counter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -68,6 +120,10 @@ func (m *metric) Time() time.Time {
|
||||
return m.pt.Time()
|
||||
}
|
||||
|
||||
func (m *metric) Type() ValueType {
|
||||
return m.mType
|
||||
}
|
||||
|
||||
func (m *metric) UnixNano() int64 {
|
||||
return m.pt.UnixNano()
|
||||
}
|
||||
|
||||
@@ -23,6 +23,51 @@ func TestNewMetric(t *testing.T) {
|
||||
m, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, Untyped, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewGaugeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := NewGaugeMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, Gauge, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewCounterMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := NewCounterMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, Counter, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package aerospike
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -11,7 +13,7 @@ import (
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
as "github.com/sparrc/aerospike-client-go"
|
||||
as "github.com/aerospike/aerospike-client-go"
|
||||
)
|
||||
|
||||
type Aerospike struct {
|
||||
@@ -82,7 +84,12 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||
return err
|
||||
}
|
||||
for k, v := range stats {
|
||||
fields[strings.Replace(k, "-", "_", -1)] = parseValue(v)
|
||||
val, err := parseValue(v)
|
||||
if err == nil {
|
||||
fields[strings.Replace(k, "-", "_", -1)] = val
|
||||
} else {
|
||||
log.Printf("skipping aerospike field %v with int64 overflow", k)
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike_node", fields, tags, time.Now())
|
||||
|
||||
@@ -110,7 +117,12 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
nFields[strings.Replace(parts[0], "-", "_", -1)] = parseValue(parts[1])
|
||||
val, err := parseValue(parts[1])
|
||||
if err == nil {
|
||||
nFields[strings.Replace(parts[0], "-", "_", -1)] = val
|
||||
} else {
|
||||
log.Printf("skipping aerospike field %v with int64 overflow", parts[0])
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
|
||||
}
|
||||
@@ -118,13 +130,16 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseValue(v string) interface{} {
|
||||
func parseValue(v string) (interface{}, error) {
|
||||
if parsed, err := strconv.ParseInt(v, 10, 64); err == nil {
|
||||
return parsed
|
||||
return parsed, nil
|
||||
} else if _, err := strconv.ParseUint(v, 10, 64); err == nil {
|
||||
// int64 overflow, yet valid uint64
|
||||
return nil, errors.New("Number is too large")
|
||||
} else if parsed, err := strconv.ParseBool(v); err == nil {
|
||||
return parsed
|
||||
return parsed, nil
|
||||
} else {
|
||||
return v
|
||||
return v, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
func TestAerospikeStatistics(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
t.Skip("Skipping aerospike integration tests.")
|
||||
}
|
||||
|
||||
a := &Aerospike{
|
||||
@@ -29,7 +29,7 @@ func TestAerospikeStatistics(t *testing.T) {
|
||||
|
||||
func TestAerospikeStatisticsPartialErr(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
t.Skip("Skipping aerospike integration tests.")
|
||||
}
|
||||
|
||||
a := &Aerospike{
|
||||
@@ -48,3 +48,20 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) {
|
||||
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
|
||||
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
|
||||
}
|
||||
|
||||
func TestAerospikeParseValue(t *testing.T) {
|
||||
// uint64 with value bigger than int64 max
|
||||
val, err := parseValue("18446744041841121751")
|
||||
assert.Nil(t, val)
|
||||
assert.Error(t, err)
|
||||
|
||||
// int values
|
||||
val, err = parseValue("42")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, val, int64(42), "must be parsed as int")
|
||||
|
||||
// string values
|
||||
val, err = parseValue("BB977942A2CA502")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, val, `BB977942A2CA502`, "must be left as string")
|
||||
}
|
||||
|
||||
@@ -23,10 +23,12 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
|
||||
|
||||
The plugin works by scanning the configured SocketDir for OSD and MON socket files. When it finds
|
||||
*Admin Socket Stats*
|
||||
|
||||
This gatherer works by scanning the configured SocketDir for OSD and MON socket files. When it finds
|
||||
a MON socket, it runs **ceph --admin-daemon $file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf dump**
|
||||
|
||||
The resulting JSON is parsed and grouped into collections, based on top-level key. Top-level keys are
|
||||
@@ -27,11 +29,26 @@ Would be parsed into the following metrics, all of which would be tagged with co
|
||||
- refresh_latency.sum: 5378.794002000
|
||||
|
||||
|
||||
*Cluster Stats*
|
||||
|
||||
This gatherer works by invoking ceph commands against the cluster thus only requires the ceph client, valid
|
||||
ceph configuration and an access key to function (the ceph_config and ceph_user configuration variables work
|
||||
in conjunction to specify these prerequisites). It may be run on any server you wish which has access to
|
||||
the cluster. The currently supported commands are:
|
||||
|
||||
* ceph status
|
||||
* ceph df
|
||||
* ceph osd pool stats
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
|
||||
[[inputs.ceph]]
|
||||
## This is the recommended interval to poll. Too frequent and you will lose
|
||||
## data points due to timeouts during rebalancing and recovery
|
||||
interval = '1m'
|
||||
|
||||
## All configuration values are optional, defaults are shown below
|
||||
|
||||
## location of ceph binary
|
||||
@@ -46,15 +63,86 @@ Would be parsed into the following metrics, all of which would be tagged with co
|
||||
|
||||
## suffix used to identify socket files
|
||||
socket_suffix = "asok"
|
||||
|
||||
## Ceph user to authenticate as, ceph will search for the corresponding keyring
|
||||
## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the
|
||||
## client section of ceph.conf for example:
|
||||
##
|
||||
## [client.telegraf]
|
||||
## keyring = /etc/ceph/client.telegraf.keyring
|
||||
##
|
||||
## Consult the ceph documentation for more detail on keyring generation.
|
||||
ceph_user = "client.admin"
|
||||
|
||||
## Ceph configuration to use to locate the cluster
|
||||
ceph_config = "/etc/ceph/ceph.conf"
|
||||
|
||||
## Whether to gather statistics via the admin socket
|
||||
gather_admin_socket_stats = true
|
||||
|
||||
## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config
|
||||
## to be specified
|
||||
gather_cluster_stats = true
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
*Admin Socket Stats*
|
||||
|
||||
All fields are collected under the **ceph** measurement and stored as float64s. For a full list of fields, see the sample perf dumps in ceph_test.go.
|
||||
|
||||
*Cluster Stats*
|
||||
|
||||
* ceph\_osdmap
|
||||
* epoch (float)
|
||||
* full (boolean)
|
||||
* nearfull (boolean)
|
||||
* num\_in\_osds (float)
|
||||
* num\_osds (float)
|
||||
* num\_remremapped\_pgs (float)
|
||||
* num\_up\_osds (float)
|
||||
|
||||
* ceph\_pgmap
|
||||
* bytes\_avail (float)
|
||||
* bytes\_total (float)
|
||||
* bytes\_used (float)
|
||||
* data\_bytes (float)
|
||||
* num\_pgs (float)
|
||||
* op\_per\_sec (float)
|
||||
* read\_bytes\_sec (float)
|
||||
* version (float)
|
||||
* write\_bytes\_sec (float)
|
||||
* recovering\_bytes\_per\_sec (float)
|
||||
* recovering\_keys\_per\_sec (float)
|
||||
* recovering\_objects\_per\_sec (float)
|
||||
|
||||
* ceph\_pgmap\_state
|
||||
* state name e.g. active+clean (float)
|
||||
|
||||
* ceph\_usage
|
||||
* bytes\_used (float)
|
||||
* kb\_used (float)
|
||||
* max\_avail (float)
|
||||
* objects (float)
|
||||
|
||||
* ceph\_pool\_usage
|
||||
* bytes\_used (float)
|
||||
* kb\_used (float)
|
||||
* max\_avail (float)
|
||||
* objects (float)
|
||||
|
||||
* ceph\_pool\_stats
|
||||
* op\_per\_sec (float)
|
||||
* read\_bytes\_sec (float)
|
||||
* write\_bytes\_sec (float)
|
||||
* recovering\_object\_per\_sec (float)
|
||||
* recovering\_bytes\_per\_sec (float)
|
||||
* recovering\_keys\_per\_sec (float)
|
||||
|
||||
### Tags:
|
||||
|
||||
*Admin Socket Stats*
|
||||
|
||||
All measurements will have the following tags:
|
||||
|
||||
- type: either 'osd' or 'mon' to indicate which type of node was queried
|
||||
@@ -96,9 +184,21 @@ All measurements will have the following tags:
|
||||
- throttle-osd_client_bytes
|
||||
- throttle-osd_client_messages
|
||||
|
||||
*Cluster Stats*
|
||||
|
||||
* ceph\_pg\_state has the following tags:
|
||||
* state (state for which the value applies e.g. active+clean, active+remapped+backfill)
|
||||
* ceph\_pool\_usage has the following tags:
|
||||
* id
|
||||
* name
|
||||
* ceph\_pool\_stats has the following tags:
|
||||
* id
|
||||
* name
|
||||
|
||||
### Example Output:
|
||||
|
||||
*Admin Socket Stats*
|
||||
|
||||
<pre>
|
||||
telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d -input-filter ceph
|
||||
* Plugin: ceph, Collection 1
|
||||
@@ -107,3 +207,16 @@ telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegr
|
||||
> ceph,collection=throttle-mon_daemon_bytes,id=node-2,type=mon get=4058121,get_or_fail_fail=0,get_or_fail_success=0,get_sum=6027348117,max=419430400,put=4058121,put_sum=6027348117,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814815661
|
||||
> ceph,collection=throttle-msgr_dispatch_throttler-mon,id=node-2,type=mon get=54276277,get_or_fail_fail=0,get_or_fail_success=0,get_sum=370232877040,max=104857600,put=54276277,put_sum=370232877040,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814872064
|
||||
</pre>
|
||||
|
||||
*Cluster Stats*
|
||||
|
||||
<pre>
|
||||
> ceph_osdmap,host=ceph-mon-0 epoch=170772,full=false,nearfull=false,num_in_osds=340,num_osds=340,num_remapped_pgs=0,num_up_osds=340 1468841037000000000
|
||||
> ceph_pgmap,host=ceph-mon-0 bytes_avail=634895531270144,bytes_total=812117151809536,bytes_used=177221620539392,data_bytes=56979991615058,num_pgs=22952,op_per_sec=15869,read_bytes_sec=43956026,version=39387592,write_bytes_sec=165344818 1468841037000000000
|
||||
> ceph_pgmap_state,host=ceph-mon-0 active+clean=22952 1468928660000000000
|
||||
> ceph_usage,host=ceph-mon-0 total_avail_bytes=634895514791936,total_bytes=812117151809536,total_used_bytes=177221637017600 1468841037000000000
|
||||
> ceph_pool_usage,host=ceph-mon-0,id=150,name=cinder.volumes bytes_used=12648553794802,kb_used=12352103316,max_avail=154342562489244,objects=3026295 1468841037000000000
|
||||
> ceph_pool_usage,host=ceph-mon-0,id=182,name=cinder.volumes.flash bytes_used=8541308223964,kb_used=8341121313,max_avail=39388593563936,objects=2075066 1468841037000000000
|
||||
> ceph_pool_stats,host=ceph-mon-0,id=150,name=cinder.volumes op_per_sec=1706,read_bytes_sec=28671674,write_bytes_sec=29994541 1468841037000000000
|
||||
> ceph_pool_stats,host=ceph-mon-0,id=182,name=cinder.volumes.flash op_per_sec=9748,read_bytes_sec=9605524,write_bytes_sec=45593310 1468841037000000000
|
||||
</pre>
|
||||
|
||||
@@ -23,33 +23,15 @@ const (
|
||||
)
|
||||
|
||||
type Ceph struct {
|
||||
CephBinary string
|
||||
OsdPrefix string
|
||||
MonPrefix string
|
||||
SocketDir string
|
||||
SocketSuffix string
|
||||
}
|
||||
|
||||
func (c *Ceph) setDefaults() {
|
||||
if c.CephBinary == "" {
|
||||
c.CephBinary = "/usr/bin/ceph"
|
||||
}
|
||||
|
||||
if c.OsdPrefix == "" {
|
||||
c.OsdPrefix = osdPrefix
|
||||
}
|
||||
|
||||
if c.MonPrefix == "" {
|
||||
c.MonPrefix = monPrefix
|
||||
}
|
||||
|
||||
if c.SocketDir == "" {
|
||||
c.SocketDir = "/var/run/ceph"
|
||||
}
|
||||
|
||||
if c.SocketSuffix == "" {
|
||||
c.SocketSuffix = sockSuffix
|
||||
}
|
||||
CephBinary string
|
||||
OsdPrefix string
|
||||
MonPrefix string
|
||||
SocketDir string
|
||||
SocketSuffix string
|
||||
CephUser string
|
||||
CephConfig string
|
||||
GatherAdminSocketStats bool
|
||||
GatherClusterStats bool
|
||||
}
|
||||
|
||||
func (c *Ceph) Description() string {
|
||||
@@ -57,6 +39,10 @@ func (c *Ceph) Description() string {
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## This is the recommended interval to poll. Too frequent and you will lose
|
||||
## data points due to timeouts during rebalancing and recovery
|
||||
interval = '1m'
|
||||
|
||||
## All configuration values are optional, defaults are shown below
|
||||
|
||||
## location of ceph binary
|
||||
@@ -71,6 +57,18 @@ var sampleConfig = `
|
||||
|
||||
## suffix used to identify socket files
|
||||
socket_suffix = "asok"
|
||||
|
||||
## Ceph user to authenticate as
|
||||
ceph_user = "client.admin"
|
||||
|
||||
## Ceph configuration to use to locate the cluster
|
||||
ceph_config = "/etc/ceph/ceph.conf"
|
||||
|
||||
## Whether to gather statistics via the admin socket
|
||||
gather_admin_socket_stats = true
|
||||
|
||||
## Whether to gather statistics via ceph commands
|
||||
gather_cluster_stats = true
|
||||
`
|
||||
|
||||
func (c *Ceph) SampleConfig() string {
|
||||
@@ -78,7 +76,22 @@ func (c *Ceph) SampleConfig() string {
|
||||
}
|
||||
|
||||
func (c *Ceph) Gather(acc telegraf.Accumulator) error {
|
||||
c.setDefaults()
|
||||
if c.GatherAdminSocketStats {
|
||||
if err := c.gatherAdminSocketStats(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.GatherClusterStats {
|
||||
if err := c.gatherClusterStats(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
|
||||
sockets, err := findSockets(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find sockets at path '%s': %v", c.SocketDir, err)
|
||||
@@ -104,8 +117,46 @@ func (c *Ceph) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Ceph) gatherClusterStats(acc telegraf.Accumulator) error {
|
||||
jobs := []struct {
|
||||
command string
|
||||
parser func(telegraf.Accumulator, string) error
|
||||
}{
|
||||
{"status", decodeStatus},
|
||||
{"df", decodeDf},
|
||||
{"osd pool stats", decodeOsdPoolStats},
|
||||
}
|
||||
|
||||
// For each job, execute against the cluster, parse and accumulate the data points
|
||||
for _, job := range jobs {
|
||||
output, err := c.exec(job.command)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error executing command: %v", err)
|
||||
}
|
||||
err = job.parser(acc, output)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing output: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add(measurement, func() telegraf.Input { return &Ceph{} })
|
||||
c := Ceph{
|
||||
CephBinary: "/usr/bin/ceph",
|
||||
OsdPrefix: osdPrefix,
|
||||
MonPrefix: monPrefix,
|
||||
SocketDir: "/var/run/ceph",
|
||||
SocketSuffix: sockSuffix,
|
||||
CephUser: "client.admin",
|
||||
CephConfig: "/etc/ceph/ceph.conf",
|
||||
GatherAdminSocketStats: true,
|
||||
GatherClusterStats: false,
|
||||
}
|
||||
|
||||
inputs.Add(measurement, func() telegraf.Input { return &c })
|
||||
|
||||
}
|
||||
|
||||
var perfDump = func(binary string, socket *socket) (string, error) {
|
||||
@@ -247,3 +298,192 @@ func flatten(data interface{}) []*metric {
|
||||
|
||||
return metrics
|
||||
}
|
||||
|
||||
func (c *Ceph) exec(command string) (string, error) {
|
||||
cmdArgs := []string{"--conf", c.CephConfig, "--name", c.CephUser, "--format", "json"}
|
||||
cmdArgs = append(cmdArgs, strings.Split(command, " ")...)
|
||||
|
||||
cmd := exec.Command(c.CephBinary, cmdArgs...)
|
||||
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error running ceph %v: %s", command, err)
|
||||
}
|
||||
|
||||
output := out.String()
|
||||
|
||||
// Ceph doesn't sanitize its output, and may return invalid JSON. Patch this
|
||||
// up for them, as having some inaccurate data is better than none.
|
||||
output = strings.Replace(output, "-inf", "0", -1)
|
||||
output = strings.Replace(output, "inf", "0", -1)
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func decodeStatus(acc telegraf.Accumulator, input string) error {
|
||||
data := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(input), &data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse json: '%s': %v", input, err)
|
||||
}
|
||||
|
||||
err = decodeStatusOsdmap(acc, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = decodeStatusPgmap(acc, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = decodeStatusPgmapState(acc, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeStatusOsdmap(acc telegraf.Accumulator, data map[string]interface{}) error {
|
||||
osdmap, ok := data["osdmap"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement)
|
||||
}
|
||||
fields, ok := osdmap["osdmap"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement)
|
||||
}
|
||||
acc.AddFields("ceph_osdmap", fields, map[string]string{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeStatusPgmap(acc telegraf.Accumulator, data map[string]interface{}) error {
|
||||
pgmap, ok := data["pgmap"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
for key, value := range pgmap {
|
||||
switch value.(type) {
|
||||
case float64:
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
acc.AddFields("ceph_pgmap", fields, map[string]string{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error {
|
||||
pgmap, ok := data["pgmap"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
for key, value := range pgmap {
|
||||
switch value.(type) {
|
||||
case []interface{}:
|
||||
if key != "pgs_by_state" {
|
||||
continue
|
||||
}
|
||||
for _, state := range value.([]interface{}) {
|
||||
state_map, ok := state.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state", measurement)
|
||||
}
|
||||
state_name, ok := state_map["state_name"].(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement)
|
||||
}
|
||||
state_count, ok := state_map["count"].(float64)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement)
|
||||
}
|
||||
fields[state_name] = state_count
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("ceph_pgmap_state", fields, map[string]string{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeDf(acc telegraf.Accumulator, input string) error {
|
||||
data := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(input), &data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse json: '%s': %v", input, err)
|
||||
}
|
||||
|
||||
// ceph.usage: records global utilization and number of objects
|
||||
stats_fields, ok := data["stats"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode df stats", measurement)
|
||||
}
|
||||
acc.AddFields("ceph_usage", stats_fields, map[string]string{})
|
||||
|
||||
// ceph.pool.usage: records per pool utilization and number of objects
|
||||
pools, ok := data["pools"].([]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode df pools", measurement)
|
||||
}
|
||||
|
||||
for _, pool := range pools {
|
||||
pool_map, ok := pool.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode df pool", measurement)
|
||||
}
|
||||
pool_name, ok := pool_map["name"].(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode df pool name", measurement)
|
||||
}
|
||||
fields, ok := pool_map["stats"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode df pool stats", measurement)
|
||||
}
|
||||
tags := map[string]string{
|
||||
"name": pool_name,
|
||||
}
|
||||
acc.AddFields("ceph_pool_usage", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeOsdPoolStats(acc telegraf.Accumulator, input string) error {
|
||||
data := make([]map[string]interface{}, 0)
|
||||
err := json.Unmarshal([]byte(input), &data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse json: '%s': %v", input, err)
|
||||
}
|
||||
|
||||
// ceph.pool.stats: records pre pool IO and recovery throughput
|
||||
for _, pool := range data {
|
||||
pool_name, ok := pool["pool_name"].(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode osd pool stats name", measurement)
|
||||
}
|
||||
// Note: the 'recovery' object looks broken (in hammer), so it's omitted
|
||||
objects := []string{
|
||||
"client_io_rate",
|
||||
"recovery_rate",
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
for _, object := range objects {
|
||||
perfdata, ok := pool[object].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode osd pool stats", measurement)
|
||||
}
|
||||
for key, value := range perfdata {
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
tags := map[string]string{
|
||||
"name": pool_name,
|
||||
}
|
||||
acc.AddFields("ceph_pool_stats", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -65,12 +65,17 @@ func TestFindSockets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
c := &Ceph{
|
||||
CephBinary: "foo",
|
||||
SocketDir: tmpdir,
|
||||
CephBinary: "foo",
|
||||
OsdPrefix: "ceph-osd",
|
||||
MonPrefix: "ceph-mon",
|
||||
SocketDir: tmpdir,
|
||||
SocketSuffix: "asok",
|
||||
CephUser: "client.admin",
|
||||
CephConfig: "/etc/ceph/ceph.conf",
|
||||
GatherAdminSocketStats: true,
|
||||
GatherClusterStats: false,
|
||||
}
|
||||
|
||||
c.setDefaults()
|
||||
|
||||
for _, st := range sockTestParams {
|
||||
createTestFiles(tmpdir, st)
|
||||
|
||||
|
||||
@@ -34,6 +34,11 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
## Metric Statistic Namespace (required)
|
||||
namespace = 'AWS/ELB'
|
||||
|
||||
## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
## maximum of 10. Optional - default value is 10.
|
||||
ratelimit = 10
|
||||
|
||||
## Metrics to Pull (optional)
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
## Refreshes Namespace available metrics every 1h
|
||||
|
||||
@@ -33,6 +33,7 @@ type (
|
||||
Namespace string `toml:"namespace"`
|
||||
Metrics []*Metric `toml:"metrics"`
|
||||
CacheTTL internal.Duration `toml:"cache_ttl"`
|
||||
RateLimit int `toml:"ratelimit"`
|
||||
client cloudwatchClient
|
||||
metricCache *MetricCache
|
||||
}
|
||||
@@ -96,6 +97,11 @@ func (c *CloudWatch) SampleConfig() string {
|
||||
## Metric Statistic Namespace (required)
|
||||
namespace = 'AWS/ELB'
|
||||
|
||||
## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
## maximum of 10. Optional - default value is 10.
|
||||
ratelimit = 10
|
||||
|
||||
## Metrics to Pull (optional)
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
## Refreshes Namespace available metrics every 1h
|
||||
@@ -175,7 +181,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
// limit concurrency or we can easily exhaust user connection limit
|
||||
// see cloudwatch API request limits:
|
||||
// http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html
|
||||
lmtr := limiter.NewRateLimiter(10, time.Second)
|
||||
lmtr := limiter.NewRateLimiter(c.RateLimit, time.Second)
|
||||
defer lmtr.Stop()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(metrics))
|
||||
@@ -195,7 +201,8 @@ func init() {
|
||||
inputs.Add("cloudwatch", func() telegraf.Input {
|
||||
ttl, _ := time.ParseDuration("1hr")
|
||||
return &CloudWatch{
|
||||
CacheTTL: internal.Duration{Duration: ttl},
|
||||
CacheTTL: internal.Duration{Duration: ttl},
|
||||
RateLimit: 10,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -58,6 +58,7 @@ func TestGather(t *testing.T) {
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: internalDuration,
|
||||
Period: internalDuration,
|
||||
RateLimit: 10,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -28,7 +28,8 @@ type Docker struct {
|
||||
PerDevice bool `toml:"perdevice"`
|
||||
Total bool `toml:"total"`
|
||||
|
||||
client DockerClient
|
||||
client DockerClient
|
||||
engine_host string
|
||||
}
|
||||
|
||||
// DockerClient interface, useful for testing
|
||||
@@ -147,6 +148,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.engine_host = info.Name
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"n_cpus": info.NCPU,
|
||||
@@ -159,11 +161,11 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
// Add metrics
|
||||
acc.AddFields("docker",
|
||||
fields,
|
||||
nil,
|
||||
map[string]string{"engine_host": d.engine_host},
|
||||
now)
|
||||
acc.AddFields("docker",
|
||||
map[string]interface{}{"memory_total": info.MemTotal},
|
||||
map[string]string{"unit": "bytes"},
|
||||
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
|
||||
now)
|
||||
// Get storage metrics
|
||||
for _, rawData := range info.DriverStatus {
|
||||
@@ -177,7 +179,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
// pool blocksize
|
||||
acc.AddFields("docker",
|
||||
map[string]interface{}{"pool_blocksize": value},
|
||||
map[string]string{"unit": "bytes"},
|
||||
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
|
||||
now)
|
||||
} else if strings.HasPrefix(name, "data_space_") {
|
||||
// data space
|
||||
@@ -192,13 +194,13 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
if len(dataFields) > 0 {
|
||||
acc.AddFields("docker_data",
|
||||
dataFields,
|
||||
map[string]string{"unit": "bytes"},
|
||||
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
|
||||
now)
|
||||
}
|
||||
if len(metadataFields) > 0 {
|
||||
acc.AddFields("docker_metadata",
|
||||
metadataFields,
|
||||
map[string]string{"unit": "bytes"},
|
||||
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
|
||||
now)
|
||||
}
|
||||
return nil
|
||||
@@ -225,6 +227,7 @@ func (d *Docker) gatherContainer(
|
||||
imageVersion = imageParts[1]
|
||||
}
|
||||
tags := map[string]string{
|
||||
"engine_host": d.engine_host,
|
||||
"container_name": cname,
|
||||
"container_image": imageName,
|
||||
"container_version": imageVersion,
|
||||
|
||||
@@ -400,7 +400,7 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"n_images": int(199),
|
||||
"n_goroutines": int(39),
|
||||
},
|
||||
map[string]string{},
|
||||
map[string]string{"engine_host": "absol"},
|
||||
)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -411,7 +411,8 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"available": int64(36530000000),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "bytes",
|
||||
"unit": "bytes",
|
||||
"engine_host": "absol",
|
||||
},
|
||||
)
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -425,6 +426,7 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"container_image": "quay.io/coreos/etcd",
|
||||
"cpu": "cpu3",
|
||||
"container_version": "v2.2.2",
|
||||
"engine_host": "absol",
|
||||
},
|
||||
)
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -467,6 +469,7 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||
},
|
||||
map[string]string{
|
||||
"engine_host": "absol",
|
||||
"container_name": "etcd2",
|
||||
"container_image": "quay.io/coreos/etcd",
|
||||
"container_version": "v2.2.2",
|
||||
|
||||
@@ -8,9 +8,18 @@ and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference
|
||||
|
||||
```
|
||||
[[inputs.elasticsearch]]
|
||||
## specify a list of one or more Elasticsearch servers
|
||||
servers = ["http://localhost:9200"]
|
||||
|
||||
## Timeout for HTTP requests to the elastic search server(s)
|
||||
http_timeout = "5s"
|
||||
|
||||
## set local to false when you want to read the indices stats from all nodes
|
||||
## within the cluster
|
||||
local = true
|
||||
cluster_health = true
|
||||
|
||||
## set cluster_health to true when you want to also obtain cluster level stats
|
||||
cluster_health = false
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
|
||||
@@ -62,6 +62,9 @@ const sampleConfig = `
|
||||
## specify a list of one or more Elasticsearch servers
|
||||
servers = ["http://localhost:9200"]
|
||||
|
||||
## Timeout for HTTP requests to the elastic search server(s)
|
||||
http_timeout = "5s"
|
||||
|
||||
## set local to false when you want to read the indices stats from all nodes
|
||||
## within the cluster
|
||||
local = true
|
||||
@@ -82,6 +85,7 @@ const sampleConfig = `
|
||||
type Elasticsearch struct {
|
||||
Local bool
|
||||
Servers []string
|
||||
HttpTimeout internal.Duration
|
||||
ClusterHealth bool
|
||||
SSLCA string `toml:"ssl_ca"` // Path to CA file
|
||||
SSLCert string `toml:"ssl_cert"` // Path to host cert file
|
||||
@@ -92,7 +96,9 @@ type Elasticsearch struct {
|
||||
|
||||
// NewElasticsearch return a new instance of Elasticsearch
|
||||
func NewElasticsearch() *Elasticsearch {
|
||||
return &Elasticsearch{}
|
||||
return &Elasticsearch{
|
||||
HttpTimeout: internal.Duration{Duration: time.Second * 5},
|
||||
}
|
||||
}
|
||||
|
||||
// SampleConfig returns sample configuration for this plugin.
|
||||
@@ -150,12 +156,12 @@ func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
tr := &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
ResponseHeaderTimeout: e.HttpTimeout.Duration,
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
Timeout: e.HttpTimeout.Duration,
|
||||
}
|
||||
|
||||
return client, nil
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
@@ -114,9 +115,36 @@ func (c CommandRunner) Run(
|
||||
}
|
||||
}
|
||||
|
||||
out = removeCarriageReturns(out)
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
|
||||
// removeCarriageReturns removes all carriage returns from the input if the
|
||||
// OS is Windows. It does not return any errors.
|
||||
func removeCarriageReturns(b bytes.Buffer) bytes.Buffer {
|
||||
if runtime.GOOS == "windows" {
|
||||
var buf bytes.Buffer
|
||||
for {
|
||||
byt, er := b.ReadBytes(0x0D)
|
||||
end := len(byt)
|
||||
if nil == er {
|
||||
end -= 1
|
||||
}
|
||||
if nil != byt {
|
||||
buf.Write(byt[:end])
|
||||
} else {
|
||||
break
|
||||
}
|
||||
if nil != er {
|
||||
break
|
||||
}
|
||||
}
|
||||
b = buf
|
||||
}
|
||||
return b
|
||||
|
||||
}
|
||||
|
||||
func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package exec
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -46,6 +48,29 @@ cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
`
|
||||
|
||||
type CarriageReturnTest struct {
|
||||
input []byte
|
||||
output []byte
|
||||
}
|
||||
|
||||
var crTests = []CarriageReturnTest{
|
||||
{[]byte{0x4c, 0x69, 0x6e, 0x65, 0x20, 0x31, 0x0d, 0x0a, 0x4c, 0x69,
|
||||
0x6e, 0x65, 0x20, 0x32, 0x0d, 0x0a, 0x4c, 0x69, 0x6e, 0x65,
|
||||
0x20, 0x33},
|
||||
[]byte{0x4c, 0x69, 0x6e, 0x65, 0x20, 0x31, 0x0a, 0x4c, 0x69, 0x6e,
|
||||
0x65, 0x20, 0x32, 0x0a, 0x4c, 0x69, 0x6e, 0x65, 0x20, 0x33}},
|
||||
{[]byte{0x4c, 0x69, 0x6e, 0x65, 0x20, 0x31, 0x0a, 0x4c, 0x69, 0x6e,
|
||||
0x65, 0x20, 0x32, 0x0a, 0x4c, 0x69, 0x6e, 0x65, 0x20, 0x33},
|
||||
[]byte{0x4c, 0x69, 0x6e, 0x65, 0x20, 0x31, 0x0a, 0x4c, 0x69, 0x6e,
|
||||
0x65, 0x20, 0x32, 0x0a, 0x4c, 0x69, 0x6e, 0x65, 0x20, 0x33}},
|
||||
{[]byte{0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, 0x6c,
|
||||
0x6c, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x62, 0x69, 0x67, 0x20,
|
||||
0x6c, 0x69, 0x6e, 0x65},
|
||||
[]byte{0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, 0x6c,
|
||||
0x6c, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x62, 0x69, 0x67, 0x20,
|
||||
0x6c, 0x69, 0x6e, 0x65}},
|
||||
}
|
||||
|
||||
type runnerMock struct {
|
||||
out []byte
|
||||
err error
|
||||
@@ -217,3 +242,21 @@ func TestExecCommandWithoutGlobAndPath(t *testing.T) {
|
||||
}
|
||||
acc.AssertContainsFields(t, "metric", fields)
|
||||
}
|
||||
|
||||
func TestRemoveCarriageReturns(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
// Test that all carriage returns are removed
|
||||
for _, test := range crTests {
|
||||
b := bytes.NewBuffer(test.input)
|
||||
out := removeCarriageReturns(*b)
|
||||
assert.True(t, bytes.Equal(test.output, out.Bytes()))
|
||||
}
|
||||
} else {
|
||||
// Test that the buffer is returned unaltered
|
||||
for _, test := range crTests {
|
||||
b := bytes.NewBuffer(test.input)
|
||||
out := removeCarriageReturns(*b)
|
||||
assert.True(t, bytes.Equal(test.input, out.Bytes()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
24
plugins/inputs/http_listener/README.md
Normal file
24
plugins/inputs/http_listener/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# HTTP listener service input plugin
|
||||
|
||||
The HTTP listener is a service input plugin that listens for messages sent via HTTP POST.
|
||||
The plugin expects messages in the InfluxDB line-protocol ONLY, other Telegraf input data formats are not supported.
|
||||
The intent of the plugin is to allow Telegraf to serve as a proxy/router for the /write endpoint of the InfluxDB HTTP API.
|
||||
When chaining Telegraf instances using this plugin, CREATE DATABASE requests receive a 200 OK response with message body `{"results":[]}` but they are not relayed. The output configuration of the Telegraf instance which ultimately submits data to InfluxDB determines the destination database.
|
||||
|
||||
See: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx).
|
||||
Example: curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000'
|
||||
|
||||
### Configuration:
|
||||
|
||||
This is a sample configuration for the plugin.
|
||||
|
||||
```toml
|
||||
# # Influx HTTP write listener
|
||||
[[inputs.http_listener]]
|
||||
## Address and port to host HTTP listener on
|
||||
service_address = ":8186"
|
||||
|
||||
## timeouts
|
||||
read_timeout = "10s"
|
||||
write_timeout = "10s"
|
||||
```
|
||||
154
plugins/inputs/http_listener/http_listener.go
Normal file
154
plugins/inputs/http_listener/http_listener.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package http_listener
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/http_listener/stoppableListener"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
|
||||
type HttpListener struct {
|
||||
ServiceAddress string
|
||||
ReadTimeout internal.Duration
|
||||
WriteTimeout internal.Duration
|
||||
|
||||
sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
|
||||
listener *stoppableListener.StoppableListener
|
||||
|
||||
parser parsers.Parser
|
||||
acc telegraf.Accumulator
|
||||
}
|
||||
|
||||
const sampleConfig = `
|
||||
## Address and port to host HTTP listener on
|
||||
service_address = ":8186"
|
||||
|
||||
## timeouts
|
||||
read_timeout = "10s"
|
||||
write_timeout = "10s"
|
||||
`
|
||||
|
||||
func (t *HttpListener) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (t *HttpListener) Description() string {
|
||||
return "Influx HTTP write listener"
|
||||
}
|
||||
|
||||
func (t *HttpListener) Gather(_ telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *HttpListener) SetParser(parser parsers.Parser) {
|
||||
t.parser = parser
|
||||
}
|
||||
|
||||
// Start starts the http listener service.
|
||||
func (t *HttpListener) Start(acc telegraf.Accumulator) error {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
t.acc = acc
|
||||
|
||||
var rawListener, err = net.Listen("tcp", t.ServiceAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.listener, err = stoppableListener.New(rawListener)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go t.httpListen()
|
||||
|
||||
log.Printf("Started HTTP listener service on %s\n", t.ServiceAddress)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop cleans up all resources
|
||||
func (t *HttpListener) Stop() {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
t.listener.Stop()
|
||||
t.listener.Close()
|
||||
|
||||
t.wg.Wait()
|
||||
|
||||
log.Println("Stopped HTTP listener service on ", t.ServiceAddress)
|
||||
}
|
||||
|
||||
// httpListen listens for HTTP requests.
|
||||
func (t *HttpListener) httpListen() error {
|
||||
if t.ReadTimeout.Duration < time.Second {
|
||||
t.ReadTimeout.Duration = time.Second * 10
|
||||
}
|
||||
if t.WriteTimeout.Duration < time.Second {
|
||||
t.WriteTimeout.Duration = time.Second * 10
|
||||
}
|
||||
|
||||
var server = http.Server{
|
||||
Handler: t,
|
||||
ReadTimeout: t.ReadTimeout.Duration,
|
||||
WriteTimeout: t.WriteTimeout.Duration,
|
||||
}
|
||||
|
||||
return server.Serve(t.listener)
|
||||
}
|
||||
|
||||
func (t *HttpListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
||||
t.wg.Add(1)
|
||||
defer t.wg.Done()
|
||||
body, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
log.Printf("Problem reading request: [%s], Error: %s\n", string(body), err)
|
||||
http.Error(res, "ERROR reading request", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
switch req.URL.Path {
|
||||
case "/write":
|
||||
var metrics []telegraf.Metric
|
||||
metrics, err = t.parser.Parse(body)
|
||||
if err == nil {
|
||||
for _, m := range metrics {
|
||||
t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||
}
|
||||
res.WriteHeader(http.StatusNoContent)
|
||||
} else {
|
||||
log.Printf("Problem parsing body: [%s], Error: %s\n", string(body), err)
|
||||
http.Error(res, "ERROR parsing metrics", http.StatusInternalServerError)
|
||||
}
|
||||
case "/query":
|
||||
// Deliver a dummy response to the query endpoint, as some InfluxDB
|
||||
// clients test endpoint availability with a query
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
res.Header().Set("X-Influxdb-Version", "1.0")
|
||||
res.WriteHeader(http.StatusOK)
|
||||
res.Write([]byte("{\"results\":[]}"))
|
||||
case "/ping":
|
||||
// respond to ping requests
|
||||
res.WriteHeader(http.StatusNoContent)
|
||||
default:
|
||||
// Don't know how to respond to calls to other endpoints
|
||||
http.NotFound(res, req)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("http_listener", func() telegraf.Input {
|
||||
return &HttpListener{}
|
||||
})
|
||||
}
|
||||
181
plugins/inputs/http_listener/http_listener_test.go
Normal file
181
plugins/inputs/http_listener/http_listener_test.go
Normal file
@@ -0,0 +1,181 @@
|
||||
package http_listener
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"bytes"
|
||||
"github.com/stretchr/testify/require"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const (
|
||||
testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n"
|
||||
|
||||
testMsgs = `cpu_load_short,host=server02 value=12.0 1422568543702900257
|
||||
cpu_load_short,host=server03 value=12.0 1422568543702900257
|
||||
cpu_load_short,host=server04 value=12.0 1422568543702900257
|
||||
cpu_load_short,host=server05 value=12.0 1422568543702900257
|
||||
cpu_load_short,host=server06 value=12.0 1422568543702900257
|
||||
`
|
||||
badMsg = "blahblahblah: 42\n"
|
||||
|
||||
emptyMsg = ""
|
||||
)
|
||||
|
||||
func newTestHttpListener() *HttpListener {
|
||||
listener := &HttpListener{
|
||||
ServiceAddress: ":8186",
|
||||
}
|
||||
return listener
|
||||
}
|
||||
|
||||
func TestWriteHTTP(t *testing.T) {
|
||||
listener := newTestHttpListener()
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
listener.SetParser(parser)
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
|
||||
// post single message to listener
|
||||
resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsg)))
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
|
||||
time.Sleep(time.Millisecond * 15)
|
||||
acc.AssertContainsTaggedFields(t, "cpu_load_short",
|
||||
map[string]interface{}{"value": float64(12)},
|
||||
map[string]string{"host": "server01"},
|
||||
)
|
||||
|
||||
// post multiple message to listener
|
||||
resp, err = http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgs)))
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
|
||||
time.Sleep(time.Millisecond * 15)
|
||||
hostTags := []string{"server02", "server03",
|
||||
"server04", "server05", "server06"}
|
||||
for _, hostTag := range hostTags {
|
||||
acc.AssertContainsTaggedFields(t, "cpu_load_short",
|
||||
map[string]interface{}{"value": float64(12)},
|
||||
map[string]string{"host": hostTag},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// writes 25,000 metrics to the listener with 10 different writers
|
||||
func TestWriteHTTPHighTraffic(t *testing.T) {
|
||||
listener := &HttpListener{ServiceAddress: ":8286"}
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
listener.SetParser(parser)
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
|
||||
// post many messages to listener
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 10; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
for i := 0; i < 500; i++ {
|
||||
resp, err := http.Post("http://localhost:8286/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgs)))
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
time.Sleep(time.Millisecond * 50)
|
||||
listener.Gather(acc)
|
||||
|
||||
require.Equal(t, int64(25000), int64(acc.NMetrics()))
|
||||
}
|
||||
|
||||
func TestReceive404ForInvalidEndpoint(t *testing.T) {
|
||||
listener := newTestHttpListener()
|
||||
listener.parser, _ = parsers.NewInfluxParser()
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
|
||||
// post single message to listener
|
||||
resp, err := http.Post("http://localhost:8186/foobar", "", bytes.NewBuffer([]byte(testMsg)))
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 404, resp.StatusCode)
|
||||
}
|
||||
|
||||
func TestWriteHTTPInvalid(t *testing.T) {
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
|
||||
listener := newTestHttpListener()
|
||||
listener.parser, _ = parsers.NewInfluxParser()
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
|
||||
// post single message to listener
|
||||
resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(badMsg)))
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 500, resp.StatusCode)
|
||||
}
|
||||
|
||||
func TestWriteHTTPEmpty(t *testing.T) {
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
|
||||
listener := newTestHttpListener()
|
||||
listener.parser, _ = parsers.NewInfluxParser()
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
|
||||
// post single message to listener
|
||||
resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(emptyMsg)))
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
}
|
||||
|
||||
func TestQueryAndPingHTTP(t *testing.T) {
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
|
||||
listener := newTestHttpListener()
|
||||
listener.parser, _ = parsers.NewInfluxParser()
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
|
||||
// post query to listener
|
||||
resp, err := http.Post("http://localhost:8186/query?db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22", "", nil)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 200, resp.StatusCode)
|
||||
|
||||
// post ping to listener
|
||||
resp, err = http.Post("http://localhost:8186/ping", "", nil)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
}
|
||||
10
plugins/inputs/http_listener/stoppableListener/LICENSE
Normal file
10
plugins/inputs/http_listener/stoppableListener/LICENSE
Normal file
@@ -0,0 +1,10 @@
|
||||
Copyright (c) 2014, Eric Urban
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
62
plugins/inputs/http_listener/stoppableListener/listener.go
Normal file
62
plugins/inputs/http_listener/stoppableListener/listener.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package stoppableListener
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type StoppableListener struct {
|
||||
*net.TCPListener //Wrapped listener
|
||||
stop chan int //Channel used only to indicate listener should shutdown
|
||||
}
|
||||
|
||||
func New(l net.Listener) (*StoppableListener, error) {
|
||||
tcpL, ok := l.(*net.TCPListener)
|
||||
|
||||
if !ok {
|
||||
return nil, errors.New("Cannot wrap listener")
|
||||
}
|
||||
|
||||
retval := &StoppableListener{}
|
||||
retval.TCPListener = tcpL
|
||||
retval.stop = make(chan int)
|
||||
|
||||
return retval, nil
|
||||
}
|
||||
|
||||
var StoppedError = errors.New("Listener stopped")
|
||||
|
||||
func (sl *StoppableListener) Accept() (net.Conn, error) {
|
||||
|
||||
for {
|
||||
//Wait up to one second for a new connection
|
||||
sl.SetDeadline(time.Now().Add(time.Second))
|
||||
|
||||
newConn, err := sl.TCPListener.Accept()
|
||||
|
||||
//Check for the channel being closed
|
||||
select {
|
||||
case <-sl.stop:
|
||||
return nil, StoppedError
|
||||
default:
|
||||
//If the channel is still open, continue as normal
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
netErr, ok := err.(net.Error)
|
||||
|
||||
//If this is a timeout, then continue to wait for
|
||||
//new connections
|
||||
if ok && netErr.Timeout() && netErr.Temporary() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return newConn, err
|
||||
}
|
||||
}
|
||||
|
||||
func (sl *StoppableListener) Stop() {
|
||||
close(sl.stop)
|
||||
}
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
The httpjson plugin can collect data from remote URLs which respond with JSON. Then it flattens JSON and finds all numeric values, treating them as floats.
|
||||
|
||||
For example, if you have a service called _mycollector_, which has HTTP endpoint for gathering stats at http://my.service.com/_stats, you would configure the HTTP JSON
|
||||
plugin like this:
|
||||
For example, if you have a service called _mycollector_, which has HTTP endpoint for gathering stats at http://my.service.com/_stats, you would configure the HTTP JSON plugin like this:
|
||||
|
||||
```
|
||||
[[inputs.httpjson]]
|
||||
@@ -15,12 +14,17 @@ plugin like this:
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
# Set response_timeout (default 5 seconds)
|
||||
response_timeout = "5s"
|
||||
```
|
||||
|
||||
`name` is used as a prefix for the measurements.
|
||||
|
||||
`method` specifies HTTP method to use for requests.
|
||||
|
||||
`response_timeout` specifies timeout to wait to get the response
|
||||
|
||||
You can also specify which keys from server response should be considered tags:
|
||||
|
||||
```
|
||||
@@ -94,8 +98,7 @@ httpjson_mycollector_b_e,service='service01',server='http://my.service.com/_stat
|
||||
|
||||
# Example 2, Multiple Services:
|
||||
|
||||
There is also the option to collect JSON from multiple services, here is an
|
||||
example doing that.
|
||||
There is also the option to collect JSON from multiple services, here is an example doing that.
|
||||
|
||||
```
|
||||
[[inputs.httpjson]]
|
||||
|
||||
@@ -16,13 +16,15 @@ import (
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
|
||||
// HttpJson struct
|
||||
type HttpJson struct {
|
||||
Name string
|
||||
Servers []string
|
||||
Method string
|
||||
TagKeys []string
|
||||
Parameters map[string]string
|
||||
Headers map[string]string
|
||||
Name string
|
||||
Servers []string
|
||||
Method string
|
||||
TagKeys []string
|
||||
ResponseTimeout internal.Duration
|
||||
Parameters map[string]string
|
||||
Headers map[string]string
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
@@ -79,6 +81,8 @@ var sampleConfig = `
|
||||
"http://localhost:9999/stats/",
|
||||
"http://localhost:9998/stats/",
|
||||
]
|
||||
## Set response_timeout (default 5 seconds)
|
||||
response_timeout = "5s"
|
||||
|
||||
## HTTP method to use: GET or POST (case-sensitive)
|
||||
method = "GET"
|
||||
@@ -126,12 +130,12 @@ func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
|
||||
return err
|
||||
}
|
||||
tr := &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
ResponseHeaderTimeout: h.ResponseTimeout.Duration,
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
Timeout: h.ResponseTimeout.Duration,
|
||||
}
|
||||
h.client.SetHTTPClient(client)
|
||||
}
|
||||
@@ -291,6 +295,9 @@ func init() {
|
||||
inputs.Add("httpjson", func() telegraf.Input {
|
||||
return &HttpJson{
|
||||
client: &RealHTTPClient{},
|
||||
ResponseTimeout: internal.Duration{
|
||||
Duration: 5 * time.Second,
|
||||
},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -210,9 +210,13 @@ func (i *InfluxDB) gatherURL(
|
||||
continue
|
||||
}
|
||||
|
||||
if p.Tags == nil {
|
||||
p.Tags = make(map[string]string)
|
||||
}
|
||||
|
||||
// If the object was a point, but was not fully initialized,
|
||||
// ignore it and move on.
|
||||
if p.Name == "" || p.Tags == nil || p.Values == nil || len(p.Values) == 0 {
|
||||
if p.Name == "" || p.Values == nil || len(p.Values) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -116,6 +116,31 @@ func TestInfluxDB(t *testing.T) {
|
||||
}, map[string]string{})
|
||||
}
|
||||
|
||||
func TestInfluxDB2(t *testing.T) {
|
||||
fakeInfluxServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/endpoint" {
|
||||
_, _ = w.Write([]byte(influxReturn2))
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer fakeInfluxServer.Close()
|
||||
|
||||
plugin := &influxdb.InfluxDB{
|
||||
URLs: []string{fakeInfluxServer.URL + "/endpoint"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
|
||||
require.Len(t, acc.Metrics, 34)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "influxdb",
|
||||
map[string]interface{}{
|
||||
"n_shards": 1,
|
||||
}, map[string]string{})
|
||||
}
|
||||
|
||||
func TestErrorHandling(t *testing.T) {
|
||||
badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/endpoint" {
|
||||
@@ -241,3 +266,49 @@ const influxReturn = `
|
||||
"tsm1_wal:/Users/csparr/.influxdb/wal/udp/default/1": {"name": "tsm1_wal", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/wal/udp/default/1", "retentionPolicy": "default"}, "values": {"currentSegmentDiskBytes": 193728, "oldSegmentsDiskBytes": 1008330}},
|
||||
"write": {"name": "write", "tags": {}, "values": {"pointReq": 3613, "pointReqLocal": 3613, "req": 110, "subWriteOk": 110, "writeOk": 110}}
|
||||
}`
|
||||
|
||||
// InfluxDB 1.0+ with tags: null instead of tags: {}.
|
||||
const influxReturn2 = `
|
||||
{
|
||||
"cluster": {"name": "cluster", "tags": null, "values": {}},
|
||||
"cmdline": ["influxd"],
|
||||
"cq": {"name": "cq", "tags": null, "values": {}},
|
||||
"database:_internal": {"name": "database", "tags": {"database": "_internal"}, "values": {"numMeasurements": 8, "numSeries": 12}},
|
||||
"database:udp": {"name": "database", "tags": {"database": "udp"}, "values": {"numMeasurements": 14, "numSeries": 38}},
|
||||
"hh:/Users/csparr/.influxdb/hh": {"name": "hh", "tags": {"path": "/Users/csparr/.influxdb/hh"}, "values": {}},
|
||||
"httpd::8086": {"name": "httpd", "tags": {"bind": ":8086"}, "values": {"req": 7, "reqActive": 1, "reqDurationNs": 4488799}},
|
||||
"measurement:cpu_idle.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "cpu_idle"}, "values": {"numSeries": 1}},
|
||||
"measurement:cpu_usage.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "cpu_usage"}, "values": {"numSeries": 1}},
|
||||
"measurement:database._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "database"}, "values": {"numSeries": 2}},
|
||||
"measurement:database.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "database"}, "values": {"numSeries": 2}},
|
||||
"measurement:httpd.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "httpd"}, "values": {"numSeries": 1}},
|
||||
"measurement:measurement.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "measurement"}, "values": {"numSeries": 22}},
|
||||
"measurement:mem.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "mem"}, "values": {"numSeries": 1}},
|
||||
"measurement:net.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "net"}, "values": {"numSeries": 1}},
|
||||
"measurement:runtime._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "runtime"}, "values": {"numSeries": 1}},
|
||||
"measurement:runtime.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "runtime"}, "values": {"numSeries": 1}},
|
||||
"measurement:shard._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "shard"}, "values": {"numSeries": 2}},
|
||||
"measurement:shard.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "shard"}, "values": {"numSeries": 1}},
|
||||
"measurement:subscriber._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "subscriber"}, "values": {"numSeries": 1}},
|
||||
"measurement:subscriber.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "subscriber"}, "values": {"numSeries": 1}},
|
||||
"measurement:swap_used.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "swap_used"}, "values": {"numSeries": 1}},
|
||||
"measurement:tsm1_cache._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "tsm1_cache"}, "values": {"numSeries": 2}},
|
||||
"measurement:tsm1_cache.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "tsm1_cache"}, "values": {"numSeries": 2}},
|
||||
"measurement:tsm1_wal._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "tsm1_wal"}, "values": {"numSeries": 2}},
|
||||
"measurement:tsm1_wal.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "tsm1_wal"}, "values": {"numSeries": 2}},
|
||||
"measurement:udp._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "udp"}, "values": {"numSeries": 1}},
|
||||
"measurement:write._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "write"}, "values": {"numSeries": 1}},
|
||||
"measurement:write.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "write"}, "values": {"numSeries": 1}},
|
||||
"memstats": {"Alloc":17034016,"TotalAlloc":201739016,"Sys":38537464,"Lookups":77,"Mallocs":570251,"Frees":381008,"HeapAlloc":17034016,"HeapSys":33849344,"HeapIdle":15802368,"HeapInuse":18046976,"HeapReleased":3473408,"HeapObjects":189243,"StackInuse":753664,"StackSys":753664,"MSpanInuse":97440,"MSpanSys":114688,"MCacheInuse":4800,"MCacheSys":16384,"BuckHashSys":1461583,"GCSys":1112064,"OtherSys":1229737,"NextGC":20843042,"LastGC":1460434886475114239,"PauseTotalNs":5132914,"PauseNs":[195052,117751,139370,156933,263089,165249,713747,103904,122015,294408,213753,170864,175845,114221,121563,122409,113098,162219,229257,126726,250774,254235,117206,293588,144279,124306,127053,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"PauseEnd":[1460433856394860455,1460433856398162739,1460433856405888337,1460433856411784017,1460433856417924684,1460433856428385687,1460433856443782908,1460433856456522851,1460433857392743223,1460433866484394564,1460433866494076235,1460433896472438632,1460433957839825106,1460433976473440328,1460434016473413006,1460434096471892794,1460434126470792929,1460434246480428250,1460434366554468369,1460434396471249528,1460434456471205885,1460434476479487292,1460434536471435965,1460434616469784776,1460434736482078216,1460434856544251733,1460434886475114239,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"NumGC":27,"GCCPUFraction":4.287178819113636e-05,"EnableGC":true,"DebugGC":false,"BySize":[{"Size":0,"Mallocs":0,"Frees":0},{"Size":8,"Mallocs":1031,"Frees":955},{"Size":16,"Mallocs":308485,"Frees":142064},{"Size":32,"Mallocs":64937,"Frees":54321},{"Size":48,"Mallocs":33012,"Frees":29754},{"Size":64,"Mallocs":20299,"Frees":18173},{"Size":80,"Mallocs":8186,"Frees":7597},{"Size":96,"Mallocs":9806,"Frees":8982},{"Size":112,"Mallocs":5671,"Frees":4850},{"Size":128,"Mallocs":2972,"Frees":2684},{"Size":144,"Mallocs":4106,"Frees":3719},{"Size":160,"Mallocs":1324,"Frees":911},{"Size":176,"Mallocs":2574,"Frees":2391},{"Size":192,"Mallocs":4053,"Frees":3863},{"Size":208,"Mallocs":442,"Frees":307},{"Size":224,"Mallocs":336,"Frees":172},{"Size":240,"Mallocs":143,"Frees":125},{"Size":256,"Mallocs":542,"Frees":497},{"Size":288,"Mallocs":15971,"Frees":14761},{"Size":320,"Mallocs":245,"Frees":30},{"Size":352,"Mallocs":1299,"Frees":1065},{"Size":384,"Mallocs":138,"Frees":2},{"Size":416,"Mallocs":54,"Frees":47},{"Size":448,"Mallocs":75,"Frees":29},{"Size":480,"Mallocs":6,"Frees":4},{"Size":512,"Mallocs":452,"Frees":422},{"Size":576,"Mallocs":486,"Frees":395},{"Size":640,"Mallocs":81,"Frees":67},{"Size":704,"Mallocs":421,"Frees":397},{"Size":768,"Mallocs":469,"Frees":468},{"Size":896,"Mallocs":1049,"Frees":1010},{"Size":1024,"Mallocs":1078,"Frees":960},{"Size":1152,"Mallocs":750,"Frees":498},{"Size":1280,"Mallocs":84,"Frees":72},{"Size":1408,"Mallocs":218,"Frees":187},{"Size":1536,"Mallocs":73,"Frees":48},{"Size":1664,"Mallocs":43,"Frees":30},{"Size":2048,"Mallocs":153,"Frees":57},{"Size":2304,"Mallocs":41,"Frees":30},{"Size":2560,"Mallocs":18,"Frees":15},{"Size":2816,"Mallocs":164,"Frees":157},{"Size":3072,"Mallocs":0,"Frees":0},{"Size":3328,"Mallocs":13,"Frees":6},{"Size":4096,"Mallocs":101,"Frees":82},{"Size":4608,"Mallocs":32,"Frees":26},{"Size":5376,"Mallocs":165,"Frees":151},{"Size":6144,"Mallocs":15,"Frees":9},{"Size":6400,"Mallocs":1,"Frees":1},{"Size":6656,"Mallocs":1,"Frees":0},{"Size":6912,"Mallocs":0,"Frees":0},{"Size":8192,"Mallocs":13,"Frees":13},{"Size":8448,"Mallocs":0,"Frees":0},{"Size":8704,"Mallocs":1,"Frees":1},{"Size":9472,"Mallocs":6,"Frees":4},{"Size":10496,"Mallocs":0,"Frees":0},{"Size":12288,"Mallocs":41,"Frees":35},{"Size":13568,"Mallocs":0,"Frees":0},{"Size":14080,"Mallocs":0,"Frees":0},{"Size":16384,"Mallocs":4,"Frees":4},{"Size":16640,"Mallocs":0,"Frees":0},{"Size":17664,"Mallocs":0,"Frees":0}]},
|
||||
"queryExecutor": {"name": "queryExecutor", "tags": null, "values": {}},
|
||||
"shard:/Users/csparr/.influxdb/data/_internal/monitor/2:2": {"name": "shard", "tags": {"database": "_internal", "engine": "tsm1", "id": "2", "path": "/Users/csparr/.influxdb/data/_internal/monitor/2", "retentionPolicy": "monitor"}, "values": {}},
|
||||
"shard:/Users/csparr/.influxdb/data/udp/default/1:1": {"name": "shard", "tags": {"database": "udp", "engine": "tsm1", "id": "1", "path": "/Users/csparr/.influxdb/data/udp/default/1", "retentionPolicy": "default"}, "values": {"fieldsCreate": 61, "seriesCreate": 33, "writePointsOk": 3613, "writeReq": 110}},
|
||||
"subscriber": {"name": "subscriber", "tags": null, "values": {"pointsWritten": 3613}},
|
||||
"tsm1_cache:/Users/csparr/.influxdb/data/_internal/monitor/2": {"name": "tsm1_cache", "tags": {"database": "_internal", "path": "/Users/csparr/.influxdb/data/_internal/monitor/2", "retentionPolicy": "monitor"}, "values": {"WALCompactionTimeMs": 0, "cacheAgeMs": 1103932, "cachedBytes": 0, "diskBytes": 0, "memBytes": 40480, "snapshotCount": 0}},
|
||||
"tsm1_cache:/Users/csparr/.influxdb/data/udp/default/1": {"name": "tsm1_cache", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/data/udp/default/1", "retentionPolicy": "default"}, "values": {"WALCompactionTimeMs": 0, "cacheAgeMs": 1103029, "cachedBytes": 0, "diskBytes": 0, "memBytes": 2359472, "snapshotCount": 0}},
|
||||
"tsm1_filestore:/Users/csparr/.influxdb/data/_internal/monitor/2": {"name": "tsm1_filestore", "tags": {"database": "_internal", "path": "/Users/csparr/.influxdb/data/_internal/monitor/2", "retentionPolicy": "monitor"}, "values": {}},
|
||||
"tsm1_filestore:/Users/csparr/.influxdb/data/udp/default/1": {"name": "tsm1_filestore", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/data/udp/default/1", "retentionPolicy": "default"}, "values": {}},
|
||||
"tsm1_wal:/Users/csparr/.influxdb/wal/_internal/monitor/2": {"name": "tsm1_wal", "tags": {"database": "_internal", "path": "/Users/csparr/.influxdb/wal/_internal/monitor/2", "retentionPolicy": "monitor"}, "values": {"currentSegmentDiskBytes": 0, "oldSegmentsDiskBytes": 69532}},
|
||||
"tsm1_wal:/Users/csparr/.influxdb/wal/udp/default/1": {"name": "tsm1_wal", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/wal/udp/default/1", "retentionPolicy": "default"}, "values": {"currentSegmentDiskBytes": 193728, "oldSegmentsDiskBytes": 1008330}},
|
||||
"write": {"name": "write", "tags": null, "values": {"pointReq": 3613, "pointReqLocal": 3613, "req": 110, "subWriteOk": 110, "writeOk": 110}}
|
||||
}`
|
||||
|
||||
74
plugins/inputs/iptables/README.md
Normal file
74
plugins/inputs/iptables/README.md
Normal file
@@ -0,0 +1,74 @@
|
||||
# Iptables Plugin
|
||||
|
||||
The iptables plugin gathers packets and bytes counters for rules within a set of table and chain from the Linux's iptables firewall.
|
||||
|
||||
Rules are identified through associated comment. Rules without comment are ignored.
|
||||
|
||||
The iptables command requires CAP_NET_ADMIN and CAP_NET_RAW capabilities. You have several options to grant telegraf to run iptables:
|
||||
|
||||
* Run telegraf as root. This is strongly discouraged.
|
||||
* Configure systemd to run telegraf with CAP_NET_ADMIN and CAP_NET_RAW. This is the simplest and recommended option.
|
||||
* Configure sudo to grant telegraf to run iptables. This is the most restrictive option, but require sudo setup.
|
||||
|
||||
### Using systemd capabilities
|
||||
|
||||
You may run `systemctl edit telegraf.service` and add the following:
|
||||
|
||||
```
|
||||
[Service]
|
||||
CapabilityBoundingSet=CAP_NET_RAW CAP_NET_ADMIN
|
||||
AmbientCapabilities=CAP_NET_RAW CAP_NET_ADMIN
|
||||
```
|
||||
|
||||
Since telegraf will fork a process to run iptables, `AmbientCapabilities` is required to transmit the capabilities bounding set to the forked process.
|
||||
|
||||
### Using sudo
|
||||
|
||||
You may edit your sudo configuration with the following:
|
||||
|
||||
```sudo
|
||||
telegraf ALL=(root) NOPASSWD: /usr/bin/iptables -nvL *
|
||||
```
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# use sudo to run iptables
|
||||
use_sudo = false
|
||||
# defines the table to monitor:
|
||||
table = "filter"
|
||||
# defines the chains to monitor:
|
||||
chains = [ "INPUT" ]
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
|
||||
- iptables
|
||||
- pkts (integer, count)
|
||||
- bytes (integer, bytes)
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- table
|
||||
- chain
|
||||
- ruleid
|
||||
|
||||
The `ruleid` is the comment associated to the rule.
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ iptables -nvL INPUT
|
||||
Chain INPUT (policy DROP 0 packets, 0 bytes)
|
||||
pkts bytes target prot opt in out source destination
|
||||
100 1024 ACCEPT tcp -- * * 192.168.0.0/24 0.0.0.0/0 tcp dpt:22 /* ssh */
|
||||
42 2048 ACCEPT tcp -- * * 192.168.0.0/24 0.0.0.0/0 tcp dpt:80 /* httpd */
|
||||
```
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter iptables -test
|
||||
iptables,table=filter,chain=INPUT,ruleid=ssh pkts=100i,bytes=1024i 1453831884664956455
|
||||
iptables,table=filter,chain=INPUT,ruleid=httpd pkts=42i,bytes=2048i 1453831884664956455
|
||||
```
|
||||
128
plugins/inputs/iptables/iptables.go
Normal file
128
plugins/inputs/iptables/iptables.go
Normal file
@@ -0,0 +1,128 @@
|
||||
// +build linux
|
||||
|
||||
package iptables
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// Iptables is a telegraf plugin to gather packets and bytes throughput from Linux's iptables packet filter.
|
||||
type Iptables struct {
|
||||
UseSudo bool
|
||||
Table string
|
||||
Chains []string
|
||||
lister chainLister
|
||||
}
|
||||
|
||||
// Description returns a short description of the plugin.
|
||||
func (ipt *Iptables) Description() string {
|
||||
return "Gather packets and bytes throughput from iptables"
|
||||
}
|
||||
|
||||
// SampleConfig returns sample configuration options.
|
||||
func (ipt *Iptables) SampleConfig() string {
|
||||
return `
|
||||
## iptables require root access on most systems.
|
||||
## Setting 'use_sudo' to true will make use of sudo to run iptables.
|
||||
## Users must configure sudo to allow telegraf user to run iptables with no password.
|
||||
## iptables can be restricted to only list command "iptables -nvL"
|
||||
use_sudo = false
|
||||
## defines the table to monitor:
|
||||
table = "filter"
|
||||
## defines the chains to monitor:
|
||||
chains = [ "INPUT" ]
|
||||
`
|
||||
}
|
||||
|
||||
// Gather gathers iptables packets and bytes throughput from the configured tables and chains.
|
||||
func (ipt *Iptables) Gather(acc telegraf.Accumulator) error {
|
||||
if ipt.Table == "" || len(ipt.Chains) == 0 {
|
||||
return nil
|
||||
}
|
||||
// best effort : we continue through the chains even if an error is encountered,
|
||||
// but we keep track of the last error.
|
||||
var err error
|
||||
for _, chain := range ipt.Chains {
|
||||
data, e := ipt.lister(ipt.Table, chain)
|
||||
if e != nil {
|
||||
err = e
|
||||
continue
|
||||
}
|
||||
e = ipt.parseAndGather(data, acc)
|
||||
if e != nil {
|
||||
err = e
|
||||
continue
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ipt *Iptables) chainList(table, chain string) (string, error) {
|
||||
iptablePath, err := exec.LookPath("iptables")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var args []string
|
||||
name := iptablePath
|
||||
if ipt.UseSudo {
|
||||
name = "sudo"
|
||||
args = append(args, iptablePath)
|
||||
}
|
||||
args = append(args, "-nvL", chain, "-t", table, "-x")
|
||||
c := exec.Command(name, args...)
|
||||
out, err := c.Output()
|
||||
return string(out), err
|
||||
}
|
||||
|
||||
const measurement = "iptables"
|
||||
|
||||
var errParse = errors.New("Cannot parse iptables list information")
|
||||
var chainNameRe = regexp.MustCompile(`^Chain\s+(\S+)`)
|
||||
var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+`)
|
||||
var valuesRe = regexp.MustCompile(`^\s*([0-9]+)\s+([0-9]+)\s+.*?(/\*\s(.*)\s\*/)?$`)
|
||||
|
||||
func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error {
|
||||
lines := strings.Split(data, "\n")
|
||||
if len(lines) < 3 {
|
||||
return nil
|
||||
}
|
||||
mchain := chainNameRe.FindStringSubmatch(lines[0])
|
||||
if mchain == nil {
|
||||
return errParse
|
||||
}
|
||||
if !fieldsHeaderRe.MatchString(lines[1]) {
|
||||
return errParse
|
||||
}
|
||||
for _, line := range lines[2:] {
|
||||
mv := valuesRe.FindAllStringSubmatch(line, -1)
|
||||
// best effort : if line does not match or rule is not commented forget about it
|
||||
if len(mv) == 0 || len(mv[0]) != 5 || mv[0][4] == "" {
|
||||
continue
|
||||
}
|
||||
tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": mv[0][4]}
|
||||
fields := make(map[string]interface{})
|
||||
// since parse error is already catched by the regexp,
|
||||
// we never enter ther error case here => no error check (but still need a test to cover the case)
|
||||
fields["pkts"], _ = strconv.ParseUint(mv[0][1], 10, 64)
|
||||
fields["bytes"], _ = strconv.ParseUint(mv[0][2], 10, 64)
|
||||
acc.AddFields(measurement, fields, tags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type chainLister func(table, chain string) (string, error)
|
||||
|
||||
func init() {
|
||||
inputs.Add("iptables", func() telegraf.Input {
|
||||
ipt := new(Iptables)
|
||||
ipt.lister = ipt.chainList
|
||||
return ipt
|
||||
})
|
||||
}
|
||||
3
plugins/inputs/iptables/iptables_nocompile.go
Normal file
3
plugins/inputs/iptables/iptables_nocompile.go
Normal file
@@ -0,0 +1,3 @@
|
||||
// +build !linux
|
||||
|
||||
package iptables
|
||||
206
plugins/inputs/iptables/iptables_test.go
Normal file
206
plugins/inputs/iptables/iptables_test.go
Normal file
@@ -0,0 +1,206 @@
|
||||
// +build linux
|
||||
|
||||
package iptables
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestIptables_Gather(t *testing.T) {
|
||||
tests := []struct {
|
||||
table string
|
||||
chains []string
|
||||
values []string
|
||||
tags []map[string]string
|
||||
fields [][]map[string]interface{}
|
||||
err error
|
||||
}{
|
||||
{ // 1 - no configured table => no results
|
||||
values: []string{
|
||||
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
|
||||
pkts bytes target prot opt in out source destination
|
||||
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
|
||||
`},
|
||||
},
|
||||
{ // 2 - no configured chains => no results
|
||||
table: "filter",
|
||||
values: []string{
|
||||
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
|
||||
pkts bytes target prot opt in out source destination
|
||||
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
|
||||
`},
|
||||
},
|
||||
{ // 3 - pkts and bytes are gathered as integers
|
||||
table: "filter",
|
||||
chains: []string{"INPUT"},
|
||||
values: []string{
|
||||
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
|
||||
pkts bytes target prot opt in out source destination
|
||||
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foobar */
|
||||
`},
|
||||
tags: []map[string]string{map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foobar"}},
|
||||
fields: [][]map[string]interface{}{
|
||||
{map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}},
|
||||
},
|
||||
},
|
||||
{ // 4 - missing fields header => no results
|
||||
table: "filter",
|
||||
chains: []string{"INPUT"},
|
||||
values: []string{`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)`},
|
||||
},
|
||||
{ // 5 - invalid chain header => error
|
||||
table: "filter",
|
||||
chains: []string{"INPUT"},
|
||||
values: []string{
|
||||
`INPUT (policy ACCEPT 58 packets, 5096 bytes)
|
||||
pkts bytes target prot opt in out source destination
|
||||
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
|
||||
`},
|
||||
err: errParse,
|
||||
},
|
||||
{ // 6 - invalid fields header => error
|
||||
table: "filter",
|
||||
chains: []string{"INPUT"},
|
||||
values: []string{
|
||||
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
|
||||
|
||||
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
|
||||
`},
|
||||
err: errParse,
|
||||
},
|
||||
{ // 7 - invalid integer value => best effort, no error
|
||||
table: "filter",
|
||||
chains: []string{"INPUT"},
|
||||
values: []string{
|
||||
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
|
||||
pkts bytes target prot opt in out source destination
|
||||
K 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
|
||||
`},
|
||||
},
|
||||
{ // 8 - Multiple rows, multipe chains => no error
|
||||
table: "filter",
|
||||
chains: []string{"INPUT", "FORWARD"},
|
||||
values: []string{
|
||||
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
|
||||
pkts bytes target prot opt in out source destination
|
||||
100 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
|
||||
200 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foo */
|
||||
`,
|
||||
`Chain FORWARD (policy ACCEPT 58 packets, 5096 bytes)
|
||||
pkts bytes target prot opt in out source destination
|
||||
300 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* bar */
|
||||
400 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
|
||||
500 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foobar */
|
||||
`,
|
||||
},
|
||||
tags: []map[string]string{
|
||||
map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foo"},
|
||||
map[string]string{"table": "filter", "chain": "FORWARD", "ruleid": "bar"},
|
||||
map[string]string{"table": "filter", "chain": "FORWARD", "ruleid": "foobar"},
|
||||
},
|
||||
fields: [][]map[string]interface{}{
|
||||
{map[string]interface{}{"pkts": uint64(200), "bytes": uint64(4520)}},
|
||||
{map[string]interface{}{"pkts": uint64(300), "bytes": uint64(4520)}},
|
||||
{map[string]interface{}{"pkts": uint64(500), "bytes": uint64(4520)}},
|
||||
},
|
||||
},
|
||||
{ // 9 - comments are used as ruleid if any
|
||||
table: "filter",
|
||||
chains: []string{"INPUT"},
|
||||
values: []string{
|
||||
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
|
||||
pkts bytes target prot opt in out source destination
|
||||
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:22 /* foobar */
|
||||
100 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:80
|
||||
`},
|
||||
tags: []map[string]string{
|
||||
map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foobar"},
|
||||
},
|
||||
fields: [][]map[string]interface{}{
|
||||
{map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
i++
|
||||
ipt := &Iptables{
|
||||
Table: tt.table,
|
||||
Chains: tt.chains,
|
||||
lister: func(table, chain string) (string, error) {
|
||||
if len(tt.values) > 0 {
|
||||
v := tt.values[0]
|
||||
tt.values = tt.values[1:]
|
||||
return v, nil
|
||||
}
|
||||
return "", nil
|
||||
},
|
||||
}
|
||||
acc := new(testutil.Accumulator)
|
||||
err := ipt.Gather(acc)
|
||||
if !reflect.DeepEqual(tt.err, err) {
|
||||
t.Errorf("%d: expected error '%#v' got '%#v'", i, tt.err, err)
|
||||
}
|
||||
if tt.table == "" {
|
||||
n := acc.NFields()
|
||||
if n != 0 {
|
||||
t.Errorf("%d: expected 0 fields if empty table got %d", i, n)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if len(tt.chains) == 0 {
|
||||
n := acc.NFields()
|
||||
if n != 0 {
|
||||
t.Errorf("%d: expected 0 fields if empty chains got %d", i, n)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if len(tt.tags) == 0 {
|
||||
n := acc.NFields()
|
||||
if n != 0 {
|
||||
t.Errorf("%d: expected 0 values got %d", i, n)
|
||||
}
|
||||
continue
|
||||
}
|
||||
n := 0
|
||||
for j, tags := range tt.tags {
|
||||
for k, fields := range tt.fields[j] {
|
||||
if len(acc.Metrics) < n+1 {
|
||||
t.Errorf("%d: expected at least %d values got %d", i, n+1, len(acc.Metrics))
|
||||
break
|
||||
}
|
||||
m := acc.Metrics[n]
|
||||
if !reflect.DeepEqual(m.Measurement, measurement) {
|
||||
t.Errorf("%d %d %d: expected measurement '%#v' got '%#v'\n", i, j, k, measurement, m.Measurement)
|
||||
}
|
||||
if !reflect.DeepEqual(m.Tags, tags) {
|
||||
t.Errorf("%d %d %d: expected tags\n%#v got\n%#v\n", i, j, k, tags, m.Tags)
|
||||
}
|
||||
if !reflect.DeepEqual(m.Fields, fields) {
|
||||
t.Errorf("%d %d %d: expected fields\n%#v got\n%#v\n", i, j, k, fields, m.Fields)
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIptables_Gather_listerError(t *testing.T) {
|
||||
errFoo := errors.New("error foobar")
|
||||
ipt := &Iptables{
|
||||
Table: "nat",
|
||||
Chains: []string{"foo", "bar"},
|
||||
lister: func(table, chain string) (string, error) {
|
||||
return "", errFoo
|
||||
},
|
||||
}
|
||||
acc := new(testutil.Accumulator)
|
||||
err := ipt.Gather(acc)
|
||||
if !reflect.DeepEqual(err, errFoo) {
|
||||
t.Errorf("Expected error %#v got\n%#v\n", errFoo, err)
|
||||
}
|
||||
}
|
||||
@@ -52,6 +52,7 @@ type Jolokia struct {
|
||||
|
||||
const sampleConfig = `
|
||||
## This is the context root used to compose the jolokia url
|
||||
## NOTE that your jolokia security policy must allow for POST requests.
|
||||
context = "/jolokia"
|
||||
|
||||
## This specifies the mode used
|
||||
@@ -104,7 +105,6 @@ func (j *Jolokia) Description() string {
|
||||
}
|
||||
|
||||
func (j *Jolokia) doRequest(req *http.Request) (map[string]interface{}, error) {
|
||||
|
||||
resp, err := j.jClient.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -43,7 +43,7 @@ func TestRunParser(t *testing.T) {
|
||||
k.parser, _ = parsers.NewInfluxParser()
|
||||
go k.receiver()
|
||||
in <- saramaMsg(testMsg)
|
||||
time.Sleep(time.Millisecond)
|
||||
time.Sleep(time.Millisecond * 5)
|
||||
|
||||
assert.Equal(t, acc.NFields(), 1)
|
||||
}
|
||||
@@ -58,7 +58,7 @@ func TestRunParserInvalidMsg(t *testing.T) {
|
||||
k.parser, _ = parsers.NewInfluxParser()
|
||||
go k.receiver()
|
||||
in <- saramaMsg(invalidMsg)
|
||||
time.Sleep(time.Millisecond)
|
||||
time.Sleep(time.Millisecond * 5)
|
||||
|
||||
assert.Equal(t, acc.NFields(), 0)
|
||||
}
|
||||
@@ -73,7 +73,7 @@ func TestRunParserAndGather(t *testing.T) {
|
||||
k.parser, _ = parsers.NewInfluxParser()
|
||||
go k.receiver()
|
||||
in <- saramaMsg(testMsg)
|
||||
time.Sleep(time.Millisecond)
|
||||
time.Sleep(time.Millisecond * 5)
|
||||
|
||||
k.Gather(&acc)
|
||||
|
||||
@@ -92,7 +92,7 @@ func TestRunParserAndGatherGraphite(t *testing.T) {
|
||||
k.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil)
|
||||
go k.receiver()
|
||||
in <- saramaMsg(testMsgGraphite)
|
||||
time.Sleep(time.Millisecond)
|
||||
time.Sleep(time.Millisecond * 5)
|
||||
|
||||
k.Gather(&acc)
|
||||
|
||||
@@ -111,7 +111,7 @@ func TestRunParserAndGatherJSON(t *testing.T) {
|
||||
k.parser, _ = parsers.NewJSONParser("kafka_json_test", []string{}, nil)
|
||||
go k.receiver()
|
||||
in <- saramaMsg(testMsgJSON)
|
||||
time.Sleep(time.Millisecond)
|
||||
time.Sleep(time.Millisecond * 5)
|
||||
|
||||
k.Gather(&acc)
|
||||
|
||||
|
||||
@@ -241,7 +241,7 @@ Mesos tasks metric groups
|
||||
- executor_name
|
||||
- framework_id
|
||||
- source
|
||||
- statistics (all metrics below will have `statistics_` prefix included in their names
|
||||
- statistics
|
||||
- cpus_limit
|
||||
- cpus_system_time_secs
|
||||
- cpus_user_time_secs
|
||||
@@ -266,14 +266,20 @@ Mesos tasks metric groups
|
||||
- server
|
||||
- role (master/slave)
|
||||
|
||||
- Tasks measurements have the following tags:
|
||||
- All master measurements have the extra tags:
|
||||
- state (leader/follower)
|
||||
|
||||
- Tasks measurements have the following tags:
|
||||
- server
|
||||
- framework_id
|
||||
- task_id
|
||||
|
||||
### Example Output:
|
||||
```
|
||||
$ telegraf -config ~/mesos.conf -input-filter mesos -test
|
||||
* Plugin: mesos, Collection 1
|
||||
mesos,host=172.17.8.102,server=172.17.8.101 allocator/event_queue_dispatches=0,master/cpus_percent=0,
|
||||
mesos,role=master,state=leader,host=172.17.8.102,server=172.17.8.101
|
||||
allocator/event_queue_dispatches=0,master/cpus_percent=0,
|
||||
master/cpus_revocable_percent=0,master/cpus_revocable_total=0,
|
||||
master/cpus_revocable_used=0,master/cpus_total=2,
|
||||
master/cpus_used=0,master/disk_percent=0,master/disk_revocable_percent=0,
|
||||
@@ -293,13 +299,13 @@ master/messages_deactivate_framework=0 ...
|
||||
|
||||
Meoso tasks metrics (if enabled):
|
||||
```
|
||||
mesos-tasks,host=172.17.8.102,server=172.17.8.101,task_id=hello-world.e4b5b497-2ccd-11e6-a659-0242fb222ce2
|
||||
statistics_cpus_limit=0.2,statistics_cpus_system_time_secs=142.49,statistics_cpus_user_time_secs=388.14,
|
||||
statistics_mem_anon_bytes=359129088,statistics_mem_cache_bytes=3964928,
|
||||
statistics_mem_critical_pressure_counter=0,statistics_mem_file_bytes=3964928,
|
||||
statistics_mem_limit_bytes=767557632,statistics_mem_low_pressure_counter=0,
|
||||
statistics_mem_mapped_file_bytes=114688,statistics_mem_medium_pressure_counter=0,
|
||||
statistics_mem_rss_bytes=359129088,statistics_mem_swap_bytes=0,statistics_mem_total_bytes=363094016,
|
||||
statistics_mem_total_memsw_bytes=363094016,statistics_mem_unevictable_bytes=0,
|
||||
statistics_timestamp=1465486052.70525 1465486053052811792...
|
||||
mesos-tasks,host=172.17.8.102,server=172.17.8.101,framework_id=e3060235-c4ed-4765-9d36-784e3beca07f-0000,task_id=hello-world.e4b5b497-2ccd-11e6-a659-0242fb222ce2
|
||||
cpus_limit=0.2,cpus_system_time_secs=142.49,cpus_user_time_secs=388.14,
|
||||
mem_anon_bytes=359129088,mem_cache_bytes=3964928,
|
||||
mem_critical_pressure_counter=0,mem_file_bytes=3964928,
|
||||
mem_limit_bytes=767557632,mem_low_pressure_counter=0,
|
||||
mem_mapped_file_bytes=114688,mem_medium_pressure_counter=0,
|
||||
mem_rss_bytes=359129088,mem_swap_bytes=0,mem_total_bytes=363094016,
|
||||
mem_total_memsw_bytes=363094016,mem_unevictable_bytes=0,
|
||||
timestamp=1465486052.70525 1465486053052811792...
|
||||
```
|
||||
|
||||
@@ -116,7 +116,7 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error {
|
||||
for _, v := range m.Slaves {
|
||||
wg.Add(1)
|
||||
go func(c string) {
|
||||
errorChannel <- m.gatherMainMetrics(c, ":5051", MASTER, acc)
|
||||
errorChannel <- m.gatherMainMetrics(c, ":5051", SLAVE, acc)
|
||||
wg.Done()
|
||||
return
|
||||
}(v)
|
||||
@@ -420,8 +420,15 @@ var client = &http.Client{
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
// TaskStats struct for JSON API output /monitor/statistics
|
||||
type TaskStats struct {
|
||||
ExecutorID string `json:"executor_id"`
|
||||
FrameworkID string `json:"framework_id"`
|
||||
Statistics map[string]interface{} `json:"statistics"`
|
||||
}
|
||||
|
||||
func (m *Mesos) gatherSlaveTaskMetrics(address string, defaultPort string, acc telegraf.Accumulator) error {
|
||||
var metrics []map[string]interface{}
|
||||
var metrics []TaskStats
|
||||
|
||||
host, _, err := net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
@@ -452,16 +459,18 @@ func (m *Mesos) gatherSlaveTaskMetrics(address string, defaultPort string, acc t
|
||||
}
|
||||
|
||||
for _, task := range metrics {
|
||||
tags["task_id"] = task["executor_id"].(string)
|
||||
tags["task_id"] = task.ExecutorID
|
||||
tags["framework_id"] = task.FrameworkID
|
||||
|
||||
jf := jsonparser.JSONFlattener{}
|
||||
err = jf.FlattenJSON("", task)
|
||||
err = jf.FlattenJSON("", task.Statistics)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
timestamp := time.Unix(int64(jf.Fields["timestamp"].(float64)), 0)
|
||||
|
||||
acc.AddFields("mesos-tasks", jf.Fields, tags)
|
||||
acc.AddFields("mesos_tasks", jf.Fields, tags, timestamp)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -510,6 +519,14 @@ func (m *Mesos) gatherMainMetrics(a string, defaultPort string, role Role, acc t
|
||||
return err
|
||||
}
|
||||
|
||||
if role == MASTER {
|
||||
if jf.Fields["master/elected"] != 0.0 {
|
||||
tags["state"] = "leader"
|
||||
} else {
|
||||
tags["state"] = "standby"
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("mesos", jf.Fields, tags)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -345,7 +345,10 @@ func TestMesosSlave(t *testing.T) {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
|
||||
acc.AssertContainsFields(t, "mesos-tasks", jf.Fields)
|
||||
acc.AssertContainsFields(
|
||||
t,
|
||||
"mesos_tasks",
|
||||
slaveTaskMetrics["statistics"].(map[string]interface{}))
|
||||
}
|
||||
|
||||
func TestSlaveFilter(t *testing.T) {
|
||||
|
||||
@@ -26,12 +26,26 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||
s.Session.SetMode(mgo.Eventual, true)
|
||||
s.Session.SetSocketTimeout(0)
|
||||
result_server := &ServerStatus{}
|
||||
err := s.Session.DB("admin").Run(bson.D{{"serverStatus", 1}, {"recordStats", 0}}, result_server)
|
||||
err := s.Session.DB("admin").Run(bson.D{
|
||||
{
|
||||
Name: "serverStatus",
|
||||
Value: 1,
|
||||
},
|
||||
{
|
||||
Name: "recordStats",
|
||||
Value: 0,
|
||||
},
|
||||
}, result_server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result_repl := &ReplSetStatus{}
|
||||
err = s.Session.DB("admin").Run(bson.D{{"replSetGetStatus", 1}}, result_repl)
|
||||
err = s.Session.DB("admin").Run(bson.D{
|
||||
{
|
||||
Name: "replSetGetStatus",
|
||||
Value: 1,
|
||||
},
|
||||
}, result_repl)
|
||||
if err != nil {
|
||||
log.Println("Not gathering replica set status, member not in replica set (" + err.Error() + ")")
|
||||
}
|
||||
@@ -52,7 +66,12 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||
}
|
||||
for _, db_name := range names {
|
||||
db_stat_line := &DbStatsData{}
|
||||
err = s.Session.DB(db_name).Run(bson.D{{"dbStats", 1}}, db_stat_line)
|
||||
err = s.Session.DB(db_name).Run(bson.D{
|
||||
{
|
||||
Name: "dbStats",
|
||||
Value: 1,
|
||||
},
|
||||
}, db_stat_line)
|
||||
if err != nil {
|
||||
log.Println("Error getting db stats from " + db_name + "(" + err.Error() + ")")
|
||||
}
|
||||
|
||||
@@ -514,7 +514,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
||||
returnVal.Command = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs)
|
||||
}
|
||||
|
||||
if newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil {
|
||||
if newStat.Metrics != nil && newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil {
|
||||
returnVal.Passes = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs)
|
||||
returnVal.DeletedDocuments = diff(newStat.Metrics.TTL.DeletedDocuments, oldStat.Metrics.TTL.DeletedDocuments, sampleSecs)
|
||||
}
|
||||
|
||||
@@ -1478,19 +1478,23 @@ func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumula
|
||||
tags["schema"] = tableSchema
|
||||
tags["table"] = tableName
|
||||
|
||||
acc.Add(newNamespace("info_schema", "table_rows"), tableRows, tags)
|
||||
acc.AddFields(newNamespace("info_schema", "table_rows"),
|
||||
map[string]interface{}{"value": tableRows}, tags)
|
||||
|
||||
dlTags := copyTags(tags)
|
||||
dlTags["component"] = "data_length"
|
||||
acc.Add(newNamespace("info_schema", "table_size", "data_length"), dataLength, dlTags)
|
||||
acc.AddFields(newNamespace("info_schema", "table_size", "data_length"),
|
||||
map[string]interface{}{"value": dataLength}, dlTags)
|
||||
|
||||
ilTags := copyTags(tags)
|
||||
ilTags["component"] = "index_length"
|
||||
acc.Add(newNamespace("info_schema", "table_size", "index_length"), indexLength, ilTags)
|
||||
acc.AddFields(newNamespace("info_schema", "table_size", "index_length"),
|
||||
map[string]interface{}{"value": indexLength}, ilTags)
|
||||
|
||||
dfTags := copyTags(tags)
|
||||
dfTags["component"] = "data_free"
|
||||
acc.Add(newNamespace("info_schema", "table_size", "data_free"), dataFree, dfTags)
|
||||
acc.AddFields(newNamespace("info_schema", "table_size", "data_free"),
|
||||
map[string]interface{}{"value": dataFree}, dfTags)
|
||||
|
||||
versionTags := copyTags(tags)
|
||||
versionTags["type"] = tableType
|
||||
@@ -1498,7 +1502,8 @@ func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumula
|
||||
versionTags["row_format"] = rowFormat
|
||||
versionTags["create_options"] = createOptions
|
||||
|
||||
acc.Add(newNamespace("info_schema", "table_version"), version, versionTags)
|
||||
acc.AddFields(newNamespace("info_schema", "table_version"),
|
||||
map[string]interface{}{"value": version}, versionTags)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -1511,7 +1516,7 @@ func parseValue(value sql.RawBytes) (float64, bool) {
|
||||
}
|
||||
|
||||
if bytes.Compare(value, []byte("No")) == 0 || bytes.Compare(value, []byte("OFF")) == 0 {
|
||||
return 0, false
|
||||
return 0, true
|
||||
}
|
||||
n, err := strconv.ParseFloat(string(value), 64)
|
||||
return n, err == nil
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
type Postgresql struct {
|
||||
Address string
|
||||
Databases []string
|
||||
IgnoredDatabases []string
|
||||
OrderedColumns []string
|
||||
AllColumns []string
|
||||
sanitizedAddress string
|
||||
@@ -40,8 +41,12 @@ var sampleConfig = `
|
||||
##
|
||||
address = "host=localhost user=postgres sslmode=disable"
|
||||
|
||||
## A list of databases to explicitly ignore. If not specified, metrics for all
|
||||
## databases are gathered. Do NOT use with the 'databases' option.
|
||||
# ignored_databases = ["postgres", "template0", "template1"]
|
||||
|
||||
## A list of databases to pull metrics about. If not specified, metrics for all
|
||||
## databases are gathered.
|
||||
## databases are gathered. Do NOT use with the 'ignore_databases' option.
|
||||
# databases = ["app_production", "testing"]
|
||||
`
|
||||
|
||||
@@ -73,8 +78,11 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
defer db.Close()
|
||||
|
||||
if len(p.Databases) == 0 {
|
||||
if len(p.Databases) == 0 && len(p.IgnoredDatabases) == 0 {
|
||||
query = `SELECT * FROM pg_stat_database`
|
||||
} else if len(p.IgnoredDatabases) != 0 {
|
||||
query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname NOT IN ('%s')`,
|
||||
strings.Join(p.IgnoredDatabases, "','"))
|
||||
} else {
|
||||
query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname IN ('%s')`,
|
||||
strings.Join(p.Databases, "','"))
|
||||
|
||||
@@ -150,3 +150,75 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) {
|
||||
assert.False(t, acc.HasMeasurement(col))
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostgresqlDatabaseWhitelistTest(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := &Postgresql{
|
||||
Address: fmt.Sprintf("host=%s user=postgres sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
Databases: []string{"template0"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := p.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
var foundTemplate0 = false
|
||||
var foundTemplate1 = false
|
||||
|
||||
for _, pnt := range acc.Metrics {
|
||||
if pnt.Measurement == "postgresql" {
|
||||
if pnt.Tags["db"] == "template0" {
|
||||
foundTemplate0 = true
|
||||
}
|
||||
}
|
||||
if pnt.Measurement == "postgresql" {
|
||||
if pnt.Tags["db"] == "template1" {
|
||||
foundTemplate1 = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, foundTemplate0)
|
||||
assert.False(t, foundTemplate1)
|
||||
}
|
||||
|
||||
func TestPostgresqlDatabaseBlacklistTest(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := &Postgresql{
|
||||
Address: fmt.Sprintf("host=%s user=postgres sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
IgnoredDatabases: []string{"template0"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := p.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
var foundTemplate0 = false
|
||||
var foundTemplate1 = false
|
||||
|
||||
for _, pnt := range acc.Metrics {
|
||||
if pnt.Measurement == "postgresql" {
|
||||
if pnt.Tags["db"] == "template0" {
|
||||
foundTemplate0 = true
|
||||
}
|
||||
}
|
||||
if pnt.Measurement == "postgresql" {
|
||||
if pnt.Tags["db"] == "template1" {
|
||||
foundTemplate1 = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert.False(t, foundTemplate0)
|
||||
assert.True(t, foundTemplate1)
|
||||
}
|
||||
|
||||
@@ -44,40 +44,9 @@ func (r *Redis) Description() string {
|
||||
}
|
||||
|
||||
var Tracking = map[string]string{
|
||||
"uptime_in_seconds": "uptime",
|
||||
"connected_clients": "clients",
|
||||
"used_memory": "used_memory",
|
||||
"used_memory_rss": "used_memory_rss",
|
||||
"used_memory_peak": "used_memory_peak",
|
||||
"used_memory_lua": "used_memory_lua",
|
||||
"rdb_changes_since_last_save": "rdb_changes_since_last_save",
|
||||
"total_connections_received": "total_connections_received",
|
||||
"total_commands_processed": "total_commands_processed",
|
||||
"instantaneous_ops_per_sec": "instantaneous_ops_per_sec",
|
||||
"instantaneous_input_kbps": "instantaneous_input_kbps",
|
||||
"instantaneous_output_kbps": "instantaneous_output_kbps",
|
||||
"sync_full": "sync_full",
|
||||
"sync_partial_ok": "sync_partial_ok",
|
||||
"sync_partial_err": "sync_partial_err",
|
||||
"expired_keys": "expired_keys",
|
||||
"evicted_keys": "evicted_keys",
|
||||
"keyspace_hits": "keyspace_hits",
|
||||
"keyspace_misses": "keyspace_misses",
|
||||
"pubsub_channels": "pubsub_channels",
|
||||
"pubsub_patterns": "pubsub_patterns",
|
||||
"latest_fork_usec": "latest_fork_usec",
|
||||
"connected_slaves": "connected_slaves",
|
||||
"master_repl_offset": "master_repl_offset",
|
||||
"master_last_io_seconds_ago": "master_last_io_seconds_ago",
|
||||
"repl_backlog_active": "repl_backlog_active",
|
||||
"repl_backlog_size": "repl_backlog_size",
|
||||
"repl_backlog_histlen": "repl_backlog_histlen",
|
||||
"mem_fragmentation_ratio": "mem_fragmentation_ratio",
|
||||
"used_cpu_sys": "used_cpu_sys",
|
||||
"used_cpu_user": "used_cpu_user",
|
||||
"used_cpu_sys_children": "used_cpu_sys_children",
|
||||
"used_cpu_user_children": "used_cpu_user_children",
|
||||
"role": "replication_role",
|
||||
"uptime_in_seconds": "uptime",
|
||||
"connected_clients": "clients",
|
||||
"role": "replication_role",
|
||||
}
|
||||
|
||||
var ErrProtocolError = errors.New("redis protocol error")
|
||||
@@ -188,6 +157,7 @@ func gatherInfoOutput(
|
||||
acc telegraf.Accumulator,
|
||||
tags map[string]string,
|
||||
) error {
|
||||
var section string
|
||||
var keyspace_hits, keyspace_misses uint64 = 0, 0
|
||||
|
||||
scanner := bufio.NewScanner(rdr)
|
||||
@@ -198,7 +168,13 @@ func gatherInfoOutput(
|
||||
break
|
||||
}
|
||||
|
||||
if len(line) == 0 || line[0] == '#' {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
if line[0] == '#' {
|
||||
if len(line) > 2 {
|
||||
section = line[2:]
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -206,42 +182,69 @@ func gatherInfoOutput(
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
name := string(parts[0])
|
||||
metric, ok := Tracking[name]
|
||||
if !ok {
|
||||
kline := strings.TrimSpace(string(parts[1]))
|
||||
gatherKeyspaceLine(name, kline, acc, tags)
|
||||
|
||||
if section == "Server" {
|
||||
if name != "lru_clock" && name != "uptime_in_seconds" {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if name == "mem_allocator" {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasSuffix(name, "_human") {
|
||||
continue
|
||||
}
|
||||
|
||||
metric, ok := Tracking[name]
|
||||
if !ok {
|
||||
if section == "Keyspace" {
|
||||
kline := strings.TrimSpace(string(parts[1]))
|
||||
gatherKeyspaceLine(name, kline, acc, tags)
|
||||
continue
|
||||
}
|
||||
metric = name
|
||||
}
|
||||
|
||||
val := strings.TrimSpace(parts[1])
|
||||
ival, err := strconv.ParseUint(val, 10, 64)
|
||||
|
||||
if name == "keyspace_hits" {
|
||||
keyspace_hits = ival
|
||||
// Try parsing as a uint
|
||||
if ival, err := strconv.ParseUint(val, 10, 64); err == nil {
|
||||
switch name {
|
||||
case "keyspace_hits":
|
||||
keyspace_hits = ival
|
||||
case "keyspace_misses":
|
||||
keyspace_misses = ival
|
||||
case "rdb_last_save_time":
|
||||
// influxdb can't calculate this, so we have to do it
|
||||
fields["rdb_last_save_time_elapsed"] = uint64(time.Now().Unix()) - ival
|
||||
}
|
||||
fields[metric] = ival
|
||||
continue
|
||||
}
|
||||
|
||||
if name == "keyspace_misses" {
|
||||
keyspace_misses = ival
|
||||
// Try parsing as an int
|
||||
if ival, err := strconv.ParseInt(val, 10, 64); err == nil {
|
||||
fields[metric] = ival
|
||||
continue
|
||||
}
|
||||
|
||||
// Try parsing as a float
|
||||
if fval, err := strconv.ParseFloat(val, 64); err == nil {
|
||||
fields[metric] = fval
|
||||
continue
|
||||
}
|
||||
|
||||
// Treat it as a string
|
||||
|
||||
if name == "role" {
|
||||
tags["replication_role"] = val
|
||||
continue
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
fields[metric] = ival
|
||||
continue
|
||||
}
|
||||
|
||||
fval, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields[metric] = fval
|
||||
fields[metric] = val
|
||||
}
|
||||
var keyspace_hitrate float64 = 0.0
|
||||
if keyspace_hits != 0 || keyspace_misses != 0 {
|
||||
|
||||
@@ -5,8 +5,10 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -37,40 +39,73 @@ func TestRedis_ParseMetrics(t *testing.T) {
|
||||
|
||||
tags = map[string]string{"host": "redis.net", "replication_role": "master"}
|
||||
fields := map[string]interface{}{
|
||||
"uptime": uint64(238),
|
||||
"clients": uint64(1),
|
||||
"used_memory": uint64(1003936),
|
||||
"used_memory_rss": uint64(811008),
|
||||
"used_memory_peak": uint64(1003936),
|
||||
"used_memory_lua": uint64(33792),
|
||||
"rdb_changes_since_last_save": uint64(0),
|
||||
"total_connections_received": uint64(2),
|
||||
"total_commands_processed": uint64(1),
|
||||
"instantaneous_ops_per_sec": uint64(0),
|
||||
"sync_full": uint64(0),
|
||||
"sync_partial_ok": uint64(0),
|
||||
"sync_partial_err": uint64(0),
|
||||
"expired_keys": uint64(0),
|
||||
"evicted_keys": uint64(0),
|
||||
"keyspace_hits": uint64(1),
|
||||
"keyspace_misses": uint64(1),
|
||||
"pubsub_channels": uint64(0),
|
||||
"pubsub_patterns": uint64(0),
|
||||
"latest_fork_usec": uint64(0),
|
||||
"connected_slaves": uint64(0),
|
||||
"master_repl_offset": uint64(0),
|
||||
"repl_backlog_active": uint64(0),
|
||||
"repl_backlog_size": uint64(1048576),
|
||||
"repl_backlog_histlen": uint64(0),
|
||||
"mem_fragmentation_ratio": float64(0.81),
|
||||
"instantaneous_input_kbps": float64(876.16),
|
||||
"instantaneous_output_kbps": float64(3010.23),
|
||||
"used_cpu_sys": float64(0.14),
|
||||
"used_cpu_user": float64(0.05),
|
||||
"used_cpu_sys_children": float64(0.00),
|
||||
"used_cpu_user_children": float64(0.00),
|
||||
"keyspace_hitrate": float64(0.50),
|
||||
"uptime": uint64(238),
|
||||
"lru_clock": uint64(2364819),
|
||||
"clients": uint64(1),
|
||||
"client_longest_output_list": uint64(0),
|
||||
"client_biggest_input_buf": uint64(0),
|
||||
"blocked_clients": uint64(0),
|
||||
"used_memory": uint64(1003936),
|
||||
"used_memory_rss": uint64(811008),
|
||||
"used_memory_peak": uint64(1003936),
|
||||
"used_memory_lua": uint64(33792),
|
||||
"mem_fragmentation_ratio": float64(0.81),
|
||||
"loading": uint64(0),
|
||||
"rdb_changes_since_last_save": uint64(0),
|
||||
"rdb_bgsave_in_progress": uint64(0),
|
||||
"rdb_last_save_time": uint64(1428427941),
|
||||
"rdb_last_bgsave_status": "ok",
|
||||
"rdb_last_bgsave_time_sec": int64(-1),
|
||||
"rdb_current_bgsave_time_sec": int64(-1),
|
||||
"aof_enabled": uint64(0),
|
||||
"aof_rewrite_in_progress": uint64(0),
|
||||
"aof_rewrite_scheduled": uint64(0),
|
||||
"aof_last_rewrite_time_sec": int64(-1),
|
||||
"aof_current_rewrite_time_sec": int64(-1),
|
||||
"aof_last_bgrewrite_status": "ok",
|
||||
"aof_last_write_status": "ok",
|
||||
"total_connections_received": uint64(2),
|
||||
"total_commands_processed": uint64(1),
|
||||
"instantaneous_ops_per_sec": uint64(0),
|
||||
"instantaneous_input_kbps": float64(876.16),
|
||||
"instantaneous_output_kbps": float64(3010.23),
|
||||
"rejected_connections": uint64(0),
|
||||
"sync_full": uint64(0),
|
||||
"sync_partial_ok": uint64(0),
|
||||
"sync_partial_err": uint64(0),
|
||||
"expired_keys": uint64(0),
|
||||
"evicted_keys": uint64(0),
|
||||
"keyspace_hits": uint64(1),
|
||||
"keyspace_misses": uint64(1),
|
||||
"pubsub_channels": uint64(0),
|
||||
"pubsub_patterns": uint64(0),
|
||||
"latest_fork_usec": uint64(0),
|
||||
"connected_slaves": uint64(0),
|
||||
"master_repl_offset": uint64(0),
|
||||
"repl_backlog_active": uint64(0),
|
||||
"repl_backlog_size": uint64(1048576),
|
||||
"repl_backlog_first_byte_offset": uint64(0),
|
||||
"repl_backlog_histlen": uint64(0),
|
||||
"used_cpu_sys": float64(0.14),
|
||||
"used_cpu_user": float64(0.05),
|
||||
"used_cpu_sys_children": float64(0.00),
|
||||
"used_cpu_user_children": float64(0.00),
|
||||
"keyspace_hitrate": float64(0.50),
|
||||
}
|
||||
|
||||
// We have to test rdb_last_save_time_offset manually because the value is based on the time when gathered
|
||||
for _, m := range acc.Metrics {
|
||||
for k, v := range m.Fields {
|
||||
if k == "rdb_last_save_time_elapsed" {
|
||||
fields[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.InDelta(t,
|
||||
uint64(time.Now().Unix())-fields["rdb_last_save_time"].(uint64),
|
||||
fields["rdb_last_save_time_elapsed"].(uint64),
|
||||
2) // allow for 2 seconds worth of offset
|
||||
|
||||
keyspaceTags := map[string]string{"host": "redis.net", "replication_role": "master", "database": "db0"}
|
||||
keyspaceFields := map[string]interface{}{
|
||||
"avg_ttl": uint64(0),
|
||||
|
||||
@@ -73,7 +73,7 @@ func (s *Sensors) parse(acc telegraf.Accumulator) error {
|
||||
tags["chip"] = chip
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(line, " ") {
|
||||
if !strings.HasPrefix(line, " ") {
|
||||
if len(tags) > 1 {
|
||||
acc.AddFields("sensors", fields, tags)
|
||||
}
|
||||
@@ -114,5 +114,5 @@ func init() {
|
||||
|
||||
// snake converts string to snake case
|
||||
func snake(input string) string {
|
||||
return strings.ToLower(strings.Replace(input, " ", "_", -1))
|
||||
return strings.ToLower(strings.Replace(strings.TrimSpace(input), " ", "_", -1))
|
||||
}
|
||||
|
||||
@@ -122,6 +122,28 @@ func TestGatherDefault(t *testing.T) {
|
||||
"temp_crit_alarm": 0.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
map[string]string{
|
||||
"chip": "atk0110-acpi-0",
|
||||
"feature": "vcore_voltage",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"in_input": 1.136,
|
||||
"in_min": 0.800,
|
||||
"in_max": 1.600,
|
||||
},
|
||||
},
|
||||
{
|
||||
map[string]string{
|
||||
"chip": "atk0110-acpi-0",
|
||||
"feature": "+3.3_voltage",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"in_input": 3.360,
|
||||
"in_min": 2.970,
|
||||
"in_max": 3.630,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -240,8 +262,29 @@ func TestGatherNotRemoveNumbers(t *testing.T) {
|
||||
"temp3_crit_alarm": 0.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
map[string]string{
|
||||
"chip": "atk0110-acpi-0",
|
||||
"feature": "vcore_voltage",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"in0_input": 1.136,
|
||||
"in0_min": 0.800,
|
||||
"in0_max": 1.600,
|
||||
},
|
||||
},
|
||||
{
|
||||
map[string]string{
|
||||
"chip": "atk0110-acpi-0",
|
||||
"feature": "+3.3_voltage",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"in1_input": 3.360,
|
||||
"in1_min": 2.970,
|
||||
"in1_max": 3.630,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
acc.AssertContainsTaggedFields(t, "sensors", test.fields, test.tags)
|
||||
}
|
||||
@@ -309,6 +352,16 @@ Core 1:
|
||||
temp3_max: 82.000
|
||||
temp3_crit: 92.000
|
||||
temp3_crit_alarm: 0.000
|
||||
|
||||
atk0110-acpi-0
|
||||
Vcore Voltage:
|
||||
in0_input: 1.136
|
||||
in0_min: 0.800
|
||||
in0_max: 1.600
|
||||
+3.3 Voltage:
|
||||
in1_input: 3.360
|
||||
in1_min: 2.970
|
||||
in1_max: 3.630
|
||||
`
|
||||
|
||||
args := os.Args
|
||||
|
||||
@@ -1,482 +0,0 @@
|
||||
package snmp_legacy
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSNMPErrorGet1(t *testing.T) {
|
||||
get1 := Data{
|
||||
Name: "oid1",
|
||||
Unit: "octets",
|
||||
Oid: ".1.3.6.1.2.1.2.2.1.16.1",
|
||||
}
|
||||
h := Host{
|
||||
Collect: []string{"oid1"},
|
||||
}
|
||||
s := Snmp{
|
||||
SnmptranslateFile: "bad_oid.txt",
|
||||
Host: []Host{h},
|
||||
Get: []Data{get1},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := s.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestSNMPErrorGet2(t *testing.T) {
|
||||
get1 := Data{
|
||||
Name: "oid1",
|
||||
Unit: "octets",
|
||||
Oid: ".1.3.6.1.2.1.2.2.1.16.1",
|
||||
}
|
||||
h := Host{
|
||||
Collect: []string{"oid1"},
|
||||
}
|
||||
s := Snmp{
|
||||
Host: []Host{h},
|
||||
Get: []Data{get1},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := s.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(acc.Metrics))
|
||||
}
|
||||
|
||||
func TestSNMPErrorBulk(t *testing.T) {
|
||||
bulk1 := Data{
|
||||
Name: "oid1",
|
||||
Unit: "octets",
|
||||
Oid: ".1.3.6.1.2.1.2.2.1.16",
|
||||
}
|
||||
h := Host{
|
||||
Address: testutil.GetLocalHost(),
|
||||
Collect: []string{"oid1"},
|
||||
}
|
||||
s := Snmp{
|
||||
Host: []Host{h},
|
||||
Bulk: []Data{bulk1},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := s.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(acc.Metrics))
|
||||
}
|
||||
|
||||
func TestSNMPGet1(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
get1 := Data{
|
||||
Name: "oid1",
|
||||
Unit: "octets",
|
||||
Oid: ".1.3.6.1.2.1.2.2.1.16.1",
|
||||
}
|
||||
h := Host{
|
||||
Address: testutil.GetLocalHost() + ":31161",
|
||||
Community: "telegraf",
|
||||
Version: 2,
|
||||
Timeout: 2.0,
|
||||
Retries: 2,
|
||||
Collect: []string{"oid1"},
|
||||
}
|
||||
s := Snmp{
|
||||
Host: []Host{h},
|
||||
Get: []Data{get1},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := s.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"oid1",
|
||||
map[string]interface{}{
|
||||
"oid1": uint(543846),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "octets",
|
||||
"snmp_host": testutil.GetLocalHost(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestSNMPGet2(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
get1 := Data{
|
||||
Name: "oid1",
|
||||
Oid: "ifNumber",
|
||||
}
|
||||
h := Host{
|
||||
Address: testutil.GetLocalHost() + ":31161",
|
||||
Community: "telegraf",
|
||||
Version: 2,
|
||||
Timeout: 2.0,
|
||||
Retries: 2,
|
||||
Collect: []string{"oid1"},
|
||||
}
|
||||
s := Snmp{
|
||||
SnmptranslateFile: "./testdata/oids.txt",
|
||||
Host: []Host{h},
|
||||
Get: []Data{get1},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := s.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"ifNumber",
|
||||
map[string]interface{}{
|
||||
"ifNumber": int(4),
|
||||
},
|
||||
map[string]string{
|
||||
"instance": "0",
|
||||
"snmp_host": testutil.GetLocalHost(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestSNMPGet3(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
get1 := Data{
|
||||
Name: "oid1",
|
||||
Unit: "octets",
|
||||
Oid: "ifSpeed",
|
||||
Instance: "1",
|
||||
}
|
||||
h := Host{
|
||||
Address: testutil.GetLocalHost() + ":31161",
|
||||
Community: "telegraf",
|
||||
Version: 2,
|
||||
Timeout: 2.0,
|
||||
Retries: 2,
|
||||
Collect: []string{"oid1"},
|
||||
}
|
||||
s := Snmp{
|
||||
SnmptranslateFile: "./testdata/oids.txt",
|
||||
Host: []Host{h},
|
||||
Get: []Data{get1},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := s.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"ifSpeed",
|
||||
map[string]interface{}{
|
||||
"ifSpeed": uint(10000000),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "octets",
|
||||
"instance": "1",
|
||||
"snmp_host": testutil.GetLocalHost(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestSNMPEasyGet4(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
get1 := Data{
|
||||
Name: "oid1",
|
||||
Unit: "octets",
|
||||
Oid: "ifSpeed",
|
||||
Instance: "1",
|
||||
}
|
||||
h := Host{
|
||||
Address: testutil.GetLocalHost() + ":31161",
|
||||
Community: "telegraf",
|
||||
Version: 2,
|
||||
Timeout: 2.0,
|
||||
Retries: 2,
|
||||
Collect: []string{"oid1"},
|
||||
GetOids: []string{"ifNumber"},
|
||||
}
|
||||
s := Snmp{
|
||||
SnmptranslateFile: "./testdata/oids.txt",
|
||||
Host: []Host{h},
|
||||
Get: []Data{get1},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := s.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"ifSpeed",
|
||||
map[string]interface{}{
|
||||
"ifSpeed": uint(10000000),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "octets",
|
||||
"instance": "1",
|
||||
"snmp_host": testutil.GetLocalHost(),
|
||||
},
|
||||
)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"ifNumber",
|
||||
map[string]interface{}{
|
||||
"ifNumber": int(4),
|
||||
},
|
||||
map[string]string{
|
||||
"instance": "0",
|
||||
"snmp_host": testutil.GetLocalHost(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestSNMPEasyGet5(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
get1 := Data{
|
||||
Name: "oid1",
|
||||
Unit: "octets",
|
||||
Oid: "ifSpeed",
|
||||
Instance: "1",
|
||||
}
|
||||
h := Host{
|
||||
Address: testutil.GetLocalHost() + ":31161",
|
||||
Community: "telegraf",
|
||||
Version: 2,
|
||||
Timeout: 2.0,
|
||||
Retries: 2,
|
||||
Collect: []string{"oid1"},
|
||||
GetOids: []string{".1.3.6.1.2.1.2.1.0"},
|
||||
}
|
||||
s := Snmp{
|
||||
SnmptranslateFile: "./testdata/oids.txt",
|
||||
Host: []Host{h},
|
||||
Get: []Data{get1},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := s.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"ifSpeed",
|
||||
map[string]interface{}{
|
||||
"ifSpeed": uint(10000000),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "octets",
|
||||
"instance": "1",
|
||||
"snmp_host": testutil.GetLocalHost(),
|
||||
},
|
||||
)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"ifNumber",
|
||||
map[string]interface{}{
|
||||
"ifNumber": int(4),
|
||||
},
|
||||
map[string]string{
|
||||
"instance": "0",
|
||||
"snmp_host": testutil.GetLocalHost(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestSNMPEasyGet6(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
h := Host{
|
||||
Address: testutil.GetLocalHost() + ":31161",
|
||||
Community: "telegraf",
|
||||
Version: 2,
|
||||
Timeout: 2.0,
|
||||
Retries: 2,
|
||||
GetOids: []string{"1.3.6.1.2.1.2.1.0"},
|
||||
}
|
||||
s := Snmp{
|
||||
SnmptranslateFile: "./testdata/oids.txt",
|
||||
Host: []Host{h},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := s.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"ifNumber",
|
||||
map[string]interface{}{
|
||||
"ifNumber": int(4),
|
||||
},
|
||||
map[string]string{
|
||||
"instance": "0",
|
||||
"snmp_host": testutil.GetLocalHost(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestSNMPBulk1(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
bulk1 := Data{
|
||||
Name: "oid1",
|
||||
Unit: "octets",
|
||||
Oid: ".1.3.6.1.2.1.2.2.1.16",
|
||||
MaxRepetition: 2,
|
||||
}
|
||||
h := Host{
|
||||
Address: testutil.GetLocalHost() + ":31161",
|
||||
Community: "telegraf",
|
||||
Version: 2,
|
||||
Timeout: 2.0,
|
||||
Retries: 2,
|
||||
Collect: []string{"oid1"},
|
||||
}
|
||||
s := Snmp{
|
||||
SnmptranslateFile: "./testdata/oids.txt",
|
||||
Host: []Host{h},
|
||||
Bulk: []Data{bulk1},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := s.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"ifOutOctets",
|
||||
map[string]interface{}{
|
||||
"ifOutOctets": uint(543846),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "octets",
|
||||
"instance": "1",
|
||||
"snmp_host": testutil.GetLocalHost(),
|
||||
},
|
||||
)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"ifOutOctets",
|
||||
map[string]interface{}{
|
||||
"ifOutOctets": uint(26475179),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "octets",
|
||||
"instance": "2",
|
||||
"snmp_host": testutil.GetLocalHost(),
|
||||
},
|
||||
)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"ifOutOctets",
|
||||
map[string]interface{}{
|
||||
"ifOutOctets": uint(108963968),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "octets",
|
||||
"instance": "3",
|
||||
"snmp_host": testutil.GetLocalHost(),
|
||||
},
|
||||
)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"ifOutOctets",
|
||||
map[string]interface{}{
|
||||
"ifOutOctets": uint(12991453),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "octets",
|
||||
"instance": "36",
|
||||
"snmp_host": testutil.GetLocalHost(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// TODO find why, if this test is active
|
||||
// Circle CI stops with the following error...
|
||||
// bash scripts/circle-test.sh died unexpectedly
|
||||
// Maybe the test is too long ??
|
||||
func dTestSNMPBulk2(t *testing.T) {
|
||||
bulk1 := Data{
|
||||
Name: "oid1",
|
||||
Unit: "octets",
|
||||
Oid: "ifOutOctets",
|
||||
MaxRepetition: 2,
|
||||
}
|
||||
h := Host{
|
||||
Address: testutil.GetLocalHost() + ":31161",
|
||||
Community: "telegraf",
|
||||
Version: 2,
|
||||
Timeout: 2.0,
|
||||
Retries: 2,
|
||||
Collect: []string{"oid1"},
|
||||
}
|
||||
s := Snmp{
|
||||
SnmptranslateFile: "./testdata/oids.txt",
|
||||
Host: []Host{h},
|
||||
Bulk: []Data{bulk1},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := s.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"ifOutOctets",
|
||||
map[string]interface{}{
|
||||
"ifOutOctets": uint(543846),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "octets",
|
||||
"instance": "1",
|
||||
"snmp_host": testutil.GetLocalHost(),
|
||||
},
|
||||
)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"ifOutOctets",
|
||||
map[string]interface{}{
|
||||
"ifOutOctets": uint(26475179),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "octets",
|
||||
"instance": "2",
|
||||
"snmp_host": testutil.GetLocalHost(),
|
||||
},
|
||||
)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"ifOutOctets",
|
||||
map[string]interface{}{
|
||||
"ifOutOctets": uint(108963968),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "octets",
|
||||
"instance": "3",
|
||||
"snmp_host": testutil.GetLocalHost(),
|
||||
},
|
||||
)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"ifOutOctets",
|
||||
map[string]interface{}{
|
||||
"ifOutOctets": uint(12991453),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "octets",
|
||||
"instance": "36",
|
||||
"snmp_host": testutil.GetLocalHost(),
|
||||
},
|
||||
)
|
||||
}
|
||||
32
plugins/inputs/snmp_legacy/testdata/oids.txt
vendored
32
plugins/inputs/snmp_legacy/testdata/oids.txt
vendored
@@ -1,32 +0,0 @@
|
||||
org 1.3
|
||||
dod 1.3.6
|
||||
internet 1.3.6.1
|
||||
directory 1.3.6.1.1
|
||||
mgmt 1.3.6.1.2
|
||||
mib-2 1.3.6.1.2.1
|
||||
interfaces 1.3.6.1.2.1.2
|
||||
ifNumber 1.3.6.1.2.1.2.1
|
||||
ifTable 1.3.6.1.2.1.2.2
|
||||
ifEntry 1.3.6.1.2.1.2.2.1
|
||||
ifIndex 1.3.6.1.2.1.2.2.1.1
|
||||
ifDescr 1.3.6.1.2.1.2.2.1.2
|
||||
ifType 1.3.6.1.2.1.2.2.1.3
|
||||
ifMtu 1.3.6.1.2.1.2.2.1.4
|
||||
ifSpeed 1.3.6.1.2.1.2.2.1.5
|
||||
ifPhysAddress 1.3.6.1.2.1.2.2.1.6
|
||||
ifAdminStatus 1.3.6.1.2.1.2.2.1.7
|
||||
ifOperStatus 1.3.6.1.2.1.2.2.1.8
|
||||
ifLastChange 1.3.6.1.2.1.2.2.1.9
|
||||
ifInOctets 1.3.6.1.2.1.2.2.1.10
|
||||
ifInUcastPkts 1.3.6.1.2.1.2.2.1.11
|
||||
ifInNUcastPkts 1.3.6.1.2.1.2.2.1.12
|
||||
ifInDiscards 1.3.6.1.2.1.2.2.1.13
|
||||
ifInErrors 1.3.6.1.2.1.2.2.1.14
|
||||
ifInUnknownProtos 1.3.6.1.2.1.2.2.1.15
|
||||
ifOutOctets 1.3.6.1.2.1.2.2.1.16
|
||||
ifOutUcastPkts 1.3.6.1.2.1.2.2.1.17
|
||||
ifOutNUcastPkts 1.3.6.1.2.1.2.2.1.18
|
||||
ifOutDiscards 1.3.6.1.2.1.2.2.1.19
|
||||
ifOutErrors 1.3.6.1.2.1.2.2.1.20
|
||||
ifOutQLen 1.3.6.1.2.1.2.2.1.21
|
||||
ifSpecific 1.3.6.1.2.1.2.2.1.22
|
||||
@@ -166,7 +166,9 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e
|
||||
|
||||
if query.ResultByRow {
|
||||
// add measurement to Accumulator
|
||||
acc.Add(measurement, *columnMap["value"], tags, time.Now())
|
||||
acc.AddFields(measurement,
|
||||
map[string]interface{}{"value": *columnMap["value"]},
|
||||
tags, time.Now())
|
||||
} else {
|
||||
// values
|
||||
for header, val := range columnMap {
|
||||
@@ -290,8 +292,8 @@ IF OBJECT_ID('tempdb..#clerk') IS NOT NULL
|
||||
DROP TABLE #clerk;
|
||||
|
||||
CREATE TABLE #clerk (
|
||||
ClerkCategory nvarchar(64) NOT NULL,
|
||||
UsedPercent decimal(9,2),
|
||||
ClerkCategory nvarchar(64) NOT NULL,
|
||||
UsedPercent decimal(9,2),
|
||||
UsedBytes bigint
|
||||
);
|
||||
|
||||
|
||||
@@ -53,7 +53,9 @@ func TestSqlServer_ParseMetrics(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// add value to Accumulator
|
||||
acc.Add(measurement, value, tags, time.Now())
|
||||
acc.AddFields(measurement,
|
||||
map[string]interface{}{"value": value},
|
||||
tags, time.Now())
|
||||
// assert
|
||||
acc.AssertContainsTaggedFields(t, measurement, map[string]interface{}{"value": value}, tags)
|
||||
|
||||
|
||||
@@ -416,7 +416,7 @@ func (s *Statsd) parseStatsdLine(line string) error {
|
||||
}
|
||||
|
||||
// Parse the value
|
||||
if strings.ContainsAny(pipesplit[0], "-+") {
|
||||
if strings.HasPrefix(pipesplit[0], "-") || strings.HasPrefix(pipesplit[0], "+") {
|
||||
if m.mtype != "g" {
|
||||
log.Printf("Error: +- values are only supported for gauges: %s\n", line)
|
||||
return errors.New("Error Parsing statsd line")
|
||||
|
||||
@@ -24,6 +24,267 @@ func NewTestStatsd() *Statsd {
|
||||
return &s
|
||||
}
|
||||
|
||||
// Valid lines should be parsed and their values should be cached
|
||||
func TestParse_ValidLines(t *testing.T) {
|
||||
s := NewTestStatsd()
|
||||
valid_lines := []string{
|
||||
"valid:45|c",
|
||||
"valid:45|s",
|
||||
"valid:45|g",
|
||||
"valid.timer:45|ms",
|
||||
"valid.timer:45|h",
|
||||
}
|
||||
|
||||
for _, line := range valid_lines {
|
||||
err := s.parseStatsdLine(line)
|
||||
if err != nil {
|
||||
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests low-level functionality of gauges
|
||||
func TestParse_Gauges(t *testing.T) {
|
||||
s := NewTestStatsd()
|
||||
|
||||
// Test that gauge +- values work
|
||||
valid_lines := []string{
|
||||
"plus.minus:100|g",
|
||||
"plus.minus:-10|g",
|
||||
"plus.minus:+30|g",
|
||||
"plus.plus:100|g",
|
||||
"plus.plus:+100|g",
|
||||
"plus.plus:+100|g",
|
||||
"minus.minus:100|g",
|
||||
"minus.minus:-100|g",
|
||||
"minus.minus:-100|g",
|
||||
"lone.plus:+100|g",
|
||||
"lone.minus:-100|g",
|
||||
"overwrite:100|g",
|
||||
"overwrite:300|g",
|
||||
"scientific.notation:4.696E+5|g",
|
||||
"scientific.notation.minus:4.7E-5|g",
|
||||
}
|
||||
|
||||
for _, line := range valid_lines {
|
||||
err := s.parseStatsdLine(line)
|
||||
if err != nil {
|
||||
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
|
||||
}
|
||||
}
|
||||
|
||||
validations := []struct {
|
||||
name string
|
||||
value float64
|
||||
}{
|
||||
{
|
||||
"scientific_notation",
|
||||
469600,
|
||||
},
|
||||
{
|
||||
"scientific_notation_minus",
|
||||
0.000047,
|
||||
},
|
||||
{
|
||||
"plus_minus",
|
||||
120,
|
||||
},
|
||||
{
|
||||
"plus_plus",
|
||||
300,
|
||||
},
|
||||
{
|
||||
"minus_minus",
|
||||
-100,
|
||||
},
|
||||
{
|
||||
"lone_plus",
|
||||
100,
|
||||
},
|
||||
{
|
||||
"lone_minus",
|
||||
-100,
|
||||
},
|
||||
{
|
||||
"overwrite",
|
||||
300,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range validations {
|
||||
err := test_validate_gauge(test.name, test.value, s.gauges)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests low-level functionality of sets
|
||||
func TestParse_Sets(t *testing.T) {
|
||||
s := NewTestStatsd()
|
||||
|
||||
// Test that sets work
|
||||
valid_lines := []string{
|
||||
"unique.user.ids:100|s",
|
||||
"unique.user.ids:100|s",
|
||||
"unique.user.ids:100|s",
|
||||
"unique.user.ids:100|s",
|
||||
"unique.user.ids:100|s",
|
||||
"unique.user.ids:101|s",
|
||||
"unique.user.ids:102|s",
|
||||
"unique.user.ids:102|s",
|
||||
"unique.user.ids:123456789|s",
|
||||
"oneuser.id:100|s",
|
||||
"oneuser.id:100|s",
|
||||
"scientific.notation.sets:4.696E+5|s",
|
||||
"scientific.notation.sets:4.696E+5|s",
|
||||
"scientific.notation.sets:4.697E+5|s",
|
||||
}
|
||||
|
||||
for _, line := range valid_lines {
|
||||
err := s.parseStatsdLine(line)
|
||||
if err != nil {
|
||||
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
|
||||
}
|
||||
}
|
||||
|
||||
validations := []struct {
|
||||
name string
|
||||
value int64
|
||||
}{
|
||||
{
|
||||
"scientific_notation_sets",
|
||||
2,
|
||||
},
|
||||
{
|
||||
"unique_user_ids",
|
||||
4,
|
||||
},
|
||||
{
|
||||
"oneuser_id",
|
||||
1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range validations {
|
||||
err := test_validate_set(test.name, test.value, s.sets)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests low-level functionality of counters
|
||||
func TestParse_Counters(t *testing.T) {
|
||||
s := NewTestStatsd()
|
||||
|
||||
// Test that counters work
|
||||
valid_lines := []string{
|
||||
"small.inc:1|c",
|
||||
"big.inc:100|c",
|
||||
"big.inc:1|c",
|
||||
"big.inc:100000|c",
|
||||
"big.inc:1000000|c",
|
||||
"small.inc:1|c",
|
||||
"zero.init:0|c",
|
||||
"sample.rate:1|c|@0.1",
|
||||
"sample.rate:1|c",
|
||||
"scientific.notation:4.696E+5|c",
|
||||
}
|
||||
|
||||
for _, line := range valid_lines {
|
||||
err := s.parseStatsdLine(line)
|
||||
if err != nil {
|
||||
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
|
||||
}
|
||||
}
|
||||
|
||||
validations := []struct {
|
||||
name string
|
||||
value int64
|
||||
}{
|
||||
{
|
||||
"scientific_notation",
|
||||
469600,
|
||||
},
|
||||
{
|
||||
"small_inc",
|
||||
2,
|
||||
},
|
||||
{
|
||||
"big_inc",
|
||||
1100101,
|
||||
},
|
||||
{
|
||||
"zero_init",
|
||||
0,
|
||||
},
|
||||
{
|
||||
"sample_rate",
|
||||
11,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range validations {
|
||||
err := test_validate_counter(test.name, test.value, s.counters)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests low-level functionality of timings
|
||||
func TestParse_Timings(t *testing.T) {
|
||||
s := NewTestStatsd()
|
||||
s.Percentiles = []int{90}
|
||||
acc := &testutil.Accumulator{}
|
||||
|
||||
// Test that counters work
|
||||
valid_lines := []string{
|
||||
"test.timing:1|ms",
|
||||
"test.timing:11|ms",
|
||||
"test.timing:1|ms",
|
||||
"test.timing:1|ms",
|
||||
"test.timing:1|ms",
|
||||
}
|
||||
|
||||
for _, line := range valid_lines {
|
||||
err := s.parseStatsdLine(line)
|
||||
if err != nil {
|
||||
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
|
||||
}
|
||||
}
|
||||
|
||||
s.Gather(acc)
|
||||
|
||||
valid := map[string]interface{}{
|
||||
"90_percentile": float64(11),
|
||||
"count": int64(5),
|
||||
"lower": float64(1),
|
||||
"mean": float64(3),
|
||||
"stddev": float64(4),
|
||||
"upper": float64(11),
|
||||
}
|
||||
|
||||
acc.AssertContainsFields(t, "test_timing", valid)
|
||||
}
|
||||
|
||||
func TestParseScientificNotation(t *testing.T) {
|
||||
s := NewTestStatsd()
|
||||
sciNotationLines := []string{
|
||||
"scientific.notation:4.6968460083008E-5|ms",
|
||||
"scientific.notation:4.6968460083008E-5|g",
|
||||
"scientific.notation:4.6968460083008E-5|c",
|
||||
"scientific.notation:4.6968460083008E-5|h",
|
||||
}
|
||||
for _, line := range sciNotationLines {
|
||||
err := s.parseStatsdLine(line)
|
||||
if err != nil {
|
||||
t.Errorf("Parsing line [%s] should not have resulted in error: %s\n", line, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Invalid lines should return an error
|
||||
func TestParse_InvalidLines(t *testing.T) {
|
||||
s := NewTestStatsd()
|
||||
@@ -715,229 +976,6 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Valid lines should be parsed and their values should be cached
|
||||
func TestParse_ValidLines(t *testing.T) {
|
||||
s := NewTestStatsd()
|
||||
valid_lines := []string{
|
||||
"valid:45|c",
|
||||
"valid:45|s",
|
||||
"valid:45|g",
|
||||
"valid.timer:45|ms",
|
||||
"valid.timer:45|h",
|
||||
}
|
||||
|
||||
for _, line := range valid_lines {
|
||||
err := s.parseStatsdLine(line)
|
||||
if err != nil {
|
||||
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests low-level functionality of gauges
|
||||
func TestParse_Gauges(t *testing.T) {
|
||||
s := NewTestStatsd()
|
||||
|
||||
// Test that gauge +- values work
|
||||
valid_lines := []string{
|
||||
"plus.minus:100|g",
|
||||
"plus.minus:-10|g",
|
||||
"plus.minus:+30|g",
|
||||
"plus.plus:100|g",
|
||||
"plus.plus:+100|g",
|
||||
"plus.plus:+100|g",
|
||||
"minus.minus:100|g",
|
||||
"minus.minus:-100|g",
|
||||
"minus.minus:-100|g",
|
||||
"lone.plus:+100|g",
|
||||
"lone.minus:-100|g",
|
||||
"overwrite:100|g",
|
||||
"overwrite:300|g",
|
||||
}
|
||||
|
||||
for _, line := range valid_lines {
|
||||
err := s.parseStatsdLine(line)
|
||||
if err != nil {
|
||||
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
|
||||
}
|
||||
}
|
||||
|
||||
validations := []struct {
|
||||
name string
|
||||
value float64
|
||||
}{
|
||||
{
|
||||
"plus_minus",
|
||||
120,
|
||||
},
|
||||
{
|
||||
"plus_plus",
|
||||
300,
|
||||
},
|
||||
{
|
||||
"minus_minus",
|
||||
-100,
|
||||
},
|
||||
{
|
||||
"lone_plus",
|
||||
100,
|
||||
},
|
||||
{
|
||||
"lone_minus",
|
||||
-100,
|
||||
},
|
||||
{
|
||||
"overwrite",
|
||||
300,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range validations {
|
||||
err := test_validate_gauge(test.name, test.value, s.gauges)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests low-level functionality of sets
|
||||
func TestParse_Sets(t *testing.T) {
|
||||
s := NewTestStatsd()
|
||||
|
||||
// Test that sets work
|
||||
valid_lines := []string{
|
||||
"unique.user.ids:100|s",
|
||||
"unique.user.ids:100|s",
|
||||
"unique.user.ids:100|s",
|
||||
"unique.user.ids:100|s",
|
||||
"unique.user.ids:100|s",
|
||||
"unique.user.ids:101|s",
|
||||
"unique.user.ids:102|s",
|
||||
"unique.user.ids:102|s",
|
||||
"unique.user.ids:123456789|s",
|
||||
"oneuser.id:100|s",
|
||||
"oneuser.id:100|s",
|
||||
}
|
||||
|
||||
for _, line := range valid_lines {
|
||||
err := s.parseStatsdLine(line)
|
||||
if err != nil {
|
||||
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
|
||||
}
|
||||
}
|
||||
|
||||
validations := []struct {
|
||||
name string
|
||||
value int64
|
||||
}{
|
||||
{
|
||||
"unique_user_ids",
|
||||
4,
|
||||
},
|
||||
{
|
||||
"oneuser_id",
|
||||
1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range validations {
|
||||
err := test_validate_set(test.name, test.value, s.sets)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests low-level functionality of counters
|
||||
func TestParse_Counters(t *testing.T) {
|
||||
s := NewTestStatsd()
|
||||
|
||||
// Test that counters work
|
||||
valid_lines := []string{
|
||||
"small.inc:1|c",
|
||||
"big.inc:100|c",
|
||||
"big.inc:1|c",
|
||||
"big.inc:100000|c",
|
||||
"big.inc:1000000|c",
|
||||
"small.inc:1|c",
|
||||
"zero.init:0|c",
|
||||
"sample.rate:1|c|@0.1",
|
||||
"sample.rate:1|c",
|
||||
}
|
||||
|
||||
for _, line := range valid_lines {
|
||||
err := s.parseStatsdLine(line)
|
||||
if err != nil {
|
||||
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
|
||||
}
|
||||
}
|
||||
|
||||
validations := []struct {
|
||||
name string
|
||||
value int64
|
||||
}{
|
||||
{
|
||||
"small_inc",
|
||||
2,
|
||||
},
|
||||
{
|
||||
"big_inc",
|
||||
1100101,
|
||||
},
|
||||
{
|
||||
"zero_init",
|
||||
0,
|
||||
},
|
||||
{
|
||||
"sample_rate",
|
||||
11,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range validations {
|
||||
err := test_validate_counter(test.name, test.value, s.counters)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests low-level functionality of timings
|
||||
func TestParse_Timings(t *testing.T) {
|
||||
s := NewTestStatsd()
|
||||
s.Percentiles = []int{90}
|
||||
acc := &testutil.Accumulator{}
|
||||
|
||||
// Test that counters work
|
||||
valid_lines := []string{
|
||||
"test.timing:1|ms",
|
||||
"test.timing:11|ms",
|
||||
"test.timing:1|ms",
|
||||
"test.timing:1|ms",
|
||||
"test.timing:1|ms",
|
||||
}
|
||||
|
||||
for _, line := range valid_lines {
|
||||
err := s.parseStatsdLine(line)
|
||||
if err != nil {
|
||||
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
|
||||
}
|
||||
}
|
||||
|
||||
s.Gather(acc)
|
||||
|
||||
valid := map[string]interface{}{
|
||||
"90_percentile": float64(11),
|
||||
"count": int64(5),
|
||||
"lower": float64(1),
|
||||
"mean": float64(3),
|
||||
"stddev": float64(4),
|
||||
"upper": float64(11),
|
||||
}
|
||||
|
||||
acc.AssertContainsFields(t, "test_timing", valid)
|
||||
}
|
||||
|
||||
// Tests low-level functionality of timings when multiple fields is enabled
|
||||
// and a measurement template has been defined which can parse field names
|
||||
func TestParse_Timings_MultipleFieldsWithTemplate(t *testing.T) {
|
||||
|
||||
@@ -13,13 +13,15 @@ type CPUStats struct {
|
||||
ps PS
|
||||
lastStats []cpu.TimesStat
|
||||
|
||||
PerCPU bool `toml:"percpu"`
|
||||
TotalCPU bool `toml:"totalcpu"`
|
||||
PerCPU bool `toml:"percpu"`
|
||||
TotalCPU bool `toml:"totalcpu"`
|
||||
CollectCPUTime bool `toml:"collect_cpu_time"`
|
||||
}
|
||||
|
||||
func NewCPUStats(ps PS) *CPUStats {
|
||||
return &CPUStats{
|
||||
ps: ps,
|
||||
ps: ps,
|
||||
CollectCPUTime: true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,8 +34,8 @@ var sampleConfig = `
|
||||
percpu = true
|
||||
## Whether to report total system cpu stats or not
|
||||
totalcpu = true
|
||||
## Comment this line if you want the raw CPU time metrics
|
||||
fielddrop = ["time_*"]
|
||||
## If true, collect raw CPU time metrics.
|
||||
collect_cpu_time = false
|
||||
`
|
||||
|
||||
func (_ *CPUStats) SampleConfig() string {
|
||||
@@ -54,23 +56,25 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
total := totalCpuTime(cts)
|
||||
|
||||
// Add cpu time metrics
|
||||
fields := map[string]interface{}{
|
||||
"time_user": cts.User,
|
||||
"time_system": cts.System,
|
||||
"time_idle": cts.Idle,
|
||||
"time_nice": cts.Nice,
|
||||
"time_iowait": cts.Iowait,
|
||||
"time_irq": cts.Irq,
|
||||
"time_softirq": cts.Softirq,
|
||||
"time_steal": cts.Steal,
|
||||
"time_guest": cts.Guest,
|
||||
"time_guest_nice": cts.GuestNice,
|
||||
if s.CollectCPUTime {
|
||||
// Add cpu time metrics
|
||||
fieldsC := map[string]interface{}{
|
||||
"time_user": cts.User,
|
||||
"time_system": cts.System,
|
||||
"time_idle": cts.Idle,
|
||||
"time_nice": cts.Nice,
|
||||
"time_iowait": cts.Iowait,
|
||||
"time_irq": cts.Irq,
|
||||
"time_softirq": cts.Softirq,
|
||||
"time_steal": cts.Steal,
|
||||
"time_guest": cts.Guest,
|
||||
"time_guest_nice": cts.GuestNice,
|
||||
}
|
||||
acc.AddCounter("cpu", fieldsC, tags, now)
|
||||
}
|
||||
|
||||
// Add in percentage
|
||||
if len(s.lastStats) == 0 {
|
||||
acc.AddFields("cpu", fields, tags, now)
|
||||
// If it's the 1st gather, can't get CPU Usage stats yet
|
||||
continue
|
||||
}
|
||||
@@ -86,18 +90,19 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
|
||||
if totalDelta == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
fields["usage_user"] = 100 * (cts.User - lastCts.User) / totalDelta
|
||||
fields["usage_system"] = 100 * (cts.System - lastCts.System) / totalDelta
|
||||
fields["usage_idle"] = 100 * (cts.Idle - lastCts.Idle) / totalDelta
|
||||
fields["usage_nice"] = 100 * (cts.Nice - lastCts.Nice) / totalDelta
|
||||
fields["usage_iowait"] = 100 * (cts.Iowait - lastCts.Iowait) / totalDelta
|
||||
fields["usage_irq"] = 100 * (cts.Irq - lastCts.Irq) / totalDelta
|
||||
fields["usage_softirq"] = 100 * (cts.Softirq - lastCts.Softirq) / totalDelta
|
||||
fields["usage_steal"] = 100 * (cts.Steal - lastCts.Steal) / totalDelta
|
||||
fields["usage_guest"] = 100 * (cts.Guest - lastCts.Guest) / totalDelta
|
||||
fields["usage_guest_nice"] = 100 * (cts.GuestNice - lastCts.GuestNice) / totalDelta
|
||||
acc.AddFields("cpu", fields, tags, now)
|
||||
fieldsG := map[string]interface{}{
|
||||
"usage_user": 100 * (cts.User - lastCts.User) / totalDelta,
|
||||
"usage_system": 100 * (cts.System - lastCts.System) / totalDelta,
|
||||
"usage_idle": 100 * (cts.Idle - lastCts.Idle) / totalDelta,
|
||||
"usage_nice": 100 * (cts.Nice - lastCts.Nice) / totalDelta,
|
||||
"usage_iowait": 100 * (cts.Iowait - lastCts.Iowait) / totalDelta,
|
||||
"usage_irq": 100 * (cts.Irq - lastCts.Irq) / totalDelta,
|
||||
"usage_softirq": 100 * (cts.Softirq - lastCts.Softirq) / totalDelta,
|
||||
"usage_steal": 100 * (cts.Steal - lastCts.Steal) / totalDelta,
|
||||
"usage_guest": 100 * (cts.Guest - lastCts.Guest) / totalDelta,
|
||||
"usage_guest_nice": 100 * (cts.GuestNice - lastCts.GuestNice) / totalDelta,
|
||||
}
|
||||
acc.AddGauge("cpu", fieldsG, tags, now)
|
||||
}
|
||||
|
||||
s.lastStats = times
|
||||
|
||||
@@ -70,7 +70,7 @@ func (s *DiskStats) Gather(acc telegraf.Accumulator) error {
|
||||
"inodes_free": du.InodesFree,
|
||||
"inodes_used": du.InodesUsed,
|
||||
}
|
||||
acc.AddFields("disk", fields, tags)
|
||||
acc.AddGauge("disk", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -139,7 +139,7 @@ func (s *DiskIOStats) Gather(acc telegraf.Accumulator) error {
|
||||
"write_time": io.WriteTime,
|
||||
"io_time": io.IoTime,
|
||||
}
|
||||
acc.AddFields("diskio", fields, tags)
|
||||
acc.AddCounter("diskio", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -81,7 +81,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("kernel", fields, map[string]string{})
|
||||
acc.AddCounter("kernel", fields, map[string]string{})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error {
|
||||
"used_percent": 100 * float64(vm.Used) / float64(vm.Total),
|
||||
"available_percent": 100 * float64(vm.Available) / float64(vm.Total),
|
||||
}
|
||||
acc.AddFields("mem", fields, nil)
|
||||
acc.AddCounter("mem", fields, nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -56,15 +56,18 @@ func (s *SwapStats) Gather(acc telegraf.Accumulator) error {
|
||||
return fmt.Errorf("error getting swap memory info: %s", err)
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
fieldsG := map[string]interface{}{
|
||||
"total": swap.Total,
|
||||
"used": swap.Used,
|
||||
"free": swap.Free,
|
||||
"used_percent": swap.UsedPercent,
|
||||
"in": swap.Sin,
|
||||
"out": swap.Sout,
|
||||
}
|
||||
acc.AddFields("swap", fields, nil)
|
||||
fieldsC := map[string]interface{}{
|
||||
"in": swap.Sin,
|
||||
"out": swap.Sout,
|
||||
}
|
||||
acc.AddGauge("swap", fieldsG, nil)
|
||||
acc.AddCounter("swap", fieldsC, nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -67,8 +67,6 @@ func TestMemStats(t *testing.T) {
|
||||
"used": uint64(1232),
|
||||
"used_percent": float64(12.2),
|
||||
"free": uint64(6412),
|
||||
"in": uint64(7),
|
||||
"out": uint64(830),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "swap", swapfields, make(map[string]string))
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error {
|
||||
"drop_in": io.Dropin,
|
||||
"drop_out": io.Dropout,
|
||||
}
|
||||
acc.AddFields("net", fields, tags)
|
||||
acc.AddCounter("net", fields, tags)
|
||||
}
|
||||
|
||||
// Get system wide stats for different network protocols
|
||||
|
||||
@@ -57,7 +57,7 @@ func (p *Processes) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("processes", fields, nil)
|
||||
acc.AddGauge("processes", fields, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ type PS interface {
|
||||
func add(acc telegraf.Accumulator,
|
||||
name string, val float64, tags map[string]string) {
|
||||
if val >= 0 {
|
||||
acc.Add(name, val, tags)
|
||||
acc.AddFields(name, map[string]interface{}{"value": val}, tags)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -37,16 +37,17 @@ func (_ *SystemStats) Gather(acc telegraf.Accumulator) error {
|
||||
return err
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"load1": loadavg.Load1,
|
||||
"load5": loadavg.Load5,
|
||||
"load15": loadavg.Load15,
|
||||
acc.AddGauge("system", map[string]interface{}{
|
||||
"load1": loadavg.Load1,
|
||||
"load5": loadavg.Load5,
|
||||
"load15": loadavg.Load15,
|
||||
"n_users": len(users),
|
||||
"n_cpus": runtime.NumCPU(),
|
||||
}, nil)
|
||||
acc.AddCounter("system", map[string]interface{}{
|
||||
"uptime": hostinfo.Uptime,
|
||||
"n_users": len(users),
|
||||
"uptime_format": format_uptime(hostinfo.Uptime),
|
||||
"n_cpus": runtime.NumCPU(),
|
||||
}
|
||||
acc.AddFields("system", fields, nil)
|
||||
}, nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -77,13 +77,13 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error {
|
||||
if s.filter == nil {
|
||||
var err error
|
||||
if len(s.Stats) == 0 {
|
||||
s.filter, err = filter.CompileFilter(defaultStats)
|
||||
s.filter, err = filter.Compile(defaultStats)
|
||||
} else {
|
||||
// legacy support, change "all" -> "*":
|
||||
if s.Stats[0] == "all" {
|
||||
s.Stats[0] = "*"
|
||||
}
|
||||
s.filter, err = filter.CompileFilter(s.Stats)
|
||||
s.filter, err = filter.Compile(s.Stats)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -15,6 +15,7 @@ $ sudo service telegraf start
|
||||
|
||||
## Available webhooks
|
||||
|
||||
- [Filestack](filestack/)
|
||||
- [Github](github/)
|
||||
- [Mandrill](mandrill/)
|
||||
- [Rollbar](rollbar/)
|
||||
|
||||
17
plugins/inputs/webhooks/filestack/README.md
Normal file
17
plugins/inputs/webhooks/filestack/README.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# Filestack webhook
|
||||
|
||||
You should configure your Filestack's Webhooks to point at the `webhooks` service. To do this go to `filestack.com/`, select your app and click `Credentials > Webhooks`. In the resulting page, set the `URL` to `http://<my_ip>:1619/filestack`, and click on `Add`.
|
||||
|
||||
## Events
|
||||
|
||||
See the [webhook doc](https://www.filestack.com/docs/webhooks).
|
||||
|
||||
*Limitations*: It stores all events except video conversions events.
|
||||
|
||||
All events for logs the original timestamp, the action and the id.
|
||||
|
||||
**Tags:**
|
||||
* 'action' = `event.action` string
|
||||
|
||||
**Fields:**
|
||||
* 'id' = `event.id` string
|
||||
44
plugins/inputs/webhooks/filestack/filestack_webhooks.go
Normal file
44
plugins/inputs/webhooks/filestack/filestack_webhooks.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package filestack
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type FilestackWebhook struct {
|
||||
Path string
|
||||
acc telegraf.Accumulator
|
||||
}
|
||||
|
||||
func (fs *FilestackWebhook) Register(router *mux.Router, acc telegraf.Accumulator) {
|
||||
router.HandleFunc(fs.Path, fs.eventHandler).Methods("POST")
|
||||
|
||||
log.Printf("Started the webhooks_filestack on %s\n", fs.Path)
|
||||
fs.acc = acc
|
||||
}
|
||||
|
||||
func (fs *FilestackWebhook) eventHandler(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
event := &FilestackEvent{}
|
||||
err = json.Unmarshal(body, event)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
fs.acc.AddFields("filestack_webhooks", event.Fields(), event.Tags(), time.Unix(event.TimeStamp, 0))
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
package filestack
|
||||
|
||||
import "strconv"
|
||||
|
||||
type FilestackEvent struct {
|
||||
Action string `json:"action"`
|
||||
TimeStamp int64 `json:"timestamp"`
|
||||
Id int `json:"id"`
|
||||
}
|
||||
|
||||
func (fe *FilestackEvent) Tags() map[string]string {
|
||||
return map[string]string{
|
||||
"action": fe.Action,
|
||||
}
|
||||
}
|
||||
|
||||
func (fe *FilestackEvent) Fields() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"id": strconv.Itoa(fe.Id),
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,110 @@
|
||||
package filestack
|
||||
|
||||
func DialogOpenJSON() string {
|
||||
return `{
|
||||
"action": "fp.dialog",
|
||||
"timestamp": 1435584646,
|
||||
"id": 102,
|
||||
"text": {
|
||||
"mimetypes": ["*/*"],
|
||||
"iframe": false,
|
||||
"language": "en",
|
||||
"id": "1435584650723",
|
||||
"mobile": false,
|
||||
"app":{
|
||||
"upsell": "false",
|
||||
"apikey": "YOUR_API_KEY",
|
||||
"customization":{
|
||||
"saveas_subheader": "Save it down to your local device or onto the Cloud",
|
||||
"folder_subheader": "Choose a folder to share with this application",
|
||||
"open_subheader": "Choose from the files on your local device or the ones you have online",
|
||||
"folder_header": "Select a folder",
|
||||
"help_text": "",
|
||||
"saveas_header": "Save your file",
|
||||
"open_header": "Upload a file"
|
||||
}
|
||||
},
|
||||
"dialogType": "open",
|
||||
"auth": false,
|
||||
"welcome_header": "Upload a file",
|
||||
"welcome_subheader": "Choose from the files on your local device or the ones you have online",
|
||||
"help_text": "",
|
||||
"recent_path": "/",
|
||||
"extensions": null,
|
||||
"maxSize": 0,
|
||||
"signature": null,
|
||||
"policy": null,
|
||||
"custom_providers": "imgur,cloudapp",
|
||||
"intra": false
|
||||
}
|
||||
}`
|
||||
}
|
||||
|
||||
func UploadJSON() string {
|
||||
return `{
|
||||
"action":"fp.upload",
|
||||
"timestamp":1443444905,
|
||||
"id":100946,
|
||||
"text":{
|
||||
"url":"https://www.filestackapi.com/api/file/WAunDTTqQfCNWwUUyf6n",
|
||||
"client":"Facebook",
|
||||
"type":"image/jpeg",
|
||||
"filename":"1579337399020824.jpg",
|
||||
"size":139154
|
||||
}
|
||||
}`
|
||||
}
|
||||
|
||||
func VideoConversionJSON() string {
|
||||
return `{
|
||||
"status":"completed",
|
||||
"message":"Done",
|
||||
"data":{
|
||||
"thumb":"https://cdn.filestackcontent.com/f1e8V88QDuxzOvtOAq1W",
|
||||
"thumb100x100":"https://process.filestackapi.com/AhTgLagciQByzXpFGRI0Az/resize=w:100,h:100,f:crop/output=f:jpg,q:66/https://cdn.filestackcontent.com/f1e8V88QDuxzOvtOAq1W",
|
||||
"thumb200x200":"https://process.filestackapi.com/AhTgLagciQByzXpFGRI0Az/resize=w:200,h:200,f:crop/output=f:jpg,q:66/https://cdn.filestackcontent.com/f1e8V88QDuxzOvtOAq1W",
|
||||
"thumb300x300":"https://process.filestackapi.com/AhTgLagciQByzXpFGRI0Az/resize=w:300,h:300,f:crop/output=f:jpg,q:66/https://cdn.filestackcontent.com/f1e8V88QDuxzOvtOAq1W",
|
||||
"url":"https://cdn.filestackcontent.com/VgvFVdvvTkml0WXPIoGn"
|
||||
},
|
||||
"metadata":{
|
||||
"result":{
|
||||
"audio_channels":2,
|
||||
"audio_codec":"vorbis",
|
||||
"audio_sample_rate":44100,
|
||||
"created_at":"2015/12/21 20:45:19 +0000",
|
||||
"duration":10587,
|
||||
"encoding_progress":100,
|
||||
"encoding_time":8,
|
||||
"extname":".webm",
|
||||
"file_size":293459,
|
||||
"fps":24,
|
||||
"height":260,
|
||||
"mime_type":"video/webm",
|
||||
"started_encoding_at":"2015/12/21 20:45:22 +0000",
|
||||
"updated_at":"2015/12/21 20:45:32 +0000",
|
||||
"video_bitrate":221,
|
||||
"video_codec":"vp8",
|
||||
"width":300
|
||||
},
|
||||
"source":{
|
||||
"audio_bitrate":125,
|
||||
"audio_channels":2,
|
||||
"audio_codec":"aac",
|
||||
"audio_sample_rate":44100,
|
||||
"created_at":"2015/12/21 20:45:19 +0000",
|
||||
"duration":10564,
|
||||
"extname":".mp4",
|
||||
"file_size":875797,
|
||||
"fps":24,
|
||||
"height":360,
|
||||
"mime_type":"video/mp4",
|
||||
"updated_at":"2015/12/21 20:45:32 +0000",
|
||||
"video_bitrate":196,
|
||||
"video_codec":"h264",
|
||||
"width":480
|
||||
}
|
||||
},
|
||||
"timestamp":"1453850583",
|
||||
"uuid":"638311d89d2bc849563a674a45809b7c"
|
||||
}`
|
||||
}
|
||||
74
plugins/inputs/webhooks/filestack/filestack_webhooks_test.go
Normal file
74
plugins/inputs/webhooks/filestack/filestack_webhooks_test.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package filestack
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func postWebhooks(md *FilestackWebhook, eventBody string) *httptest.ResponseRecorder {
|
||||
req, _ := http.NewRequest("POST", "/filestack", strings.NewReader(eventBody))
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
md.eventHandler(w, req)
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
func TestDialogEvent(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
fs := &FilestackWebhook{Path: "/filestack", acc: &acc}
|
||||
resp := postWebhooks(fs, DialogOpenJSON())
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"id": "102",
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"action": "fp.dialog",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "filestack_webhooks", fields, tags)
|
||||
}
|
||||
|
||||
func TestParseError(t *testing.T) {
|
||||
fs := &FilestackWebhook{Path: "/filestack"}
|
||||
resp := postWebhooks(fs, "")
|
||||
if resp.Code != http.StatusBadRequest {
|
||||
t.Errorf("POST returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUploadEvent(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
fs := &FilestackWebhook{Path: "/filestack", acc: &acc}
|
||||
resp := postWebhooks(fs, UploadJSON())
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"id": "100946",
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"action": "fp.upload",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "filestack_webhooks", fields, tags)
|
||||
}
|
||||
|
||||
func TestVideoConversionEvent(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
fs := &FilestackWebhook{Path: "/filestack", acc: &acc}
|
||||
resp := postWebhooks(fs, VideoConversionJSON())
|
||||
if resp.Code != http.StatusBadRequest {
|
||||
t.Errorf("POST returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs/webhooks/filestack"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/webhooks/github"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/webhooks/mandrill"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/webhooks/rollbar"
|
||||
@@ -26,9 +27,10 @@ func init() {
|
||||
type Webhooks struct {
|
||||
ServiceAddress string
|
||||
|
||||
Github *github.GithubWebhook
|
||||
Mandrill *mandrill.MandrillWebhook
|
||||
Rollbar *rollbar.RollbarWebhook
|
||||
Github *github.GithubWebhook
|
||||
Filestack *filestack.FilestackWebhook
|
||||
Mandrill *mandrill.MandrillWebhook
|
||||
Rollbar *rollbar.RollbarWebhook
|
||||
}
|
||||
|
||||
func NewWebhooks() *Webhooks {
|
||||
@@ -40,6 +42,9 @@ func (wb *Webhooks) SampleConfig() string {
|
||||
## Address and port to host Webhook listener on
|
||||
service_address = ":1619"
|
||||
|
||||
[inputs.webhooks.filestack]
|
||||
path = "/filestack"
|
||||
|
||||
[inputs.webhooks.github]
|
||||
path = "/github"
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/kinesis"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/librato"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/mqtt"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/nats"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/nsq"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/opentsdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client"
|
||||
|
||||
@@ -35,7 +35,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
StatIncludesBadChar = regexp.MustCompile("[^[:alnum:][:blank:]-_.]")
|
||||
ValueIncludesBadChar = regexp.MustCompile("[^[:digit:].]")
|
||||
MetricNameReplacer = regexp.MustCompile("[^-[:alnum:]_.]+")
|
||||
)
|
||||
|
||||
var sampleConfig = `
|
||||
@@ -131,8 +132,17 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error {
|
||||
}
|
||||
|
||||
for _, stat := range stats {
|
||||
if !StatIncludesBadChar.MatchString(stat) {
|
||||
points = append(points, fmt.Sprintf("%s %s", metricType, stat))
|
||||
// decompose "metric.name value time"
|
||||
splitStat := strings.SplitN(stat, " ", 3)
|
||||
metric := splitStat[0]
|
||||
value := splitStat[1]
|
||||
time := splitStat[2]
|
||||
|
||||
// replace invalid components of metric name with underscore
|
||||
clean_metric := MetricNameReplacer.ReplaceAllString(metric, "_")
|
||||
|
||||
if !ValueIncludesBadChar.MatchString(value) {
|
||||
points = append(points, fmt.Sprintf("%s %s %s %s", metricType, clean_metric, value, time))
|
||||
} else if i.Debug {
|
||||
log.Printf("Unable to send bad stat: %s", stat)
|
||||
}
|
||||
|
||||
@@ -49,21 +49,28 @@ func TestWrite(t *testing.T) {
|
||||
map[string]interface{}{"value": float64(3.14)},
|
||||
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
// We will drop metrics that simply won't be accepted by Instrumental
|
||||
// We will modify metric names that won't be accepted by Instrumental
|
||||
m4, _ := telegraf.NewMetric(
|
||||
"bad_metric_name",
|
||||
map[string]string{"host": "192.168.0.1:8888::123", "metric_type": "counter"},
|
||||
map[string]interface{}{"value": 1},
|
||||
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
// We will drop metric values that won't be accepted by Instrumental
|
||||
m5, _ := telegraf.NewMetric(
|
||||
"bad_values",
|
||||
map[string]string{"host": "192.168.0.1", "metric_type": "counter"},
|
||||
map[string]interface{}{"value": "\" 3:30\""},
|
||||
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
m5, _ := telegraf.NewMetric(
|
||||
m6, _ := telegraf.NewMetric(
|
||||
"my_counter",
|
||||
map[string]string{"host": "192.168.0.1", "metric_type": "counter"},
|
||||
map[string]interface{}{"value": float64(3.14)},
|
||||
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
|
||||
metrics = []telegraf.Metric{m3, m4, m5}
|
||||
metrics = []telegraf.Metric{m3, m4, m5, m6}
|
||||
i.Write(metrics)
|
||||
|
||||
wg.Wait()
|
||||
@@ -101,8 +108,15 @@ func TCPServer(t *testing.T, wg *sync.WaitGroup) {
|
||||
|
||||
data3, _ := tp.ReadLine()
|
||||
assert.Equal(t, "increment my.prefix.192_168_0_1.my_histogram 3.14 1289430000", data3)
|
||||
|
||||
data4, _ := tp.ReadLine()
|
||||
assert.Equal(t, "increment my.prefix.192_168_0_1.my_counter 3.14 1289430000", data4)
|
||||
assert.Equal(t, "increment my.prefix.192_168_0_1_8888_123.bad_metric_name 1 1289430000", data4)
|
||||
|
||||
data5, _ := tp.ReadLine()
|
||||
assert.Equal(t, "increment my.prefix.192_168_0_1.my_counter 3.14 1289430000", data5)
|
||||
|
||||
data6, _ := tp.ReadLine()
|
||||
assert.Equal(t, "", data6)
|
||||
|
||||
conn.Close()
|
||||
}
|
||||
|
||||
37
plugins/outputs/nats/README.md
Normal file
37
plugins/outputs/nats/README.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# NATS Output Plugin
|
||||
|
||||
This plugin writes to a (list of) specified NATS instance(s).
|
||||
|
||||
```
|
||||
[[outputs.nats]]
|
||||
## URLs of NATS servers
|
||||
servers = ["nats://localhost:4222"]
|
||||
## Optional credentials
|
||||
# username = ""
|
||||
# password = ""
|
||||
## NATS subject for producer messages
|
||||
subject = "telegraf"
|
||||
## Optional TLS Config
|
||||
## CA certificate used to self-sign NATS server(s) TLS certificate(s)
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
### Required parameters:
|
||||
|
||||
* `servers`: List of strings, this is for NATS clustering support. Each URL should start with `nats://`.
|
||||
* `subject`: The NATS subject to publish to.
|
||||
|
||||
### Optional parameters:
|
||||
|
||||
* `username`: Username for NATS
|
||||
* `password`: Password for NATS
|
||||
* `tls_ca`: TLS CA
|
||||
* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false)
|
||||
133
plugins/outputs/nats/nats.go
Normal file
133
plugins/outputs/nats/nats.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package nats
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
nats_client "github.com/nats-io/nats"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdata/telegraf/plugins/serializers"
|
||||
)
|
||||
|
||||
type NATS struct {
|
||||
// Servers is the NATS server pool to connect to
|
||||
Servers []string
|
||||
// Credentials
|
||||
Username string
|
||||
Password string
|
||||
// NATS subject to publish metrics to
|
||||
Subject string
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
conn *nats_client.Conn
|
||||
serializer serializers.Serializer
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## URLs of NATS servers
|
||||
servers = ["nats://localhost:4222"]
|
||||
## Optional credentials
|
||||
# username = ""
|
||||
# password = ""
|
||||
## NATS subject for producer messages
|
||||
subject = "telegraf"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
`
|
||||
|
||||
func (n *NATS) SetSerializer(serializer serializers.Serializer) {
|
||||
n.serializer = serializer
|
||||
}
|
||||
|
||||
func (n *NATS) Connect() error {
|
||||
var err error
|
||||
// set NATS connection options
|
||||
opts := nats_client.DefaultOptions
|
||||
opts.Servers = n.Servers
|
||||
if n.Username != "" {
|
||||
opts.User = n.Username
|
||||
opts.Password = n.Password
|
||||
}
|
||||
|
||||
tlsConfig, err := internal.GetTLSConfig(
|
||||
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tlsConfig != nil {
|
||||
// set NATS connection TLS options
|
||||
opts.Secure = true
|
||||
opts.TLSConfig = tlsConfig
|
||||
}
|
||||
|
||||
// try and connect
|
||||
n.conn, err = opts.Connect()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (n *NATS) Close() error {
|
||||
n.conn.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NATS) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (n *NATS) Description() string {
|
||||
return "Send telegraf measurements to NATS"
|
||||
}
|
||||
|
||||
func (n *NATS) Write(metrics []telegraf.Metric) error {
|
||||
if len(metrics) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
values, err := n.serializer.Serialize(metric)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var pubErr error
|
||||
for _, value := range values {
|
||||
err = n.conn.Publish(n.Subject, []byte(value))
|
||||
if err != nil {
|
||||
pubErr = err
|
||||
}
|
||||
}
|
||||
|
||||
if pubErr != nil {
|
||||
return fmt.Errorf("FAILED to send NATS message: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("nats", func() telegraf.Output {
|
||||
return &NATS{}
|
||||
})
|
||||
}
|
||||
31
plugins/outputs/nats/nats_test.go
Normal file
31
plugins/outputs/nats/nats_test.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package nats
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/serializers"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConnectAndWrite(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
server := []string{"nats://" + testutil.GetLocalHost() + ":4222"}
|
||||
s, _ := serializers.NewInfluxSerializer()
|
||||
n := &NATS{
|
||||
Servers: server,
|
||||
Subject: "telegraf",
|
||||
serializer: s,
|
||||
}
|
||||
|
||||
// Verify that we can connect to the NATS daemon
|
||||
err := n.Connect()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that we can successfully write data to the NATS daemon
|
||||
err = n.Write(testutil.MockMetrics())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1,6 +1,12 @@
|
||||
# OpenTSDB Output Plugin
|
||||
|
||||
This plugin writes to a OpenTSDB instance using the "telnet" mode
|
||||
This plugin writes to an OpenTSDB instance using either the "telnet" or Http mode.
|
||||
|
||||
Using the Http API is the recommended way of writing metrics since OpenTSDB 2.0
|
||||
To use Http mode, set useHttp to true in config. You can also control how many
|
||||
metrics is sent in each http request by setting batchSize in config.
|
||||
|
||||
See http://opentsdb.net/docs/build/html/api_http/put.html for details.
|
||||
|
||||
## Transfer "Protocol" in the telnet mode
|
||||
|
||||
@@ -10,14 +16,14 @@ The expected input from OpenTSDB is specified in the following way:
|
||||
put <metric> <timestamp> <value> <tagk1=tagv1[ tagk2=tagv2 ...tagkN=tagvN]>
|
||||
```
|
||||
|
||||
The telegraf output plugin adds an optional prefix to the metric keys so
|
||||
The telegraf output plugin adds an optional prefix to the metric keys so
|
||||
that a subamount can be selected.
|
||||
|
||||
```
|
||||
put <[prefix.]metric> <timestamp> <value> <tagk1=tagv1[ tagk2=tagv2 ...tagkN=tagvN]>
|
||||
```
|
||||
|
||||
### Example
|
||||
### Example
|
||||
|
||||
```
|
||||
put nine.telegraf.system_load1 1441910356 0.430000 dc=homeoffice host=irimame scope=green
|
||||
@@ -38,12 +44,12 @@ put nine.telegraf.ping_average_response_ms 1441910366 24.006000 dc=homeoffice ho
|
||||
...
|
||||
```
|
||||
|
||||
##
|
||||
##
|
||||
|
||||
The OpenTSDB interface can be simulated with this reader:
|
||||
The OpenTSDB telnet interface can be simulated with this reader:
|
||||
|
||||
```
|
||||
// opentsdb_telnet_mode_mock.go
|
||||
// opentsdb_telnet_mode_mock.go
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -75,4 +81,4 @@ func main() {
|
||||
|
||||
## Allowed values for metrics
|
||||
|
||||
OpenTSDB allows `integers` and `floats` as input values
|
||||
OpenTSDB allows `integers` and `floats` as input values
|
||||
|
||||
@@ -3,10 +3,10 @@ package opentsdb
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
@@ -18,6 +18,8 @@ type OpenTSDB struct {
|
||||
Host string
|
||||
Port int
|
||||
|
||||
HttpBatchSize int
|
||||
|
||||
Debug bool
|
||||
}
|
||||
|
||||
@@ -28,27 +30,41 @@ var sampleConfig = `
|
||||
## prefix for metrics keys
|
||||
prefix = "my.specific.prefix."
|
||||
|
||||
## Telnet Mode ##
|
||||
## DNS name of the OpenTSDB server in telnet mode
|
||||
## DNS name of the OpenTSDB server
|
||||
## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
|
||||
## telnet API. "http://opentsdb.example.com" will use the Http API.
|
||||
host = "opentsdb.example.com"
|
||||
|
||||
## Port of the OpenTSDB server in telnet mode
|
||||
## Port of the OpenTSDB server
|
||||
port = 4242
|
||||
|
||||
## Number of data points to send to OpenTSDB in Http requests.
|
||||
## Not used with telnet API.
|
||||
httpBatchSize = 50
|
||||
|
||||
## Debug true - Prints OpenTSDB communication
|
||||
debug = false
|
||||
`
|
||||
|
||||
type MetricLine struct {
|
||||
Metric string
|
||||
Timestamp int64
|
||||
Value string
|
||||
Tags string
|
||||
func ToLineFormat(tags map[string]string) string {
|
||||
tagsArray := make([]string, len(tags))
|
||||
index := 0
|
||||
for k, v := range tags {
|
||||
tagsArray[index] = fmt.Sprintf("%s=%s", k, v)
|
||||
index++
|
||||
}
|
||||
sort.Strings(tagsArray)
|
||||
return strings.Join(tagsArray, " ")
|
||||
}
|
||||
|
||||
func (o *OpenTSDB) Connect() error {
|
||||
// Test Connection to OpenTSDB Server
|
||||
uri := fmt.Sprintf("%s:%d", o.Host, o.Port)
|
||||
u, err := url.Parse(o.Host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in parsing host url: %s", err.Error())
|
||||
}
|
||||
|
||||
uri := fmt.Sprintf("%s:%d", u.Host, o.Port)
|
||||
tcpAddr, err := net.ResolveTCPAddr("tcp", uri)
|
||||
if err != nil {
|
||||
return fmt.Errorf("OpenTSDB: TCP address cannot be resolved")
|
||||
@@ -65,10 +81,64 @@ func (o *OpenTSDB) Write(metrics []telegraf.Metric) error {
|
||||
if len(metrics) == 0 {
|
||||
return nil
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
u, err := url.Parse(o.Host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in parsing host url: %s", err.Error())
|
||||
}
|
||||
|
||||
if u.Scheme == "" || u.Scheme == "tcp" {
|
||||
return o.WriteTelnet(metrics, u)
|
||||
} else if u.Scheme == "http" {
|
||||
return o.WriteHttp(metrics, u)
|
||||
} else {
|
||||
return fmt.Errorf("Unknown scheme in host parameter.")
|
||||
}
|
||||
}
|
||||
|
||||
func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error {
|
||||
http := openTSDBHttp{
|
||||
Host: u.Host,
|
||||
Port: o.Port,
|
||||
BatchSize: o.HttpBatchSize,
|
||||
Debug: o.Debug,
|
||||
}
|
||||
|
||||
for _, m := range metrics {
|
||||
now := m.UnixNano() / 1000000000
|
||||
tags := cleanTags(m.Tags())
|
||||
|
||||
for fieldName, value := range m.Fields() {
|
||||
metricValue, buildError := buildValue(value)
|
||||
if buildError != nil {
|
||||
fmt.Printf("OpenTSDB: %s\n", buildError.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
metric := &HttpMetric{
|
||||
Metric: sanitizedChars.Replace(fmt.Sprintf("%s%s_%s",
|
||||
o.Prefix, m.Name(), fieldName)),
|
||||
Tags: tags,
|
||||
Timestamp: now,
|
||||
Value: metricValue,
|
||||
}
|
||||
|
||||
if err := http.sendDataPoint(metric); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := http.flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error {
|
||||
// Send Data with telnet / socket communication
|
||||
uri := fmt.Sprintf("%s:%d", o.Host, o.Port)
|
||||
uri := fmt.Sprintf("%s:%d", u.Host, o.Port)
|
||||
tcpAddr, _ := net.ResolveTCPAddr("tcp", uri)
|
||||
connection, err := net.DialTCP("tcp", nil, tcpAddr)
|
||||
if err != nil {
|
||||
@@ -77,9 +147,20 @@ func (o *OpenTSDB) Write(metrics []telegraf.Metric) error {
|
||||
defer connection.Close()
|
||||
|
||||
for _, m := range metrics {
|
||||
for _, metric := range buildMetrics(m, now, o.Prefix) {
|
||||
now := m.UnixNano() / 1000000000
|
||||
tags := ToLineFormat(cleanTags(m.Tags()))
|
||||
|
||||
for fieldName, value := range m.Fields() {
|
||||
metricValue, buildError := buildValue(value)
|
||||
if buildError != nil {
|
||||
fmt.Printf("OpenTSDB: %s\n", buildError.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
messageLine := fmt.Sprintf("put %s %v %s %s\n",
|
||||
metric.Metric, metric.Timestamp, metric.Value, metric.Tags)
|
||||
sanitizedChars.Replace(fmt.Sprintf("%s%s_%s", o.Prefix, m.Name(), fieldName)),
|
||||
now, metricValue, tags)
|
||||
|
||||
if o.Debug {
|
||||
fmt.Print(messageLine)
|
||||
}
|
||||
@@ -93,37 +174,12 @@ func (o *OpenTSDB) Write(metrics []telegraf.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildTags(mTags map[string]string) []string {
|
||||
tags := make([]string, len(mTags))
|
||||
index := 0
|
||||
for k, v := range mTags {
|
||||
tags[index] = sanitizedChars.Replace(fmt.Sprintf("%s=%s", k, v))
|
||||
index++
|
||||
func cleanTags(tags map[string]string) map[string]string {
|
||||
tagSet := make(map[string]string, len(tags))
|
||||
for k, v := range tags {
|
||||
tagSet[sanitizedChars.Replace(k)] = sanitizedChars.Replace(v)
|
||||
}
|
||||
sort.Strings(tags)
|
||||
return tags
|
||||
}
|
||||
|
||||
func buildMetrics(m telegraf.Metric, now time.Time, prefix string) []*MetricLine {
|
||||
ret := []*MetricLine{}
|
||||
for fieldName, value := range m.Fields() {
|
||||
metric := &MetricLine{
|
||||
Metric: sanitizedChars.Replace(fmt.Sprintf("%s%s_%s",
|
||||
prefix, m.Name(), fieldName)),
|
||||
Timestamp: now.Unix(),
|
||||
}
|
||||
|
||||
metricValue, buildError := buildValue(value)
|
||||
if buildError != nil {
|
||||
fmt.Printf("OpenTSDB: %s\n", buildError.Error())
|
||||
continue
|
||||
}
|
||||
metric.Value = metricValue
|
||||
tagsSlice := buildTags(m.Tags())
|
||||
metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " "))
|
||||
ret = append(ret, metric)
|
||||
}
|
||||
return ret
|
||||
return tagSet
|
||||
}
|
||||
|
||||
func buildValue(v interface{}) (string, error) {
|
||||
|
||||
174
plugins/outputs/opentsdb/opentsdb_http.go
Normal file
174
plugins/outputs/opentsdb/opentsdb_http.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package opentsdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
type HttpMetric struct {
|
||||
Metric string `json:"metric"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Value string `json:"value"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
}
|
||||
|
||||
type openTSDBHttp struct {
|
||||
Host string
|
||||
Port int
|
||||
BatchSize int
|
||||
Debug bool
|
||||
|
||||
metricCounter int
|
||||
body requestBody
|
||||
}
|
||||
|
||||
type requestBody struct {
|
||||
b bytes.Buffer
|
||||
g *gzip.Writer
|
||||
|
||||
dbgB bytes.Buffer
|
||||
|
||||
w io.Writer
|
||||
enc *json.Encoder
|
||||
|
||||
empty bool
|
||||
}
|
||||
|
||||
func (r *requestBody) reset(debug bool) {
|
||||
r.b.Reset()
|
||||
r.dbgB.Reset()
|
||||
|
||||
if r.g == nil {
|
||||
r.g = gzip.NewWriter(&r.b)
|
||||
} else {
|
||||
r.g.Reset(&r.b)
|
||||
}
|
||||
|
||||
if debug {
|
||||
r.w = io.MultiWriter(r.g, &r.dbgB)
|
||||
} else {
|
||||
r.w = r.g
|
||||
}
|
||||
|
||||
r.enc = json.NewEncoder(r.w)
|
||||
|
||||
io.WriteString(r.w, "[")
|
||||
|
||||
r.empty = true
|
||||
}
|
||||
|
||||
func (r *requestBody) addMetric(metric *HttpMetric) error {
|
||||
if !r.empty {
|
||||
io.WriteString(r.w, ",")
|
||||
}
|
||||
|
||||
if err := r.enc.Encode(metric); err != nil {
|
||||
return fmt.Errorf("Metric serialization error %s", err.Error())
|
||||
}
|
||||
|
||||
r.empty = false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *requestBody) close() error {
|
||||
io.WriteString(r.w, "]")
|
||||
|
||||
if err := r.g.Close(); err != nil {
|
||||
return fmt.Errorf("Error when closing gzip writer: %s", err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *openTSDBHttp) sendDataPoint(metric *HttpMetric) error {
|
||||
if o.metricCounter == 0 {
|
||||
o.body.reset(o.Debug)
|
||||
}
|
||||
|
||||
if err := o.body.addMetric(metric); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o.metricCounter++
|
||||
if o.metricCounter == o.BatchSize {
|
||||
if err := o.flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o.metricCounter = 0
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *openTSDBHttp) flush() error {
|
||||
if o.metricCounter == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
o.body.close()
|
||||
|
||||
u := url.URL{
|
||||
Scheme: "http",
|
||||
Host: fmt.Sprintf("%s:%d", o.Host, o.Port),
|
||||
Path: "/api/put",
|
||||
}
|
||||
|
||||
if o.Debug {
|
||||
u.RawQuery = "details"
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", u.String(), &o.body.b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error when building request: %s", err.Error())
|
||||
}
|
||||
req.Header.Set("Content-Type", "applicaton/json")
|
||||
req.Header.Set("Content-Encoding", "gzip")
|
||||
|
||||
if o.Debug {
|
||||
dump, err := httputil.DumpRequestOut(req, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error when dumping request: %s", err.Error())
|
||||
}
|
||||
|
||||
fmt.Printf("Sending metrics:\n%s", dump)
|
||||
fmt.Printf("Body:\n%s\n\n", o.body.dbgB.String())
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error when sending metrics: %s", err.Error())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if o.Debug {
|
||||
dump, err := httputil.DumpResponse(resp, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error when dumping response: %s", err.Error())
|
||||
}
|
||||
|
||||
fmt.Printf("Received response\n%s\n\n", dump)
|
||||
} else {
|
||||
// Important so http client reuse connection for next request if need be.
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
}
|
||||
|
||||
if resp.StatusCode/100 != 2 {
|
||||
if resp.StatusCode/100 == 4 {
|
||||
log.Printf("WARNING: Received %d status code. Dropping metrics to avoid overflowing buffer.", resp.StatusCode)
|
||||
} else {
|
||||
return fmt.Errorf("Error when sending metrics.Received status %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,46 +1,119 @@
|
||||
package opentsdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
// "github.com/influxdata/telegraf/testutil"
|
||||
// "github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
//"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCleanTags(t *testing.T) {
|
||||
var tagtests = []struct {
|
||||
ptIn map[string]string
|
||||
outTags map[string]string
|
||||
}{
|
||||
{
|
||||
map[string]string{"one": "two", "three": "four"},
|
||||
map[string]string{"one": "two", "three": "four"},
|
||||
},
|
||||
{
|
||||
map[string]string{"aaa": "bbb"},
|
||||
map[string]string{"aaa": "bbb"},
|
||||
},
|
||||
{
|
||||
map[string]string{"Sp%ci@l Chars": "g$t repl#ced"},
|
||||
map[string]string{"Sp-ci-l_Chars": "g-t_repl-ced"},
|
||||
},
|
||||
{
|
||||
map[string]string{},
|
||||
map[string]string{},
|
||||
},
|
||||
}
|
||||
for _, tt := range tagtests {
|
||||
tags := cleanTags(tt.ptIn)
|
||||
if !reflect.DeepEqual(tags, tt.outTags) {
|
||||
t.Errorf("\nexpected %+v\ngot %+v\n", tt.outTags, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildTagsTelnet(t *testing.T) {
|
||||
var tagtests = []struct {
|
||||
ptIn map[string]string
|
||||
outTags []string
|
||||
outTags string
|
||||
}{
|
||||
{
|
||||
map[string]string{"one": "two", "three": "four"},
|
||||
[]string{"one=two", "three=four"},
|
||||
"one=two three=four",
|
||||
},
|
||||
{
|
||||
map[string]string{"aaa": "bbb"},
|
||||
[]string{"aaa=bbb"},
|
||||
"aaa=bbb",
|
||||
},
|
||||
{
|
||||
map[string]string{"one": "two", "aaa": "bbb"},
|
||||
[]string{"aaa=bbb", "one=two"},
|
||||
},
|
||||
{
|
||||
map[string]string{"Sp%ci@l Chars": "g$t repl#ced"},
|
||||
[]string{"Sp-ci-l_Chars=g-t_repl-ced"},
|
||||
"aaa=bbb one=two",
|
||||
},
|
||||
{
|
||||
map[string]string{},
|
||||
[]string{},
|
||||
"",
|
||||
},
|
||||
}
|
||||
for _, tt := range tagtests {
|
||||
tags := buildTags(tt.ptIn)
|
||||
tags := ToLineFormat(tt.ptIn)
|
||||
if !reflect.DeepEqual(tags, tt.outTags) {
|
||||
t.Errorf("\nexpected %+v\ngot %+v\n", tt.outTags, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHttpSend(b *testing.B) {
|
||||
const BatchSize = 50
|
||||
const MetricsCount = 4 * BatchSize
|
||||
metrics := make([]telegraf.Metric, MetricsCount)
|
||||
for i := 0; i < MetricsCount; i++ {
|
||||
metrics[i] = testutil.TestMetric(1.0)
|
||||
}
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprintln(w, "{}")
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
u, err := url.Parse(ts.URL)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
_, p, _ := net.SplitHostPort(u.Host)
|
||||
|
||||
port, err := strconv.Atoi(p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
o := &OpenTSDB{
|
||||
Host: ts.URL,
|
||||
Port: port,
|
||||
Prefix: "",
|
||||
HttpBatchSize: BatchSize,
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
o.Write(metrics)
|
||||
}
|
||||
}
|
||||
|
||||
// func TestWrite(t *testing.T) {
|
||||
// if testing.Short() {
|
||||
// t.Skip("Skipping integration test in short mode")
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user