Compare commits
43 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
67318cb032 | ||
|
|
4f6087a99d | ||
|
|
6b0e863556 | ||
|
|
11bc82379c | ||
|
|
a093ec1eaa | ||
|
|
d71a42cd1b | ||
|
|
d518d7d806 | ||
|
|
1d1afe6481 | ||
|
|
5a3f2e61f3 | ||
|
|
504f4e69db | ||
|
|
9f6666beb3 | ||
|
|
af6e7b9531 | ||
|
|
6fd7361364 | ||
|
|
e5c7a71d8e | ||
|
|
db7a4b24b6 | ||
|
|
332f678afb | ||
|
|
04a2b36a52 | ||
|
|
f862c6585d | ||
|
|
5c32521a07 | ||
|
|
9db30250c3 | ||
|
|
2b0cd2037b | ||
|
|
536dbfb724 | ||
|
|
b77398c4d3 | ||
|
|
fbf5bee051 | ||
|
|
81004c808f | ||
|
|
196509cc53 | ||
|
|
94ce67cc67 | ||
|
|
33ed528afe | ||
|
|
2435e47926 | ||
|
|
ff67a4b96c | ||
|
|
f816b952cf | ||
|
|
b905bc1b5d | ||
|
|
0ecbf9e349 | ||
|
|
1c7715780e | ||
|
|
5d3850c44e | ||
|
|
e84b356a12 | ||
|
|
b349800f7a | ||
|
|
47de43abf3 | ||
|
|
7a9fef80f5 | ||
|
|
dc28875437 | ||
|
|
a6ed4d4c3a | ||
|
|
fe6162b2a1 | ||
|
|
34182d9c9f |
31
CHANGELOG.md
31
CHANGELOG.md
@@ -1,4 +1,32 @@
|
||||
## v1.1 [unreleased]
|
||||
## v1.2 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
### Features
|
||||
|
||||
- [#2123](https://github.com/influxdata/telegraf/pull/2123): Fix improper calculation of CPU percentages
|
||||
- [#1564](https://github.com/influxdata/telegraf/issues/1564): Use RFC3339 timestamps in log output.
|
||||
- [#1997](https://github.com/influxdata/telegraf/issues/1997): Non-default HTTP timeouts for RabbitMQ plugin.
|
||||
- [#2074](https://github.com/influxdata/telegraf/pull/2074): "discard" output plugin added, primarily for testing purposes.
|
||||
- [#1965](https://github.com/influxdata/telegraf/pull/1965): The JSON parser can now parse an array of objects using the same configuration.
|
||||
- [#1807](https://github.com/influxdata/telegraf/pull/1807): Option to use device name rather than path for reporting disk stats.
|
||||
- [#1348](https://github.com/influxdata/telegraf/issues/1348): Telegraf "internal" plugin for collecting stats on itself.
|
||||
- [#2127](https://github.com/influxdata/telegraf/pull/2127): Update Go version to 1.7.4.
|
||||
- [#2126](https://github.com/influxdata/telegraf/pull/2126): Support a metric.Split function.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2049](https://github.com/influxdata/telegraf/pull/2049): Fix the Value data format not trimming null characters from input.
|
||||
- [#1949](https://github.com/influxdata/telegraf/issues/1949): Fix windows `net` plugin.
|
||||
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus
|
||||
|
||||
## v1.1.1 [2016-11-14]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2023](https://github.com/influxdata/telegraf/issues/2023): Fix issue parsing toml durations with single quotes.
|
||||
|
||||
## v1.1.0 [2016-11-07]
|
||||
|
||||
### Release Notes
|
||||
|
||||
@@ -73,6 +101,7 @@ continue sending logs to /var/log/telegraf/telegraf.log.
|
||||
- [#1771](https://github.com/influxdata/telegraf/issues/1771): Delete nil fields in the metric maker.
|
||||
- [#870](https://github.com/influxdata/telegraf/issues/870): Fix MySQL special characters in DSN parsing.
|
||||
- [#1742](https://github.com/influxdata/telegraf/issues/1742): Ping input odd timeout behavior.
|
||||
- [#1950](https://github.com/influxdata/telegraf/pull/1950): Switch to github.com/kballard/go-shellquote.
|
||||
|
||||
## v1.0.1 [2016-09-26]
|
||||
|
||||
|
||||
4
Godeps
4
Godeps
@@ -20,7 +20,6 @@ github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
|
||||
github.com/gobwas/glob 49571a1557cd20e6a2410adc6421f85b66c730b5
|
||||
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
|
||||
github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
|
||||
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
@@ -32,6 +31,7 @@ github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||
github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc
|
||||
github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a
|
||||
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
|
||||
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
||||
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
@@ -47,7 +47,7 @@ github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil 4d0c402af66c78735c5ccf820dc2ca7de5e4ff08
|
||||
github.com/shirou/gopsutil 1516eb9ddc5e61ba58874047a98f8b44b5e585e8
|
||||
github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
VERSION := $(shell sh -c 'git describe --always --tags')
|
||||
BRANCH := $(shell sh -c 'git rev-parse --abbrev-ref HEAD')
|
||||
COMMIT := $(shell sh -c 'git rev-parse HEAD')
|
||||
COMMIT := $(shell sh -c 'git rev-parse --short HEAD')
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
else
|
||||
|
||||
249
README.md
249
README.md
@@ -1,15 +1,23 @@
|
||||
# Telegraf [](https://circleci.com/gh/influxdata/telegraf) [](https://hub.docker.com/_/telegraf/)
|
||||
|
||||
Telegraf is an agent written in Go for collecting metrics from the system it's
|
||||
running on, or from other services, and writing them into InfluxDB or other
|
||||
[outputs](https://github.com/influxdata/telegraf#supported-output-plugins).
|
||||
Telegraf is an agent written in Go for collecting, processing, aggregating,
|
||||
and writing metrics.
|
||||
|
||||
Design goals are to have a minimal memory footprint with a plugin system so
|
||||
that developers in the community can easily add support for collecting metrics
|
||||
from well known services (like Hadoop, Postgres, or Redis) and third party
|
||||
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
|
||||
|
||||
New input and output plugins are designed to be easy to contribute,
|
||||
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
|
||||
|
||||
1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs
|
||||
2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics
|
||||
3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.)
|
||||
4. [Output Plugins](#output-plugins) write metrics to various destinations
|
||||
|
||||
For more information on Processor and Aggregator plugins please [read this](./docs/AGGREGATORS_AND_PROCESSORS.md).
|
||||
|
||||
New plugins are designed to be easy to contribute,
|
||||
we'll eagerly accept pull
|
||||
requests and will manage the set of plugins that Telegraf supports.
|
||||
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
|
||||
@@ -20,12 +28,12 @@ new plugins.
|
||||
### Linux deb and rpm Packages:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.1_amd64.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.x86_64.rpm
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.1.1_amd64.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1.x86_64.rpm
|
||||
|
||||
Latest (arm):
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.1_armhf.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.armhf.rpm
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.1.1_armhf.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1.armhf.rpm
|
||||
|
||||
##### Package Instructions:
|
||||
|
||||
@@ -39,21 +47,21 @@ controlled via `systemctl [action] telegraf`
|
||||
### yum/apt Repositories:
|
||||
|
||||
There is a yum/apt repo available for the whole InfluxData stack, see
|
||||
[here](https://docs.influxdata.com/influxdb/v0.10/introduction/installation/#installation)
|
||||
[here](https://docs.influxdata.com/influxdb/latest/introduction/installation/#installation)
|
||||
for instructions on setting up the repo. Once it is configured, you will be able
|
||||
to use this repo to install & update telegraf.
|
||||
|
||||
### Linux tarballs:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_i386.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_armhf.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_i386.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_armhf.tar.gz
|
||||
|
||||
### FreeBSD tarball:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_freebsd_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_freebsd_amd64.tar.gz
|
||||
|
||||
### Ansible Role:
|
||||
|
||||
@@ -69,7 +77,7 @@ brew install telegraf
|
||||
### Windows Binaries (EXPERIMENTAL)
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_windows_amd64.zip
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_windows_amd64.zip
|
||||
|
||||
### From Source:
|
||||
|
||||
@@ -127,77 +135,72 @@ telegraf --config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
See the [configuration guide](docs/CONFIGURATION.md) for a rundown of the more advanced
|
||||
configuration options.
|
||||
|
||||
## Supported Input Plugins
|
||||
## Input Plugins
|
||||
|
||||
Telegraf currently has support for collecting metrics from many sources. For
|
||||
more information on each, please look at the directory of the same name in
|
||||
`plugins/inputs`.
|
||||
|
||||
Currently implemented sources:
|
||||
|
||||
* [aws cloudwatch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cloudwatch)
|
||||
* [aerospike](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aerospike)
|
||||
* [apache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/apache)
|
||||
* [bcache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/bcache)
|
||||
* [cassandra](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cassandra)
|
||||
* [ceph](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ceph)
|
||||
* [chrony](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/chrony)
|
||||
* [consul](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/consul)
|
||||
* [conntrack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/conntrack)
|
||||
* [couchbase](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchbase)
|
||||
* [couchdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchdb)
|
||||
* [disque](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/disque)
|
||||
* [dns query time](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dns_query)
|
||||
* [docker](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker)
|
||||
* [dovecot](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dovecot)
|
||||
* [elasticsearch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch)
|
||||
* [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||
* [filestat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/filestat)
|
||||
* [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy)
|
||||
* [hddtemp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/hddtemp)
|
||||
* [http_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_response)
|
||||
* [httpjson](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||
* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb)
|
||||
* [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor)
|
||||
* [iptables](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/iptables)
|
||||
* [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia)
|
||||
* [leofs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/leofs)
|
||||
* [lustre2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/lustre2)
|
||||
* [mailchimp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mailchimp)
|
||||
* [memcached](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/memcached)
|
||||
* [mesos](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mesos)
|
||||
* [mongodb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mongodb)
|
||||
* [mysql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mysql)
|
||||
* [net_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/net_response)
|
||||
* [nginx](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nginx)
|
||||
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq)
|
||||
* [nstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nstat)
|
||||
* [ntpq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ntpq)
|
||||
* [phpfpm](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/phpfpm)
|
||||
* [phusion passenger](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/passenger)
|
||||
* [ping](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ping)
|
||||
* [postgresql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql)
|
||||
* [postgresql_extensible](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql_extensible)
|
||||
* [powerdns](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/powerdns)
|
||||
* [procstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/procstat)
|
||||
* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/prometheus)
|
||||
* [puppetagent](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/puppetagent)
|
||||
* [rabbitmq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rabbitmq)
|
||||
* [raindrops](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/raindrops)
|
||||
* [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis)
|
||||
* [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb)
|
||||
* [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak)
|
||||
* [sensors](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors)
|
||||
* [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
|
||||
* [snmp_legacy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp_legacy)
|
||||
* [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft)
|
||||
* [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy)
|
||||
* [varnish](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/varnish)
|
||||
* [zfs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zfs)
|
||||
* [zookeeper](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zookeeper)
|
||||
* [win_perf_counters ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters) (windows performance counters)
|
||||
* [sysstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sysstat)
|
||||
* [system](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/system)
|
||||
* [aws cloudwatch](./plugins/inputs/cloudwatch)
|
||||
* [aerospike](./plugins/inputs/aerospike)
|
||||
* [apache](./plugins/inputs/apache)
|
||||
* [bcache](./plugins/inputs/bcache)
|
||||
* [cassandra](./plugins/inputs/cassandra)
|
||||
* [ceph](./plugins/inputs/ceph)
|
||||
* [chrony](./plugins/inputs/chrony)
|
||||
* [consul](./plugins/inputs/consul)
|
||||
* [conntrack](./plugins/inputs/conntrack)
|
||||
* [couchbase](./plugins/inputs/couchbase)
|
||||
* [couchdb](./plugins/inputs/couchdb)
|
||||
* [disque](./plugins/inputs/disque)
|
||||
* [dns query time](./plugins/inputs/dns_query)
|
||||
* [docker](./plugins/inputs/docker)
|
||||
* [dovecot](./plugins/inputs/dovecot)
|
||||
* [elasticsearch](./plugins/inputs/elasticsearch)
|
||||
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||
* [filestat](./plugins/inputs/filestat)
|
||||
* [haproxy](./plugins/inputs/haproxy)
|
||||
* [hddtemp](./plugins/inputs/hddtemp)
|
||||
* [http_response](./plugins/inputs/http_response)
|
||||
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||
* [internal](./plugins/inputs/internal)
|
||||
* [influxdb](./plugins/inputs/influxdb)
|
||||
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
|
||||
* [iptables](./plugins/inputs/iptables)
|
||||
* [jolokia](./plugins/inputs/jolokia)
|
||||
* [leofs](./plugins/inputs/leofs)
|
||||
* [lustre2](./plugins/inputs/lustre2)
|
||||
* [mailchimp](./plugins/inputs/mailchimp)
|
||||
* [memcached](./plugins/inputs/memcached)
|
||||
* [mesos](./plugins/inputs/mesos)
|
||||
* [mongodb](./plugins/inputs/mongodb)
|
||||
* [mysql](./plugins/inputs/mysql)
|
||||
* [net_response](./plugins/inputs/net_response)
|
||||
* [nginx](./plugins/inputs/nginx)
|
||||
* [nsq](./plugins/inputs/nsq)
|
||||
* [nstat](./plugins/inputs/nstat)
|
||||
* [ntpq](./plugins/inputs/ntpq)
|
||||
* [phpfpm](./plugins/inputs/phpfpm)
|
||||
* [phusion passenger](./plugins/inputs/passenger)
|
||||
* [ping](./plugins/inputs/ping)
|
||||
* [postgresql](./plugins/inputs/postgresql)
|
||||
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
|
||||
* [powerdns](./plugins/inputs/powerdns)
|
||||
* [procstat](./plugins/inputs/procstat)
|
||||
* [prometheus](./plugins/inputs/prometheus)
|
||||
* [puppetagent](./plugins/inputs/puppetagent)
|
||||
* [rabbitmq](./plugins/inputs/rabbitmq)
|
||||
* [raindrops](./plugins/inputs/raindrops)
|
||||
* [redis](./plugins/inputs/redis)
|
||||
* [rethinkdb](./plugins/inputs/rethinkdb)
|
||||
* [riak](./plugins/inputs/riak)
|
||||
* [sensors](./plugins/inputs/sensors)
|
||||
* [snmp](./plugins/inputs/snmp)
|
||||
* [snmp_legacy](./plugins/inputs/snmp_legacy)
|
||||
* [sql server](./plugins/inputs/sqlserver) (microsoft)
|
||||
* [twemproxy](./plugins/inputs/twemproxy)
|
||||
* [varnish](./plugins/inputs/varnish)
|
||||
* [zfs](./plugins/inputs/zfs)
|
||||
* [zookeeper](./plugins/inputs/zookeeper)
|
||||
* [win_perf_counters ](./plugins/inputs/win_perf_counters) (windows performance counters)
|
||||
* [sysstat](./plugins/inputs/sysstat)
|
||||
* [system](./plugins/inputs/system)
|
||||
* cpu
|
||||
* mem
|
||||
* net
|
||||
@@ -211,45 +214,51 @@ Currently implemented sources:
|
||||
|
||||
Telegraf can also collect metrics via the following service plugins:
|
||||
|
||||
* [http_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_listener)
|
||||
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
|
||||
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
|
||||
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
|
||||
* [nsq_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq_consumer)
|
||||
* [logparser](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/logparser)
|
||||
* [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd)
|
||||
* [tail](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail)
|
||||
* [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener)
|
||||
* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
|
||||
* [webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks)
|
||||
* [filestack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/filestack)
|
||||
* [github](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/github)
|
||||
* [mandrill](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/mandrill)
|
||||
* [rollbar](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/rollbar)
|
||||
* [http_listener](./plugins/inputs/http_listener)
|
||||
* [kafka_consumer](./plugins/inputs/kafka_consumer)
|
||||
* [mqtt_consumer](./plugins/inputs/mqtt_consumer)
|
||||
* [nats_consumer](./plugins/inputs/nats_consumer)
|
||||
* [nsq_consumer](./plugins/inputs/nsq_consumer)
|
||||
* [logparser](./plugins/inputs/logparser)
|
||||
* [statsd](./plugins/inputs/statsd)
|
||||
* [tail](./plugins/inputs/tail)
|
||||
* [tcp_listener](./plugins/inputs/tcp_listener)
|
||||
* [udp_listener](./plugins/inputs/udp_listener)
|
||||
* [webhooks](./plugins/inputs/webhooks)
|
||||
* [filestack](./plugins/inputs/webhooks/filestack)
|
||||
* [github](./plugins/inputs/webhooks/github)
|
||||
* [mandrill](./plugins/inputs/webhooks/mandrill)
|
||||
* [rollbar](./plugins/inputs/webhooks/rollbar)
|
||||
|
||||
We'll be adding support for many more over the coming months. Read on if you
|
||||
want to add support for another service or third-party API.
|
||||
## Processor Plugins
|
||||
|
||||
## Supported Output Plugins
|
||||
* [printer](./plugins/processors/printer)
|
||||
|
||||
* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/influxdb)
|
||||
* [amon](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/amon)
|
||||
* [amqp](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/amqp)
|
||||
* [aws kinesis](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kinesis)
|
||||
* [aws cloudwatch](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/cloudwatch)
|
||||
* [datadog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/datadog)
|
||||
* [file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file)
|
||||
* [graphite](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graphite)
|
||||
* [graylog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graylog)
|
||||
* [instrumental](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/instrumental)
|
||||
* [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka)
|
||||
* [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato)
|
||||
* [mqtt](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/mqtt)
|
||||
* [nats](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nats)
|
||||
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nsq)
|
||||
* [opentsdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
||||
* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client)
|
||||
* [riemann](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/riemann)
|
||||
## Aggregator Plugins
|
||||
|
||||
* [minmax](./plugins/aggregators/minmax)
|
||||
|
||||
## Output Plugins
|
||||
|
||||
* [influxdb](./plugins/outputs/influxdb)
|
||||
* [amon](./plugins/outputs/amon)
|
||||
* [amqp](./plugins/outputs/amqp)
|
||||
* [aws kinesis](./plugins/outputs/kinesis)
|
||||
* [aws cloudwatch](./plugins/outputs/cloudwatch)
|
||||
* [datadog](./plugins/outputs/datadog)
|
||||
* [discard](./plugins/outputs/discard)
|
||||
* [file](./plugins/outputs/file)
|
||||
* [graphite](./plugins/outputs/graphite)
|
||||
* [graylog](./plugins/outputs/graylog)
|
||||
* [instrumental](./plugins/outputs/instrumental)
|
||||
* [kafka](./plugins/outputs/kafka)
|
||||
* [librato](./plugins/outputs/librato)
|
||||
* [mqtt](./plugins/outputs/mqtt)
|
||||
* [nats](./plugins/outputs/nats)
|
||||
* [nsq](./plugins/outputs/nsq)
|
||||
* [opentsdb](./plugins/outputs/opentsdb)
|
||||
* [prometheus](./plugins/outputs/prometheus_client)
|
||||
* [riemann](./plugins/outputs/riemann)
|
||||
|
||||
## Contributing
|
||||
|
||||
|
||||
@@ -2,10 +2,14 @@ package agent
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
var (
|
||||
NErrors = selfstat.Register("agent", "gather_errors", map[string]string{})
|
||||
)
|
||||
|
||||
type MetricMaker interface {
|
||||
@@ -37,8 +41,6 @@ type accumulator struct {
|
||||
maker MetricMaker
|
||||
|
||||
precision time.Duration
|
||||
|
||||
errCount uint64
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddFields(
|
||||
@@ -80,7 +82,7 @@ func (ac *accumulator) AddError(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
atomic.AddUint64(&ac.errCount, 1)
|
||||
NErrors.Incr(1)
|
||||
//TODO suppress/throttle consecutive duplicate errors?
|
||||
log.Printf("E! Error in plugin [%s]: %s", ac.maker.Name(), err)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -41,7 +42,7 @@ func TestAdd(t *testing.T) {
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
@@ -69,7 +70,7 @@ func TestAddFields(t *testing.T) {
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test usage=99 %d", now.UnixNano()),
|
||||
fmt.Sprintf("acctest,acc=test usage=99 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
@@ -87,7 +88,7 @@ func TestAccAddError(t *testing.T) {
|
||||
a.AddError(fmt.Errorf("baz"))
|
||||
|
||||
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
|
||||
assert.EqualValues(t, 3, a.errCount)
|
||||
assert.EqualValues(t, int64(3), NErrors.Get())
|
||||
require.Len(t, errs, 4) // 4 because of trailing newline
|
||||
assert.Contains(t, string(errs[0]), "TestPlugin")
|
||||
assert.Contains(t, string(errs[0]), "foo")
|
||||
@@ -125,7 +126,7 @@ func TestAddNoIntervalWithPrecision(t *testing.T) {
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
@@ -157,7 +158,7 @@ func TestAddDisablePrecision(t *testing.T) {
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
@@ -189,7 +190,7 @@ func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
@@ -206,7 +207,7 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Millisecond)
|
||||
@@ -216,7 +217,7 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800083000000)),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800083000000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Microsecond)
|
||||
@@ -226,7 +227,7 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082913000)),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082913000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Nanosecond)
|
||||
@@ -236,7 +237,7 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
@@ -269,7 +270,7 @@ func TestAddGauge(t *testing.T) {
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
}
|
||||
@@ -303,7 +304,7 @@ func TestAddCounter(t *testing.T) {
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
}
|
||||
@@ -323,15 +324,15 @@ func (tm *TestMetricMaker) MakeMetric(
|
||||
) telegraf.Metric {
|
||||
switch mType {
|
||||
case telegraf.Untyped:
|
||||
if m, err := telegraf.NewMetric(measurement, tags, fields, t); err == nil {
|
||||
if m, err := metric.New(measurement, tags, fields, t); err == nil {
|
||||
return m
|
||||
}
|
||||
case telegraf.Counter:
|
||||
if m, err := telegraf.NewCounterMetric(measurement, tags, fields, t); err == nil {
|
||||
if m, err := metric.New(measurement, tags, fields, t, telegraf.Counter); err == nil {
|
||||
return m
|
||||
}
|
||||
case telegraf.Gauge:
|
||||
if m, err := telegraf.NewGaugeMetric(measurement, tags, fields, t); err == nil {
|
||||
if m, err := metric.New(measurement, tags, fields, t, telegraf.Gauge); err == nil {
|
||||
return m
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
// Agent runs telegraf and collects data based on the given config
|
||||
@@ -44,8 +45,6 @@ func NewAgent(config *config.Config) (*Agent, error) {
|
||||
// Connect connects to all configured outputs
|
||||
func (a *Agent) Connect() error {
|
||||
for _, o := range a.Config.Outputs {
|
||||
o.Quiet = a.Config.Agent.Quiet
|
||||
|
||||
switch ot := o.Output.(type) {
|
||||
case telegraf.ServiceOutput:
|
||||
if err := ot.Start(); err != nil {
|
||||
@@ -106,24 +105,26 @@ func (a *Agent) gatherer(
|
||||
) {
|
||||
defer panicRecover(input)
|
||||
|
||||
GatherTime := selfstat.RegisterTiming("gather",
|
||||
"gather_time_ns",
|
||||
map[string]string{"input": input.Config.Name},
|
||||
)
|
||||
|
||||
acc := NewAccumulator(input, metricC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
acc := NewAccumulator(input, metricC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
input.SetDebug(a.Config.Agent.Debug)
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown)
|
||||
|
||||
start := time.Now()
|
||||
gatherWithTimeout(shutdown, input, acc, interval)
|
||||
elapsed := time.Since(start)
|
||||
|
||||
log.Printf("D! Input [%s] gathered metrics, (%s interval) in %s\n",
|
||||
input.Name(), interval, elapsed)
|
||||
GatherTime.Incr(elapsed.Nanoseconds())
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
@@ -204,9 +205,6 @@ func (a *Agent) Test() error {
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
if acc.errCount > 0 {
|
||||
return fmt.Errorf("Errors encountered during processing")
|
||||
}
|
||||
|
||||
// Special instructions for some inputs. cpu, for example, needs to be
|
||||
// run twice in order to return cpu usage percentages.
|
||||
@@ -269,7 +267,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
|
||||
var dropOriginal bool
|
||||
if !m.IsAggregate() {
|
||||
for _, agg := range a.Config.Aggregators {
|
||||
if ok := agg.Add(copyMetric(m)); ok {
|
||||
if ok := agg.Add(m.Copy()); ok {
|
||||
dropOriginal = true
|
||||
}
|
||||
}
|
||||
@@ -279,7 +277,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
|
||||
if i == len(a.Config.Outputs)-1 {
|
||||
o.AddMetric(m)
|
||||
} else {
|
||||
o.AddMetric(copyMetric(m))
|
||||
o.AddMetric(m.Copy())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -327,13 +325,13 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
|
||||
// Start all ServicePlugins
|
||||
for _, input := range a.Config.Inputs {
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
switch p := input.Input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
acc := NewAccumulator(input, metricC)
|
||||
// Service input plugins should set their own precision of their
|
||||
// metrics.
|
||||
acc.SetPrecision(time.Nanosecond, 0)
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
if err := p.Start(acc); err != nil {
|
||||
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name(), err.Error())
|
||||
@@ -385,19 +383,3 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyMetric(m telegraf.Metric) telegraf.Metric {
|
||||
t := time.Time(m.Time())
|
||||
|
||||
tags := make(map[string]string)
|
||||
fields := make(map[string]interface{})
|
||||
for k, v := range m.Tags() {
|
||||
tags[k] = v
|
||||
}
|
||||
for k, v := range m.Fields() {
|
||||
fields[k] = v
|
||||
}
|
||||
|
||||
out, _ := telegraf.NewMetric(m.Name(), tags, fields, t)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -4,9 +4,9 @@ machine:
|
||||
post:
|
||||
- sudo service zookeeper stop
|
||||
- go version
|
||||
- go version | grep 1.7.3 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.7.3.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.7.3.linux-amd64.tar.gz
|
||||
- go version | grep 1.7.4 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.7.4.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.7.4.linux-amd64.tar.gz
|
||||
- go version
|
||||
|
||||
dependencies:
|
||||
|
||||
59
docs/AGGREGATORS_AND_PROCESSORS.md
Normal file
59
docs/AGGREGATORS_AND_PROCESSORS.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Telegraf Aggregator & Processor Plugins
|
||||
|
||||
As of release 1.1.0, Telegraf has the concept of Aggregator and Processor Plugins.
|
||||
|
||||
These plugins sit in-between Input & Output plugins, aggregating and processing
|
||||
metrics as they pass through Telegraf:
|
||||
|
||||
```
|
||||
┌───────────┐
|
||||
│ │
|
||||
│ CPU │───┐
|
||||
│ │ │
|
||||
└───────────┘ │
|
||||
│
|
||||
┌───────────┐ │ ┌───────────┐
|
||||
│ │ │ │ │
|
||||
│ Memory │───┤ ┌──▶│ InfluxDB │
|
||||
│ │ │ │ │ │
|
||||
└───────────┘ │ ┌─────────────┐ ┌─────────────┐ │ └───────────┘
|
||||
│ │ │ │Aggregate │ │
|
||||
┌───────────┐ │ │Process │ │ - mean │ │ ┌───────────┐
|
||||
│ │ │ │ - transform │ │ - quantiles │ │ │ │
|
||||
│ MySQL │───┼───▶│ - decorate │────▶│ - min/max │───┼──▶│ File │
|
||||
│ │ │ │ - filter │ │ - count │ │ │ │
|
||||
└───────────┘ │ │ │ │ │ │ └───────────┘
|
||||
│ └─────────────┘ └─────────────┘ │
|
||||
┌───────────┐ │ │ ┌───────────┐
|
||||
│ │ │ │ │ │
|
||||
│ SNMP │───┤ └──▶│ Kafka │
|
||||
│ │ │ │ │
|
||||
└───────────┘ │ └───────────┘
|
||||
│
|
||||
┌───────────┐ │
|
||||
│ │ │
|
||||
│ Docker │───┘
|
||||
│ │
|
||||
└───────────┘
|
||||
```
|
||||
|
||||
Both Aggregators and Processors analyze metrics as they pass through Telegraf.
|
||||
|
||||
**Processor** plugins process metrics as they pass through and immediately emit
|
||||
results based on the values they process. For example, this could be printing
|
||||
all metrics or adding a tag to all metrics that pass through.
|
||||
|
||||
**Aggregator** plugins, on the other hand, are a bit more complicated. Aggregators
|
||||
are typically for emitting new _aggregate_ metrics, such as a running mean,
|
||||
minimum, maximum, quantiles, or standard deviation. For this reason, all _aggregator_
|
||||
plugins are configured with a `period`. The `period` is the size of the window
|
||||
of metrics that each _aggregate_ represents. In other words, the emitted
|
||||
_aggregate_ metric will be the aggregated value of the past `period` seconds.
|
||||
Since many users will only care about their aggregates and not every single metric
|
||||
gathered, there is also a `drop_original` argument, which tells Telegraf to only
|
||||
emit the aggregates and not the original metrics.
|
||||
|
||||
**NOTE** That since aggregators only aggregate metrics within their period, that
|
||||
historical data is not supported. In other words, if your metric timestamp is more
|
||||
than `now() - period` in the past, it will not be aggregated. If this is a feature
|
||||
that you need, please comment on this [github issue](https://github.com/influxdata/telegraf/issues/1992)
|
||||
@@ -147,6 +147,62 @@ Your Telegraf metrics would get tagged with "my_tag_1"
|
||||
exec_mycollector,my_tag_1=foo a=5,b_c=6
|
||||
```
|
||||
|
||||
If the JSON data is an array, then each element of the array is parsed with the configured settings.
|
||||
Each resulting metric will be output with the same timestamp.
|
||||
|
||||
For example, if the following configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
|
||||
## List of tag names to extract from top-level of JSON server response
|
||||
tag_keys = [
|
||||
"my_tag_1",
|
||||
"my_tag_2"
|
||||
]
|
||||
```
|
||||
|
||||
with this JSON output from a command:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"a": 5,
|
||||
"b": {
|
||||
"c": 6
|
||||
},
|
||||
"my_tag_1": "foo",
|
||||
"my_tag_2": "baz"
|
||||
},
|
||||
{
|
||||
"a": 7,
|
||||
"b": {
|
||||
"c": 8
|
||||
},
|
||||
"my_tag_1": "bar",
|
||||
"my_tag_2": "baz"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Your Telegraf metrics would get tagged with "my_tag_1" and "my_tag_2"
|
||||
|
||||
```
|
||||
exec_mycollector,my_tag_1=foo,my_tag_2=baz a=5,b_c=6
|
||||
exec_mycollector,my_tag_1=bar,my_tag_2=baz a=7,b_c=8
|
||||
```
|
||||
|
||||
# Value:
|
||||
|
||||
The "value" data format translates single values into Telegraf metrics. This
|
||||
|
||||
@@ -12,11 +12,11 @@
|
||||
- github.com/gogo/protobuf [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
|
||||
- github.com/gonuts/go-shellquote (No License, but the project it was forked from https://github.com/kballard/go-shellquote is [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)).
|
||||
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
||||
- github.com/kardianos/service [ZLIB LICENSE](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
|
||||
- github.com/kballard/go-shellquote [MIT LICENSE](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
|
||||
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||
|
||||
@@ -160,7 +160,7 @@
|
||||
# # Configuration for AWS CloudWatch output.
|
||||
# [[outputs.cloudwatch]]
|
||||
# ## Amazon REGION
|
||||
# region = 'us-east-1'
|
||||
# region = "us-east-1"
|
||||
#
|
||||
# ## Amazon Credentials
|
||||
# ## Credentials are loaded in the following order
|
||||
@@ -178,7 +178,7 @@
|
||||
# #shared_credential_file = ""
|
||||
#
|
||||
# ## Namespace for the CloudWatch MetricDatums
|
||||
# namespace = 'InfluxData/Telegraf'
|
||||
# namespace = "InfluxData/Telegraf"
|
||||
|
||||
|
||||
# # Configuration for DataDog API to send metrics to.
|
||||
@@ -623,7 +623,7 @@
|
||||
# # Pull Metric Statistics from Amazon CloudWatch
|
||||
# [[inputs.cloudwatch]]
|
||||
# ## Amazon Region
|
||||
# region = 'us-east-1'
|
||||
# region = "us-east-1"
|
||||
#
|
||||
# ## Amazon Credentials
|
||||
# ## Credentials are loaded in the following order
|
||||
@@ -641,21 +641,21 @@
|
||||
# #shared_credential_file = ""
|
||||
#
|
||||
# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
|
||||
# period = '1m'
|
||||
# period = "5m"
|
||||
#
|
||||
# ## Collection Delay (required - must account for metrics availability via CloudWatch API)
|
||||
# delay = '1m'
|
||||
# delay = "5m"
|
||||
#
|
||||
# ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||
# ## gaps or overlap in pulled data
|
||||
# interval = '1m'
|
||||
# interval = "5m"
|
||||
#
|
||||
# ## Configure the TTL for the internal cache of metrics.
|
||||
# ## Defaults to 1 hr if not specified
|
||||
# #cache_ttl = '10m'
|
||||
# #cache_ttl = "10m"
|
||||
#
|
||||
# ## Metric Statistic Namespace (required)
|
||||
# namespace = 'AWS/ELB'
|
||||
# namespace = "AWS/ELB"
|
||||
#
|
||||
# ## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
# ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
@@ -666,12 +666,12 @@
|
||||
# ## Defaults to all Metrics in Namespace if nothing is provided
|
||||
# ## Refreshes Namespace available metrics every 1h
|
||||
# #[[inputs.cloudwatch.metrics]]
|
||||
# # names = ['Latency', 'RequestCount']
|
||||
# # names = ["Latency", "RequestCount"]
|
||||
# #
|
||||
# # ## Dimension filters for Metric (optional)
|
||||
# # [[inputs.cloudwatch.metrics.dimensions]]
|
||||
# # name = 'LoadBalancerName'
|
||||
# # value = 'p-example'
|
||||
# # name = "LoadBalancerName"
|
||||
# # value = "p-example"
|
||||
|
||||
|
||||
# # Gather health check statuses from services registered in Consul
|
||||
@@ -1979,4 +1979,3 @@
|
||||
#
|
||||
# [inputs.webhooks.rollbar]
|
||||
# path = "/rollbar"
|
||||
|
||||
|
||||
@@ -130,6 +130,7 @@
|
||||
Counters = [
|
||||
"Context Switches/sec",
|
||||
"System Calls/sec",
|
||||
"Processor Queue Length",
|
||||
]
|
||||
Instances = ["------"]
|
||||
Measurement = "win_system"
|
||||
|
||||
@@ -4,15 +4,17 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
var (
|
||||
MetricsWritten = selfstat.Register("agent", "metrics_written", map[string]string{})
|
||||
MetricsDropped = selfstat.Register("agent", "metrics_dropped", map[string]string{})
|
||||
)
|
||||
|
||||
// Buffer is an object for storing metrics in a circular buffer.
|
||||
type Buffer struct {
|
||||
buf chan telegraf.Metric
|
||||
// total dropped metrics
|
||||
drops int
|
||||
// total metrics added
|
||||
total int
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
@@ -36,25 +38,14 @@ func (b *Buffer) Len() int {
|
||||
return len(b.buf)
|
||||
}
|
||||
|
||||
// Drops returns the total number of dropped metrics that have occured in this
|
||||
// buffer since instantiation.
|
||||
func (b *Buffer) Drops() int {
|
||||
return b.drops
|
||||
}
|
||||
|
||||
// Total returns the total number of metrics that have been added to this buffer.
|
||||
func (b *Buffer) Total() int {
|
||||
return b.total
|
||||
}
|
||||
|
||||
// Add adds metrics to the buffer.
|
||||
func (b *Buffer) Add(metrics ...telegraf.Metric) {
|
||||
for i, _ := range metrics {
|
||||
b.total++
|
||||
MetricsWritten.Incr(1)
|
||||
select {
|
||||
case b.buf <- metrics[i]:
|
||||
default:
|
||||
b.drops++
|
||||
MetricsDropped.Incr(1)
|
||||
<-b.buf
|
||||
b.buf <- metrics[i]
|
||||
}
|
||||
|
||||
@@ -27,47 +27,53 @@ func BenchmarkAddMetrics(b *testing.B) {
|
||||
|
||||
func TestNewBufferBasicFuncs(t *testing.T) {
|
||||
b := NewBuffer(10)
|
||||
MetricsDropped.Set(0)
|
||||
MetricsWritten.Set(0)
|
||||
|
||||
assert.True(t, b.IsEmpty())
|
||||
assert.Zero(t, b.Len())
|
||||
assert.Zero(t, b.Drops())
|
||||
assert.Zero(t, b.Total())
|
||||
assert.Zero(t, MetricsDropped.Get())
|
||||
assert.Zero(t, MetricsWritten.Get())
|
||||
|
||||
m := testutil.TestMetric(1, "mymetric")
|
||||
b.Add(m)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 1)
|
||||
assert.Equal(t, b.Drops(), 0)
|
||||
assert.Equal(t, b.Total(), 1)
|
||||
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(1), MetricsWritten.Get())
|
||||
|
||||
b.Add(metricList...)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 6)
|
||||
assert.Equal(t, b.Drops(), 0)
|
||||
assert.Equal(t, b.Total(), 6)
|
||||
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(6), MetricsWritten.Get())
|
||||
}
|
||||
|
||||
func TestDroppingMetrics(t *testing.T) {
|
||||
b := NewBuffer(10)
|
||||
MetricsDropped.Set(0)
|
||||
MetricsWritten.Set(0)
|
||||
|
||||
// Add up to the size of the buffer
|
||||
b.Add(metricList...)
|
||||
b.Add(metricList...)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 10)
|
||||
assert.Equal(t, b.Drops(), 0)
|
||||
assert.Equal(t, b.Total(), 10)
|
||||
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(10), MetricsWritten.Get())
|
||||
|
||||
// Add 5 more and verify they were dropped
|
||||
b.Add(metricList...)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 10)
|
||||
assert.Equal(t, b.Drops(), 5)
|
||||
assert.Equal(t, b.Total(), 15)
|
||||
assert.Equal(t, int64(5), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(15), MetricsWritten.Get())
|
||||
}
|
||||
|
||||
func TestGettingBatches(t *testing.T) {
|
||||
b := NewBuffer(20)
|
||||
MetricsDropped.Set(0)
|
||||
MetricsWritten.Set(0)
|
||||
|
||||
// Verify that the buffer returned is smaller than requested when there are
|
||||
// not as many items as requested.
|
||||
@@ -78,8 +84,8 @@ func TestGettingBatches(t *testing.T) {
|
||||
// Verify that the buffer is now empty
|
||||
assert.True(t, b.IsEmpty())
|
||||
assert.Zero(t, b.Len())
|
||||
assert.Zero(t, b.Drops())
|
||||
assert.Equal(t, b.Total(), 5)
|
||||
assert.Zero(t, MetricsDropped.Get())
|
||||
assert.Equal(t, int64(5), MetricsWritten.Get())
|
||||
|
||||
// Verify that the buffer returned is not more than the size requested
|
||||
b.Add(metricList...)
|
||||
@@ -89,6 +95,6 @@ func TestGettingBatches(t *testing.T) {
|
||||
// Verify that buffer is not empty
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 2)
|
||||
assert.Equal(t, b.Drops(), 0)
|
||||
assert.Equal(t, b.Total(), 10)
|
||||
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(10), MetricsWritten.Get())
|
||||
}
|
||||
|
||||
@@ -821,10 +821,7 @@ func (c *Config) addInput(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
rp := &models.RunningInput{
|
||||
Input: input,
|
||||
Config: pluginConfig,
|
||||
}
|
||||
rp := models.NewRunningInput(input, pluginConfig)
|
||||
c.Inputs = append(c.Inputs, rp)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -35,8 +35,9 @@ type Duration struct {
|
||||
// UnmarshalTOML parses the duration from the TOML config file
|
||||
func (d *Duration) UnmarshalTOML(b []byte) error {
|
||||
var err error
|
||||
b = bytes.Trim(b, `'`)
|
||||
|
||||
// see if we can straight convert it
|
||||
// see if we can directly convert it
|
||||
d.Duration, err = time.ParseDuration(string(b))
|
||||
if err == nil {
|
||||
return nil
|
||||
|
||||
@@ -142,6 +142,10 @@ func TestDuration(t *testing.T) {
|
||||
d.UnmarshalTOML([]byte(`1s`))
|
||||
assert.Equal(t, time.Second, d.Duration)
|
||||
|
||||
d = Duration{}
|
||||
d.UnmarshalTOML([]byte(`'1s'`))
|
||||
assert.Equal(t, time.Second, d.Duration)
|
||||
|
||||
d = Duration{}
|
||||
d.UnmarshalTOML([]byte(`10`))
|
||||
assert.Equal(t, 10*time.Second, d.Duration)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
// makemetric is used by both RunningAggregator & RunningInput
|
||||
@@ -31,7 +32,6 @@ func makemetric(
|
||||
daemonTags map[string]string,
|
||||
filter Filter,
|
||||
applyFilter bool,
|
||||
debug bool,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric {
|
||||
@@ -122,11 +122,9 @@ func makemetric(
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
if debug {
|
||||
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
}
|
||||
log.Printf("D! Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
@@ -135,16 +133,7 @@ func makemetric(
|
||||
}
|
||||
}
|
||||
|
||||
var m telegraf.Metric
|
||||
var err error
|
||||
switch mType {
|
||||
case telegraf.Counter:
|
||||
m, err = telegraf.NewCounterMetric(measurement, tags, fields, t)
|
||||
case telegraf.Gauge:
|
||||
m, err = telegraf.NewGaugeMetric(measurement, tags, fields, t)
|
||||
default:
|
||||
m, err = telegraf.NewMetric(measurement, tags, fields, t)
|
||||
}
|
||||
m, err := metric.New(measurement, tags, fields, t, mType)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return nil
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
type RunningAggregator struct {
|
||||
@@ -65,7 +66,6 @@ func (r *RunningAggregator) MakeMetric(
|
||||
nil,
|
||||
r.Config.Filter,
|
||||
false,
|
||||
false,
|
||||
mType,
|
||||
t,
|
||||
)
|
||||
@@ -90,7 +90,7 @@ func (r *RunningAggregator) Add(in telegraf.Metric) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
in, _ = telegraf.NewMetric(name, tags, fields, t)
|
||||
in, _ = metric.New(name, tags, fields, t)
|
||||
}
|
||||
|
||||
r.metrics <- in
|
||||
|
||||
@@ -184,8 +184,8 @@ func TestMakeMetricA(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
@@ -202,8 +202,8 @@ func TestMakeMetricA(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
@@ -220,8 +220,8 @@ func TestMakeMetricA(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
|
||||
@@ -5,15 +5,34 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
var GlobalMetricsGathered = selfstat.Register("agent", "metrics_gathered", map[string]string{})
|
||||
|
||||
type RunningInput struct {
|
||||
Input telegraf.Input
|
||||
Config *InputConfig
|
||||
|
||||
trace bool
|
||||
debug bool
|
||||
defaultTags map[string]string
|
||||
|
||||
MetricsGathered selfstat.Stat
|
||||
}
|
||||
|
||||
func NewRunningInput(
|
||||
input telegraf.Input,
|
||||
config *InputConfig,
|
||||
) *RunningInput {
|
||||
return &RunningInput{
|
||||
Input: input,
|
||||
Config: config,
|
||||
MetricsGathered: selfstat.Register(
|
||||
"gather",
|
||||
"metrics_gathered",
|
||||
map[string]string{"input": config.Name},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// InputConfig containing a name, interval, and filter
|
||||
@@ -51,7 +70,6 @@ func (r *RunningInput) MakeMetric(
|
||||
r.defaultTags,
|
||||
r.Config.Filter,
|
||||
true,
|
||||
r.debug,
|
||||
mType,
|
||||
t,
|
||||
)
|
||||
@@ -60,17 +78,11 @@ func (r *RunningInput) MakeMetric(
|
||||
fmt.Println("> " + m.String())
|
||||
}
|
||||
|
||||
r.MetricsGathered.Incr(1)
|
||||
GlobalMetricsGathered.Incr(1)
|
||||
return m
|
||||
}
|
||||
|
||||
func (r *RunningInput) Debug() bool {
|
||||
return r.debug
|
||||
}
|
||||
|
||||
func (r *RunningInput) SetDebug(debug bool) {
|
||||
r.debug = debug
|
||||
}
|
||||
|
||||
func (r *RunningInput) Trace() bool {
|
||||
return r.trace
|
||||
}
|
||||
|
||||
@@ -13,11 +13,9 @@ import (
|
||||
|
||||
func TestMakeMetricNoFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
@@ -32,11 +30,9 @@ func TestMakeMetricNoFields(t *testing.T) {
|
||||
// nil fields should get dropped
|
||||
func TestMakeMetricNilFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
@@ -50,7 +46,7 @@ func TestMakeMetricNilFields(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
@@ -58,13 +54,10 @@ func TestMakeMetricNilFields(t *testing.T) {
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
assert.Equal(t, "inputs.TestRunningInput", ri.Name())
|
||||
@@ -78,8 +71,8 @@ func TestMakeMetric(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
@@ -96,8 +89,8 @@ func TestMakeMetric(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
@@ -114,8 +107,8 @@ func TestMakeMetric(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
@@ -126,16 +119,13 @@ func TestMakeMetric(t *testing.T) {
|
||||
|
||||
func TestMakeMetricWithPluginTags(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
Tags: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
Tags: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
@@ -148,24 +138,21 @@ func TestMakeMetricWithPluginTags(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricFilteredOut(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
Tags: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
Filter: Filter{NamePass: []string{"foobar"}},
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
Tags: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
Filter: Filter{NamePass: []string{"foobar"}},
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
assert.NoError(t, ri.Config.Filter.Compile())
|
||||
@@ -182,16 +169,13 @@ func TestMakeMetricFilteredOut(t *testing.T) {
|
||||
|
||||
func TestMakeMetricWithDaemonTags(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
ri.SetDefaultTags(map[string]string{
|
||||
"foo": "bar",
|
||||
})
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
@@ -204,8 +188,8 @@ func TestMakeMetricWithDaemonTags(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -214,13 +198,10 @@ func TestMakeMetricInfFields(t *testing.T) {
|
||||
inf := math.Inf(1)
|
||||
ninf := math.Inf(-1)
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
@@ -237,20 +218,17 @@ func TestMakeMetricInfFields(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricAllFieldTypes(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
@@ -275,21 +253,28 @@ func TestMakeMetricAllFieldTypes(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest a=10i,b=10i,c=10i,d=10i,e=10i,f=10i,g=10i,h=10i,i=10i,j=10,k=9223372036854775807i,l=\"foobar\",m=true %d", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Contains(t, m.String(), "a=10i")
|
||||
assert.Contains(t, m.String(), "b=10i")
|
||||
assert.Contains(t, m.String(), "c=10i")
|
||||
assert.Contains(t, m.String(), "d=10i")
|
||||
assert.Contains(t, m.String(), "e=10i")
|
||||
assert.Contains(t, m.String(), "f=10i")
|
||||
assert.Contains(t, m.String(), "g=10i")
|
||||
assert.Contains(t, m.String(), "h=10i")
|
||||
assert.Contains(t, m.String(), "i=10i")
|
||||
assert.Contains(t, m.String(), "j=10")
|
||||
assert.NotContains(t, m.String(), "j=10i")
|
||||
assert.Contains(t, m.String(), "k=9223372036854775807i")
|
||||
assert.Contains(t, m.String(), "l=\"foobar\"")
|
||||
assert.Contains(t, m.String(), "m=true")
|
||||
}
|
||||
|
||||
func TestMakeMetricNameOverride(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
NameOverride: "foobar",
|
||||
},
|
||||
}
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
NameOverride: "foobar",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
@@ -300,19 +285,17 @@ func TestMakeMetricNameOverride(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("foobar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("foobar value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricNamePrefix(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
MeasurementPrefix: "foobar_",
|
||||
},
|
||||
}
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
MeasurementPrefix: "foobar_",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
@@ -323,19 +306,17 @@ func TestMakeMetricNamePrefix(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("foobar_RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("foobar_RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricNameSuffix(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
MeasurementSuffix: "_foobar",
|
||||
},
|
||||
}
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
MeasurementSuffix: "_foobar",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
@@ -346,7 +327,13 @@ func TestMakeMetricNameSuffix(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest_foobar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest_foobar value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
type testInput struct{}
|
||||
|
||||
func (t *testInput) Description() string { return "" }
|
||||
func (t *testInput) SampleConfig() string { return "" }
|
||||
func (t *testInput) Gather(acc telegraf.Accumulator) error { return nil }
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/buffer"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -21,10 +23,15 @@ type RunningOutput struct {
|
||||
Name string
|
||||
Output telegraf.Output
|
||||
Config *OutputConfig
|
||||
Quiet bool
|
||||
MetricBufferLimit int
|
||||
MetricBatchSize int
|
||||
|
||||
MetricsFiltered selfstat.Stat
|
||||
MetricsWritten selfstat.Stat
|
||||
BufferSize selfstat.Stat
|
||||
BufferLimit selfstat.Stat
|
||||
WriteTime selfstat.Stat
|
||||
|
||||
metrics *buffer.Buffer
|
||||
failMetrics *buffer.Buffer
|
||||
}
|
||||
@@ -50,29 +57,56 @@ func NewRunningOutput(
|
||||
Config: conf,
|
||||
MetricBufferLimit: bufferLimit,
|
||||
MetricBatchSize: batchSize,
|
||||
MetricsWritten: selfstat.Register(
|
||||
"write",
|
||||
"metrics_written",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
MetricsFiltered: selfstat.Register(
|
||||
"write",
|
||||
"metrics_filtered",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
BufferSize: selfstat.Register(
|
||||
"write",
|
||||
"buffer_size",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
BufferLimit: selfstat.Register(
|
||||
"write",
|
||||
"buffer_limit",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
WriteTime: selfstat.RegisterTiming(
|
||||
"write",
|
||||
"write_time_ns",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
}
|
||||
ro.BufferLimit.Incr(int64(ro.MetricBufferLimit))
|
||||
return ro
|
||||
}
|
||||
|
||||
// AddMetric adds a metric to the output. This function can also write cached
|
||||
// points if FlushBufferWhenFull is true.
|
||||
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
||||
func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
|
||||
// Filter any tagexclude/taginclude parameters before adding metric
|
||||
if ro.Config.Filter.IsActive() {
|
||||
// In order to filter out tags, we need to create a new metric, since
|
||||
// metrics are immutable once created.
|
||||
name := metric.Name()
|
||||
tags := metric.Tags()
|
||||
fields := metric.Fields()
|
||||
t := metric.Time()
|
||||
name := m.Name()
|
||||
tags := m.Tags()
|
||||
fields := m.Fields()
|
||||
t := m.Time()
|
||||
if ok := ro.Config.Filter.Apply(name, fields, tags); !ok {
|
||||
ro.MetricsFiltered.Incr(1)
|
||||
return
|
||||
}
|
||||
// error is not possible if creating from another metric, so ignore.
|
||||
metric, _ = telegraf.NewMetric(name, tags, fields, t)
|
||||
m, _ = metric.New(name, tags, fields, t)
|
||||
}
|
||||
|
||||
ro.metrics.Add(metric)
|
||||
ro.metrics.Add(m)
|
||||
if ro.metrics.Len() == ro.MetricBatchSize {
|
||||
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
||||
err := ro.write(batch)
|
||||
@@ -84,28 +118,21 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
||||
|
||||
// Write writes all cached points to this output.
|
||||
func (ro *RunningOutput) Write() error {
|
||||
if !ro.Quiet {
|
||||
log.Printf("I! Output [%s] buffer fullness: %d / %d metrics. "+
|
||||
"Total gathered metrics: %d. Total dropped metrics: %d.",
|
||||
ro.Name,
|
||||
ro.failMetrics.Len()+ro.metrics.Len(),
|
||||
ro.MetricBufferLimit,
|
||||
ro.metrics.Total(),
|
||||
ro.metrics.Drops()+ro.failMetrics.Drops())
|
||||
}
|
||||
|
||||
nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len()
|
||||
log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ",
|
||||
ro.Name, nFails+nMetrics, ro.MetricBufferLimit)
|
||||
ro.BufferSize.Incr(int64(nFails + nMetrics))
|
||||
var err error
|
||||
if !ro.failMetrics.IsEmpty() {
|
||||
bufLen := ro.failMetrics.Len()
|
||||
// how many batches of failed writes we need to write.
|
||||
nBatches := bufLen/ro.MetricBatchSize + 1
|
||||
nBatches := nFails/ro.MetricBatchSize + 1
|
||||
batchSize := ro.MetricBatchSize
|
||||
|
||||
for i := 0; i < nBatches; i++ {
|
||||
// If it's the last batch, only grab the metrics that have not had
|
||||
// a write attempt already (this is primarily to preserve order).
|
||||
if i == nBatches-1 {
|
||||
batchSize = bufLen % ro.MetricBatchSize
|
||||
batchSize = nFails % ro.MetricBatchSize
|
||||
}
|
||||
batch := ro.failMetrics.Batch(batchSize)
|
||||
// If we've already failed previous writes, don't bother trying to
|
||||
@@ -126,6 +153,7 @@ func (ro *RunningOutput) Write() error {
|
||||
if err == nil {
|
||||
err = ro.write(batch)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
ro.failMetrics.Add(batch...)
|
||||
return err
|
||||
@@ -134,17 +162,19 @@ func (ro *RunningOutput) Write() error {
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
if metrics == nil || len(metrics) == 0 {
|
||||
nMetrics := len(metrics)
|
||||
if nMetrics == 0 {
|
||||
return nil
|
||||
}
|
||||
start := time.Now()
|
||||
err := ro.Output.Write(metrics)
|
||||
elapsed := time.Since(start)
|
||||
if err == nil {
|
||||
if !ro.Quiet {
|
||||
log.Printf("I! Output [%s] wrote batch of %d metrics in %s\n",
|
||||
ro.Name, len(metrics), elapsed)
|
||||
}
|
||||
log.Printf("D! Output [%s] wrote batch of %d metrics in %s\n",
|
||||
ro.Name, nMetrics, elapsed)
|
||||
ro.MetricsWritten.Incr(int64(nMetrics))
|
||||
ro.BufferSize.Incr(-int64(nMetrics))
|
||||
ro.WriteTime.Incr(elapsed.Nanoseconds())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -36,10 +36,9 @@ func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
||||
|
||||
m := &perfOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
ro.Quiet = true
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
ro.AddMetric(first5[0])
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
ro.Write()
|
||||
}
|
||||
}
|
||||
@@ -52,10 +51,9 @@ func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
|
||||
|
||||
m := &perfOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
ro.Quiet = true
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
ro.AddMetric(first5[0])
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
if n%100 == 0 {
|
||||
ro.Write()
|
||||
}
|
||||
@@ -71,10 +69,9 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
|
||||
m := &perfOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
ro.Quiet = true
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
ro.AddMetric(first5[0])
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -140,7 +137,7 @@ func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(first5[0])
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
@@ -161,7 +158,7 @@ func TestRunningOutput_TagExcludeMatch(t *testing.T) {
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(first5[0])
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
@@ -182,7 +179,7 @@ func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(first5[0])
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
@@ -203,7 +200,7 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) {
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(first5[0])
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/wlog"
|
||||
)
|
||||
@@ -19,8 +20,8 @@ type telegrafLog struct {
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (t *telegrafLog) Write(p []byte) (n int, err error) {
|
||||
return t.writer.Write(p)
|
||||
func (t *telegrafLog) Write(b []byte) (n int, err error) {
|
||||
return t.writer.Write(append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...))
|
||||
}
|
||||
|
||||
// SetupLogging configures the logging output.
|
||||
@@ -30,6 +31,7 @@ func (t *telegrafLog) Write(p []byte) (n int, err error) {
|
||||
// interpreted as stderr. If there is an error opening the file the
|
||||
// logger will fallback to stderr.
|
||||
func SetupLogging(debug, quiet bool, logfile string) {
|
||||
log.SetFlags(0)
|
||||
if debug {
|
||||
wlog.SetLevel(wlog.DEBUG)
|
||||
}
|
||||
|
||||
62
logger/logger_test.go
Normal file
62
logger/logger_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestWriteLogToFile(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
assert.NoError(t, err)
|
||||
defer func() { os.Remove(tmpfile.Name()) }()
|
||||
|
||||
SetupLogging(false, false, tmpfile.Name())
|
||||
log.Printf("I! TEST")
|
||||
log.Printf("D! TEST") // <- should be ignored
|
||||
|
||||
f, err := ioutil.ReadFile(tmpfile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, f[19:], []byte("Z I! TEST\n"))
|
||||
}
|
||||
|
||||
func TestDebugWriteLogToFile(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
assert.NoError(t, err)
|
||||
defer func() { os.Remove(tmpfile.Name()) }()
|
||||
|
||||
SetupLogging(true, false, tmpfile.Name())
|
||||
log.Printf("D! TEST")
|
||||
|
||||
f, err := ioutil.ReadFile(tmpfile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, f[19:], []byte("Z D! TEST\n"))
|
||||
}
|
||||
|
||||
func TestErrorWriteLogToFile(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
assert.NoError(t, err)
|
||||
defer func() { os.Remove(tmpfile.Name()) }()
|
||||
|
||||
SetupLogging(false, true, tmpfile.Name())
|
||||
log.Printf("E! TEST")
|
||||
log.Printf("I! TEST") // <- should be ignored
|
||||
|
||||
f, err := ioutil.ReadFile(tmpfile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, f[19:], []byte("Z E! TEST\n"))
|
||||
}
|
||||
|
||||
func BenchmarkTelegrafLogWrite(b *testing.B) {
|
||||
var msg = []byte("test")
|
||||
var buf bytes.Buffer
|
||||
w := newTelegrafWriter(&buf)
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf.Reset()
|
||||
w.Write(msg)
|
||||
}
|
||||
}
|
||||
179
metric.go
179
metric.go
@@ -3,8 +3,8 @@ package telegraf
|
||||
import (
|
||||
"time"
|
||||
|
||||
// TODO remove
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/influxdb/models"
|
||||
)
|
||||
|
||||
// ValueType is an enumeration of metric types that represent a simple value.
|
||||
@@ -19,159 +19,44 @@ const (
|
||||
)
|
||||
|
||||
type Metric interface {
|
||||
// Name returns the measurement name of the metric
|
||||
Serialize() []byte
|
||||
String() string // convenience function for string(Serialize())
|
||||
Copy() Metric
|
||||
// Split will attempt to return multiple metrics with the same timestamp
|
||||
// whose string representations are no longer than maxSize.
|
||||
// Metrics with a single field may exceed the requested size.
|
||||
Split(maxSize int) []Metric
|
||||
|
||||
// Tag functions
|
||||
HasTag(key string) bool
|
||||
AddTag(key, value string)
|
||||
RemoveTag(key string)
|
||||
|
||||
// Field functions
|
||||
HasField(key string) bool
|
||||
AddField(key string, value interface{})
|
||||
RemoveField(key string) error
|
||||
|
||||
// Name functions
|
||||
SetName(name string)
|
||||
SetPrefix(prefix string)
|
||||
SetSuffix(suffix string)
|
||||
|
||||
// Getting data structure functions
|
||||
Name() string
|
||||
|
||||
// Name returns the tags associated with the metric
|
||||
Tags() map[string]string
|
||||
|
||||
// Time return the timestamp for the metric
|
||||
Fields() map[string]interface{}
|
||||
Time() time.Time
|
||||
|
||||
// Type returns the metric type. Can be either telegraf.Gauge or telegraf.Counter
|
||||
Type() ValueType
|
||||
|
||||
// UnixNano returns the unix nano time of the metric
|
||||
UnixNano() int64
|
||||
|
||||
// HashID returns a non-cryptographic hash of the metric (name + tags)
|
||||
// NOTE: do not persist & depend on this value to disk.
|
||||
Type() ValueType
|
||||
Len() int // returns the length of the serialized metric, including newline
|
||||
HashID() uint64
|
||||
|
||||
// Fields returns the fields for the metric
|
||||
Fields() map[string]interface{}
|
||||
|
||||
// String returns a line-protocol string of the metric
|
||||
String() string
|
||||
|
||||
// PrecisionString returns a line-protocol string of the metric, at precision
|
||||
PrecisionString(precison string) string
|
||||
// aggregator things:
|
||||
SetAggregate(bool)
|
||||
IsAggregate() bool
|
||||
|
||||
// Point returns a influxdb client.Point object
|
||||
// TODO remove this function
|
||||
Point() *client.Point
|
||||
|
||||
// SetAggregate sets the metric's aggregate status
|
||||
// This is so that aggregate metrics don't get re-sent to aggregator plugins
|
||||
SetAggregate(bool)
|
||||
// IsAggregate returns true if the metric is an aggregate
|
||||
IsAggregate() bool
|
||||
}
|
||||
|
||||
// metric is a wrapper of the influxdb client.Point struct
|
||||
type metric struct {
|
||||
pt models.Point
|
||||
|
||||
mType ValueType
|
||||
|
||||
isaggregate bool
|
||||
}
|
||||
|
||||
func NewMetricFromPoint(pt models.Point) Metric {
|
||||
return &metric{
|
||||
pt: pt,
|
||||
mType: Untyped,
|
||||
}
|
||||
}
|
||||
|
||||
// NewMetric returns an untyped metric.
|
||||
func NewMetric(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
) (Metric, error) {
|
||||
pt, err := models.NewPoint(name, models.NewTags(tags), fields, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metric{
|
||||
pt: pt,
|
||||
mType: Untyped,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewGaugeMetric returns a gauge metric.
|
||||
// Gauge metrics should be used when the metric is can arbitrarily go up and
|
||||
// down. ie, temperature, memory usage, cpu usage, etc.
|
||||
func NewGaugeMetric(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
) (Metric, error) {
|
||||
pt, err := models.NewPoint(name, models.NewTags(tags), fields, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metric{
|
||||
pt: pt,
|
||||
mType: Gauge,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewCounterMetric returns a Counter metric.
|
||||
// Counter metrics should be used when the metric being created is an
|
||||
// always-increasing counter. ie, net bytes received, requests served, errors, etc.
|
||||
func NewCounterMetric(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
) (Metric, error) {
|
||||
pt, err := models.NewPoint(name, models.NewTags(tags), fields, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metric{
|
||||
pt: pt,
|
||||
mType: Counter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *metric) Name() string {
|
||||
return m.pt.Name()
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
return m.pt.Tags().Map()
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
return m.pt.Time()
|
||||
}
|
||||
|
||||
func (m *metric) Type() ValueType {
|
||||
return m.mType
|
||||
}
|
||||
|
||||
func (m *metric) HashID() uint64 {
|
||||
return m.pt.HashID()
|
||||
}
|
||||
|
||||
func (m *metric) UnixNano() int64 {
|
||||
return m.pt.UnixNano()
|
||||
}
|
||||
|
||||
func (m *metric) Fields() map[string]interface{} {
|
||||
return m.pt.Fields()
|
||||
}
|
||||
|
||||
func (m *metric) String() string {
|
||||
return m.pt.String()
|
||||
}
|
||||
|
||||
func (m *metric) PrecisionString(precison string) string {
|
||||
return m.pt.PrecisionString(precison)
|
||||
}
|
||||
|
||||
func (m *metric) Point() *client.Point {
|
||||
return client.NewPointFrom(m.pt)
|
||||
}
|
||||
|
||||
func (m *metric) IsAggregate() bool {
|
||||
return m.isaggregate
|
||||
}
|
||||
|
||||
func (m *metric) SetAggregate(b bool) {
|
||||
m.isaggregate = b
|
||||
}
|
||||
|
||||
49
metric/escape.go
Normal file
49
metric/escape.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// escaper is for escaping:
|
||||
// - tag keys
|
||||
// - tag values
|
||||
// - field keys
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`)
|
||||
unEscaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`)
|
||||
|
||||
// nameEscaper is for escaping measurement names only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
nameEscaper = strings.NewReplacer(`,`, `\,`, ` `, `\ `)
|
||||
nameUnEscaper = strings.NewReplacer(`\,`, `,`, `\ `, ` `)
|
||||
|
||||
// stringFieldEscaper is for escaping string field values only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
stringFieldEscaper = strings.NewReplacer(`"`, `\"`)
|
||||
stringFieldUnEscaper = strings.NewReplacer(`\"`, `"`)
|
||||
)
|
||||
|
||||
func escape(s string, t string) string {
|
||||
switch t {
|
||||
case "fieldkey", "tagkey", "tagval":
|
||||
return escaper.Replace(s)
|
||||
case "name":
|
||||
return nameEscaper.Replace(s)
|
||||
case "fieldval":
|
||||
return stringFieldEscaper.Replace(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func unescape(s string, t string) string {
|
||||
switch t {
|
||||
case "fieldkey", "tagkey", "tagval":
|
||||
return unEscaper.Replace(s)
|
||||
case "name":
|
||||
return nameUnEscaper.Replace(s)
|
||||
case "fieldval":
|
||||
return stringFieldUnEscaper.Replace(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
38
metric/inline_strconv_parse.go
Normal file
38
metric/inline_strconv_parse.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
|
||||
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseInt(s, base, bitSize)
|
||||
}
|
||||
|
||||
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
|
||||
func parseFloatBytes(b []byte, bitSize int) (float64, error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseFloat(s, bitSize)
|
||||
}
|
||||
|
||||
// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
|
||||
func parseBoolBytes(b []byte) (bool, error) {
|
||||
return strconv.ParseBool(unsafeBytesToString(b))
|
||||
}
|
||||
|
||||
// unsafeBytesToString converts a []byte to a string without a heap allocation.
|
||||
//
|
||||
// It is unsafe, and is intended to prepare input to short-lived functions
|
||||
// that require strings.
|
||||
func unsafeBytesToString(in []byte) string {
|
||||
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
|
||||
dst := reflect.StringHeader{
|
||||
Data: src.Data,
|
||||
Len: src.Len,
|
||||
}
|
||||
s := *(*string)(unsafe.Pointer(&dst))
|
||||
return s
|
||||
}
|
||||
103
metric/inline_strconv_parse_test.go
Normal file
103
metric/inline_strconv_parse_test.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
func TestParseIntBytesEquivalenceFuzz(t *testing.T) {
|
||||
f := func(b []byte, base int, bitSize int) bool {
|
||||
exp, expErr := strconv.ParseInt(string(b), base, bitSize)
|
||||
got, gotErr := parseIntBytes(b, base, bitSize)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseIntBytesValid64bitBase10EquivalenceFuzz(t *testing.T) {
|
||||
buf := []byte{}
|
||||
f := func(n int64) bool {
|
||||
buf = strconv.AppendInt(buf[:0], n, 10)
|
||||
|
||||
exp, expErr := strconv.ParseInt(string(buf), 10, 64)
|
||||
got, gotErr := parseIntBytes(buf, 10, 64)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFloatBytesEquivalenceFuzz(t *testing.T) {
|
||||
f := func(b []byte, bitSize int) bool {
|
||||
exp, expErr := strconv.ParseFloat(string(b), bitSize)
|
||||
got, gotErr := parseFloatBytes(b, bitSize)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFloatBytesValid64bitEquivalenceFuzz(t *testing.T) {
|
||||
buf := []byte{}
|
||||
f := func(n float64) bool {
|
||||
buf = strconv.AppendFloat(buf[:0], n, 'f', -1, 64)
|
||||
|
||||
exp, expErr := strconv.ParseFloat(string(buf), 64)
|
||||
got, gotErr := parseFloatBytes(buf, 64)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseBoolBytesEquivalence(t *testing.T) {
|
||||
var buf []byte
|
||||
for _, s := range []string{"1", "t", "T", "TRUE", "true", "True", "0", "f", "F", "FALSE", "false", "False", "fail", "TrUe", "FAlSE", "numbers", ""} {
|
||||
buf = append(buf[:0], s...)
|
||||
|
||||
exp, expErr := strconv.ParseBool(s)
|
||||
got, gotErr := parseBoolBytes(buf)
|
||||
|
||||
if got != exp || !checkErrs(expErr, gotErr) {
|
||||
t.Errorf("Failed to parse boolean value %q correctly: wanted (%t, %v), got (%t, %v)", s, exp, expErr, got, gotErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkErrs(a, b error) bool {
|
||||
if (a == nil) != (b == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
return a == nil || a.Error() == b.Error()
|
||||
}
|
||||
546
metric/metric.go
Normal file
546
metric/metric.go
Normal file
@@ -0,0 +1,546 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
// TODO remove
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
)
|
||||
|
||||
const MaxInt = int(^uint(0) >> 1)
|
||||
|
||||
func New(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
mType ...telegraf.ValueType,
|
||||
) (telegraf.Metric, error) {
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("Metric cannot be made without any fields")
|
||||
}
|
||||
if len(name) == 0 {
|
||||
return nil, fmt.Errorf("Metric cannot be made with an empty name")
|
||||
}
|
||||
|
||||
var thisType telegraf.ValueType
|
||||
if len(mType) > 0 {
|
||||
thisType = mType[0]
|
||||
} else {
|
||||
thisType = telegraf.Untyped
|
||||
}
|
||||
|
||||
m := &metric{
|
||||
name: []byte(escape(name, "name")),
|
||||
t: []byte(fmt.Sprint(t.UnixNano())),
|
||||
nsec: t.UnixNano(),
|
||||
mType: thisType,
|
||||
}
|
||||
|
||||
// pre-allocate exact size of the tags slice
|
||||
taglen := 0
|
||||
for k, v := range tags {
|
||||
// TODO check that length of tag key & value are > 0
|
||||
taglen += 2 + len(escape(k, "tagkey")) + len(escape(v, "tagval"))
|
||||
}
|
||||
m.tags = make([]byte, taglen)
|
||||
|
||||
i := 0
|
||||
for k, v := range tags {
|
||||
m.tags[i] = ','
|
||||
i++
|
||||
i += copy(m.tags[i:], escape(k, "tagkey"))
|
||||
m.tags[i] = '='
|
||||
i++
|
||||
i += copy(m.tags[i:], escape(v, "tagval"))
|
||||
}
|
||||
|
||||
// pre-allocate capacity of the fields slice
|
||||
fieldlen := 0
|
||||
for k, _ := range fields {
|
||||
// 10 bytes is completely arbitrary, but will at least prevent some
|
||||
// amount of allocations. There's a small possibility this will create
|
||||
// slightly more allocations for a metric that has many short fields.
|
||||
fieldlen += len(k) + 10
|
||||
}
|
||||
m.fields = make([]byte, 0, fieldlen)
|
||||
|
||||
i = 0
|
||||
for k, v := range fields {
|
||||
if i != 0 {
|
||||
m.fields = append(m.fields, ',')
|
||||
}
|
||||
m.fields = appendField(m.fields, k, v)
|
||||
i++
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// indexUnescapedByte finds the index of the first byte equal to b in buf that
|
||||
// is not escaped. Returns -1 if not found.
|
||||
func indexUnescapedByte(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
i := bytes.IndexByte(buf[keyi:], b)
|
||||
if i == -1 {
|
||||
return -1
|
||||
} else if i == 0 {
|
||||
break
|
||||
}
|
||||
keyi += i
|
||||
if countBackslashes(buf, keyi-1)%2 == 0 {
|
||||
break
|
||||
} else {
|
||||
keyi++
|
||||
}
|
||||
}
|
||||
return keyi
|
||||
}
|
||||
|
||||
// countBackslashes counts the number of preceding backslashes starting at
|
||||
// the 'start' index.
|
||||
func countBackslashes(buf []byte, index int) int {
|
||||
var count int
|
||||
for {
|
||||
if index < 0 {
|
||||
return count
|
||||
}
|
||||
if buf[index] == '\\' {
|
||||
count++
|
||||
index--
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
name []byte
|
||||
tags []byte
|
||||
fields []byte
|
||||
t []byte
|
||||
|
||||
mType telegraf.ValueType
|
||||
aggregate bool
|
||||
|
||||
// cached values for reuse in "get" functions
|
||||
hashID uint64
|
||||
nsec int64
|
||||
}
|
||||
|
||||
func (m *metric) Point() *client.Point {
|
||||
c, _ := client.NewPoint(m.Name(), m.Tags(), m.Fields(), m.Time())
|
||||
return c
|
||||
}
|
||||
|
||||
func (m *metric) String() string {
|
||||
return string(m.name) + string(m.tags) + " " + string(m.fields) + " " + string(m.t) + "\n"
|
||||
}
|
||||
|
||||
func (m *metric) SetAggregate(b bool) {
|
||||
m.aggregate = b
|
||||
}
|
||||
|
||||
func (m *metric) IsAggregate() bool {
|
||||
return m.aggregate
|
||||
}
|
||||
|
||||
func (m *metric) Type() telegraf.ValueType {
|
||||
return m.mType
|
||||
}
|
||||
|
||||
func (m *metric) Len() int {
|
||||
// 3 is for 2 spaces surrounding the fields array + newline at the end.
|
||||
return len(m.name) + len(m.tags) + len(m.fields) + len(m.t) + 3
|
||||
}
|
||||
|
||||
func (m *metric) Serialize() []byte {
|
||||
tmp := make([]byte, m.Len())
|
||||
i := 0
|
||||
i += copy(tmp[i:], m.name)
|
||||
i += copy(tmp[i:], m.tags)
|
||||
tmp[i] = ' '
|
||||
i++
|
||||
i += copy(tmp[i:], m.fields)
|
||||
tmp[i] = ' '
|
||||
i++
|
||||
i += copy(tmp[i:], m.t)
|
||||
tmp[i] = '\n'
|
||||
return tmp
|
||||
}
|
||||
|
||||
func (m *metric) Split(maxSize int) []telegraf.Metric {
|
||||
if m.Len() < maxSize {
|
||||
return []telegraf.Metric{m}
|
||||
}
|
||||
var out []telegraf.Metric
|
||||
|
||||
// constant number of bytes for each metric (in addition to field bytes)
|
||||
constant := len(m.name) + len(m.tags) + len(m.t) + 3
|
||||
// currently selected fields
|
||||
fields := make([]byte, 0, maxSize)
|
||||
|
||||
i := 0
|
||||
for {
|
||||
if i >= len(m.fields) {
|
||||
// hit the end of the field byte slice
|
||||
if len(fields) > 0 {
|
||||
out = append(out, copyWith(m.name, m.tags, fields, m.t))
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// find the end of the next field
|
||||
j := indexUnescapedByte(m.fields[i:], ',')
|
||||
if j == -1 {
|
||||
j = len(m.fields)
|
||||
} else {
|
||||
j += i
|
||||
}
|
||||
|
||||
// if true, then we need to create a metric _not_ including the currently
|
||||
// selected field
|
||||
if len(m.fields[i:j])+len(fields)+constant > maxSize {
|
||||
// if false, then we'll create a metric including the currently
|
||||
// selected field anyways. This means that the given maxSize is too
|
||||
// small for a single field to fit.
|
||||
if len(fields) > 0 {
|
||||
out = append(out, copyWith(m.name, m.tags, fields, m.t))
|
||||
}
|
||||
|
||||
fields = make([]byte, 0, maxSize)
|
||||
}
|
||||
if len(fields) > 0 {
|
||||
fields = append(fields, ',')
|
||||
}
|
||||
fields = append(fields, m.fields[i:j]...)
|
||||
|
||||
i = j + 1
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (m *metric) Fields() map[string]interface{} {
|
||||
fieldMap := map[string]interface{}{}
|
||||
i := 0
|
||||
for {
|
||||
if i >= len(m.fields) {
|
||||
break
|
||||
}
|
||||
// end index of field key
|
||||
i1 := indexUnescapedByte(m.fields[i:], '=')
|
||||
if i1 == -1 {
|
||||
break
|
||||
}
|
||||
// start index of field value
|
||||
i2 := i1 + 1
|
||||
|
||||
// end index of field value
|
||||
var i3 int
|
||||
if m.fields[i:][i2] == '"' {
|
||||
i3 = indexUnescapedByte(m.fields[i:][i2+1:], '"')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
i3 += i2 + 2 // increment index to the comma
|
||||
} else {
|
||||
i3 = indexUnescapedByte(m.fields[i:], ',')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
}
|
||||
|
||||
switch m.fields[i:][i2] {
|
||||
case '"':
|
||||
// string field
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = unescape(string(m.fields[i:][i2+1:i3-1]), "fieldval")
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
// number field
|
||||
switch m.fields[i:][i3-1] {
|
||||
case 'i':
|
||||
// integer field
|
||||
n, err := parseIntBytes(m.fields[i:][i2:i3-1], 10, 64)
|
||||
if err == nil {
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
|
||||
} else {
|
||||
// TODO handle error or just ignore field silently?
|
||||
}
|
||||
default:
|
||||
// float field
|
||||
n, err := parseFloatBytes(m.fields[i:][i2:i3], 64)
|
||||
if err == nil {
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
|
||||
} else {
|
||||
// TODO handle error or just ignore field silently?
|
||||
}
|
||||
}
|
||||
case 'T', 't':
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = true
|
||||
case 'F', 'f':
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = false
|
||||
default:
|
||||
// TODO handle unsupported field type
|
||||
}
|
||||
|
||||
i += i3 + 1
|
||||
}
|
||||
|
||||
return fieldMap
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
tagMap := map[string]string{}
|
||||
if len(m.tags) == 0 {
|
||||
return tagMap
|
||||
}
|
||||
|
||||
i := 0
|
||||
for {
|
||||
// start index of tag key
|
||||
i0 := indexUnescapedByte(m.tags[i:], ',') + 1
|
||||
if i0 == 0 {
|
||||
// didn't find a tag start
|
||||
break
|
||||
}
|
||||
// end index of tag key
|
||||
i1 := indexUnescapedByte(m.tags[i:], '=')
|
||||
// start index of tag value
|
||||
i2 := i1 + 1
|
||||
// end index of tag value (starting from i2)
|
||||
i3 := indexUnescapedByte(m.tags[i+i2:], ',')
|
||||
if i3 == -1 {
|
||||
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:]), "tagval")
|
||||
break
|
||||
}
|
||||
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:i2+i3]), "tagval")
|
||||
// increment start index for the next tag
|
||||
i += i2 + i3
|
||||
}
|
||||
|
||||
return tagMap
|
||||
}
|
||||
|
||||
func (m *metric) Name() string {
|
||||
return unescape(string(m.name), "name")
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
// assume metric has been verified already and ignore error:
|
||||
if m.nsec == 0 {
|
||||
m.nsec, _ = parseIntBytes(m.t, 10, 64)
|
||||
}
|
||||
return time.Unix(0, m.nsec)
|
||||
}
|
||||
|
||||
func (m *metric) UnixNano() int64 {
|
||||
// assume metric has been verified already and ignore error:
|
||||
if m.nsec == 0 {
|
||||
m.nsec, _ = parseIntBytes(m.t, 10, 64)
|
||||
}
|
||||
return m.nsec
|
||||
}
|
||||
|
||||
func (m *metric) SetName(name string) {
|
||||
m.hashID = 0
|
||||
m.name = []byte(nameEscaper.Replace(name))
|
||||
}
|
||||
|
||||
func (m *metric) SetPrefix(prefix string) {
|
||||
m.hashID = 0
|
||||
m.name = append([]byte(nameEscaper.Replace(prefix)), m.name...)
|
||||
}
|
||||
|
||||
func (m *metric) SetSuffix(suffix string) {
|
||||
m.hashID = 0
|
||||
m.name = append(m.name, []byte(nameEscaper.Replace(suffix))...)
|
||||
}
|
||||
|
||||
func (m *metric) AddTag(key, value string) {
|
||||
m.RemoveTag(key)
|
||||
m.tags = append(m.tags, []byte(","+escape(key, "tagkey")+"="+escape(value, "tagval"))...)
|
||||
}
|
||||
|
||||
func (m *metric) HasTag(key string) bool {
|
||||
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *metric) RemoveTag(key string) {
|
||||
m.hashID = 0
|
||||
|
||||
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return
|
||||
}
|
||||
|
||||
tmp := m.tags[0 : i-1]
|
||||
j := indexUnescapedByte(m.tags[i:], ',')
|
||||
if j != -1 {
|
||||
tmp = append(tmp, m.tags[i+j:]...)
|
||||
}
|
||||
m.tags = tmp
|
||||
return
|
||||
}
|
||||
|
||||
func (m *metric) AddField(key string, value interface{}) {
|
||||
m.fields = append(m.fields, ',')
|
||||
m.fields = appendField(m.fields, key, value)
|
||||
}
|
||||
|
||||
func (m *metric) HasField(key string) bool {
|
||||
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *metric) RemoveField(key string) error {
|
||||
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var tmp []byte
|
||||
if i != 0 {
|
||||
tmp = m.fields[0 : i-1]
|
||||
}
|
||||
j := indexUnescapedByte(m.fields[i:], ',')
|
||||
if j != -1 {
|
||||
tmp = append(tmp, m.fields[i+j:]...)
|
||||
}
|
||||
|
||||
if len(tmp) == 0 {
|
||||
return fmt.Errorf("Metric cannot remove final field: %s", m.fields)
|
||||
}
|
||||
|
||||
m.fields = tmp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *metric) Copy() telegraf.Metric {
|
||||
return copyWith(m.name, m.tags, m.fields, m.t)
|
||||
}
|
||||
|
||||
func copyWith(name, tags, fields, t []byte) telegraf.Metric {
|
||||
out := metric{
|
||||
name: make([]byte, len(name)),
|
||||
tags: make([]byte, len(tags)),
|
||||
fields: make([]byte, len(fields)),
|
||||
t: make([]byte, len(t)),
|
||||
}
|
||||
copy(out.name, name)
|
||||
copy(out.tags, tags)
|
||||
copy(out.fields, fields)
|
||||
copy(out.t, t)
|
||||
return &out
|
||||
}
|
||||
|
||||
func (m *metric) HashID() uint64 {
|
||||
if m.hashID == 0 {
|
||||
h := fnv.New64a()
|
||||
h.Write(m.name)
|
||||
|
||||
tags := m.Tags()
|
||||
tmp := make([]string, len(tags))
|
||||
i := 0
|
||||
for k, v := range tags {
|
||||
tmp[i] = k + v
|
||||
i++
|
||||
}
|
||||
sort.Strings(tmp)
|
||||
|
||||
for _, s := range tmp {
|
||||
h.Write([]byte(s))
|
||||
}
|
||||
|
||||
m.hashID = h.Sum64()
|
||||
}
|
||||
return m.hashID
|
||||
}
|
||||
|
||||
func appendField(b []byte, k string, v interface{}) []byte {
|
||||
if v == nil {
|
||||
return b
|
||||
}
|
||||
b = append(b, []byte(escape(k, "tagkey")+"=")...)
|
||||
|
||||
// check popular types first
|
||||
switch v := v.(type) {
|
||||
case float64:
|
||||
b = strconv.AppendFloat(b, v, 'f', -1, 64)
|
||||
case int64:
|
||||
b = strconv.AppendInt(b, v, 10)
|
||||
b = append(b, 'i')
|
||||
case string:
|
||||
b = append(b, '"')
|
||||
b = append(b, []byte(escape(v, "fieldval"))...)
|
||||
b = append(b, '"')
|
||||
case bool:
|
||||
b = strconv.AppendBool(b, v)
|
||||
case int32:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int16:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int8:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint64:
|
||||
// Cap uints above the maximum int value
|
||||
var intv int64
|
||||
if v <= uint64(MaxInt) {
|
||||
intv = int64(v)
|
||||
} else {
|
||||
intv = int64(MaxInt)
|
||||
}
|
||||
b = strconv.AppendInt(b, intv, 10)
|
||||
b = append(b, 'i')
|
||||
case uint32:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint16:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint8:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint:
|
||||
// Cap uints above the maximum int value
|
||||
var intv int64
|
||||
if v <= uint(MaxInt) {
|
||||
intv = int64(v)
|
||||
} else {
|
||||
intv = int64(MaxInt)
|
||||
}
|
||||
b = strconv.AppendInt(b, intv, 10)
|
||||
b = append(b, 'i')
|
||||
case float32:
|
||||
b = strconv.AppendFloat(b, float64(v), 'f', -1, 32)
|
||||
case []byte:
|
||||
b = append(b, v...)
|
||||
default:
|
||||
// Can't determine the type, so convert to string
|
||||
b = append(b, '"')
|
||||
b = append(b, []byte(escape(fmt.Sprintf("%v", v), "fieldval"))...)
|
||||
b = append(b, '"')
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
148
metric/metric_benchmark_test.go
Normal file
148
metric/metric_benchmark_test.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
// vars for making sure that the compiler doesnt optimize out the benchmarks:
|
||||
var (
|
||||
s string
|
||||
I interface{}
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
)
|
||||
|
||||
func BenchmarkNewMetric(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
for n := 0; n < b.N; n++ {
|
||||
mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
}
|
||||
s = string(mt.String())
|
||||
}
|
||||
|
||||
func BenchmarkAddTag(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
mt = &metric{
|
||||
name: []byte("cpu"),
|
||||
tags: []byte(",host=localhost"),
|
||||
fields: []byte("a=101"),
|
||||
t: []byte("1480614053000000000"),
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
mt.AddTag("foo", "bar")
|
||||
}
|
||||
s = string(mt.String())
|
||||
}
|
||||
|
||||
func BenchmarkSplit(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
mt = &metric{
|
||||
name: []byte("cpu"),
|
||||
tags: []byte(",host=localhost"),
|
||||
fields: []byte("a=101,b=10i,c=10101,d=101010,e=42"),
|
||||
t: []byte("1480614053000000000"),
|
||||
}
|
||||
var metrics []telegraf.Metric
|
||||
for n := 0; n < b.N; n++ {
|
||||
metrics = mt.Split(60)
|
||||
}
|
||||
s = string(metrics[0].String())
|
||||
}
|
||||
|
||||
func BenchmarkTags(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
var mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
tags = mt.Tags()
|
||||
}
|
||||
s = fmt.Sprint(tags)
|
||||
}
|
||||
|
||||
func BenchmarkFields(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
var mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
fields = mt.Fields()
|
||||
}
|
||||
s = fmt.Sprint(fields)
|
||||
}
|
||||
|
||||
func BenchmarkString(b *testing.B) {
|
||||
mt, _ := New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var S string
|
||||
for n := 0; n < b.N; n++ {
|
||||
S = mt.String()
|
||||
}
|
||||
s = S
|
||||
}
|
||||
|
||||
func BenchmarkSerialize(b *testing.B) {
|
||||
mt, _ := New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var B []byte
|
||||
for n := 0; n < b.N; n++ {
|
||||
B = mt.Serialize()
|
||||
}
|
||||
s = string(B)
|
||||
}
|
||||
646
metric/metric_test.go
Normal file
646
metric/metric_test.go
Normal file
@@ -0,0 +1,646 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Untyped, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewErrors(t *testing.T) {
|
||||
// creating a metric with an empty name produces an error:
|
||||
m, err := New(
|
||||
"",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
// creating a metric with empty fields produces an error:
|
||||
m, err = New(
|
||||
"foobar",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, m)
|
||||
}
|
||||
|
||||
func TestNewMetric_Tags(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, m.HasTag("host"))
|
||||
assert.True(t, m.HasTag("datacenter"))
|
||||
|
||||
m.AddTag("newtag", "foo")
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
|
||||
m.RemoveTag("host")
|
||||
assert.False(t, m.HasTag("host"))
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
assert.True(t, m.HasTag("datacenter"))
|
||||
|
||||
m.RemoveTag("datacenter")
|
||||
assert.False(t, m.HasTag("datacenter"))
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
assert.Equal(t, map[string]string{"newtag": "foo"}, m.Tags())
|
||||
|
||||
m.RemoveTag("newtag")
|
||||
assert.False(t, m.HasTag("newtag"))
|
||||
assert.Equal(t, map[string]string{}, m.Tags())
|
||||
|
||||
assert.Equal(t, "cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n", m.String())
|
||||
}
|
||||
|
||||
func TestSerialize(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t,
|
||||
[]byte("cpu,datacenter=us-east-1 value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
|
||||
m.Serialize())
|
||||
|
||||
m.RemoveTag("datacenter")
|
||||
assert.Equal(t,
|
||||
[]byte("cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
|
||||
m.Serialize())
|
||||
}
|
||||
|
||||
func TestHashID(t *testing.T) {
|
||||
m, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
hash := m.HashID()
|
||||
|
||||
// adding a field doesn't change the hash:
|
||||
m.AddField("foo", int64(100))
|
||||
assert.Equal(t, hash, m.HashID())
|
||||
|
||||
// removing a non-existent tag doesn't change the hash:
|
||||
m.RemoveTag("no-op")
|
||||
assert.Equal(t, hash, m.HashID())
|
||||
|
||||
// adding a tag does change it:
|
||||
m.AddTag("foo", "bar")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
|
||||
// removing a tag also changes it:
|
||||
m.RemoveTag("mytag")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
}
|
||||
|
||||
func TestHashID_Consistency(t *testing.T) {
|
||||
m, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
hash := m.HashID()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
m2, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Equal(t, hash, m2.HashID())
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewMetric_NameModifiers(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hash := m.HashID()
|
||||
suffix := fmt.Sprintf(" value=1 %d\n", now.UnixNano())
|
||||
assert.Equal(t, "cpu"+suffix, m.String())
|
||||
|
||||
m.SetPrefix("pre_")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
assert.Equal(t, "pre_cpu"+suffix, m.String())
|
||||
|
||||
m.SetSuffix("_post")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
assert.Equal(t, "pre_cpu_post"+suffix, m.String())
|
||||
|
||||
m.SetName("mem")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
assert.Equal(t, "mem"+suffix, m.String())
|
||||
}
|
||||
|
||||
func TestNewMetric_FieldModifiers(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, m.HasField("value"))
|
||||
assert.False(t, m.HasField("foo"))
|
||||
|
||||
m.AddField("newfield", "foo")
|
||||
assert.True(t, m.HasField("newfield"))
|
||||
|
||||
assert.NoError(t, m.RemoveField("newfield"))
|
||||
assert.False(t, m.HasField("newfield"))
|
||||
|
||||
// don't allow user to remove all fields:
|
||||
assert.Error(t, m.RemoveField("value"))
|
||||
|
||||
m.AddField("value2", int64(101))
|
||||
assert.NoError(t, m.RemoveField("value"))
|
||||
assert.False(t, m.HasField("value"))
|
||||
}
|
||||
|
||||
func TestNewMetric_Fields(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
}
|
||||
|
||||
func TestNewMetric_Time(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m = m.Copy()
|
||||
m2 := m.Copy()
|
||||
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m2.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewMetric_Copy(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m2 := m.Copy()
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
m.AddTag("host", "localhost")
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu,host=localhost float=1 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
|
||||
m2.String())
|
||||
}
|
||||
|
||||
func TestNewMetric_AllTypes(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"float64": float64(1),
|
||||
"float32": float32(1),
|
||||
"int64": int64(1),
|
||||
"int32": int32(1),
|
||||
"int16": int16(1),
|
||||
"int8": int8(1),
|
||||
"int": int(1),
|
||||
"uint64": uint64(1),
|
||||
"uint32": uint32(1),
|
||||
"uint16": uint16(1),
|
||||
"uint8": uint8(1),
|
||||
"uint": uint(1),
|
||||
"bytes": []byte("foo"),
|
||||
"nil": nil,
|
||||
"maxuint64": uint64(MaxInt) + 10,
|
||||
"maxuint": uint(MaxInt) + 10,
|
||||
"unsupported": []int{1, 2},
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, m.String(), "float64=1")
|
||||
assert.Contains(t, m.String(), "float32=1")
|
||||
assert.Contains(t, m.String(), "int64=1i")
|
||||
assert.Contains(t, m.String(), "int32=1i")
|
||||
assert.Contains(t, m.String(), "int16=1i")
|
||||
assert.Contains(t, m.String(), "int8=1i")
|
||||
assert.Contains(t, m.String(), "int=1i")
|
||||
assert.Contains(t, m.String(), "uint64=1i")
|
||||
assert.Contains(t, m.String(), "uint32=1i")
|
||||
assert.Contains(t, m.String(), "uint16=1i")
|
||||
assert.Contains(t, m.String(), "uint8=1i")
|
||||
assert.Contains(t, m.String(), "uint=1i")
|
||||
assert.NotContains(t, m.String(), "nil")
|
||||
assert.Contains(t, m.String(), fmt.Sprintf("maxuint64=%di", MaxInt))
|
||||
assert.Contains(t, m.String(), fmt.Sprintf("maxuint=%di", MaxInt))
|
||||
}
|
||||
|
||||
func TestIndexUnescapedByte(t *testing.T) {
|
||||
tests := []struct {
|
||||
in []byte
|
||||
b byte
|
||||
expected int
|
||||
}{
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'b',
|
||||
expected: 3,
|
||||
},
|
||||
{
|
||||
in: []byte(`foo\bar`),
|
||||
b: 'b',
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
in: []byte(`foo\\bar`),
|
||||
b: 'b',
|
||||
expected: 5,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'f',
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'r',
|
||||
expected: 5,
|
||||
},
|
||||
{
|
||||
in: []byte(`\foobar`),
|
||||
b: 'f',
|
||||
expected: -1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := indexUnescapedByte(test.in, test.b)
|
||||
assert.Equal(t, test.expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewGaugeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now, telegraf.Gauge)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Gauge, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewCounterMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now, telegraf.Counter)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Counter, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
func TestSplitMetric(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split80 := m.Split(80)
|
||||
assert.Len(t, split80, 2)
|
||||
|
||||
split70 := m.Split(70)
|
||||
assert.Len(t, split70, 3)
|
||||
|
||||
split60 := m.Split(60)
|
||||
assert.Len(t, split60, 4)
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
// use a simple regex check to verify that the split metrics are valid
|
||||
func TestSplitMetric_RegexVerify(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"foo": float64(98934259085),
|
||||
"bar": float64(19385292),
|
||||
"number": float64(19385292),
|
||||
"another": float64(19385292),
|
||||
"n": float64(19385292),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// verification regex
|
||||
re := regexp.MustCompile(`cpu,host=localhost \w+=\d+(,\w+=\d+)* 1480940990034083306`)
|
||||
|
||||
split90 := m.Split(90)
|
||||
assert.Len(t, split90, 2)
|
||||
for _, splitM := range split90 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
|
||||
split70 := m.Split(70)
|
||||
assert.Len(t, split70, 3)
|
||||
for _, splitM := range split70 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
|
||||
split20 := m.Split(20)
|
||||
assert.Len(t, split20, 5)
|
||||
for _, splitM := range split20 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
}
|
||||
|
||||
// test splitting metric even when given length is shorter than
|
||||
// shortest possible length
|
||||
// Split should split metric as short as possible, ie, 1 field per metric
|
||||
func TestSplitMetric_TooShort(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split := m.Split(10)
|
||||
assert.Len(t, split, 5)
|
||||
strings := make([]string, 5)
|
||||
for i, splitM := range split {
|
||||
strings[i] = splitM.String()
|
||||
}
|
||||
|
||||
assert.Contains(t, strings, "cpu,host=localhost float=100001 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost int=100001i 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost bool=true 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost false=false 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost string=\"test\" 1480940990034083306\n")
|
||||
}
|
||||
|
||||
func TestSplitMetric_NoOp(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split := m.Split(1000)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, m, split[0])
|
||||
}
|
||||
|
||||
func TestSplitMetric_OneField(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", m.String())
|
||||
|
||||
split := m.Split(1000)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
|
||||
split = m.Split(1)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
|
||||
split = m.Split(40)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
}
|
||||
|
||||
func TestNewMetricAggregate(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.False(t, m.IsAggregate())
|
||||
m.SetAggregate(true)
|
||||
assert.True(t, m.IsAggregate())
|
||||
}
|
||||
|
||||
func TestNewMetricPoint(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
p := m.Point()
|
||||
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, fields, p.Fields())
|
||||
assert.Equal(t, "cpu", p.Name())
|
||||
}
|
||||
|
||||
func TestNewMetricString(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n",
|
||||
now.UnixNano())
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
}
|
||||
|
||||
func TestNewMetricFailNaN(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": math.NaN(),
|
||||
}
|
||||
|
||||
_, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
627
metric/parse.go
Normal file
627
metric/parse.go
Normal file
@@ -0,0 +1,627 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInvalidNumber = errors.New("invalid number")
|
||||
)
|
||||
|
||||
const (
|
||||
// the number of characters for the largest possible int64 (9223372036854775807)
|
||||
maxInt64Digits = 19
|
||||
|
||||
// the number of characters for the smallest possible int64 (-9223372036854775808)
|
||||
minInt64Digits = 20
|
||||
|
||||
// the number of characters required for the largest float64 before a range check
|
||||
// would occur during parsing
|
||||
maxFloat64Digits = 25
|
||||
|
||||
// the number of characters required for smallest float64 before a range check occur
|
||||
// would occur during parsing
|
||||
minFloat64Digits = 27
|
||||
|
||||
MaxKeyLength = 65535
|
||||
)
|
||||
|
||||
// The following constants allow us to specify which state to move to
|
||||
// next, when scanning sections of a Point.
|
||||
const (
|
||||
tagKeyState = iota
|
||||
tagValueState
|
||||
fieldsState
|
||||
)
|
||||
|
||||
func Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
return ParseWithDefaultTime(buf, time.Now())
|
||||
}
|
||||
|
||||
func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
|
||||
if len(buf) <= 6 {
|
||||
return []telegraf.Metric{}, makeError("buffer too short", buf, 0)
|
||||
}
|
||||
metrics := make([]telegraf.Metric, 0, bytes.Count(buf, []byte("\n"))+1)
|
||||
var errStr string
|
||||
i := 0
|
||||
for {
|
||||
j := bytes.IndexByte(buf[i:], '\n')
|
||||
if j == -1 {
|
||||
break
|
||||
}
|
||||
if len(buf[i:i+j]) < 2 {
|
||||
i += j + 1 // increment i past the previous newline
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := parseMetric(buf[i:i+j], t)
|
||||
if err != nil {
|
||||
i += j + 1 // increment i past the previous newline
|
||||
errStr += " " + err.Error()
|
||||
continue
|
||||
}
|
||||
i += j + 1 // increment i past the previous newline
|
||||
|
||||
metrics = append(metrics, m)
|
||||
}
|
||||
|
||||
if len(errStr) > 0 {
|
||||
return metrics, fmt.Errorf(errStr)
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func parseMetric(buf []byte, defaultTime time.Time) (telegraf.Metric, error) {
|
||||
var dTime string
|
||||
// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
|
||||
pos, key, err := scanKey(buf, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// measurement name is required
|
||||
if len(key) == 0 {
|
||||
return nil, fmt.Errorf("missing measurement")
|
||||
}
|
||||
|
||||
if len(key) > MaxKeyLength {
|
||||
return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
|
||||
}
|
||||
|
||||
// scan the second block is which is field1=value1[,field2=value2,...]
|
||||
pos, fields, err := scanFields(buf, pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// at least one field is required
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("missing fields")
|
||||
}
|
||||
|
||||
// scan the last block which is an optional integer timestamp
|
||||
pos, ts, err := scanTime(buf, pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := &metric{
|
||||
fields: fields,
|
||||
t: ts,
|
||||
}
|
||||
|
||||
// parse out the measurement name
|
||||
// namei is the index at which the "name" ends
|
||||
namei := indexUnescapedByte(key, ',')
|
||||
if namei < 1 {
|
||||
// no tags
|
||||
m.name = key
|
||||
} else {
|
||||
m.name = key[0:namei]
|
||||
m.tags = key[namei:]
|
||||
}
|
||||
|
||||
if len(m.t) == 0 {
|
||||
if len(dTime) == 0 {
|
||||
dTime = fmt.Sprint(defaultTime.UnixNano())
|
||||
}
|
||||
// use default time
|
||||
m.t = []byte(dTime)
|
||||
}
|
||||
|
||||
// here we copy on return because this allows us to later call
|
||||
// AddTag, AddField, RemoveTag, RemoveField, etc. without worrying about
|
||||
// modifying 'tag' bytes having an affect on 'field' bytes, for example.
|
||||
return m.Copy(), nil
|
||||
}
|
||||
|
||||
// scanKey scans buf starting at i for the measurement and tag portion of the point.
|
||||
// It returns the ending position and the byte slice of key within buf. If there
|
||||
// are tags, they will be sorted if they are not already.
|
||||
func scanKey(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
// First scan the Point's measurement.
|
||||
state, i, err := scanMeasurement(buf, i)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
|
||||
// Optionally scan tags if needed.
|
||||
if state == tagKeyState {
|
||||
i, err = scanTags(buf, i)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// scanMeasurement examines the measurement part of a Point, returning
|
||||
// the next state to move to, and the current location in the buffer.
|
||||
func scanMeasurement(buf []byte, i int) (int, int, error) {
|
||||
// Check first byte of measurement, anything except a comma is fine.
|
||||
// It can't be a space, since whitespace is stripped prior to this
|
||||
// function call.
|
||||
if i >= len(buf) || buf[i] == ',' {
|
||||
return -1, i, makeError("missing measurement", buf, i)
|
||||
}
|
||||
|
||||
for {
|
||||
i++
|
||||
if i >= len(buf) {
|
||||
// cpu
|
||||
return -1, i, makeError("missing fields", buf, i)
|
||||
}
|
||||
|
||||
if buf[i-1] == '\\' {
|
||||
// Skip character (it's escaped).
|
||||
continue
|
||||
}
|
||||
|
||||
// Unescaped comma; move onto scanning the tags.
|
||||
if buf[i] == ',' {
|
||||
return tagKeyState, i + 1, nil
|
||||
}
|
||||
|
||||
// Unescaped space; move onto scanning the fields.
|
||||
if buf[i] == ' ' {
|
||||
// cpu value=1.0
|
||||
return fieldsState, i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTags examines all the tags in a Point, keeping track of and
|
||||
// returning the updated indices slice, number of commas and location
|
||||
// in buf where to start examining the Point fields.
|
||||
func scanTags(buf []byte, i int) (int, error) {
|
||||
var (
|
||||
err error
|
||||
state = tagKeyState
|
||||
)
|
||||
|
||||
for {
|
||||
switch state {
|
||||
case tagKeyState:
|
||||
i, err = scanTagsKey(buf, i)
|
||||
state = tagValueState // tag value always follows a tag key
|
||||
case tagValueState:
|
||||
state, i, err = scanTagsValue(buf, i)
|
||||
case fieldsState:
|
||||
return i, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTagsKey scans each character in a tag key.
|
||||
func scanTagsKey(buf []byte, i int) (int, error) {
|
||||
// First character of the key.
|
||||
if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
|
||||
// cpu,{'', ' ', ',', '='}
|
||||
return i, makeError("missing tag key", buf, i)
|
||||
}
|
||||
|
||||
// Examine each character in the tag key until we hit an unescaped
|
||||
// equals (the tag value), or we hit an error (i.e., unescaped
|
||||
// space or comma).
|
||||
for {
|
||||
i++
|
||||
|
||||
// Either we reached the end of the buffer or we hit an
|
||||
// unescaped comma or space.
|
||||
if i >= len(buf) ||
|
||||
((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
|
||||
// cpu,tag{'', ' ', ','}
|
||||
return i, makeError("missing tag value", buf, i)
|
||||
}
|
||||
|
||||
if buf[i] == '=' && buf[i-1] != '\\' {
|
||||
// cpu,tag=
|
||||
return i + 1, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTagsValue scans each character in a tag value.
|
||||
func scanTagsValue(buf []byte, i int) (int, int, error) {
|
||||
// Tag value cannot be empty.
|
||||
if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
|
||||
// cpu,tag={',', ' '}
|
||||
return -1, i, makeError("missing tag value", buf, i)
|
||||
}
|
||||
|
||||
// Examine each character in the tag value until we hit an unescaped
|
||||
// comma (move onto next tag key), an unescaped space (move onto
|
||||
// fields), or we error out.
|
||||
for {
|
||||
i++
|
||||
if i >= len(buf) {
|
||||
// cpu,tag=value
|
||||
return -1, i, makeError("missing fields", buf, i)
|
||||
}
|
||||
|
||||
// An unescaped equals sign is an invalid tag value.
|
||||
if buf[i] == '=' && buf[i-1] != '\\' {
|
||||
// cpu,tag={'=', 'fo=o'}
|
||||
return -1, i, makeError("invalid tag format", buf, i)
|
||||
}
|
||||
|
||||
if buf[i] == ',' && buf[i-1] != '\\' {
|
||||
// cpu,tag=foo,
|
||||
return tagKeyState, i + 1, nil
|
||||
}
|
||||
|
||||
// cpu,tag=foo value=1.0
|
||||
// cpu, tag=foo\= value=1.0
|
||||
if buf[i] == ' ' && buf[i-1] != '\\' {
|
||||
return fieldsState, i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanFields scans buf, starting at i for the fields section of a point. It returns
|
||||
// the ending position and the byte slice of the fields within buf
|
||||
func scanFields(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
quoted := false
|
||||
|
||||
// tracks how many '=' we've seen
|
||||
equals := 0
|
||||
|
||||
// tracks how many commas we've seen
|
||||
commas := 0
|
||||
|
||||
for {
|
||||
// reached the end of buf?
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
// escaped characters?
|
||||
if buf[i] == '\\' && i+1 < len(buf) {
|
||||
i += 2
|
||||
continue
|
||||
}
|
||||
|
||||
// If the value is quoted, scan until we get to the end quote
|
||||
// Only quote values in the field value since quotes are not significant
|
||||
// in the field key
|
||||
if buf[i] == '"' && equals > commas {
|
||||
quoted = !quoted
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// If we see an =, ensure that there is at least on char before and after it
|
||||
if buf[i] == '=' && !quoted {
|
||||
equals++
|
||||
|
||||
// check for "... =123" but allow "a\ =123"
|
||||
if buf[i-1] == ' ' && buf[i-2] != '\\' {
|
||||
return i, buf[start:i], makeError("missing field key", buf, i)
|
||||
}
|
||||
|
||||
// check for "...a=123,=456" but allow "a=123,a\,=456"
|
||||
if buf[i-1] == ',' && buf[i-2] != '\\' {
|
||||
return i, buf[start:i], makeError("missing field key", buf, i)
|
||||
}
|
||||
|
||||
// check for "... value="
|
||||
if i+1 >= len(buf) {
|
||||
return i, buf[start:i], makeError("missing field value", buf, i)
|
||||
}
|
||||
|
||||
// check for "... value=,value2=..."
|
||||
if buf[i+1] == ',' || buf[i+1] == ' ' {
|
||||
return i, buf[start:i], makeError("missing field value", buf, i)
|
||||
}
|
||||
|
||||
if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
|
||||
var err error
|
||||
i, err = scanNumber(buf, i+1)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
continue
|
||||
}
|
||||
// If next byte is not a double-quote, the value must be a boolean
|
||||
if buf[i+1] != '"' {
|
||||
var err error
|
||||
i, _, err = scanBoolean(buf, i+1)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if buf[i] == ',' && !quoted {
|
||||
commas++
|
||||
}
|
||||
|
||||
// reached end of block?
|
||||
if buf[i] == ' ' && !quoted {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if quoted {
|
||||
return i, buf[start:i], makeError("unbalanced quotes", buf, i)
|
||||
}
|
||||
|
||||
// check that all field sections had key and values (e.g. prevent "a=1,b"
|
||||
if equals == 0 || commas != equals-1 {
|
||||
return i, buf[start:i], makeError("invalid field format", buf, i)
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// scanTime scans buf, starting at i for the time section of a point. It
|
||||
// returns the ending position and the byte slice of the timestamp within buf
|
||||
// and and error if the timestamp is not in the correct numeric format.
|
||||
func scanTime(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
for {
|
||||
// reached the end of buf?
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
// Reached end of block or trailing whitespace?
|
||||
if buf[i] == '\n' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
|
||||
// Handle negative timestamps
|
||||
if i == start && buf[i] == '-' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// Timestamps should be integers, make sure they are so we don't need
|
||||
// to actually parse the timestamp until needed.
|
||||
if buf[i] < '0' || buf[i] > '9' {
|
||||
return i, buf[start:i], makeError("invalid timestamp", buf, i)
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
func isNumeric(b byte) bool {
|
||||
return (b >= '0' && b <= '9') || b == '.'
|
||||
}
|
||||
|
||||
// scanNumber returns the end position within buf, start at i after
|
||||
// scanning over buf for an integer, or float. It returns an
|
||||
// error if a invalid number is scanned.
|
||||
func scanNumber(buf []byte, i int) (int, error) {
|
||||
start := i
|
||||
var isInt bool
|
||||
|
||||
// Is negative number?
|
||||
if i < len(buf) && buf[i] == '-' {
|
||||
i++
|
||||
// There must be more characters now, as just '-' is illegal.
|
||||
if i == len(buf) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
}
|
||||
|
||||
// how many decimal points we've see
|
||||
decimal := false
|
||||
|
||||
// indicates the number is float in scientific notation
|
||||
scientific := false
|
||||
|
||||
for {
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == ',' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == 'i' && i > start && !isInt {
|
||||
isInt = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
if buf[i] == '.' {
|
||||
// Can't have more than 1 decimal (e.g. 1.1.1 should fail)
|
||||
if decimal {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
decimal = true
|
||||
}
|
||||
|
||||
// `e` is valid for floats but not as the first char
|
||||
if i > start && (buf[i] == 'e' || buf[i] == 'E') {
|
||||
scientific = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// + and - are only valid at this point if they follow an e (scientific notation)
|
||||
if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// NaN is an unsupported value
|
||||
if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
if !isNumeric(buf[i]) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if isInt && (decimal || scientific) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
numericDigits := i - start
|
||||
if isInt {
|
||||
numericDigits--
|
||||
}
|
||||
if decimal {
|
||||
numericDigits--
|
||||
}
|
||||
if buf[start] == '-' {
|
||||
numericDigits--
|
||||
}
|
||||
|
||||
if numericDigits == 0 {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
// It's more common that numbers will be within min/max range for their type but we need to prevent
|
||||
// out or range numbers from being parsed successfully. This uses some simple heuristics to decide
|
||||
// if we should parse the number to the actual type. It does not do it all the time because it incurs
|
||||
// extra allocations and we end up converting the type again when writing points to disk.
|
||||
if isInt {
|
||||
// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
|
||||
if buf[i-1] != 'i' {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
// Parse the int to check bounds the number of digits could be larger than the max range
|
||||
// We subtract 1 from the index to remove the `i` from our tests
|
||||
if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
|
||||
if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil {
|
||||
return i, makeError(fmt.Sprintf("unable to parse integer %s: %s", buf[start:i-1], err), buf, i)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
|
||||
if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
|
||||
if _, err := parseFloatBytes(buf[start:i], 10); err != nil {
|
||||
return i, makeError("invalid float", buf, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// scanBoolean returns the end position within buf, start at i after
|
||||
// scanning over buf for boolean. Valid values for a boolean are
|
||||
// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean
|
||||
// is scanned.
|
||||
func scanBoolean(buf []byte, i int) (int, []byte, error) {
|
||||
start := i
|
||||
|
||||
if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
|
||||
return i, buf[start:i], makeError("invalid value", buf, i)
|
||||
}
|
||||
|
||||
i++
|
||||
for {
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == ',' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
// Single char bool (t, T, f, F) is ok
|
||||
if i-start == 1 {
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// length must be 4 for true or TRUE
|
||||
if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
// length must be 5 for false or FALSE
|
||||
if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
// Otherwise
|
||||
valid := false
|
||||
switch buf[start] {
|
||||
case 't':
|
||||
valid = bytes.Equal(buf[start:i], []byte("true"))
|
||||
case 'f':
|
||||
valid = bytes.Equal(buf[start:i], []byte("false"))
|
||||
case 'T':
|
||||
valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
|
||||
case 'F':
|
||||
valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
|
||||
}
|
||||
|
||||
if !valid {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
|
||||
}
|
||||
|
||||
// skipWhitespace returns the end position within buf, starting at i after
|
||||
// scanning over spaces in tags
|
||||
func skipWhitespace(buf []byte, i int) int {
|
||||
for i < len(buf) {
|
||||
if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// makeError is a helper function for making a metric parsing error.
|
||||
// reason is the reason that the error occured.
|
||||
// buf should be the current buffer we are parsing.
|
||||
// i is the current index, to give some context on where in the buffer we are.
|
||||
func makeError(reason string, buf []byte, i int) error {
|
||||
return fmt.Errorf("metric parsing error, reason: [%s], buffer: [%s], index: [%d]",
|
||||
reason, buf, i)
|
||||
}
|
||||
355
metric/parse_test.go
Normal file
355
metric/parse_test.go
Normal file
@@ -0,0 +1,355 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const trues = `booltest b=T
|
||||
booltest b=t
|
||||
booltest b=True
|
||||
booltest b=TRUE
|
||||
booltest b=true
|
||||
`
|
||||
|
||||
const falses = `booltest b=F
|
||||
booltest b=f
|
||||
booltest b=False
|
||||
booltest b=FALSE
|
||||
booltest b=false
|
||||
`
|
||||
|
||||
const withEscapes = `w\,\ eather,host=local temp=99 1465839830100400200
|
||||
w\,eather,host=local temp=99 1465839830100400200
|
||||
weather,location=us\,midwest temperature=82 1465839830100400200
|
||||
weather,location=us-midwest temp\=rature=82 1465839830100400200
|
||||
weather,location\ place=us-midwest temperature=82 1465839830100400200
|
||||
weather,location=us-midwest temperature="too\"hot\"" 1465839830100400200
|
||||
`
|
||||
|
||||
const withTimestamps = `cpu usage=99 1480595849000000000
|
||||
cpu usage=99 1480595850000000000
|
||||
cpu usage=99 1480595851700030000
|
||||
cpu usage=99 1480595852000000300
|
||||
`
|
||||
|
||||
const sevenMetrics = `cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
`
|
||||
|
||||
// some metrics are invalid
|
||||
const someInvalid = `cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu3, host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu4 , usage_idle=99,usage_busy=1
|
||||
cpu 1480595852000000300
|
||||
cpu usage=99 1480595852foobar300
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
`
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
start := time.Now()
|
||||
metrics, err := Parse([]byte(sevenMetrics))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 7)
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
firstTime := metrics[0].Time()
|
||||
for _, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"idle": float64(99),
|
||||
"busy": int64(1),
|
||||
"b": true,
|
||||
"s": "string",
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
},
|
||||
m.Tags(),
|
||||
)
|
||||
assert.True(t, m.Time().After(start))
|
||||
assert.True(t, m.Time().Equal(firstTime))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseErrors(t *testing.T) {
|
||||
start := time.Now()
|
||||
metrics, err := Parse([]byte(someInvalid))
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, metrics, 4)
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
firstTime := metrics[0].Time()
|
||||
for _, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
},
|
||||
m.Tags(),
|
||||
)
|
||||
assert.True(t, m.Time().After(start))
|
||||
assert.True(t, m.Time().Equal(firstTime))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseWithTimestamps(t *testing.T) {
|
||||
metrics, err := Parse([]byte(withTimestamps))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 4)
|
||||
|
||||
expectedTimestamps := []time.Time{
|
||||
time.Unix(0, 1480595849000000000),
|
||||
time.Unix(0, 1480595850000000000),
|
||||
time.Unix(0, 1480595851700030000),
|
||||
time.Unix(0, 1480595852000000300),
|
||||
}
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
for i, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.True(t, m.Time().Equal(expectedTimestamps[i]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEscapes(t *testing.T) {
|
||||
metrics, err := Parse([]byte(withEscapes))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 6)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
}{
|
||||
{
|
||||
name: `w, eather`,
|
||||
fields: map[string]interface{}{"temp": float64(99)},
|
||||
tags: map[string]string{"host": "local"},
|
||||
},
|
||||
{
|
||||
name: `w,eather`,
|
||||
fields: map[string]interface{}{"temp": float64(99)},
|
||||
tags: map[string]string{"host": "local"},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{"temperature": float64(82)},
|
||||
tags: map[string]string{"location": `us,midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{`temp=rature`: float64(82)},
|
||||
tags: map[string]string{"location": `us-midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{"temperature": float64(82)},
|
||||
tags: map[string]string{`location place`: `us-midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{`temperature`: `too"hot"`},
|
||||
tags: map[string]string{"location": `us-midwest`},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
assert.Equal(t, test.name, metrics[i].Name())
|
||||
assert.Equal(t, test.fields, metrics[i].Fields())
|
||||
assert.Equal(t, test.tags, metrics[i].Tags())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTrueBooleans(t *testing.T) {
|
||||
metrics, err := Parse([]byte(trues))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 5)
|
||||
|
||||
for _, metric := range metrics {
|
||||
assert.Equal(t, "booltest", metric.Name())
|
||||
assert.Equal(t, true, metric.Fields()["b"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFalseBooleans(t *testing.T) {
|
||||
metrics, err := Parse([]byte(falses))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 5)
|
||||
|
||||
for _, metric := range metrics {
|
||||
assert.Equal(t, "booltest", metric.Name())
|
||||
assert.Equal(t, false, metric.Fields()["b"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointBadNumber(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"cpu v=- ",
|
||||
"cpu v=-i ",
|
||||
"cpu v=-. ",
|
||||
"cpu v=. ",
|
||||
"cpu v=1.0i ",
|
||||
"cpu v=1ii ",
|
||||
"cpu v=1a ",
|
||||
"cpu v=-e-e-e ",
|
||||
"cpu v=42+3 ",
|
||||
"cpu v= ",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTagsMissingParts(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
`cpu,host`,
|
||||
`cpu,host,`,
|
||||
`cpu,host=`,
|
||||
`cpu,f=oo=bar value=1`,
|
||||
`cpu,host value=1i`,
|
||||
`cpu,host=serverA,region value=1i`,
|
||||
`cpu,host=serverA,region= value=1i`,
|
||||
`cpu,host=serverA,region=,zone=us-west value=1i`,
|
||||
`cpu, value=1`,
|
||||
`cpu, ,,`,
|
||||
`cpu,,,`,
|
||||
`cpu,host=serverA,=us-east value=1i`,
|
||||
`cpu,host=serverAa\,,=us-east value=1i`,
|
||||
`cpu,host=serverA\,,=us-east value=1i`,
|
||||
`cpu, =serverA value=1i`,
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointWhitespace(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000 `,
|
||||
} {
|
||||
m, err := Parse([]byte(tt + "\n"))
|
||||
assert.NoError(t, err, tt)
|
||||
assert.Equal(t, "cpu", m[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{"value": float64(1)}, m[0].Fields())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointInvalidFields(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test,foo=bar a=101,=value",
|
||||
"test,foo=bar =value",
|
||||
"test,foo=bar a=101,key=",
|
||||
"test,foo=bar key=",
|
||||
`test,foo=bar a=101,b="foo`,
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointNoFields(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"cpu_load_short,host=server01,region=us-west",
|
||||
"very_long_measurement_name",
|
||||
"cpu,host==",
|
||||
"============",
|
||||
"cpu",
|
||||
"cpu\n\n\n\n\n\n\n",
|
||||
" ",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
// a b=1 << this is the shortest possible metric
|
||||
// any shorter is just ignored
|
||||
func TestParseBufTooShort(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"",
|
||||
"a",
|
||||
"a ",
|
||||
"a b=",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidBooleans(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test b=tru",
|
||||
"test b=fals",
|
||||
"test b=faLse",
|
||||
"test q=foo",
|
||||
"test b=lambchops",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidNumbers(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test b=-",
|
||||
"test b=1.1.1",
|
||||
"test b=nan",
|
||||
"test b=9i10",
|
||||
"test b=9999999999999999999i",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNegativeTimestamps(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test foo=101 -1257894000000000000",
|
||||
} {
|
||||
metrics, err := Parse([]byte(tt + "\n"))
|
||||
assert.NoError(t, err, tt)
|
||||
assert.True(t, metrics[0].Time().Equal(time.Unix(0, -1257894000000000000)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMaxKeyLength(t *testing.T) {
|
||||
key := ""
|
||||
for {
|
||||
if len(key) > MaxKeyLength {
|
||||
break
|
||||
}
|
||||
key += "test"
|
||||
}
|
||||
|
||||
_, err := Parse([]byte(key + " value=1\n"))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
111
metric_test.go
111
metric_test.go
@@ -1,111 +0,0 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, Untyped, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewGaugeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := NewGaugeMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, Gauge, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewCounterMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := NewCounterMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, Counter, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewMetricString(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
|
||||
now.UnixNano())
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
|
||||
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
|
||||
now.Unix())
|
||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||
}
|
||||
|
||||
func TestNewMetricFailNaN(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": math.NaN(),
|
||||
}
|
||||
|
||||
_, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
42
plugins/aggregators/minmax/README.md
Normal file
42
plugins/aggregators/minmax/README.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# MinMax Aggregator Plugin
|
||||
|
||||
The minmax aggregator plugin aggregates min & max values of each field it sees,
|
||||
emitting the aggrate every `period` seconds.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Keep the aggregate min/max of each metric passing through.
|
||||
[[aggregators.minmax]]
|
||||
## General Aggregator Arguments:
|
||||
## The period on which to flush & clear the aggregator.
|
||||
period = "30s"
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- measurement1
|
||||
- field1_max
|
||||
- field1_min
|
||||
|
||||
### Tags:
|
||||
|
||||
No tags are applied by this aggregator.
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ telegraf --config telegraf.conf --quiet
|
||||
system,host=tars load1=1.72 1475583980000000000
|
||||
system,host=tars load1=1.6 1475583990000000000
|
||||
system,host=tars load1=1.66 1475584000000000000
|
||||
system,host=tars load1=1.63 1475584010000000000
|
||||
system,host=tars load1_max=1.72,load1_min=1.6 1475584010000000000
|
||||
system,host=tars load1=1.46 1475584020000000000
|
||||
system,host=tars load1=1.39 1475584030000000000
|
||||
system,host=tars load1=1.41 1475584040000000000
|
||||
system,host=tars load1_max=1.46,load1_min=1.39 1475584040000000000
|
||||
```
|
||||
@@ -4,11 +4,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
var m1, _ = telegraf.NewMetric("m1",
|
||||
var m1, _ = metric.New("m1",
|
||||
map[string]string{"foo": "bar"},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
@@ -24,7 +24,7 @@ var m1, _ = telegraf.NewMetric("m1",
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var m2, _ = telegraf.NewMetric("m1",
|
||||
var m2, _ = metric.New("m1",
|
||||
map[string]string{"foo": "bar"},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/internal"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
@@ -74,6 +75,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/usgs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/webhooks"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
|
||||
|
||||
@@ -2,6 +2,16 @@
|
||||
|
||||
#### Plugin arguments:
|
||||
- **urls** []string: List of apache-status URLs to collect from. Default is "http://localhost/server-status?auto".
|
||||
- **username** string: Username for HTTP basic authentication
|
||||
- **password** string: Password for HTTP basic authentication
|
||||
- **timeout** duration: time that the HTTP connection will remain waiting for response. Defalt 4 seconds ("4s")
|
||||
|
||||
##### Optional SSL Config
|
||||
|
||||
- **ssl_ca** string: the full path for the SSL CA certicate
|
||||
- **ssl_cert** string: the full path for the SSL certificate
|
||||
- **ssl_key** string: the full path for the key file
|
||||
- **insecure_skip_verify** bool: if true HTTP client will skip all SSL verifications related to peer and host. Default to false
|
||||
|
||||
#### Description
|
||||
|
||||
|
||||
@@ -11,17 +11,42 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Apache struct {
|
||||
Urls []string
|
||||
Urls []string
|
||||
Username string
|
||||
Password string
|
||||
ResponseTimeout internal.Duration
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of Apache status URI to gather stats.
|
||||
## Default is "http://localhost/server-status?auto".
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
## user credentials for basic HTTP authentication
|
||||
username = "myuser"
|
||||
password = "mypassword"
|
||||
|
||||
## Timeout to the complete conection and reponse time in seconds
|
||||
response_timeout = "25s" ## default to 5 seconds
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
func (n *Apache) SampleConfig() string {
|
||||
@@ -36,6 +61,9 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
if len(n.Urls) == 0 {
|
||||
n.Urls = []string{"http://localhost/server-status?auto"}
|
||||
}
|
||||
if n.ResponseTimeout.Duration < time.Second {
|
||||
n.ResponseTimeout.Duration = time.Second * 5
|
||||
}
|
||||
|
||||
var outerr error
|
||||
var errch = make(chan error)
|
||||
@@ -61,21 +89,46 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
return outerr
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
resp, err := client.Get(addr.String())
|
||||
|
||||
var tr *http.Transport
|
||||
|
||||
if addr.Scheme == "https" {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
} else {
|
||||
tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: n.ResponseTimeout.Duration,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", addr.String(), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||
return fmt.Errorf("error on new request to %s : %s\n", addr.String(), err)
|
||||
}
|
||||
|
||||
if len(n.Username) != 0 && len(n.Password) != 0 {
|
||||
req.SetBasicAuth(n.Username, n.Password)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error on request to %s : %s\n", addr.String(), err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
|
||||
}
|
||||
|
||||
@@ -18,21 +18,28 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
```toml
|
||||
[[inputs.cloudwatch]]
|
||||
## Amazon Region (required)
|
||||
region = 'us-east-1'
|
||||
region = "us-east-1"
|
||||
|
||||
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
|
||||
# metrics are made available to the 1 minute period. Some are collected at
|
||||
# 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# Note that if a period is configured that is smaller than the minimum for a
|
||||
# particular metric, that metric will not be returned by the Cloudwatch API
|
||||
# and will not be collected by Telegraf.
|
||||
#
|
||||
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
|
||||
period = '1m'
|
||||
period = "5m"
|
||||
|
||||
## Collection Delay (required - must account for metrics availability via CloudWatch API)
|
||||
delay = '1m'
|
||||
delay = "5m"
|
||||
|
||||
## Override global run interval (optional - defaults to global interval)
|
||||
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||
## gaps or overlap in pulled data
|
||||
interval = '1m'
|
||||
interval = "5m"
|
||||
|
||||
## Metric Statistic Namespace (required)
|
||||
namespace = 'AWS/ELB'
|
||||
namespace = "AWS/ELB"
|
||||
|
||||
## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
@@ -43,16 +50,16 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
## Refreshes Namespace available metrics every 1h
|
||||
[[inputs.cloudwatch.metrics]]
|
||||
names = ['Latency', 'RequestCount']
|
||||
names = ["Latency", "RequestCount"]
|
||||
|
||||
## Dimension filters for Metric (optional)
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = 'LoadBalancerName'
|
||||
value = 'p-example'
|
||||
name = "LoadBalancerName"
|
||||
value = "p-example"
|
||||
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = 'AvailabilityZone'
|
||||
value = '*'
|
||||
name = "AvailabilityZone"
|
||||
value = "*"
|
||||
```
|
||||
#### Requirements and Terminology
|
||||
|
||||
@@ -71,16 +78,16 @@ wildcard dimension is ignored.
|
||||
Example:
|
||||
```
|
||||
[[inputs.cloudwatch.metrics]]
|
||||
names = ['Latency']
|
||||
names = ["Latency"]
|
||||
|
||||
## Dimension filters for Metric (optional)
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = 'LoadBalancerName'
|
||||
value = 'p-example'
|
||||
name = "LoadBalancerName"
|
||||
value = "p-example"
|
||||
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = 'AvailabilityZone'
|
||||
value = '*'
|
||||
name = "AvailabilityZone"
|
||||
value = "*"
|
||||
```
|
||||
|
||||
If the following ELBs are available:
|
||||
|
||||
@@ -63,7 +63,7 @@ type (
|
||||
func (c *CloudWatch) SampleConfig() string {
|
||||
return `
|
||||
## Amazon Region
|
||||
region = 'us-east-1'
|
||||
region = "us-east-1"
|
||||
|
||||
## Amazon Credentials
|
||||
## Credentials are loaded in the following order
|
||||
@@ -80,22 +80,29 @@ func (c *CloudWatch) SampleConfig() string {
|
||||
#profile = ""
|
||||
#shared_credential_file = ""
|
||||
|
||||
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
|
||||
# metrics are made available to the 1 minute period. Some are collected at
|
||||
# 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# Note that if a period is configured that is smaller than the minimum for a
|
||||
# particular metric, that metric will not be returned by the Cloudwatch API
|
||||
# and will not be collected by Telegraf.
|
||||
#
|
||||
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
|
||||
period = '1m'
|
||||
period = "5m"
|
||||
|
||||
## Collection Delay (required - must account for metrics availability via CloudWatch API)
|
||||
delay = '1m'
|
||||
delay = "5m"
|
||||
|
||||
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||
## gaps or overlap in pulled data
|
||||
interval = '1m'
|
||||
interval = "5m"
|
||||
|
||||
## Configure the TTL for the internal cache of metrics.
|
||||
## Defaults to 1 hr if not specified
|
||||
#cache_ttl = '10m'
|
||||
#cache_ttl = "10m"
|
||||
|
||||
## Metric Statistic Namespace (required)
|
||||
namespace = 'AWS/ELB'
|
||||
namespace = "AWS/ELB"
|
||||
|
||||
## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
@@ -106,12 +113,12 @@ func (c *CloudWatch) SampleConfig() string {
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
## Refreshes Namespace available metrics every 1h
|
||||
#[[inputs.cloudwatch.metrics]]
|
||||
# names = ['Latency', 'RequestCount']
|
||||
# names = ["Latency", "RequestCount"]
|
||||
#
|
||||
# ## Dimension filters for Metric (optional)
|
||||
# [[inputs.cloudwatch.metrics.dimensions]]
|
||||
# name = 'LoadBalancerName'
|
||||
# value = 'p-example'
|
||||
# name = "LoadBalancerName"
|
||||
# value = "p-example"
|
||||
`
|
||||
}
|
||||
|
||||
@@ -133,7 +140,6 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
if !hasWilcard(m.Dimensions) {
|
||||
dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions))
|
||||
for k, d := range m.Dimensions {
|
||||
fmt.Printf("Dimension [%s]:[%s]\n", d.Name, d.Value)
|
||||
dimensions[k] = &cloudwatch.Dimension{
|
||||
Name: aws.String(d.Name),
|
||||
Value: aws.String(d.Value),
|
||||
@@ -229,13 +235,12 @@ func (c *CloudWatch) initializeCloudWatch() error {
|
||||
/*
|
||||
* Fetch available metrics for given CloudWatch Namespace
|
||||
*/
|
||||
func (c *CloudWatch) fetchNamespaceMetrics() (metrics []*cloudwatch.Metric, err error) {
|
||||
func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) {
|
||||
if c.metricCache != nil && c.metricCache.IsValid() {
|
||||
metrics = c.metricCache.Metrics
|
||||
return
|
||||
return c.metricCache.Metrics, nil
|
||||
}
|
||||
|
||||
metrics = []*cloudwatch.Metric{}
|
||||
metrics := []*cloudwatch.Metric{}
|
||||
|
||||
var token *string
|
||||
for more := true; more; {
|
||||
@@ -263,7 +268,7 @@ func (c *CloudWatch) fetchNamespaceMetrics() (metrics []*cloudwatch.Metric, err
|
||||
TTL: c.CacheTTL.Duration,
|
||||
}
|
||||
|
||||
return
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/gonuts/go-shellquote"
|
||||
"github.com/kballard/go-shellquote"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
|
||||
@@ -36,7 +36,7 @@ const malformedJson = `
|
||||
"status": "green",
|
||||
`
|
||||
|
||||
const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1"
|
||||
const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1\n"
|
||||
|
||||
const lineProtocolMulti = `
|
||||
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -43,6 +44,18 @@ type HTTPListener struct {
|
||||
parser influx.InfluxParser
|
||||
acc telegraf.Accumulator
|
||||
pool *pool
|
||||
|
||||
BytesRecv selfstat.Stat
|
||||
RequestsServed selfstat.Stat
|
||||
WritesServed selfstat.Stat
|
||||
QueriesServed selfstat.Stat
|
||||
PingsServed selfstat.Stat
|
||||
RequestsRecv selfstat.Stat
|
||||
WritesRecv selfstat.Stat
|
||||
QueriesRecv selfstat.Stat
|
||||
PingsRecv selfstat.Stat
|
||||
NotFoundsServed selfstat.Stat
|
||||
BuffersCreated selfstat.Stat
|
||||
}
|
||||
|
||||
const sampleConfig = `
|
||||
@@ -72,7 +85,7 @@ func (h *HTTPListener) Description() string {
|
||||
}
|
||||
|
||||
func (h *HTTPListener) Gather(_ telegraf.Accumulator) error {
|
||||
log.Printf("D! The http_listener has created %d buffers", h.pool.ncreated())
|
||||
h.BuffersCreated.Set(h.pool.ncreated())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -81,6 +94,21 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
tags := map[string]string{
|
||||
"address": h.ServiceAddress,
|
||||
}
|
||||
h.BytesRecv = selfstat.Register("http_listener", "bytes_received", tags)
|
||||
h.RequestsServed = selfstat.Register("http_listener", "requests_served", tags)
|
||||
h.WritesServed = selfstat.Register("http_listener", "writes_served", tags)
|
||||
h.QueriesServed = selfstat.Register("http_listener", "queries_served", tags)
|
||||
h.PingsServed = selfstat.Register("http_listener", "pings_served", tags)
|
||||
h.RequestsRecv = selfstat.Register("http_listener", "requests_received", tags)
|
||||
h.WritesRecv = selfstat.Register("http_listener", "writes_received", tags)
|
||||
h.QueriesRecv = selfstat.Register("http_listener", "queries_received", tags)
|
||||
h.PingsRecv = selfstat.Register("http_listener", "pings_received", tags)
|
||||
h.NotFoundsServed = selfstat.Register("http_listener", "not_founds_served", tags)
|
||||
h.BuffersCreated = selfstat.Register("http_listener", "buffers_created", tags)
|
||||
|
||||
if h.MaxBodySize == 0 {
|
||||
h.MaxBodySize = DEFAULT_MAX_BODY_SIZE
|
||||
}
|
||||
@@ -141,10 +169,16 @@ func (h *HTTPListener) httpListen() error {
|
||||
}
|
||||
|
||||
func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
||||
h.RequestsRecv.Incr(1)
|
||||
defer h.RequestsServed.Incr(1)
|
||||
switch req.URL.Path {
|
||||
case "/write":
|
||||
h.WritesRecv.Incr(1)
|
||||
defer h.WritesServed.Incr(1)
|
||||
h.serveWrite(res, req)
|
||||
case "/query":
|
||||
h.QueriesRecv.Incr(1)
|
||||
defer h.QueriesServed.Incr(1)
|
||||
// Deliver a dummy response to the query endpoint, as some InfluxDB
|
||||
// clients test endpoint availability with a query
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
@@ -152,9 +186,12 @@ func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
||||
res.WriteHeader(http.StatusOK)
|
||||
res.Write([]byte("{\"results\":[]}"))
|
||||
case "/ping":
|
||||
h.PingsRecv.Incr(1)
|
||||
defer h.PingsServed.Incr(1)
|
||||
// respond to ping requests
|
||||
res.WriteHeader(http.StatusNoContent)
|
||||
default:
|
||||
defer h.NotFoundsServed.Incr(1)
|
||||
// Don't know how to respond to calls to other endpoints
|
||||
http.NotFound(res, req)
|
||||
}
|
||||
@@ -195,6 +232,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
||||
badRequest(res)
|
||||
return
|
||||
}
|
||||
h.BytesRecv.Incr(int64(n))
|
||||
|
||||
if err == io.EOF {
|
||||
if return400 {
|
||||
@@ -248,7 +286,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
||||
bufStart = 0
|
||||
continue
|
||||
}
|
||||
if err := h.parse(buf[:i], now); err != nil {
|
||||
if err := h.parse(buf[:i+1], now); err != nil {
|
||||
log.Println("E! " + err.Error())
|
||||
return400 = true
|
||||
}
|
||||
|
||||
@@ -37,6 +37,8 @@ You can also specify which keys from server response should be considered tags:
|
||||
]
|
||||
```
|
||||
|
||||
If the JSON response is an array of objects, then each object will be parsed with the same configuration.
|
||||
|
||||
You can also specify additional request parameters for the service:
|
||||
|
||||
```
|
||||
@@ -150,3 +152,53 @@ httpjson_mycollector1_b_e,server='http://my.service.com/_stats' value=5
|
||||
httpjson_mycollector2_load,server='http://service.net/json/stats' value=100
|
||||
httpjson_mycollector2_users,server='http://service.net/json/stats' value=1335
|
||||
```
|
||||
|
||||
# Example 3, Multiple Metrics in Response:
|
||||
|
||||
The response JSON can be treated as an array of data points that are all parsed with the same configuration.
|
||||
|
||||
```
|
||||
[[inputs.httpjson]]
|
||||
name = "mycollector"
|
||||
servers = [
|
||||
"http://my.service.com/_stats"
|
||||
]
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
tag_keys = ["service"]
|
||||
```
|
||||
|
||||
which responds with the following JSON:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"service": "service01",
|
||||
"a": 0.5,
|
||||
"b": {
|
||||
"c": "some text",
|
||||
"d": 0.1,
|
||||
"e": 5
|
||||
}
|
||||
},
|
||||
{
|
||||
"service": "service02",
|
||||
"a": 0.6,
|
||||
"b": {
|
||||
"c": "some text",
|
||||
"d": 0.2,
|
||||
"e": 6
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
The collected metrics will be:
|
||||
```
|
||||
httpjson_mycollector_a,service='service01',server='http://my.service.com/_stats' value=0.5
|
||||
httpjson_mycollector_b_d,service='service01',server='http://my.service.com/_stats' value=0.1
|
||||
httpjson_mycollector_b_e,service='service01',server='http://my.service.com/_stats' value=5
|
||||
httpjson_mycollector_a,service='service02',server='http://my.service.com/_stats' value=0.6
|
||||
httpjson_mycollector_b_d,service='service02',server='http://my.service.com/_stats' value=0.2
|
||||
httpjson_mycollector_b_e,service='service02',server='http://my.service.com/_stats' value=6
|
||||
```
|
||||
|
||||
@@ -511,3 +511,52 @@ func TestHttpJson200Tags(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const validJSONArrayTags = `
|
||||
[
|
||||
{
|
||||
"value": 15,
|
||||
"role": "master",
|
||||
"build": "123"
|
||||
},
|
||||
{
|
||||
"value": 17,
|
||||
"role": "slave",
|
||||
"build": "456"
|
||||
}
|
||||
]`
|
||||
|
||||
// Test that array data is collected correctly
|
||||
func TestHttpJsonArray200Tags(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSONArrayTags, 200)
|
||||
|
||||
for _, service := range httpjson {
|
||||
if service.Name == "other_webapp" {
|
||||
var acc testutil.Accumulator
|
||||
err := service.Gather(&acc)
|
||||
// Set responsetime
|
||||
for _, p := range acc.Metrics {
|
||||
p.Fields["response_time"] = 1.0
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 8, acc.NFields())
|
||||
assert.Equal(t, uint64(4), acc.NMetrics())
|
||||
|
||||
for _, m := range acc.Metrics {
|
||||
if m.Tags["role"] == "master" {
|
||||
assert.Equal(t, "123", m.Tags["build"])
|
||||
assert.Equal(t, float64(15), m.Fields["value"])
|
||||
assert.Equal(t, float64(1), m.Fields["response_time"])
|
||||
assert.Equal(t, "httpjson_"+service.Name, m.Measurement)
|
||||
} else if m.Tags["role"] == "slave" {
|
||||
assert.Equal(t, "456", m.Tags["build"])
|
||||
assert.Equal(t, float64(17), m.Fields["value"])
|
||||
assert.Equal(t, float64(1), m.Fields["response_time"])
|
||||
assert.Equal(t, "httpjson_"+service.Name, m.Measurement)
|
||||
} else {
|
||||
assert.FailNow(t, "unknown metric")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
83
plugins/inputs/internal/README.md
Normal file
83
plugins/inputs/internal/README.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# Internal Input Plugin
|
||||
|
||||
The `internal` plugin collects metrics about the telegraf agent itself.
|
||||
|
||||
Note that some metrics are aggregates across all instances of one type of
|
||||
plugin.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Collect statistics about itself
|
||||
[[inputs.internal]]
|
||||
## If true, collect telegraf memory stats.
|
||||
# collect_memstats = true
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
memstats are taken from the Go runtime: https://golang.org/pkg/runtime/#MemStats
|
||||
|
||||
- internal\_memstats
|
||||
- alloc\_bytes
|
||||
- frees
|
||||
- heap\_alloc\_bytes
|
||||
- heap\_idle\_bytes
|
||||
- heap\_in\_use\_bytes
|
||||
- heap\_objects\_bytes
|
||||
- heap\_released\_bytes
|
||||
- heap\_sys\_bytes
|
||||
- mallocs
|
||||
- num\_gc
|
||||
- pointer\_lookups
|
||||
- sys\_bytes
|
||||
- total\_alloc\_bytes
|
||||
|
||||
agent stats collect aggregate stats on all telegraf plugins.
|
||||
|
||||
- internal\_agent
|
||||
- gather\_errors
|
||||
- metrics\_dropped
|
||||
- metrics\_gathered
|
||||
- metrics\_written
|
||||
|
||||
internal\_gather stats collect aggregate stats on all input plugins
|
||||
that are of the same input type. They are tagged with `input=<plugin_name>`.
|
||||
|
||||
- internal\_gather
|
||||
- gather\_time\_ns
|
||||
- metrics\_gathered
|
||||
|
||||
internal\_write stats collect aggregate stats on all output plugins
|
||||
that are of the same input type. They are tagged with `output=<plugin_name>`.
|
||||
|
||||
|
||||
- internal\_write
|
||||
- buffer\_limit
|
||||
- buffer\_size
|
||||
- metrics\_written
|
||||
- metrics\_filtered
|
||||
- write\_time\_ns
|
||||
|
||||
internal\_\<plugin\_name\> are metrics which are defined on a per-plugin basis, and
|
||||
usually contain tags which differentiate each instance of a particular type of
|
||||
plugin.
|
||||
|
||||
- internal\_\<plugin\_name\>
|
||||
- individual plugin-specific fields, such as requests counts.
|
||||
|
||||
### Tags:
|
||||
|
||||
All measurements for specific plugins are tagged with information relevant
|
||||
to each particular plugin.
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
internal_memstats,host=tyrion alloc_bytes=4457408i,sys_bytes=10590456i,pointer_lookups=7i,mallocs=17642i,frees=7473i,heap_sys_bytes=6848512i,heap_idle_bytes=1368064i,heap_in_use_bytes=5480448i,heap_released_bytes=0i,total_alloc_bytes=6875560i,heap_alloc_bytes=4457408i,heap_objects_bytes=10169i,num_gc=2i 1480682800000000000
|
||||
internal_agent,host=tyrion metrics_written=18i,metrics_dropped=0i,metrics_gathered=19i,gather_errors=0i 1480682800000000000
|
||||
internal_write,output=file,host=tyrion buffer_limit=10000i,write_time_ns=636609i,metrics_written=18i,buffer_size=0i 1480682800000000000
|
||||
internal_gather,input=internal,host=tyrion metrics_gathered=19i,gather_time_ns=442114i 1480682800000000000
|
||||
internal_gather,input=http_listener,host=tyrion metrics_gathered=0i,gather_time_ns=167285i 1480682800000000000
|
||||
internal_http_listener,address=:8186,host=tyrion queries_received=0i,writes_received=0i,requests_received=0i,buffers_created=0i,requests_served=0i,pings_received=0i,bytes_received=0i,not_founds_served=0i,pings_served=0i,queries_served=0i,writes_served=0i 1480682800000000000
|
||||
```
|
||||
66
plugins/inputs/internal/internal.go
Normal file
66
plugins/inputs/internal/internal.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
type Self struct {
|
||||
CollectMemstats bool
|
||||
}
|
||||
|
||||
func NewSelf() telegraf.Input {
|
||||
return &Self{
|
||||
CollectMemstats: true,
|
||||
}
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## If true, collect telegraf memory stats.
|
||||
# collect_memstats = true
|
||||
`
|
||||
|
||||
func (s *Self) Description() string {
|
||||
return "Collect statistics about itself"
|
||||
}
|
||||
|
||||
func (s *Self) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (s *Self) Gather(acc telegraf.Accumulator) error {
|
||||
if s.CollectMemstats {
|
||||
m := &runtime.MemStats{}
|
||||
runtime.ReadMemStats(m)
|
||||
fields := map[string]interface{}{
|
||||
"alloc_bytes": m.Alloc, // bytes allocated and not yet freed
|
||||
"total_alloc_bytes": m.TotalAlloc, // bytes allocated (even if freed)
|
||||
"sys_bytes": m.Sys, // bytes obtained from system (sum of XxxSys below)
|
||||
"pointer_lookups": m.Lookups, // number of pointer lookups
|
||||
"mallocs": m.Mallocs, // number of mallocs
|
||||
"frees": m.Frees, // number of frees
|
||||
// Main allocation heap statistics.
|
||||
"heap_alloc_bytes": m.HeapAlloc, // bytes allocated and not yet freed (same as Alloc above)
|
||||
"heap_sys_bytes": m.HeapSys, // bytes obtained from system
|
||||
"heap_idle_bytes": m.HeapIdle, // bytes in idle spans
|
||||
"heap_in_use_bytes": m.HeapInuse, // bytes in non-idle span
|
||||
"heap_released_bytes": m.HeapReleased, // bytes released to the OS
|
||||
"heap_objects_bytes": m.HeapObjects, // total number of allocated objects
|
||||
"num_gc": m.NumGC,
|
||||
}
|
||||
acc.AddFields("internal_memstats", fields, map[string]string{})
|
||||
}
|
||||
|
||||
for _, m := range selfstat.Metrics() {
|
||||
acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("internal", NewSelf)
|
||||
}
|
||||
62
plugins/inputs/internal/internal_test.go
Normal file
62
plugins/inputs/internal/internal_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSelfPlugin(t *testing.T) {
|
||||
s := NewSelf()
|
||||
acc := &testutil.Accumulator{}
|
||||
|
||||
s.Gather(acc)
|
||||
assert.True(t, acc.HasMeasurement("internal_memstats"))
|
||||
|
||||
// test that a registered stat is incremented
|
||||
stat := selfstat.Register("mytest", "test", map[string]string{"test": "foo"})
|
||||
stat.Incr(1)
|
||||
stat.Incr(2)
|
||||
s.Gather(acc)
|
||||
acc.AssertContainsTaggedFields(t, "internal_mytest",
|
||||
map[string]interface{}{
|
||||
"test": int64(3),
|
||||
},
|
||||
map[string]string{
|
||||
"test": "foo",
|
||||
},
|
||||
)
|
||||
acc.ClearMetrics()
|
||||
|
||||
// test that a registered stat is set properly
|
||||
stat.Set(101)
|
||||
s.Gather(acc)
|
||||
acc.AssertContainsTaggedFields(t, "internal_mytest",
|
||||
map[string]interface{}{
|
||||
"test": int64(101),
|
||||
},
|
||||
map[string]string{
|
||||
"test": "foo",
|
||||
},
|
||||
)
|
||||
acc.ClearMetrics()
|
||||
|
||||
// test that regular and timing stats can share the same measurement, and
|
||||
// that timings are set properly.
|
||||
timing := selfstat.RegisterTiming("mytest", "test_ns", map[string]string{"test": "foo"})
|
||||
timing.Incr(100)
|
||||
timing.Incr(200)
|
||||
s.Gather(acc)
|
||||
acc.AssertContainsTaggedFields(t, "internal_mytest",
|
||||
map[string]interface{}{
|
||||
"test": int64(101),
|
||||
"test_ns": int64(150),
|
||||
},
|
||||
map[string]string{
|
||||
"test": "foo",
|
||||
},
|
||||
)
|
||||
}
|
||||
@@ -23,7 +23,7 @@ func TestReadsMetricsFromKafka(t *testing.T) {
|
||||
testTopic := fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix())
|
||||
|
||||
// Send a Kafka message to the kafka host
|
||||
msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257"
|
||||
msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257\n"
|
||||
producer, err := sarama.NewSyncProducer(brokerPeers, nil)
|
||||
require.NoError(t, err)
|
||||
_, _, err = producer.SendMessage(
|
||||
|
||||
@@ -12,10 +12,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257"
|
||||
testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n"
|
||||
testMsgGraphite = "cpu.load.short.graphite 23422 1454780029"
|
||||
testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n"
|
||||
invalidMsg = "cpu_load_short,host=server01 1422568543702900257"
|
||||
invalidMsg = "cpu_load_short,host=server01 1422568543702900257\n"
|
||||
)
|
||||
|
||||
func newTestKafka() (*Kafka, chan *sarama.ConsumerMessage) {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/vjeantet/grok"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
var timeLayouts = map[string]string{
|
||||
@@ -280,7 +281,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return telegraf.NewMetric(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp))
|
||||
return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp))
|
||||
}
|
||||
|
||||
func (p *Parser) addCustomPatterns(scanner *bufio.Scanner) {
|
||||
|
||||
@@ -13,10 +13,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257"
|
||||
testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n"
|
||||
testMsgGraphite = "cpu.load.short.graphite 23422 1454780029"
|
||||
testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n"
|
||||
invalidMsg = "cpu_load_short,host=server01 1422568543702900257"
|
||||
invalidMsg = "cpu_load_short,host=server01 1422568543702900257\n"
|
||||
)
|
||||
|
||||
func newTestMQTTConsumer() (*MQTTConsumer, chan mqtt.Message) {
|
||||
|
||||
@@ -133,7 +133,7 @@ The unit of fields varies by the tags.
|
||||
* file_events_total(float,number)
|
||||
* file_events_seconds_total(float, milliseconds)
|
||||
* file_events_bytes_total(float, bytes)
|
||||
* Perf file events statements - gathers attributes of each event
|
||||
* Perf events statements - gathers attributes of each event
|
||||
* events_statements_total(float, number)
|
||||
* events_statements_seconds_total(float, millieconds)
|
||||
* events_statements_errors_total(float, number)
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257"
|
||||
testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n"
|
||||
testMsgGraphite = "cpu.load.short.graphite 23422 1454780029"
|
||||
testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n"
|
||||
invalidMsg = "cpu_load_short,host=server01 1422568543702900257"
|
||||
invalidMsg = "cpu_load_short,host=server01 1422568543702900257\n"
|
||||
metricBuffer = 5
|
||||
)
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
// This test is modeled after the kafka consumer integration test
|
||||
func TestReadsMetricsFromNSQ(t *testing.T) {
|
||||
msgID := nsq.MessageID{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'}
|
||||
msg := nsq.NewMessage(msgID, []byte("cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257"))
|
||||
msg := nsq.NewMessage(msgID, []byte("cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257\n"))
|
||||
|
||||
script := []instruction{
|
||||
// SUB
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
|
||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
@@ -85,7 +86,7 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) {
|
||||
} else {
|
||||
t = time.Now()
|
||||
}
|
||||
metric, err := telegraf.NewMetric(metricName, tags, fields, t)
|
||||
metric, err := metric.New(metricName, tags, fields, t)
|
||||
if err == nil {
|
||||
metrics = append(metrics, metric)
|
||||
}
|
||||
|
||||
@@ -111,9 +111,11 @@ func TestParseValidPrometheus(t *testing.T) {
|
||||
"gauge": float64(1),
|
||||
}, metrics[0].Fields())
|
||||
assert.Equal(t, map[string]string{
|
||||
"osVersion": "CentOS Linux 7 (Core)",
|
||||
"dockerVersion": "1.8.2",
|
||||
"kernelVersion": "3.10.0-229.20.1.el7.x86_64",
|
||||
"osVersion": "CentOS Linux 7 (Core)",
|
||||
"dockerVersion": "1.8.2",
|
||||
"kernelVersion": "3.10.0-229.20.1.el7.x86_64",
|
||||
"cadvisorRevision": "",
|
||||
"cadvisorVersion": "",
|
||||
}, metrics[0].Tags())
|
||||
|
||||
// Counter value
|
||||
|
||||
@@ -26,6 +26,10 @@ const DefaultPassword = "guest"
|
||||
// used by Rabbitmq
|
||||
const DefaultURL = "http://localhost:15672"
|
||||
|
||||
// Default http timeouts
|
||||
const DefaultResponseHeaderTimeout = 3
|
||||
const DefaultClientTimeout = 4
|
||||
|
||||
// RabbitMQ defines the configuration necessary for gathering metrics,
|
||||
// see the sample config for further details
|
||||
type RabbitMQ struct {
|
||||
@@ -42,6 +46,9 @@ type RabbitMQ struct {
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
ResponseHeaderTimeout internal.Duration `toml:"header_timeout"`
|
||||
ClientTimeout internal.Duration `toml:"client_timeout"`
|
||||
|
||||
// InsecureSkipVerify bool
|
||||
Nodes []string
|
||||
Queues []string
|
||||
@@ -146,6 +153,21 @@ var sampleConfig = `
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Optional request timeouts
|
||||
##
|
||||
## ResponseHeaderTimeout, if non-zero, specifies the amount of
|
||||
## time to wait for a server's response headers after fully
|
||||
## writing the request (including its body, if any). This
|
||||
## time does not include the time to read the response body.
|
||||
## See http.Transport.ResponseHeaderTimeout
|
||||
# header_timeout = "3s"
|
||||
##
|
||||
## Timeout specifies a time limit for requests made by this
|
||||
## Client. The timeout includes connection time, any
|
||||
## redirects, and reading the response body.
|
||||
## See http.Client.Timeout
|
||||
# client_timeout = "4s"
|
||||
|
||||
## A list of nodes to pull metrics about. If not specified, metrics for
|
||||
## all nodes are gathered.
|
||||
# nodes = ["rabbit@node1", "rabbit@node2"]
|
||||
@@ -170,12 +192,12 @@ func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error {
|
||||
return err
|
||||
}
|
||||
tr := &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
ResponseHeaderTimeout: r.ResponseHeaderTimeout.Duration,
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
r.Client = &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
Timeout: r.ClientTimeout.Duration,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -388,6 +410,9 @@ func (r *RabbitMQ) shouldGatherQueue(queue Queue) bool {
|
||||
|
||||
func init() {
|
||||
inputs.Add("rabbitmq", func() telegraf.Input {
|
||||
return &RabbitMQ{}
|
||||
return &RabbitMQ{
|
||||
ResponseHeaderTimeout: internal.Duration{Duration: DefaultResponseHeaderTimeout * time.Second},
|
||||
ClientTimeout: internal.Duration{Duration: DefaultClientTimeout * time.Second},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -91,10 +91,10 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
|
||||
continue
|
||||
}
|
||||
fieldsG := map[string]interface{}{
|
||||
"usage_user": 100 * (cts.User - lastCts.User) / totalDelta,
|
||||
"usage_user": 100 * (cts.User - lastCts.User - (cts.Guest - lastCts.Guest)) / totalDelta,
|
||||
"usage_system": 100 * (cts.System - lastCts.System) / totalDelta,
|
||||
"usage_idle": 100 * (cts.Idle - lastCts.Idle) / totalDelta,
|
||||
"usage_nice": 100 * (cts.Nice - lastCts.Nice) / totalDelta,
|
||||
"usage_nice": 100 * (cts.Nice - lastCts.Nice - (cts.GuestNice - lastCts.GuestNice)) / totalDelta,
|
||||
"usage_iowait": 100 * (cts.Iowait - lastCts.Iowait) / totalDelta,
|
||||
"usage_irq": 100 * (cts.Irq - lastCts.Irq) / totalDelta,
|
||||
"usage_softirq": 100 * (cts.Softirq - lastCts.Softirq) / totalDelta,
|
||||
@@ -112,7 +112,7 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
func totalCpuTime(t cpu.TimesStat) float64 {
|
||||
total := t.User + t.System + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal +
|
||||
t.Guest + t.GuestNice + t.Idle
|
||||
t.Idle
|
||||
return total
|
||||
}
|
||||
|
||||
|
||||
@@ -17,29 +17,29 @@ func TestCPUStats(t *testing.T) {
|
||||
|
||||
cts := cpu.TimesStat{
|
||||
CPU: "cpu0",
|
||||
User: 3.1,
|
||||
User: 8.8,
|
||||
System: 8.2,
|
||||
Idle: 80.1,
|
||||
Nice: 1.3,
|
||||
Iowait: 0.2,
|
||||
Irq: 0.1,
|
||||
Iowait: 0.8389,
|
||||
Irq: 0.6,
|
||||
Softirq: 0.11,
|
||||
Steal: 0.0511,
|
||||
Guest: 8.1,
|
||||
Guest: 3.1,
|
||||
GuestNice: 0.324,
|
||||
}
|
||||
|
||||
cts2 := cpu.TimesStat{
|
||||
CPU: "cpu0",
|
||||
User: 11.4, // increased by 8.3
|
||||
User: 24.9, // increased by 16.1
|
||||
System: 10.9, // increased by 2.7
|
||||
Idle: 158.8699, // increased by 78.7699 (for total increase of 100)
|
||||
Nice: 2.5, // increased by 1.2
|
||||
Iowait: 0.7, // increased by 0.5
|
||||
Irq: 1.2, // increased by 1.1
|
||||
Idle: 157.9798, // increased by 77.8798 (for total increase of 100)
|
||||
Nice: 3.5, // increased by 2.2
|
||||
Iowait: 0.929, // increased by 0.0901
|
||||
Irq: 1.2, // increased by 0.6
|
||||
Softirq: 0.31, // increased by 0.2
|
||||
Steal: 0.2812, // increased by 0.0001
|
||||
Guest: 12.9, // increased by 4.8
|
||||
Steal: 0.2812, // increased by 0.2301
|
||||
Guest: 11.4, // increased by 8.3
|
||||
GuestNice: 2.524, // increased by 2.2
|
||||
}
|
||||
|
||||
@@ -56,15 +56,15 @@ func TestCPUStats(t *testing.T) {
|
||||
|
||||
// Computed values are checked with delta > 0 becasue of floating point arithmatic
|
||||
// imprecision
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 3.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 8.8, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 8.2, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 1.3, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.2, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 0.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.8389, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 0.6, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_softirq", 0.11, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_steal", 0.0511, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 8.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 3.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 0.324, 0, cputags)
|
||||
|
||||
mps2 := MockPS{}
|
||||
@@ -75,26 +75,26 @@ func TestCPUStats(t *testing.T) {
|
||||
err = cs.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 11.4, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 24.9, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 10.9, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 158.8699, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 2.5, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.7, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 157.9798, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 3.5, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.929, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 1.2, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_softirq", 0.31, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_steal", 0.2812, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 12.9, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 11.4, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 2.524, 0, cputags)
|
||||
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_user", 8.3, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_user", 7.8, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_system", 2.7, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_idle", 78.7699, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_nice", 1.2, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_iowait", 0.5, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_irq", 1.1, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_idle", 77.8798, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_nice", 0, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_iowait", 0.0901, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_irq", 0.6, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_softirq", 0.2, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_steal", 0.2301, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest", 4.8, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest", 8.3, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest_nice", 2.2, 0.0005, cputags)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
@@ -41,18 +42,19 @@ func (s *DiskStats) Gather(acc telegraf.Accumulator) error {
|
||||
s.MountPoints = s.Mountpoints
|
||||
}
|
||||
|
||||
disks, err := s.ps.DiskUsage(s.MountPoints, s.IgnoreFS)
|
||||
disks, partitions, err := s.ps.DiskUsage(s.MountPoints, s.IgnoreFS)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting disk usage info: %s", err)
|
||||
}
|
||||
|
||||
for _, du := range disks {
|
||||
for i, du := range disks {
|
||||
if du.Total == 0 {
|
||||
// Skip dummy filesystem (procfs, cgroupfs, ...)
|
||||
continue
|
||||
}
|
||||
tags := map[string]string{
|
||||
"path": du.Path,
|
||||
"device": strings.Replace(partitions[i].Device, "/dev/", "", -1),
|
||||
"fstype": du.Fstype,
|
||||
}
|
||||
var used_percent float64
|
||||
@@ -131,13 +133,14 @@ func (s *DiskIOStats) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"reads": io.ReadCount,
|
||||
"writes": io.WriteCount,
|
||||
"read_bytes": io.ReadBytes,
|
||||
"write_bytes": io.WriteBytes,
|
||||
"read_time": io.ReadTime,
|
||||
"write_time": io.WriteTime,
|
||||
"io_time": io.IoTime,
|
||||
"reads": io.ReadCount,
|
||||
"writes": io.WriteCount,
|
||||
"read_bytes": io.ReadBytes,
|
||||
"write_bytes": io.WriteBytes,
|
||||
"read_time": io.ReadTime,
|
||||
"write_time": io.WriteTime,
|
||||
"io_time": io.IoTime,
|
||||
"iops_in_progress": io.IopsInProgress,
|
||||
}
|
||||
acc.AddCounter("diskio", fields, tags)
|
||||
}
|
||||
|
||||
@@ -50,9 +50,33 @@ func TestDiskStats(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
mps.On("DiskUsage", []string(nil), []string(nil)).Return(duAll, nil)
|
||||
mps.On("DiskUsage", []string{"/", "/dev"}, []string(nil)).Return(duFiltered, nil)
|
||||
mps.On("DiskUsage", []string{"/", "/home"}, []string(nil)).Return(duAll, nil)
|
||||
psAll := []*disk.PartitionStat{
|
||||
{
|
||||
Device: "/dev/sda",
|
||||
Mountpoint: "/",
|
||||
Fstype: "ext4",
|
||||
Opts: "",
|
||||
},
|
||||
{
|
||||
Device: "/dev/sdb",
|
||||
Mountpoint: "/home",
|
||||
Fstype: "ext4",
|
||||
Opts: "",
|
||||
},
|
||||
}
|
||||
|
||||
psFiltered := []*disk.PartitionStat{
|
||||
{
|
||||
Device: "/dev/sda",
|
||||
Mountpoint: "/",
|
||||
Fstype: "ext4",
|
||||
Opts: "",
|
||||
},
|
||||
}
|
||||
|
||||
mps.On("DiskUsage", []string(nil), []string(nil)).Return(duAll, psAll, nil)
|
||||
mps.On("DiskUsage", []string{"/", "/dev"}, []string(nil)).Return(duFiltered, psFiltered, nil)
|
||||
mps.On("DiskUsage", []string{"/", "/home"}, []string(nil)).Return(duAll, psAll, nil)
|
||||
|
||||
err = (&DiskStats{ps: &mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
@@ -64,10 +88,12 @@ func TestDiskStats(t *testing.T) {
|
||||
tags1 := map[string]string{
|
||||
"path": "/",
|
||||
"fstype": "ext4",
|
||||
"device": "sda",
|
||||
}
|
||||
tags2 := map[string]string{
|
||||
"path": "/home",
|
||||
"fstype": "ext4",
|
||||
"device": "sdb",
|
||||
}
|
||||
|
||||
fields1 := map[string]interface{}{
|
||||
|
||||
@@ -33,13 +33,14 @@ func (m *MockPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) {
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m *MockPS) DiskUsage(mountPointFilter []string, fstypeExclude []string) ([]*disk.UsageStat, error) {
|
||||
func (m *MockPS) DiskUsage(mountPointFilter []string, fstypeExclude []string) ([]*disk.UsageStat, []*disk.PartitionStat, error) {
|
||||
ret := m.Called(mountPointFilter, fstypeExclude)
|
||||
|
||||
r0 := ret.Get(0).([]*disk.UsageStat)
|
||||
r1 := ret.Error(1)
|
||||
r1 := ret.Get(1).([]*disk.PartitionStat)
|
||||
r2 := ret.Error(2)
|
||||
|
||||
return r0, r1
|
||||
return r0, r1, r2
|
||||
}
|
||||
|
||||
func (m *MockPS) NetIO() ([]net.IOCountersStat, error) {
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
type PS interface {
|
||||
CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error)
|
||||
DiskUsage(mountPointFilter []string, fstypeExclude []string) ([]*disk.UsageStat, error)
|
||||
DiskUsage(mountPointFilter []string, fstypeExclude []string) ([]*disk.UsageStat, []*disk.PartitionStat, error)
|
||||
NetIO() ([]net.IOCountersStat, error)
|
||||
NetProto() ([]net.ProtoCountersStat, error)
|
||||
DiskIO() (map[string]disk.IOCountersStat, error)
|
||||
@@ -54,10 +54,10 @@ func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) {
|
||||
func (s *systemPS) DiskUsage(
|
||||
mountPointFilter []string,
|
||||
fstypeExclude []string,
|
||||
) ([]*disk.UsageStat, error) {
|
||||
) ([]*disk.UsageStat, []*disk.PartitionStat, error) {
|
||||
parts, err := disk.Partitions(true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Make a "set" out of the filter slice
|
||||
@@ -71,6 +71,7 @@ func (s *systemPS) DiskUsage(
|
||||
}
|
||||
|
||||
var usage []*disk.UsageStat
|
||||
var partitions []*disk.PartitionStat
|
||||
|
||||
for _, p := range parts {
|
||||
if len(mountPointFilter) > 0 {
|
||||
@@ -85,9 +86,10 @@ func (s *systemPS) DiskUsage(
|
||||
if _, err := os.Stat(mountpoint); err == nil {
|
||||
du, err := disk.Usage(mountpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
du.Path = p.Mountpoint
|
||||
|
||||
// If the mount point is a member of the exclude set,
|
||||
// don't gather info on it.
|
||||
_, ok := fstypeExcludeSet[p.Fstype]
|
||||
@@ -96,10 +98,11 @@ func (s *systemPS) DiskUsage(
|
||||
}
|
||||
du.Fstype = p.Fstype
|
||||
usage = append(usage, du)
|
||||
partitions = append(partitions, &p)
|
||||
}
|
||||
}
|
||||
|
||||
return usage, nil
|
||||
return usage, partitions, nil
|
||||
}
|
||||
|
||||
func (s *systemPS) NetProto() ([]net.ProtoCountersStat, error) {
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
type TcpListener struct {
|
||||
@@ -41,6 +42,12 @@ type TcpListener struct {
|
||||
|
||||
parser parsers.Parser
|
||||
acc telegraf.Accumulator
|
||||
|
||||
MaxConnections selfstat.Stat
|
||||
CurrentConnections selfstat.Stat
|
||||
TotalConnections selfstat.Stat
|
||||
PacketsRecv selfstat.Stat
|
||||
BytesRecv selfstat.Stat
|
||||
}
|
||||
|
||||
var dropwarn = "E! Error: tcp_listener message queue full. " +
|
||||
@@ -91,6 +98,16 @@ func (t *TcpListener) Start(acc telegraf.Accumulator) error {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
tags := map[string]string{
|
||||
"address": t.ServiceAddress,
|
||||
}
|
||||
t.MaxConnections = selfstat.Register("tcp_listener", "max_connections", tags)
|
||||
t.MaxConnections.Set(int64(t.MaxTCPConnections))
|
||||
t.CurrentConnections = selfstat.Register("tcp_listener", "current_connections", tags)
|
||||
t.TotalConnections = selfstat.Register("tcp_listener", "total_connections", tags)
|
||||
t.PacketsRecv = selfstat.Register("tcp_listener", "packets_received", tags)
|
||||
t.BytesRecv = selfstat.Register("tcp_listener", "bytes_received", tags)
|
||||
|
||||
t.acc = acc
|
||||
t.in = make(chan []byte, t.AllowedPendingMessages)
|
||||
t.done = make(chan struct{})
|
||||
@@ -189,6 +206,8 @@ func (t *TcpListener) refuser(conn *net.TCPConn) {
|
||||
|
||||
// handler handles a single TCP Connection
|
||||
func (t *TcpListener) handler(conn *net.TCPConn, id string) {
|
||||
t.CurrentConnections.Incr(1)
|
||||
t.TotalConnections.Incr(1)
|
||||
// connection cleanup function
|
||||
defer func() {
|
||||
t.wg.Done()
|
||||
@@ -196,6 +215,7 @@ func (t *TcpListener) handler(conn *net.TCPConn, id string) {
|
||||
// Add one connection potential back to channel when this one closes
|
||||
t.accept <- true
|
||||
t.forget(id)
|
||||
t.CurrentConnections.Incr(-1)
|
||||
}()
|
||||
|
||||
var n int
|
||||
@@ -212,8 +232,11 @@ func (t *TcpListener) handler(conn *net.TCPConn, id string) {
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
bufCopy := make([]byte, n)
|
||||
t.BytesRecv.Incr(int64(n))
|
||||
t.PacketsRecv.Incr(1)
|
||||
bufCopy := make([]byte, n+1)
|
||||
copy(bufCopy, scanner.Bytes())
|
||||
bufCopy[n] = '\n'
|
||||
|
||||
select {
|
||||
case t.in <- bufCopy:
|
||||
|
||||
@@ -9,16 +9,29 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
// UdpListener main struct for the collector
|
||||
type UdpListener struct {
|
||||
ServiceAddress string
|
||||
|
||||
// UDPBufferSize should only be set if you want/need the telegraf UDP socket to
|
||||
// differ from the system setting. In cases where you set the rmem_default to a lower
|
||||
// value at the host level, but need a larger buffer for UDP bursty traffic, this
|
||||
// setting enables you to configure that value ONLY for telegraf UDP sockets on this listener
|
||||
// Set this to 0 (or comment out) to take system default
|
||||
//
|
||||
// NOTE: You should ensure that your rmem_max is >= to this setting to work properly!
|
||||
// (e.g. sysctl -w net.core.rmem_max=N)
|
||||
UDPBufferSize int `toml:"udp_buffer_size"`
|
||||
AllowedPendingMessages int
|
||||
|
||||
// UDPPacketSize is deprecated, it's only here for legacy support
|
||||
// we now always create 1 max size buffer and then copy only what we need
|
||||
// into the in channel
|
||||
// see https://github.com/influxdata/telegraf/pull/992
|
||||
UDPPacketSize int `toml:"udp_packet_size"`
|
||||
AllowedPendingMessages int
|
||||
UDPPacketSize int `toml:"udp_packet_size"`
|
||||
|
||||
sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
@@ -36,9 +49,12 @@ type UdpListener struct {
|
||||
acc telegraf.Accumulator
|
||||
|
||||
listener *net.UDPConn
|
||||
|
||||
PacketsRecv selfstat.Stat
|
||||
BytesRecv selfstat.Stat
|
||||
}
|
||||
|
||||
// UDP packet limit, see
|
||||
// UDP_MAX_PACKET_SIZE is packet limit, see
|
||||
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
|
||||
const UDP_MAX_PACKET_SIZE int = 64 * 1024
|
||||
|
||||
@@ -57,6 +73,10 @@ const sampleConfig = `
|
||||
## UDP listener will start dropping packets.
|
||||
# allowed_pending_messages = 10000
|
||||
|
||||
## Set the buffer size of the UDP connection outside of OS default (in bytes)
|
||||
## If set to 0, take OS default
|
||||
udp_buffer_size = 16777216
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
@@ -86,6 +106,12 @@ func (u *UdpListener) Start(acc telegraf.Accumulator) error {
|
||||
u.Lock()
|
||||
defer u.Unlock()
|
||||
|
||||
tags := map[string]string{
|
||||
"address": u.ServiceAddress,
|
||||
}
|
||||
u.PacketsRecv = selfstat.Register("udp_listener", "packets_received", tags)
|
||||
u.BytesRecv = selfstat.Register("udp_listener", "bytes_received", tags)
|
||||
|
||||
u.acc = acc
|
||||
u.in = make(chan []byte, u.AllowedPendingMessages)
|
||||
u.done = make(chan struct{})
|
||||
@@ -94,7 +120,7 @@ func (u *UdpListener) Start(acc telegraf.Accumulator) error {
|
||||
go u.udpListen()
|
||||
go u.udpParser()
|
||||
|
||||
log.Printf("I! Started UDP listener service on %s\n", u.ServiceAddress)
|
||||
log.Printf("I! Started UDP listener service on %s (ReadBuffer: %d)\n", u.ServiceAddress, u.UDPBufferSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -111,20 +137,33 @@ func (u *UdpListener) Stop() {
|
||||
func (u *UdpListener) udpListen() error {
|
||||
defer u.wg.Done()
|
||||
var err error
|
||||
|
||||
address, _ := net.ResolveUDPAddr("udp", u.ServiceAddress)
|
||||
u.listener, err = net.ListenUDP("udp", address)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("ERROR: ListenUDP - %s", err)
|
||||
log.Fatalf("E! Error: ListenUDP - %s", err)
|
||||
}
|
||||
|
||||
log.Println("I! UDP server listening on: ", u.listener.LocalAddr().String())
|
||||
|
||||
buf := make([]byte, UDP_MAX_PACKET_SIZE)
|
||||
|
||||
if u.UDPBufferSize > 0 {
|
||||
err = u.listener.SetReadBuffer(u.UDPBufferSize) // if we want to move away from OS default
|
||||
if err != nil {
|
||||
log.Printf("E! Failed to set UDP read buffer to %d: %s", u.UDPBufferSize, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-u.done:
|
||||
return nil
|
||||
default:
|
||||
u.listener.SetReadDeadline(time.Now().Add(time.Second))
|
||||
|
||||
n, _, err := u.listener.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||
@@ -133,6 +172,8 @@ func (u *UdpListener) udpListen() error {
|
||||
}
|
||||
continue
|
||||
}
|
||||
u.BytesRecv.Incr(int64(n))
|
||||
u.PacketsRecv.Incr(1)
|
||||
bufCopy := make([]byte, n)
|
||||
copy(bufCopy, buf[:n])
|
||||
|
||||
|
||||
@@ -106,7 +106,7 @@ func TestConnectUDP(t *testing.T) {
|
||||
|
||||
func TestRunParser(t *testing.T) {
|
||||
log.SetOutput(ioutil.Discard)
|
||||
var testmsg = []byte("cpu_load_short,host=server01 value=12.0 1422568543702900257")
|
||||
var testmsg = []byte("cpu_load_short,host=server01 value=12.0 1422568543702900257\n")
|
||||
|
||||
listener, in := newTestUdpListener()
|
||||
acc := testutil.Accumulator{}
|
||||
|
||||
92
plugins/inputs/usgs/README.md
Normal file
92
plugins/inputs/usgs/README.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# USGS Telegraf plugin
|
||||
|
||||
This plugin gathers the recent earthquake data from the USGS and turns it into Telegraf metric format. The JSON polled from USGS looks as follows:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "FeatureCollection",
|
||||
"metadata": {
|
||||
"generated": 1481144380000,
|
||||
"url": "http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_hour.geojson",
|
||||
"title": "USGS All Earthquakes, Past Hour",
|
||||
"status": 200,
|
||||
"api": "1.5.2",
|
||||
"count": 4
|
||||
},
|
||||
"features": [
|
||||
{
|
||||
"type": "Feature",
|
||||
"properties": {
|
||||
"mag": 1.82,
|
||||
"place": "15km ENE of Hawaiian Ocean View, Hawaii",
|
||||
"time": 1481143731250,
|
||||
"updated": 1481143943070,
|
||||
"tz": -600,
|
||||
"url": "http://earthquake.usgs.gov/earthquakes/eventpage/hv61510176",
|
||||
"detail": "http://earthquake.usgs.gov/earthquakes/feed/v1.0/detail/hv61510176.geojson",
|
||||
"felt": null,
|
||||
"cdi": null,
|
||||
"mmi": null,
|
||||
"alert": null,
|
||||
"status": "automatic",
|
||||
"tsunami": 0,
|
||||
"sig": 51,
|
||||
"net": "hv",
|
||||
"code": "61510176",
|
||||
"ids": ",hv61510176,",
|
||||
"sources": ",hv,",
|
||||
"types": ",general-link,geoserve,origin,phase-data,",
|
||||
"nst": 32,
|
||||
"dmin": 0.07161,
|
||||
"rms": 0.24,
|
||||
"gap": 106,
|
||||
"magType": "md",
|
||||
"type": "earthquake",
|
||||
"title": "M 1.8 - 15km ENE of Hawaiian Ocean View, Hawaii"
|
||||
},
|
||||
"geometry": {
|
||||
"type": "Point",
|
||||
"coordinates": [
|
||||
-155.6236725,
|
||||
19.1058331,
|
||||
0.87
|
||||
]
|
||||
},
|
||||
"id": "hv61510176"
|
||||
}
|
||||
],
|
||||
"bbox": [
|
||||
-155.6236725,
|
||||
19.1058331,
|
||||
0.87,
|
||||
-117.025,
|
||||
64.9877,
|
||||
13.47
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Each `Feature` is then converted into a point in InfluxDB:
|
||||
|
||||
```yaml
|
||||
measurement: "earthquakes"
|
||||
tags:
|
||||
- magnitude: 1.82,
|
||||
- url: "http://earthquake.usgs.gov/earthquakes/eventpage/hv61510176",
|
||||
- detail: "http://earthquake.usgs.gov/earthquakes/feed/v1.0/detail/hv61510176.geojson",
|
||||
- felt: null,
|
||||
- cdi: null,
|
||||
- mmi: null,
|
||||
- alert: null,
|
||||
- status: "automatic",
|
||||
- tsunami: 0,
|
||||
- sig: 51,
|
||||
- net: "hv",
|
||||
- nst: 32,
|
||||
- dmin: 0.07161,
|
||||
- rms: 0.24,
|
||||
- gap: 106,
|
||||
- magType: "md",
|
||||
- type: "earthquake",
|
||||
- title: "M 1.8 - 15km ENE of Hawaiian Ocean View, Hawaii"
|
||||
```
|
||||
0
plugins/inputs/usgs/sample.json
Normal file
0
plugins/inputs/usgs/sample.json
Normal file
171
plugins/inputs/usgs/usgs.go
Normal file
171
plugins/inputs/usgs/usgs.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package usgs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// USGS is the top level struct for this plugin
|
||||
type USGS struct {
|
||||
Ok bool
|
||||
}
|
||||
|
||||
// Description contains a decription of the Plugin's function
|
||||
func (gs *USGS) Description() string {
|
||||
return "a plugin to gather USGS earthquake data"
|
||||
}
|
||||
|
||||
// SampleConfig returns a sample configuration for the plugin
|
||||
func (gs *USGS) SampleConfig() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Gather makes the HTTP call and converts the data
|
||||
func (gs *USGS) Gather(acc telegraf.Accumulator) error {
|
||||
resp, err := http.Get("https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_hour.geojson")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eqs := &Resp{}
|
||||
err = json.Unmarshal(body, eqs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meas := "usgsdata"
|
||||
for _, feat := range eqs.Features {
|
||||
fields := map[string]interface{}{
|
||||
// Event latitude
|
||||
"lat": feat.Geometry.Coordinates[0],
|
||||
// Event longitude
|
||||
"lng": feat.Geometry.Coordinates[1],
|
||||
// Event depth
|
||||
"depth": feat.Geometry.Coordinates[2],
|
||||
// Earthquake intensity: http://earthquake.usgs.gov/learn/topics/mag_vs_int.php
|
||||
"intensity": feat.Properties.Cdi,
|
||||
// Link to detail for this Feature
|
||||
"detail": feat.Properties.Detail,
|
||||
// Horizontal distance from the epicenter to the nearest station (in degrees). 1 degree is approximately 111.2 kilometers.
|
||||
"dmin": feat.Properties.Dmin,
|
||||
// The total number of felt reports submitted to the DYFI? system.
|
||||
"felt": feat.Properties.Felt,
|
||||
// The largest azimuthal gap between azimuthally adjacent stations (in degrees). In general, the smaller this number, the more reliable
|
||||
"gap": int(feat.Properties.Gap),
|
||||
// The magnitude for the event
|
||||
"magnitude": feat.Properties.Mag,
|
||||
// Method of magnitude calculation: https://earthquake.usgs.gov/data/comcat/data-eventterms.php#magType
|
||||
"magnitudeType": feat.Properties.MagType,
|
||||
// The maximum estimated instrumental intensity for the event.
|
||||
"maxIntensity": feat.Properties.Mmi,
|
||||
// Human readable place name
|
||||
"place": feat.Properties.Place,
|
||||
// A number describing how significant the event is. Larger numbers indicate a more significant event.
|
||||
"significance": int(feat.Properties.Sig),
|
||||
// Link to USGS Event Page for event.
|
||||
"usgsEventPage": feat.Properties.URL,
|
||||
}
|
||||
tags := map[string]string{
|
||||
"latInt": coordToString(feat.Geometry.Coordinates[0]),
|
||||
"lngInt": coordToString(feat.Geometry.Coordinates[1]),
|
||||
// Alert is “green”, “yellow”, “orange”, “red”
|
||||
"alert": toString(feat.Properties.Alert),
|
||||
// The total number of seismic stations used to determine earthquake location.
|
||||
"numStations": toString(feat.Properties.Nst),
|
||||
// Indicates whether the event has been reviewed by a human -> “automatic”, “reviewed”, “deleted”
|
||||
"reviewStatus": toString(feat.Properties.Status),
|
||||
// This flag is set to "1" for large events in oceanic regions and "0" otherwise.
|
||||
"tsunami": toString(feat.Properties.Tsunami),
|
||||
// Type of siesmic event “earthquake”, “quarry”
|
||||
"eventType": toString(feat.Properties.Type),
|
||||
// UTC offset for event Timezone
|
||||
"utcOffset": toString(feat.Properties.Tz),
|
||||
}
|
||||
|
||||
var t time.Time
|
||||
// Convert interface to int64
|
||||
updated := feat.Properties.Updated
|
||||
// Convert interface to int64
|
||||
original := feat.Properties.Time
|
||||
// If the event has been more reciently updated use that as the timestamp
|
||||
if updated > original {
|
||||
t = time.Unix(0, updated*int64(time.Millisecond))
|
||||
} else {
|
||||
t = time.Unix(0, original*int64(time.Millisecond))
|
||||
}
|
||||
acc.AddFields(meas, fields, tags, t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("usgs", func() telegraf.Input { return &USGS{} })
|
||||
}
|
||||
|
||||
func toString(s interface{}) string {
|
||||
return fmt.Sprintf("%v", s)
|
||||
}
|
||||
|
||||
func coordToString(coord float64) string {
|
||||
foo := math.Floor(coord)
|
||||
return fmt.Sprintf("%d", int(foo))
|
||||
}
|
||||
|
||||
// Resp is used to unmarshal the response body from USGS
|
||||
type Resp struct {
|
||||
Type string `json:"type"`
|
||||
Metadata struct {
|
||||
Generated int64 `json:"generated"`
|
||||
URL string `json:"url"`
|
||||
Title string `json:"title"`
|
||||
Status int `json:"status"`
|
||||
API string `json:"api"`
|
||||
Count int `json:"count"`
|
||||
} `json:"metadata"`
|
||||
Features []struct {
|
||||
Type string `json:"type"`
|
||||
Properties struct {
|
||||
Mag float64 `json:"mag"`
|
||||
Place string `json:"place"`
|
||||
Time int64 `json:"time"`
|
||||
Updated int64 `json:"updated"`
|
||||
Tz float64 `json:"tz"`
|
||||
URL string `json:"url"`
|
||||
Detail string `json:"detail"`
|
||||
Felt float64 `json:"felt"`
|
||||
Cdi float64 `json:"cdi"`
|
||||
Mmi float64 `json:"mmi"`
|
||||
Alert string `json:"alert"`
|
||||
Status string `json:"status"`
|
||||
Tsunami float64 `json:"tsunami"`
|
||||
Sig float64 `json:"sig"`
|
||||
Net string `json:"net"`
|
||||
Code string `json:"code"`
|
||||
Ids string `json:"ids"`
|
||||
Sources string `json:"sources"`
|
||||
Types string `json:"types"`
|
||||
Nst float64 `json:"nst"`
|
||||
Dmin float64 `json:"dmin"`
|
||||
Rms float64 `json:"rms"`
|
||||
Gap float64 `json:"gap"`
|
||||
MagType string `json:"magType"`
|
||||
Type string `json:"type"`
|
||||
Title string `json:"title"`
|
||||
} `json:"properties"`
|
||||
Geometry struct {
|
||||
Type string `json:"type"`
|
||||
Coordinates []float64 `json:"coordinates"`
|
||||
} `json:"geometry"`
|
||||
ID string `json:"id"`
|
||||
} `json:"features"`
|
||||
Bbox []float64 `json:"bbox"`
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
const meas = "github_webhooks"
|
||||
@@ -106,7 +107,7 @@ func (s CommitCommentEvent) NewMetric() telegraf.Metric {
|
||||
"commit": s.Comment.Commit,
|
||||
"comment": s.Comment.Body,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -136,7 +137,7 @@ func (s CreateEvent) NewMetric() telegraf.Metric {
|
||||
"ref": s.Ref,
|
||||
"refType": s.RefType,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -166,7 +167,7 @@ func (s DeleteEvent) NewMetric() telegraf.Metric {
|
||||
"ref": s.Ref,
|
||||
"refType": s.RefType,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -197,7 +198,7 @@ func (s DeploymentEvent) NewMetric() telegraf.Metric {
|
||||
"environment": s.Deployment.Environment,
|
||||
"description": s.Deployment.Description,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -231,7 +232,7 @@ func (s DeploymentStatusEvent) NewMetric() telegraf.Metric {
|
||||
"depState": s.DeploymentStatus.State,
|
||||
"depDescription": s.DeploymentStatus.Description,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -259,7 +260,7 @@ func (s ForkEvent) NewMetric() telegraf.Metric {
|
||||
"issues": s.Repository.Issues,
|
||||
"fork": s.Forkee.Repository,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -287,7 +288,7 @@ func (s GollumEvent) NewMetric() telegraf.Metric {
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -319,7 +320,7 @@ func (s IssueCommentEvent) NewMetric() telegraf.Metric {
|
||||
"comments": s.Issue.Comments,
|
||||
"body": s.Comment.Body,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -351,7 +352,7 @@ func (s IssuesEvent) NewMetric() telegraf.Metric {
|
||||
"title": s.Issue.Title,
|
||||
"comments": s.Issue.Comments,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -380,7 +381,7 @@ func (s MemberEvent) NewMetric() telegraf.Metric {
|
||||
"newMember": s.Member.User,
|
||||
"newMemberStatus": s.Member.Admin,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -406,7 +407,7 @@ func (s MembershipEvent) NewMetric() telegraf.Metric {
|
||||
"newMember": s.Member.User,
|
||||
"newMemberStatus": s.Member.Admin,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -432,7 +433,7 @@ func (s PageBuildEvent) NewMetric() telegraf.Metric {
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -458,7 +459,7 @@ func (s PublicEvent) NewMetric() telegraf.Metric {
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -495,7 +496,7 @@ func (s PullRequestEvent) NewMetric() telegraf.Metric {
|
||||
"deletions": s.PullRequest.Deletions,
|
||||
"changedFiles": s.PullRequest.ChangedFiles,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -533,7 +534,7 @@ func (s PullRequestReviewCommentEvent) NewMetric() telegraf.Metric {
|
||||
"commentFile": s.Comment.File,
|
||||
"comment": s.Comment.Comment,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -565,7 +566,7 @@ func (s PushEvent) NewMetric() telegraf.Metric {
|
||||
"before": s.Before,
|
||||
"after": s.After,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -593,7 +594,7 @@ func (s ReleaseEvent) NewMetric() telegraf.Metric {
|
||||
"issues": s.Repository.Issues,
|
||||
"tagName": s.Release.TagName,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -619,7 +620,7 @@ func (s RepositoryEvent) NewMetric() telegraf.Metric {
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -649,7 +650,7 @@ func (s StatusEvent) NewMetric() telegraf.Metric {
|
||||
"commit": s.Commit,
|
||||
"state": s.State,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -677,7 +678,7 @@ func (s TeamAddEvent) NewMetric() telegraf.Metric {
|
||||
"issues": s.Repository.Issues,
|
||||
"teamName": s.Team.Name,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
@@ -703,7 +704,7 @@ func (s WatchEvent) NewMetric() telegraf.Metric {
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
m, err := metric.New(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
|
||||
@@ -144,7 +144,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid.
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "System"
|
||||
Counters = ["Context Switches/sec","System Calls/sec"]
|
||||
Counters = ["Context Switches/sec","System Calls/sec", "Processor Queue Length"]
|
||||
Instances = ["------"]
|
||||
Measurement = "win_system"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/amqp"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/datadog"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/discard"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/file"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/graphite"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/graylog"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package amqp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
@@ -29,7 +28,7 @@ type AMQP struct {
|
||||
Database string
|
||||
// InfluxDB retention policy
|
||||
RetentionPolicy string
|
||||
// InfluxDB precision
|
||||
// InfluxDB precision (DEPRECATED)
|
||||
Precision string
|
||||
|
||||
// Path to CA file
|
||||
@@ -61,7 +60,6 @@ const (
|
||||
DefaultAuthMethod = "PLAIN"
|
||||
DefaultRetentionPolicy = "default"
|
||||
DefaultDatabase = "telegraf"
|
||||
DefaultPrecision = "s"
|
||||
)
|
||||
|
||||
var sampleConfig = `
|
||||
@@ -79,8 +77,6 @@ var sampleConfig = `
|
||||
# retention_policy = "default"
|
||||
## InfluxDB database
|
||||
# database = "telegraf"
|
||||
## InfluxDB precision
|
||||
# precision = "s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -105,7 +101,6 @@ func (q *AMQP) Connect() error {
|
||||
defer q.Unlock()
|
||||
|
||||
q.headers = amqp.Table{
|
||||
"precision": q.Precision,
|
||||
"database": q.Database,
|
||||
"retention_policy": q.RetentionPolicy,
|
||||
}
|
||||
@@ -182,7 +177,7 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error {
|
||||
if len(metrics) == 0 {
|
||||
return nil
|
||||
}
|
||||
var outbuf = make(map[string][][]byte)
|
||||
outbuf := make(map[string][]byte)
|
||||
|
||||
for _, metric := range metrics {
|
||||
var key string
|
||||
@@ -192,14 +187,12 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error {
|
||||
}
|
||||
}
|
||||
|
||||
values, err := q.serializer.Serialize(metric)
|
||||
buf, err := q.serializer.Serialize(metric)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
outbuf[key] = append(outbuf[key], []byte(value))
|
||||
}
|
||||
outbuf[key] = append(outbuf[key], buf...)
|
||||
}
|
||||
|
||||
for key, buf := range outbuf {
|
||||
@@ -211,7 +204,7 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error {
|
||||
amqp.Publishing{
|
||||
Headers: q.headers,
|
||||
ContentType: "text/plain",
|
||||
Body: bytes.Join(buf, []byte("\n")),
|
||||
Body: buf,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("FAILED to send amqp message: %s", err)
|
||||
@@ -225,7 +218,6 @@ func init() {
|
||||
return &AMQP{
|
||||
AuthMethod: DefaultAuthMethod,
|
||||
Database: DefaultDatabase,
|
||||
Precision: DefaultPrecision,
|
||||
RetentionPolicy: DefaultRetentionPolicy,
|
||||
}
|
||||
})
|
||||
|
||||
@@ -30,7 +30,7 @@ type CloudWatch struct {
|
||||
|
||||
var sampleConfig = `
|
||||
## Amazon REGION
|
||||
region = 'us-east-1'
|
||||
region = "us-east-1"
|
||||
|
||||
## Amazon Credentials
|
||||
## Credentials are loaded in the following order
|
||||
@@ -48,7 +48,7 @@ var sampleConfig = `
|
||||
#shared_credential_file = ""
|
||||
|
||||
## Namespace for the CloudWatch MetricDatums
|
||||
namespace = 'InfluxData/Telegraf'
|
||||
namespace = "InfluxData/Telegraf"
|
||||
`
|
||||
|
||||
func (c *CloudWatch) SampleConfig() string {
|
||||
|
||||
12
plugins/outputs/discard/README.md
Normal file
12
plugins/outputs/discard/README.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# discard Output Plugin
|
||||
|
||||
This output plugin simply drops all metrics that are sent to it. It is only
|
||||
meant to be used for testing purposes.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Send metrics to nowhere at all
|
||||
[[outputs.discard]]
|
||||
# no configuration
|
||||
```
|
||||
18
plugins/outputs/discard/discard.go
Normal file
18
plugins/outputs/discard/discard.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package discard
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
type Discard struct{}
|
||||
|
||||
func (d *Discard) Connect() error { return nil }
|
||||
func (d *Discard) Close() error { return nil }
|
||||
func (d *Discard) SampleConfig() string { return "" }
|
||||
func (d *Discard) Description() string { return "Send metrics to nowhere at all" }
|
||||
func (d *Discard) Write(metrics []telegraf.Metric) error { return nil }
|
||||
|
||||
func init() {
|
||||
outputs.Add("discard", func() telegraf.Output { return &Discard{} })
|
||||
}
|
||||
@@ -92,16 +92,13 @@ func (f *File) Write(metrics []telegraf.Metric) error {
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
values, err := f.serializer.Serialize(metric)
|
||||
b, err := f.serializer.Serialize(metric)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to serialize message: %s", err)
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
_, err = f.writer.Write([]byte(value + "\n"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("FAILED to write message: %s, %s", value, err)
|
||||
}
|
||||
_, err = f.writer.Write(b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write message: %s, %s", metric.Serialize(), err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -76,20 +75,19 @@ func (g *Graphite) Description() string {
|
||||
// occurs, logging each unsuccessful. If all servers fail, return error.
|
||||
func (g *Graphite) Write(metrics []telegraf.Metric) error {
|
||||
// Prepare data
|
||||
var bp []string
|
||||
var batch []byte
|
||||
s, err := serializers.NewGraphiteSerializer(g.Prefix, g.Template)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
gMetrics, err := s.Serialize(metric)
|
||||
buf, err := s.Serialize(metric)
|
||||
if err != nil {
|
||||
log.Printf("E! Error serializing some metrics to graphite: %s", err.Error())
|
||||
}
|
||||
bp = append(bp, gMetrics...)
|
||||
batch = append(batch, buf...)
|
||||
}
|
||||
graphitePoints := strings.Join(bp, "\n") + "\n"
|
||||
|
||||
// This will get set to nil if a successful write occurs
|
||||
err = errors.New("Could not write to any Graphite server in cluster\n")
|
||||
@@ -100,7 +98,7 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error {
|
||||
if g.Timeout > 0 {
|
||||
g.conns[n].SetWriteDeadline(time.Now().Add(time.Duration(g.Timeout) * time.Second))
|
||||
}
|
||||
if _, e := g.conns[n].Write([]byte(graphitePoints)); e != nil {
|
||||
if _, e := g.conns[n].Write(batch); e != nil {
|
||||
// Error
|
||||
log.Println("E! Graphite Error: " + e.Error())
|
||||
// Let's try the next one
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -21,7 +22,7 @@ func TestGraphiteError(t *testing.T) {
|
||||
Prefix: "my.prefix",
|
||||
}
|
||||
// Init metrics
|
||||
m1, _ := telegraf.NewMetric(
|
||||
m1, _ := metric.New(
|
||||
"mymeasurement",
|
||||
map[string]string{"host": "192.168.0.1"},
|
||||
map[string]interface{}{"mymeasurement": float64(3.14)},
|
||||
@@ -51,19 +52,19 @@ func TestGraphiteOK(t *testing.T) {
|
||||
Prefix: "my.prefix",
|
||||
}
|
||||
// Init metrics
|
||||
m1, _ := telegraf.NewMetric(
|
||||
m1, _ := metric.New(
|
||||
"mymeasurement",
|
||||
map[string]string{"host": "192.168.0.1"},
|
||||
map[string]interface{}{"myfield": float64(3.14)},
|
||||
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
m2, _ := telegraf.NewMetric(
|
||||
m2, _ := metric.New(
|
||||
"mymeasurement",
|
||||
map[string]string{"host": "192.168.0.1"},
|
||||
map[string]interface{}{"value": float64(3.14)},
|
||||
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
m3, _ := telegraf.NewMetric(
|
||||
m3, _ := metric.New(
|
||||
"my_measurement",
|
||||
map[string]string{"host": "192.168.0.1"},
|
||||
map[string]interface{}{"value": float64(3.14)},
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package instrumental
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
@@ -10,11 +11,17 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdata/telegraf/plugins/serializers"
|
||||
"github.com/influxdata/telegraf/plugins/serializers/graphite"
|
||||
)
|
||||
|
||||
var (
|
||||
ValueIncludesBadChar = regexp.MustCompile("[^[:digit:].]")
|
||||
MetricNameReplacer = regexp.MustCompile("[^-[:alnum:]_.]+")
|
||||
)
|
||||
|
||||
type Instrumental struct {
|
||||
Host string
|
||||
ApiToken string
|
||||
@@ -34,11 +41,6 @@ const (
|
||||
HandshakeFormat = HelloMessage + AuthFormat
|
||||
)
|
||||
|
||||
var (
|
||||
ValueIncludesBadChar = regexp.MustCompile("[^[:digit:].]")
|
||||
MetricNameReplacer = regexp.MustCompile("[^-[:alnum:]_.]+")
|
||||
)
|
||||
|
||||
var sampleConfig = `
|
||||
## Project API Token (required)
|
||||
api_token = "API Token" # required
|
||||
@@ -94,7 +96,7 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error {
|
||||
var toSerialize telegraf.Metric
|
||||
var newTags map[string]string
|
||||
|
||||
for _, metric := range metrics {
|
||||
for _, m := range metrics {
|
||||
// Pull the metric_type out of the metric's tags. We don't want the type
|
||||
// to show up with the other tags pulled from the system, as they go in the
|
||||
// beginning of the line instead.
|
||||
@@ -106,18 +108,18 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error {
|
||||
//
|
||||
// increment some_prefix.host.tag1.tag2.tag3.counter.field value timestamp
|
||||
//
|
||||
newTags = metric.Tags()
|
||||
newTags = m.Tags()
|
||||
metricType = newTags["metric_type"]
|
||||
delete(newTags, "metric_type")
|
||||
|
||||
toSerialize, _ = telegraf.NewMetric(
|
||||
metric.Name(),
|
||||
toSerialize, _ = metric.New(
|
||||
m.Name(),
|
||||
newTags,
|
||||
metric.Fields(),
|
||||
metric.Time(),
|
||||
m.Fields(),
|
||||
m.Time(),
|
||||
)
|
||||
|
||||
stats, err := s.Serialize(toSerialize)
|
||||
buf, err := s.Serialize(toSerialize)
|
||||
if err != nil {
|
||||
log.Printf("E! Error serializing a metric to Instrumental: %s", err)
|
||||
}
|
||||
@@ -131,29 +133,32 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error {
|
||||
metricType = "gauge"
|
||||
}
|
||||
|
||||
for _, stat := range stats {
|
||||
buffer := bytes.NewBuffer(buf)
|
||||
for {
|
||||
line, err := buffer.ReadBytes('\n')
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
stat := string(line)
|
||||
|
||||
// decompose "metric.name value time"
|
||||
splitStat := strings.SplitN(stat, " ", 3)
|
||||
metric := splitStat[0]
|
||||
name := splitStat[0]
|
||||
value := splitStat[1]
|
||||
time := splitStat[2]
|
||||
|
||||
// replace invalid components of metric name with underscore
|
||||
clean_metric := MetricNameReplacer.ReplaceAllString(metric, "_")
|
||||
clean_metric := MetricNameReplacer.ReplaceAllString(name, "_")
|
||||
|
||||
if !ValueIncludesBadChar.MatchString(value) {
|
||||
points = append(points, fmt.Sprintf("%s %s %s %s", metricType, clean_metric, value, time))
|
||||
} else if i.Debug {
|
||||
log.Printf("E! Instrumental unable to send bad stat: %s", stat)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
allPoints := strings.Join(points, "\n") + "\n"
|
||||
allPoints := strings.Join(points, "")
|
||||
_, err = fmt.Fprintf(i.conn, allPoints)
|
||||
|
||||
log.Println("D! Instrumental: " + allPoints)
|
||||
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
i.Close()
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -26,13 +27,13 @@ func TestWrite(t *testing.T) {
|
||||
}
|
||||
|
||||
// Default to gauge
|
||||
m1, _ := telegraf.NewMetric(
|
||||
m1, _ := metric.New(
|
||||
"mymeasurement",
|
||||
map[string]string{"host": "192.168.0.1"},
|
||||
map[string]interface{}{"myfield": float64(3.14)},
|
||||
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
m2, _ := telegraf.NewMetric(
|
||||
m2, _ := metric.New(
|
||||
"mymeasurement",
|
||||
map[string]string{"host": "192.168.0.1", "metric_type": "set"},
|
||||
map[string]interface{}{"value": float64(3.14)},
|
||||
@@ -43,27 +44,27 @@ func TestWrite(t *testing.T) {
|
||||
i.Write(metrics)
|
||||
|
||||
// Counter and Histogram are increments
|
||||
m3, _ := telegraf.NewMetric(
|
||||
m3, _ := metric.New(
|
||||
"my_histogram",
|
||||
map[string]string{"host": "192.168.0.1", "metric_type": "histogram"},
|
||||
map[string]interface{}{"value": float64(3.14)},
|
||||
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
// We will modify metric names that won't be accepted by Instrumental
|
||||
m4, _ := telegraf.NewMetric(
|
||||
m4, _ := metric.New(
|
||||
"bad_metric_name",
|
||||
map[string]string{"host": "192.168.0.1:8888::123", "metric_type": "counter"},
|
||||
map[string]interface{}{"value": 1},
|
||||
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
// We will drop metric values that won't be accepted by Instrumental
|
||||
m5, _ := telegraf.NewMetric(
|
||||
m5, _ := metric.New(
|
||||
"bad_values",
|
||||
map[string]string{"host": "192.168.0.1", "metric_type": "counter"},
|
||||
map[string]interface{}{"value": "\" 3:30\""},
|
||||
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
m6, _ := telegraf.NewMetric(
|
||||
m6, _ := metric.New(
|
||||
"my_counter",
|
||||
map[string]string{"host": "192.168.0.1", "metric_type": "counter"},
|
||||
map[string]interface{}{"value": float64(3.14)},
|
||||
|
||||
@@ -154,26 +154,23 @@ func (k *Kafka) Write(metrics []telegraf.Metric) error {
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
values, err := k.serializer.Serialize(metric)
|
||||
buf, err := k.serializer.Serialize(metric)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var pubErr error
|
||||
for _, value := range values {
|
||||
m := &sarama.ProducerMessage{
|
||||
Topic: k.Topic,
|
||||
Value: sarama.StringEncoder(value),
|
||||
}
|
||||
if h, ok := metric.Tags()[k.RoutingTag]; ok {
|
||||
m.Key = sarama.StringEncoder(h)
|
||||
}
|
||||
|
||||
_, _, pubErr = k.producer.SendMessage(m)
|
||||
m := &sarama.ProducerMessage{
|
||||
Topic: k.Topic,
|
||||
Value: sarama.ByteEncoder(buf),
|
||||
}
|
||||
if h, ok := metric.Tags()[k.RoutingTag]; ok {
|
||||
m.Key = sarama.StringEncoder(h)
|
||||
}
|
||||
|
||||
if pubErr != nil {
|
||||
return fmt.Errorf("FAILED to send kafka message: %s\n", pubErr)
|
||||
_, _, err = k.producer.SendMessage(m)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("FAILED to send kafka message: %s\n", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -17,7 +17,7 @@ func TestFormatMetric(t *testing.T) {
|
||||
|
||||
p := testutil.MockMetrics()[0]
|
||||
|
||||
valid_string := "test1,tag1=value1 value=1 1257894000000000000"
|
||||
valid_string := "test1,tag1=value1 value=1 1257894000000000000\n"
|
||||
func_string, err := FormatMetric(k, p)
|
||||
|
||||
if func_string != valid_string {
|
||||
@@ -29,7 +29,7 @@ func TestFormatMetric(t *testing.T) {
|
||||
Format: "custom",
|
||||
}
|
||||
|
||||
valid_custom := "test1,map[tag1:value1],test1,tag1=value1 value=1 1257894000000000000"
|
||||
valid_custom := "test1,map[tag1:value1],test1,tag1=value1 value=1 1257894000000000000\n"
|
||||
func_custom, err := FormatMetric(k, p)
|
||||
|
||||
if func_custom != valid_custom {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -162,31 +163,31 @@ func TestBuildGauge(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func newHostMetric(value interface{}, name, host string) (metric telegraf.Metric) {
|
||||
metric, _ = telegraf.NewMetric(
|
||||
func newHostMetric(value interface{}, name, host string) telegraf.Metric {
|
||||
m, _ := metric.New(
|
||||
name,
|
||||
map[string]string{"host": host},
|
||||
map[string]interface{}{"value": value},
|
||||
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
return
|
||||
return m
|
||||
}
|
||||
|
||||
func TestBuildGaugeWithSource(t *testing.T) {
|
||||
mtime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
|
||||
pt1, _ := telegraf.NewMetric(
|
||||
pt1, _ := metric.New(
|
||||
"test1",
|
||||
map[string]string{"hostname": "192.168.0.1", "tag1": "value1"},
|
||||
map[string]interface{}{"value": 0.0},
|
||||
mtime,
|
||||
)
|
||||
pt2, _ := telegraf.NewMetric(
|
||||
pt2, _ := metric.New(
|
||||
"test2",
|
||||
map[string]string{"hostnam": "192.168.0.1", "tag1": "value1"},
|
||||
map[string]interface{}{"value": 1.0},
|
||||
mtime,
|
||||
)
|
||||
pt3, _ := telegraf.NewMetric(
|
||||
pt3, _ := metric.New(
|
||||
"test3",
|
||||
map[string]string{
|
||||
"hostname": "192.168.0.1",
|
||||
@@ -195,7 +196,7 @@ func TestBuildGaugeWithSource(t *testing.T) {
|
||||
map[string]interface{}{"value": 1.0},
|
||||
mtime,
|
||||
)
|
||||
pt4, _ := telegraf.NewMetric(
|
||||
pt4, _ := metric.New(
|
||||
"test4",
|
||||
map[string]string{
|
||||
"hostname": "192.168.0.1",
|
||||
|
||||
@@ -128,24 +128,22 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error {
|
||||
t = append(t, metric.Name())
|
||||
topic := strings.Join(t, "/")
|
||||
|
||||
values, err := m.serializer.Serialize(metric)
|
||||
buf, err := m.serializer.Serialize(metric)
|
||||
if err != nil {
|
||||
return fmt.Errorf("MQTT Could not serialize metric: %s",
|
||||
metric.String())
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
err = m.publish(topic, value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not write to MQTT server, %s", err)
|
||||
}
|
||||
err = m.publish(topic, buf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not write to MQTT server, %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MQTT) publish(topic, body string) error {
|
||||
func (m *MQTT) publish(topic string, body []byte) error {
|
||||
token := m.client.Publish(topic, byte(m.QoS), false, body)
|
||||
token.Wait()
|
||||
if token.Error() != nil {
|
||||
|
||||
@@ -115,20 +115,13 @@ func (n *NATS) Write(metrics []telegraf.Metric) error {
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
values, err := n.serializer.Serialize(metric)
|
||||
buf, err := n.serializer.Serialize(metric)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var pubErr error
|
||||
for _, value := range values {
|
||||
err = n.conn.Publish(n.Subject, []byte(value))
|
||||
if err != nil {
|
||||
pubErr = err
|
||||
}
|
||||
}
|
||||
|
||||
if pubErr != nil {
|
||||
err = n.conn.Publish(n.Subject, buf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("FAILED to send NATS message: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,20 +66,13 @@ func (n *NSQ) Write(metrics []telegraf.Metric) error {
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
values, err := n.serializer.Serialize(metric)
|
||||
buf, err := n.serializer.Serialize(metric)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var pubErr error
|
||||
for _, value := range values {
|
||||
err = n.producer.Publish(n.Topic, []byte(value))
|
||||
if err != nil {
|
||||
pubErr = err
|
||||
}
|
||||
}
|
||||
|
||||
if pubErr != nil {
|
||||
err = n.producer.Publish(n.Topic, buf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("FAILED to send NSQD message: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,19 +6,26 @@ import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)
|
||||
|
||||
type PrometheusClient struct {
|
||||
Listen string
|
||||
type MetricWithExpiration struct {
|
||||
Metric prometheus.Metric
|
||||
Expiration time.Time
|
||||
}
|
||||
|
||||
metrics map[string]prometheus.Metric
|
||||
lastMetrics map[string]prometheus.Metric
|
||||
type PrometheusClient struct {
|
||||
Listen string
|
||||
ExpirationInterval internal.Duration `toml:"expiration_interval"`
|
||||
|
||||
metrics map[string]*MetricWithExpiration
|
||||
|
||||
sync.Mutex
|
||||
}
|
||||
@@ -26,11 +33,13 @@ type PrometheusClient struct {
|
||||
var sampleConfig = `
|
||||
## Address to listen on
|
||||
# listen = ":9126"
|
||||
|
||||
## Interval to expire metrics and not deliver to prometheus, 0 == no expiration
|
||||
# expiration_interval = "60s"
|
||||
`
|
||||
|
||||
func (p *PrometheusClient) Start() error {
|
||||
p.metrics = make(map[string]prometheus.Metric)
|
||||
p.lastMetrics = make(map[string]prometheus.Metric)
|
||||
p.metrics = make(map[string]*MetricWithExpiration)
|
||||
prometheus.Register(p)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@@ -86,16 +95,11 @@ func (p *PrometheusClient) Collect(ch chan<- prometheus.Metric) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
if len(p.metrics) > 0 {
|
||||
p.lastMetrics = make(map[string]prometheus.Metric)
|
||||
for k, m := range p.metrics {
|
||||
ch <- m
|
||||
p.lastMetrics[k] = m
|
||||
}
|
||||
p.metrics = make(map[string]prometheus.Metric)
|
||||
} else {
|
||||
for _, m := range p.lastMetrics {
|
||||
ch <- m
|
||||
for key, m := range p.metrics {
|
||||
if p.ExpirationInterval.Duration != 0 && time.Now().After(m.Expiration) {
|
||||
delete(p.metrics, key)
|
||||
} else {
|
||||
ch <- m.Metric
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -171,7 +175,11 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error {
|
||||
"key: %s, labels: %v,\nerr: %s\n",
|
||||
mname, l, err.Error())
|
||||
}
|
||||
p.metrics[desc.String()] = metric
|
||||
|
||||
p.metrics[desc.String()] = &MetricWithExpiration{
|
||||
Metric: metric,
|
||||
Expiration: time.Now().Add(p.ExpirationInterval.Duration),
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -179,6 +187,8 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error {
|
||||
|
||||
func init() {
|
||||
outputs.Add("prometheus_client", func() telegraf.Output {
|
||||
return &PrometheusClient{}
|
||||
return &PrometheusClient{
|
||||
ExpirationInterval: internal.Duration{Duration: time.Second * 60},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,9 +4,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/prometheus"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
@@ -17,23 +20,19 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
now := time.Now()
|
||||
pTesting = &PrometheusClient{Listen: "localhost:9127"}
|
||||
err := pTesting.Start()
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
require.NoError(t, err)
|
||||
defer pTesting.Stop()
|
||||
|
||||
p := &prometheus.Prometheus{
|
||||
Urls: []string{"http://localhost:9127/metrics"},
|
||||
}
|
||||
pClient, p, err := setupPrometheus()
|
||||
require.NoError(t, err)
|
||||
defer pClient.Stop()
|
||||
|
||||
now := time.Now()
|
||||
tags := make(map[string]string)
|
||||
pt1, _ := telegraf.NewMetric(
|
||||
pt1, _ := metric.New(
|
||||
"test_point_1",
|
||||
tags,
|
||||
map[string]interface{}{"value": 0.0},
|
||||
now)
|
||||
pt2, _ := telegraf.NewMetric(
|
||||
pt2, _ := metric.New(
|
||||
"test_point_2",
|
||||
tags,
|
||||
map[string]interface{}{"value": 1.0},
|
||||
@@ -42,7 +41,7 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) {
|
||||
pt1,
|
||||
pt2,
|
||||
}
|
||||
require.NoError(t, pTesting.Write(metrics))
|
||||
require.NoError(t, pClient.Write(metrics))
|
||||
|
||||
expected := []struct {
|
||||
name string
|
||||
@@ -63,12 +62,12 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) {
|
||||
|
||||
tags = make(map[string]string)
|
||||
tags["testtag"] = "testvalue"
|
||||
pt3, _ := telegraf.NewMetric(
|
||||
pt3, _ := metric.New(
|
||||
"test_point_3",
|
||||
tags,
|
||||
map[string]interface{}{"value": 0.0},
|
||||
now)
|
||||
pt4, _ := telegraf.NewMetric(
|
||||
pt4, _ := metric.New(
|
||||
"test_point_4",
|
||||
tags,
|
||||
map[string]interface{}{"value": 1.0},
|
||||
@@ -77,7 +76,7 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) {
|
||||
pt3,
|
||||
pt4,
|
||||
}
|
||||
require.NoError(t, pTesting.Write(metrics))
|
||||
require.NoError(t, pClient.Write(metrics))
|
||||
|
||||
expected2 := []struct {
|
||||
name string
|
||||
@@ -93,3 +92,77 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) {
|
||||
map[string]interface{}{"value": e.value})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrometheusExpireOldMetrics(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
pClient, p, err := setupPrometheus()
|
||||
pClient.ExpirationInterval = internal.Duration{Duration: time.Second * 10}
|
||||
require.NoError(t, err)
|
||||
defer pClient.Stop()
|
||||
|
||||
now := time.Now()
|
||||
tags := make(map[string]string)
|
||||
pt1, _ := metric.New(
|
||||
"test_point_1",
|
||||
tags,
|
||||
map[string]interface{}{"value": 0.0},
|
||||
now)
|
||||
var metrics = []telegraf.Metric{pt1}
|
||||
require.NoError(t, pClient.Write(metrics))
|
||||
|
||||
for _, m := range pClient.metrics {
|
||||
m.Expiration = now.Add(time.Duration(-15) * time.Second)
|
||||
}
|
||||
|
||||
pt2, _ := metric.New(
|
||||
"test_point_2",
|
||||
tags,
|
||||
map[string]interface{}{"value": 1.0},
|
||||
now)
|
||||
var metrics2 = []telegraf.Metric{pt2}
|
||||
require.NoError(t, pClient.Write(metrics2))
|
||||
|
||||
expected := []struct {
|
||||
name string
|
||||
value float64
|
||||
tags map[string]string
|
||||
}{
|
||||
{"test_point_2", 1.0, tags},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
require.NoError(t, p.Gather(&acc))
|
||||
for _, e := range expected {
|
||||
acc.AssertContainsFields(t, e.name,
|
||||
map[string]interface{}{"value": e.value})
|
||||
}
|
||||
|
||||
acc.AssertDoesNotContainMeasurement(t, "test_point_1")
|
||||
|
||||
// Confirm that it's not in the PrometheusClient map anymore
|
||||
assert.Equal(t, 1, len(pClient.metrics))
|
||||
}
|
||||
|
||||
func setupPrometheus() (*PrometheusClient, *prometheus.Prometheus, error) {
|
||||
if pTesting == nil {
|
||||
pTesting = &PrometheusClient{Listen: "localhost:9127"}
|
||||
err := pTesting.Start()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else {
|
||||
pTesting.metrics = make(map[string]*MetricWithExpiration)
|
||||
}
|
||||
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
|
||||
p := &prometheus.Prometheus{
|
||||
Urls: []string{"http://localhost:9127/metrics"},
|
||||
}
|
||||
|
||||
return pTesting, p, nil
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
// Minimum and maximum supported dates for timestamps.
|
||||
@@ -216,7 +217,7 @@ func (p *GraphiteParser) ParseLine(line string) (telegraf.Metric, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return telegraf.NewMetric(measurement, tags, fieldValues, timestamp)
|
||||
return metric.New(measurement, tags, fieldValues, timestamp)
|
||||
}
|
||||
|
||||
// ApplyTemplate extracts the template fields from the given line and
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -369,7 +369,7 @@ func TestFilterMatchDefault(t *testing.T) {
|
||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||
}
|
||||
|
||||
exp, err := telegraf.NewMetric("miss.servers.localhost.cpu_load",
|
||||
exp, err := metric.New("miss.servers.localhost.cpu_load",
|
||||
map[string]string{},
|
||||
map[string]interface{}{"value": float64(11)},
|
||||
time.Unix(1435077219, 0))
|
||||
@@ -387,7 +387,7 @@ func TestFilterMatchMultipleMeasurement(t *testing.T) {
|
||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||
}
|
||||
|
||||
exp, err := telegraf.NewMetric("cpu.cpu_load.10",
|
||||
exp, err := metric.New("cpu.cpu_load.10",
|
||||
map[string]string{"host": "localhost"},
|
||||
map[string]interface{}{"value": float64(11)},
|
||||
time.Unix(1435077219, 0))
|
||||
@@ -406,7 +406,7 @@ func TestFilterMatchMultipleMeasurementSeparator(t *testing.T) {
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
exp, err := telegraf.NewMetric("cpu_cpu_load_10",
|
||||
exp, err := metric.New("cpu_cpu_load_10",
|
||||
map[string]string{"host": "localhost"},
|
||||
map[string]interface{}{"value": float64(11)},
|
||||
time.Unix(1435077219, 0))
|
||||
@@ -424,7 +424,7 @@ func TestFilterMatchSingle(t *testing.T) {
|
||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||
}
|
||||
|
||||
exp, err := telegraf.NewMetric("cpu_load",
|
||||
exp, err := metric.New("cpu_load",
|
||||
map[string]string{"host": "localhost"},
|
||||
map[string]interface{}{"value": float64(11)},
|
||||
time.Unix(1435077219, 0))
|
||||
@@ -441,7 +441,7 @@ func TestParseNoMatch(t *testing.T) {
|
||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||
}
|
||||
|
||||
exp, err := telegraf.NewMetric("servers.localhost.memory.VmallocChunk",
|
||||
exp, err := metric.New("servers.localhost.memory.VmallocChunk",
|
||||
map[string]string{},
|
||||
map[string]interface{}{"value": float64(11)},
|
||||
time.Unix(1435077219, 0))
|
||||
@@ -459,7 +459,7 @@ func TestFilterMatchWildcard(t *testing.T) {
|
||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||
}
|
||||
|
||||
exp, err := telegraf.NewMetric("cpu_load",
|
||||
exp, err := metric.New("cpu_load",
|
||||
map[string]string{"host": "localhost"},
|
||||
map[string]interface{}{"value": float64(11)},
|
||||
time.Unix(1435077219, 0))
|
||||
@@ -479,7 +479,7 @@ func TestFilterMatchExactBeforeWildcard(t *testing.T) {
|
||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||
}
|
||||
|
||||
exp, err := telegraf.NewMetric("cpu_load",
|
||||
exp, err := metric.New("cpu_load",
|
||||
map[string]string{"host": "localhost"},
|
||||
map[string]interface{}{"value": float64(11)},
|
||||
time.Unix(1435077219, 0))
|
||||
@@ -504,16 +504,11 @@ func TestFilterMatchMostLongestFilter(t *testing.T) {
|
||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||
}
|
||||
|
||||
exp, err := telegraf.NewMetric("cpu_load",
|
||||
map[string]string{"host": "localhost", "resource": "cpu"},
|
||||
map[string]interface{}{"value": float64(11)},
|
||||
time.Unix(1435077219, 0))
|
||||
assert.NoError(t, err)
|
||||
|
||||
m, err := p.ParseLine("servers.localhost.cpu.cpu_load 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, exp.String(), m.String())
|
||||
assert.Contains(t, m.String(), ",host=localhost")
|
||||
assert.Contains(t, m.String(), ",resource=cpu")
|
||||
}
|
||||
|
||||
func TestFilterMatchMultipleWildcards(t *testing.T) {
|
||||
@@ -528,7 +523,7 @@ func TestFilterMatchMultipleWildcards(t *testing.T) {
|
||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||
}
|
||||
|
||||
exp, err := telegraf.NewMetric("cpu_load",
|
||||
exp, err := metric.New("cpu_load",
|
||||
map[string]string{"host": "server01"},
|
||||
map[string]interface{}{"value": float64(11)},
|
||||
time.Unix(1435077219, 0))
|
||||
@@ -550,16 +545,12 @@ func TestParseDefaultTags(t *testing.T) {
|
||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||
}
|
||||
|
||||
exp, err := telegraf.NewMetric("cpu_load",
|
||||
map[string]string{"host": "localhost", "region": "us-east", "zone": "1c"},
|
||||
map[string]interface{}{"value": float64(11)},
|
||||
time.Unix(1435077219, 0))
|
||||
assert.NoError(t, err)
|
||||
|
||||
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, exp.String(), m.String())
|
||||
assert.Contains(t, m.String(), ",host=localhost")
|
||||
assert.Contains(t, m.String(), ",region=us-east")
|
||||
assert.Contains(t, m.String(), ",zone=1c")
|
||||
}
|
||||
|
||||
func TestParseDefaultTemplateTags(t *testing.T) {
|
||||
@@ -571,16 +562,12 @@ func TestParseDefaultTemplateTags(t *testing.T) {
|
||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||
}
|
||||
|
||||
exp, err := telegraf.NewMetric("cpu_load",
|
||||
map[string]string{"host": "localhost", "region": "us-east", "zone": "1c"},
|
||||
map[string]interface{}{"value": float64(11)},
|
||||
time.Unix(1435077219, 0))
|
||||
assert.NoError(t, err)
|
||||
|
||||
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, exp.String(), m.String())
|
||||
assert.Contains(t, m.String(), ",host=localhost")
|
||||
assert.Contains(t, m.String(), ",region=us-east")
|
||||
assert.Contains(t, m.String(), ",zone=1c")
|
||||
}
|
||||
|
||||
func TestParseDefaultTemplateTagsOverridGlobal(t *testing.T) {
|
||||
@@ -592,16 +579,12 @@ func TestParseDefaultTemplateTagsOverridGlobal(t *testing.T) {
|
||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||
}
|
||||
|
||||
exp, err := telegraf.NewMetric("cpu_load",
|
||||
map[string]string{"host": "localhost", "region": "us-east", "zone": "1c"},
|
||||
map[string]interface{}{"value": float64(11)},
|
||||
time.Unix(1435077219, 0))
|
||||
assert.NoError(t, err)
|
||||
|
||||
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, exp.String(), m.String())
|
||||
assert.Contains(t, m.String(), ",host=localhost")
|
||||
assert.Contains(t, m.String(), ",region=us-east")
|
||||
assert.Contains(t, m.String(), ",zone=1c")
|
||||
}
|
||||
|
||||
func TestParseTemplateWhitespace(t *testing.T) {
|
||||
@@ -615,16 +598,12 @@ func TestParseTemplateWhitespace(t *testing.T) {
|
||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||
}
|
||||
|
||||
exp, err := telegraf.NewMetric("cpu_load",
|
||||
map[string]string{"host": "localhost", "region": "us-east", "zone": "1c"},
|
||||
map[string]interface{}{"value": float64(11)},
|
||||
time.Unix(1435077219, 0))
|
||||
assert.NoError(t, err)
|
||||
|
||||
m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219")
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, exp.String(), m.String())
|
||||
assert.Contains(t, m.String(), ",host=localhost")
|
||||
assert.Contains(t, m.String(), ",region=us-east")
|
||||
assert.Contains(t, m.String(), ",zone=1c")
|
||||
}
|
||||
|
||||
// Test basic functionality of ApplyTemplate
|
||||
|
||||
500
plugins/parsers/influx/500.metrics
Normal file
500
plugins/parsers/influx/500.metrics
Normal file
@@ -0,0 +1,500 @@
|
||||
ctr,host=tars,some=tag-0 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-1 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-2 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-3 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-4 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-5 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-6 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-7 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-8 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-9 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-10 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-11 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-12 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-13 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-14 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-15 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-16 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-17 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-18 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-19 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-20 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-21 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-22 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-23 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-24 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-25 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-26 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-27 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-28 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-29 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-30 n=3i 1476437629342569532
|
||||
ctr,host=tars,some=tag-31 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-32 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-33 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-34 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-35 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-36 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-37 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-38 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-39 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-40 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-41 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-42 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-43 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-44 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-45 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-46 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-47 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-48 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-49 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-50 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-51 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-52 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-53 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-54 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-55 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-56 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-57 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-58 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-59 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-60 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-61 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-62 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-63 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-64 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-65 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-66 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-67 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-68 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-69 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-70 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-71 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-72 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-73 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-74 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-75 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-76 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-77 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-78 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-79 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-80 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-81 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-82 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-83 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-84 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-85 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-86 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-87 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-88 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-89 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-90 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-91 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-92 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-93 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-94 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-95 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-96 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-97 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-98 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-99 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-100 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-101 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-102 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-103 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-104 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-105 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-106 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-107 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-108 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-109 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-110 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-111 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-112 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-113 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-114 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-115 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-116 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-117 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-118 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-119 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-120 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-121 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-122 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-123 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-124 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-125 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-126 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-127 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-128 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-129 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-130 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-131 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-132 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-133 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-134 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-135 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-136 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-137 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-138 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-139 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-140 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-141 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-142 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-143 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-144 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-145 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-146 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-147 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-148 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-149 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-150 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-151 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-152 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-153 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-154 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-155 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-156 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-157 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-158 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-159 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-160 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-161 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-162 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-163 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-164 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-165 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-166 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-167 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-168 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-169 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-170 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-171 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-172 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-173 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-174 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-175 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-176 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-177 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-178 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-179 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-180 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-181 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-182 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-183 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-184 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-185 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-186 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-187 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-188 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-189 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-190 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-191 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-192 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-193 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-194 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-195 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-196 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-197 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-198 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-199 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-200 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-201 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-202 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-203 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-204 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-205 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-206 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-207 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-208 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-209 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-210 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-211 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-212 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-213 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-214 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-215 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-216 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-217 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-218 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-219 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-220 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-221 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-222 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-223 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-224 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-225 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-226 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-227 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-228 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-229 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-230 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-231 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-232 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-233 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-234 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-235 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-236 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-237 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-238 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-239 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-240 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-241 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-242 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-243 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-244 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-245 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-246 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-247 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-248 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-249 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-250 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-251 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-252 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-253 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-254 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-255 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-256 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-257 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-258 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-259 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-260 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-261 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-262 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-263 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-264 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-265 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-266 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-267 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-268 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-269 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-270 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-271 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-272 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-273 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-274 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-275 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-276 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-277 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-278 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-279 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-280 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-281 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-282 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-283 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-284 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-285 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-286 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-287 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-288 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-289 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-290 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-291 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-292 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-293 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-294 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-295 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-296 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-297 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-298 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-299 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-300 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-301 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-302 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-303 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-304 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-305 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-306 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-307 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-308 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-309 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-310 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-311 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-312 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-313 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-314 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-315 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-316 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-317 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-318 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-319 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-320 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-321 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-322 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-323 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-324 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-325 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-326 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-327 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-328 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-329 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-330 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-331 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-332 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-333 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-334 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-335 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-336 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-337 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-338 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-339 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-340 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-341 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-342 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-343 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-344 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-345 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-346 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-347 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-348 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-349 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-350 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-351 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-352 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-353 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-354 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-355 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-356 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-357 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-358 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-359 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-360 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-361 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-362 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-363 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-364 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-365 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-366 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-367 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-368 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-369 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-370 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-371 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-372 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-373 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-374 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-375 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-376 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-377 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-378 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-379 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-380 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-381 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-382 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-383 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-384 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-385 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-386 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-387 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-388 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-389 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-390 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-391 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-392 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-393 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-394 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-395 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-396 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-397 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-398 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-399 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-400 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-401 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-402 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-403 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-404 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-405 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-406 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-407 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-408 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-409 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-410 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-411 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-412 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-413 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-414 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-415 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-416 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-417 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-418 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-419 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-420 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-421 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-422 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-423 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-424 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-425 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-426 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-427 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-428 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-429 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-430 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-431 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-432 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-433 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-434 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-435 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-436 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-437 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-438 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-439 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-440 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-441 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-442 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-443 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-444 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-445 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-446 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-447 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-448 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-449 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-450 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-451 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-452 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-453 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-454 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-455 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-456 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-457 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-458 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-459 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-460 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-461 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-462 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-463 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-464 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-465 n=0i 1476437629342523514
|
||||
ctr,host=tars,some=tag-466 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-467 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-468 n=0i 1476437629342569532
|
||||
ctr,host=tars,some=tag-469 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-470 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-471 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-472 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-473 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-474 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-475 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-476 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-477 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-478 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-479 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-480 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-481 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-482 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-483 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-484 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-485 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-486 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-487 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-488 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-489 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-490 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-491 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-492 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-493 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-494 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-495 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-496 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-497 n=2i 1476437629342569532
|
||||
ctr,host=tars,some=tag-498 n=1i 1476437629342569532
|
||||
ctr,host=tars,some=tag-499 n=1i 1476437629342569532
|
||||
@@ -6,8 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/influxdata/influxdb/models"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
// InfluxParser is an object for Parsing incoming metrics.
|
||||
@@ -19,18 +18,16 @@ type InfluxParser struct {
|
||||
func (p *InfluxParser) ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
|
||||
// parse even if the buffer begins with a newline
|
||||
buf = bytes.TrimPrefix(buf, []byte("\n"))
|
||||
points, err := models.ParsePointsWithPrecision(buf, t, "n")
|
||||
metrics := make([]telegraf.Metric, len(points))
|
||||
for i, point := range points {
|
||||
for k, v := range p.DefaultTags {
|
||||
// only set the default tag if it doesn't already exist:
|
||||
if tmp := point.Tags().GetString(k); tmp == "" {
|
||||
point.AddTag(k, v)
|
||||
metrics, err := metric.ParseWithDefaultTime(buf, t)
|
||||
if len(p.DefaultTags) > 0 {
|
||||
for _, m := range metrics {
|
||||
for k, v := range p.DefaultTags {
|
||||
// only set the default tag if it doesn't already exist:
|
||||
if !m.HasTag(k) {
|
||||
m.AddTag(k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ignore error here because it's impossible that a model.Point
|
||||
// wouldn't parse into client.Point properly
|
||||
metrics[i] = telegraf.NewMetricFromPoint(point)
|
||||
}
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
@@ -1,19 +1,27 @@
|
||||
package influx
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var exptime = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
|
||||
var (
|
||||
ms []telegraf.Metric
|
||||
writer = ioutil.Discard
|
||||
metrics500 []byte
|
||||
exptime = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).UnixNano()
|
||||
)
|
||||
|
||||
const (
|
||||
validInflux = "cpu_load_short,cpu=cpu0 value=10 1257894000000000000"
|
||||
validInflux = "cpu_load_short,cpu=cpu0 value=10 1257894000000000000\n"
|
||||
validInfluxNewline = "\ncpu_load_short,cpu=cpu0 value=10 1257894000000000000\n"
|
||||
invalidInflux = "I don't think this is line protocol"
|
||||
invalidInflux2 = "{\"a\": 5, \"b\": {\"c\": 6}}"
|
||||
invalidInflux = "I don't think this is line protocol\n"
|
||||
invalidInflux2 = "{\"a\": 5, \"b\": {\"c\": 6}}\n"
|
||||
)
|
||||
|
||||
const influxMulti = `
|
||||
@@ -48,7 +56,7 @@ func TestParseValidInflux(t *testing.T) {
|
||||
assert.Equal(t, map[string]string{
|
||||
"cpu": "cpu0",
|
||||
}, metrics[0].Tags())
|
||||
assert.Equal(t, exptime, metrics[0].Time())
|
||||
assert.Equal(t, exptime, metrics[0].Time().UnixNano())
|
||||
|
||||
metrics, err = parser.Parse([]byte(validInfluxNewline))
|
||||
assert.NoError(t, err)
|
||||
@@ -60,7 +68,7 @@ func TestParseValidInflux(t *testing.T) {
|
||||
assert.Equal(t, map[string]string{
|
||||
"cpu": "cpu0",
|
||||
}, metrics[0].Tags())
|
||||
assert.Equal(t, exptime, metrics[0].Time())
|
||||
assert.Equal(t, exptime, metrics[0].Time().UnixNano())
|
||||
}
|
||||
|
||||
func TestParseLineValidInflux(t *testing.T) {
|
||||
@@ -75,7 +83,7 @@ func TestParseLineValidInflux(t *testing.T) {
|
||||
assert.Equal(t, map[string]string{
|
||||
"cpu": "cpu0",
|
||||
}, metric.Tags())
|
||||
assert.Equal(t, exptime, metric.Time())
|
||||
assert.Equal(t, exptime, metric.Time().UnixNano())
|
||||
|
||||
metric, err = parser.ParseLine(validInfluxNewline)
|
||||
assert.NoError(t, err)
|
||||
@@ -86,7 +94,7 @@ func TestParseLineValidInflux(t *testing.T) {
|
||||
assert.Equal(t, map[string]string{
|
||||
"cpu": "cpu0",
|
||||
}, metric.Tags())
|
||||
assert.Equal(t, exptime, metric.Time())
|
||||
assert.Equal(t, exptime, metric.Time().UnixNano())
|
||||
}
|
||||
|
||||
func TestParseMultipleValid(t *testing.T) {
|
||||
@@ -147,11 +155,11 @@ func TestParseDefaultTags(t *testing.T) {
|
||||
"datacenter": "us-east",
|
||||
"host": "foo",
|
||||
"tag": "default",
|
||||
}, metrics[0].Tags())
|
||||
}, metric.Tags())
|
||||
assert.Equal(t, map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}, metrics[0].Fields())
|
||||
}, metric.Fields())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -192,3 +200,44 @@ func TestParseInvalidInflux(t *testing.T) {
|
||||
_, err = parser.ParseLine(invalidInflux2)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func BenchmarkParse(b *testing.B) {
|
||||
var err error
|
||||
parser := InfluxParser{}
|
||||
for n := 0; n < b.N; n++ {
|
||||
// parse:
|
||||
ms, err = parser.Parse(metrics500)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(ms) != 500 {
|
||||
panic("500 metrics not parsed!!")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParseAddTagWrite(b *testing.B) {
|
||||
var err error
|
||||
parser := InfluxParser{}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ms, err = parser.Parse(metrics500)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(ms) != 500 {
|
||||
panic("500 metrics not parsed!!")
|
||||
}
|
||||
for _, tmp := range ms {
|
||||
tmp.AddTag("host", "localhost")
|
||||
writer.Write(tmp.Serialize())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
metrics500, err = ioutil.ReadFile("500.metrics")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
@@ -8,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
type JSONParser struct {
|
||||
@@ -16,15 +18,22 @@ type JSONParser struct {
|
||||
DefaultTags map[string]string
|
||||
}
|
||||
|
||||
func (p *JSONParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
func (p *JSONParser) parseArray(buf []byte) ([]telegraf.Metric, error) {
|
||||
metrics := make([]telegraf.Metric, 0)
|
||||
|
||||
var jsonOut map[string]interface{}
|
||||
var jsonOut []map[string]interface{}
|
||||
err := json.Unmarshal(buf, &jsonOut)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("unable to parse out as JSON, %s", err)
|
||||
err = fmt.Errorf("unable to parse out as JSON Array, %s", err)
|
||||
return nil, err
|
||||
}
|
||||
for _, item := range jsonOut {
|
||||
metrics, err = p.parseObject(metrics, item)
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]interface{}) ([]telegraf.Metric, error) {
|
||||
|
||||
tags := make(map[string]string)
|
||||
for k, v := range p.DefaultTags {
|
||||
@@ -44,12 +53,12 @@ func (p *JSONParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
}
|
||||
|
||||
f := JSONFlattener{}
|
||||
err = f.FlattenJSON("", jsonOut)
|
||||
err := f.FlattenJSON("", jsonOut)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metric, err := telegraf.NewMetric(p.MetricName, tags, f.Fields, time.Now().UTC())
|
||||
metric, err := metric.New(p.MetricName, tags, f.Fields, time.Now().UTC())
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -57,6 +66,21 @@ func (p *JSONParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
return append(metrics, metric), nil
|
||||
}
|
||||
|
||||
func (p *JSONParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
|
||||
if !isarray(buf) {
|
||||
metrics := make([]telegraf.Metric, 0)
|
||||
var jsonOut map[string]interface{}
|
||||
err := json.Unmarshal(buf, &jsonOut)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("unable to parse out as JSON, %s", err)
|
||||
return nil, err
|
||||
}
|
||||
return p.parseObject(metrics, jsonOut)
|
||||
}
|
||||
return p.parseArray(buf)
|
||||
}
|
||||
|
||||
func (p *JSONParser) ParseLine(line string) (telegraf.Metric, error) {
|
||||
metrics, err := p.Parse([]byte(line + "\n"))
|
||||
|
||||
@@ -115,3 +139,13 @@ func (f *JSONFlattener) FlattenJSON(
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isarray(buf []byte) bool {
|
||||
ia := bytes.IndexByte(buf, '[')
|
||||
ib := bytes.IndexByte(buf, '{')
|
||||
if ia > -1 && ia < ib {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user