Compare commits
3 Commits
release-1.
...
telegraf-r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f4e48f9909 | ||
|
|
2eee1b84fb | ||
|
|
9726ecfec3 |
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,7 +1,7 @@
|
||||
## Directions
|
||||
|
||||
GitHub Issues are reserved for actionable bug reports and feature requests.
|
||||
General questions should be asked at the [InfluxData Community](https://community.influxdata.com) site.
|
||||
General questions should be sent to the [InfluxDB mailing list](https://groups.google.com/forum/#!forum/influxdb).
|
||||
|
||||
Before opening an issue, search for similar bug reports or feature requests on GitHub Issues.
|
||||
If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.
|
||||
|
||||
115
CHANGELOG.md
115
CHANGELOG.md
@@ -1,60 +1,7 @@
|
||||
## v1.3.5 [2017-07-26]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3049](https://github.com/influxdata/telegraf/issues/3049): Fix prometheus output cannot be reloaded.
|
||||
- [#3037](https://github.com/influxdata/telegraf/issues/3037): Fix filestat reporting exists when cannot list directory.
|
||||
- [#2386](https://github.com/influxdata/telegraf/issues/2386): Fix ntpq parse issue when using dns_lookup.
|
||||
- [#2554](https://github.com/influxdata/telegraf/issues/2554): Fix panic when agent.interval = "0s".
|
||||
|
||||
## v1.3.4 [2017-07-12]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3001](https://github.com/influxdata/telegraf/issues/3001): Fix handling of escape characters within fields.
|
||||
- [#2988](https://github.com/influxdata/telegraf/issues/2988): Fix chrony plugin does not track system time offset.
|
||||
- [#3004](https://github.com/influxdata/telegraf/issues/3004): Do not allow metrics with trailing slashes.
|
||||
- [#3011](https://github.com/influxdata/telegraf/issues/3011): Prevent Write from being called concurrently.
|
||||
|
||||
## v1.3.3 [2017-06-28]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2915](https://github.com/influxdata/telegraf/issues/2915): Allow dos line endings in tail and logparser.
|
||||
- [#2937](https://github.com/influxdata/telegraf/issues/2937): Remove label value sanitization in prometheus output.
|
||||
- [#2948](https://github.com/influxdata/telegraf/issues/2948): Fix bug parsing default timestamps with modified precision.
|
||||
- [#2954](https://github.com/influxdata/telegraf/issues/2954): Fix panic in elasticsearch input if cannot determine master.
|
||||
|
||||
## v1.3.2 [2017-06-14]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2862](https://github.com/influxdata/telegraf/issues/2862): Fix InfluxDB UDP metric splitting.
|
||||
- [#2888](https://github.com/influxdata/telegraf/issues/2888): Fix mongodb/leofs urls without scheme.
|
||||
- [#2822](https://github.com/influxdata/telegraf/issues/2822): Fix inconsistent label dimensions in prometheus output.
|
||||
|
||||
## v1.3.1 [2017-05-31]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2749](https://github.com/influxdata/telegraf/pull/2749): Fixed sqlserver input to work with case sensitive server collation.
|
||||
- [#2782](https://github.com/influxdata/telegraf/pull/2782): Reuse transports in input plugins
|
||||
- [#2815](https://github.com/influxdata/telegraf/issues/2815): Inputs processes fails with "no such process".
|
||||
- [#2851](https://github.com/influxdata/telegraf/pull/2851): Fix InfluxDB output database quoting.
|
||||
- [#2856](https://github.com/influxdata/telegraf/issues/2856): Fix net input on older Linux kernels.
|
||||
- [#2848](https://github.com/influxdata/telegraf/pull/2848): Fix panic in mongo input.
|
||||
- [#2869](https://github.com/influxdata/telegraf/pull/2869): Fix length calculation of split metric buffer.
|
||||
|
||||
## v1.3 [2017-05-15]
|
||||
## v1.3 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- Users of the windows `ping` plugin will need to drop or migrate their
|
||||
measurements in order to continue using the plugin. The reason for this is that
|
||||
the windows plugin was outputting a different type than the linux plugin. This
|
||||
made it impossible to use the `ping` plugin for both windows and linux
|
||||
machines.
|
||||
|
||||
- Ceph: the `ceph_pgmap_state` metric content has been modified to use a unique field `count`, with each state expressed as a `state` tag.
|
||||
|
||||
Telegraf < 1.3:
|
||||
@@ -88,9 +35,6 @@ be deprecated eventually.
|
||||
|
||||
### Features
|
||||
|
||||
- [#2721](https://github.com/influxdata/telegraf/pull/2721): Added SASL options for kafka output plugin.
|
||||
- [#2723](https://github.com/influxdata/telegraf/pull/2723): Added SSL configuration for input haproxy.
|
||||
- [#2494](https://github.com/influxdata/telegraf/pull/2494): Add interrupts input plugin.
|
||||
- [#2094](https://github.com/influxdata/telegraf/pull/2094): Add generic socket listener & writer.
|
||||
- [#2204](https://github.com/influxdata/telegraf/pull/2204): Extend http_response to support searching for a substring in response. Return 1 if found, else 0.
|
||||
- [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin.
|
||||
@@ -104,71 +48,18 @@ be deprecated eventually.
|
||||
- [#2201](https://github.com/influxdata/telegraf/pull/2201): Add lock option to the IPtables input plugin.
|
||||
- [#2244](https://github.com/influxdata/telegraf/pull/2244): Support ipmi_sensor plugin querying local ipmi sensors.
|
||||
- [#2339](https://github.com/influxdata/telegraf/pull/2339): Increment gather_errors for all errors emitted by inputs.
|
||||
- [#2071](https://github.com/influxdata/telegraf/issues/2071): Use official docker SDK.
|
||||
- [#1678](https://github.com/influxdata/telegraf/pull/1678): Add AMQP consumer input plugin
|
||||
- [#2512](https://github.com/influxdata/telegraf/pull/2512): Added pprof tool.
|
||||
- [#2501](https://github.com/influxdata/telegraf/pull/2501): Support DEAD(X) state in system input plugin.
|
||||
- [#2522](https://github.com/influxdata/telegraf/pull/2522): Add support for mongodb client certificates.
|
||||
- [#1948](https://github.com/influxdata/telegraf/pull/1948): Support adding SNMP table indexes as tags.
|
||||
- [#2332](https://github.com/influxdata/telegraf/pull/2332): Add Elasticsearch 5.x output
|
||||
- [#2587](https://github.com/influxdata/telegraf/pull/2587): Add json timestamp units configurability
|
||||
- [#2597](https://github.com/influxdata/telegraf/issues/2597): Add support for Linux sysctl-fs metrics.
|
||||
- [#2425](https://github.com/influxdata/telegraf/pull/2425): Support to include/exclude docker container labels as tags
|
||||
- [#1667](https://github.com/influxdata/telegraf/pull/1667): dmcache input plugin
|
||||
- [#2637](https://github.com/influxdata/telegraf/issues/2637): Add support for precision in http_listener
|
||||
- [#2636](https://github.com/influxdata/telegraf/pull/2636): Add `message_len_max` option to `kafka_consumer` input
|
||||
- [#1100](https://github.com/influxdata/telegraf/issues/1100): Add collectd parser
|
||||
- [#1820](https://github.com/influxdata/telegraf/issues/1820): easier plugin testing without outputs
|
||||
- [#2493](https://github.com/influxdata/telegraf/pull/2493): Check signature in the GitHub webhook plugin
|
||||
- [#2038](https://github.com/influxdata/telegraf/issues/2038): Add papertrail support to webhooks
|
||||
- [#2253](https://github.com/influxdata/telegraf/pull/2253): Change jolokia plugin to use bulk requests.
|
||||
- [#2575](https://github.com/influxdata/telegraf/issues/2575) Add diskio input for Darwin
|
||||
- [#2705](https://github.com/influxdata/telegraf/pull/2705): Kinesis output: add use_random_partitionkey option
|
||||
- [#2635](https://github.com/influxdata/telegraf/issues/2635): add tcp keep-alive to socket_listener & socket_writer
|
||||
- [#2031](https://github.com/influxdata/telegraf/pull/2031): Add Kapacitor input plugin
|
||||
- [#2732](https://github.com/influxdata/telegraf/pull/2732): Use go 1.8.1
|
||||
- [#2712](https://github.com/influxdata/telegraf/issues/2712): Documentation for rabbitmq input plugin
|
||||
- [#2141](https://github.com/influxdata/telegraf/pull/2141): Logparser handles newly-created files.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2633](https://github.com/influxdata/telegraf/pull/2633): ipmi_sensor: allow @ symbol in password
|
||||
- [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int.
|
||||
- [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection.
|
||||
- [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods
|
||||
- [#1636](https://github.com/influxdata/telegraf/issues/1636): procstat - stop caching PIDs.
|
||||
- [#2318](https://github.com/influxdata/telegraf/issues/2318): haproxy input - Add missing fields.
|
||||
- [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods.
|
||||
- [#2356](https://github.com/influxdata/telegraf/issues/2356): cpu input panic when /proc/stat is empty.
|
||||
- [#2341](https://github.com/influxdata/telegraf/issues/2341): telegraf swallowing panics in --test mode.
|
||||
- [#2358](https://github.com/influxdata/telegraf/pull/2358): Create pidfile with 644 permissions & defer file deletion.
|
||||
- [#2360](https://github.com/influxdata/telegraf/pull/2360): Fixed install/remove of telegraf on non-systemd Debian/Ubuntu systems
|
||||
- [#2282](https://github.com/influxdata/telegraf/issues/2282): Reloading telegraf freezes prometheus output.
|
||||
- [#2390](https://github.com/influxdata/telegraf/issues/2390): Empty tag value causes error on InfluxDB output.
|
||||
- [#2380](https://github.com/influxdata/telegraf/issues/2380): buffer_size field value is negative number from "internal" plugin.
|
||||
- [#2414](https://github.com/influxdata/telegraf/issues/2414): Missing error handling in the MySQL plugin leads to segmentation violation.
|
||||
- [#2462](https://github.com/influxdata/telegraf/pull/2462): Fix type conflict in windows ping plugin.
|
||||
- [#2178](https://github.com/influxdata/telegraf/issues/2178): logparser: regexp with lookahead.
|
||||
- [#2466](https://github.com/influxdata/telegraf/issues/2466): Telegraf can crash in LoadDirectory on 0600 files.
|
||||
- [#2215](https://github.com/influxdata/telegraf/issues/2215): Iptables input: document better that rules without a comment are ignored.
|
||||
- [#2483](https://github.com/influxdata/telegraf/pull/2483): Fix win_perf_counters capping values at 100.
|
||||
- [#2498](https://github.com/influxdata/telegraf/pull/2498): Exporting Ipmi.Path to be set by config.
|
||||
- [#2500](https://github.com/influxdata/telegraf/pull/2500): Remove warning if parse empty content
|
||||
- [#2520](https://github.com/influxdata/telegraf/pull/2520): Update default value for Cloudwatch rate limit
|
||||
- [#2513](https://github.com/influxdata/telegraf/issues/2513): create /etc/telegraf/telegraf.d directory in tarball.
|
||||
- [#2541](https://github.com/influxdata/telegraf/issues/2541): Return error on unsupported serializer data format.
|
||||
- [#1827](https://github.com/influxdata/telegraf/issues/1827): Fix Windows Performance Counters multi instance identifier
|
||||
- [#2576](https://github.com/influxdata/telegraf/pull/2576): Add write timeout to Riemann output
|
||||
- [#2596](https://github.com/influxdata/telegraf/pull/2596): fix timestamp parsing on prometheus plugin
|
||||
- [#2610](https://github.com/influxdata/telegraf/pull/2610): Fix deadlock when output cannot write
|
||||
- [#2410](https://github.com/influxdata/telegraf/issues/2410): Fix connection leak in postgresql.
|
||||
- [#2628](https://github.com/influxdata/telegraf/issues/2628): Set default measurement name for snmp input.
|
||||
- [#2649](https://github.com/influxdata/telegraf/pull/2649): Improve performance of diskio with many disks
|
||||
- [#2671](https://github.com/influxdata/telegraf/issues/2671): The internal input plugin uses the wrong units for `heap_objects`
|
||||
- [#2684](https://github.com/influxdata/telegraf/pull/2684): Fix ipmi_sensor config is shared between all plugin instances
|
||||
- [#2450](https://github.com/influxdata/telegraf/issues/2450): Network statistics not collected when system has alias interfaces
|
||||
- [#1911](https://github.com/influxdata/telegraf/issues/1911): Sysstat plugin needs LANG=C or similar locale
|
||||
- [#2528](https://github.com/influxdata/telegraf/issues/2528): File output closes standard streams on reload.
|
||||
- [#2603](https://github.com/influxdata/telegraf/issues/2603): AMQP output disconnect blocks all outputs
|
||||
- [#2706](https://github.com/influxdata/telegraf/issues/2706): Improve documentation for redis input plugin
|
||||
|
||||
## v1.2.1 [2017-02-01]
|
||||
|
||||
|
||||
@@ -124,7 +124,7 @@ You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
@@ -254,7 +254,7 @@ You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
19
Godeps
19
Godeps
@@ -1,4 +1,3 @@
|
||||
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
|
||||
github.com/Shopify/sarama 574d3147eee384229bf96a5d12c207fe7b5234f3
|
||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
|
||||
@@ -10,7 +9,10 @@ github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
|
||||
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
|
||||
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/docker b89aff1afa1f61993ab2ba18fd62d9375a195f5d
|
||||
github.com/docker/distribution fb0bebc4b64e3881cc52a2478d749845ed76d2a8
|
||||
github.com/docker/engine-api 4290f40c056686fcaa5c9caf02eac1dde9315adf
|
||||
github.com/docker/go-connections 9670439d95da2651d9dfc7acc5d2ed92d3f25ee6
|
||||
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
|
||||
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
|
||||
@@ -22,10 +24,11 @@ github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
|
||||
github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
|
||||
github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea
|
||||
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
|
||||
github.com/hpcloud/tail 915e5feba042395f5fda4dbe9c0e99aeab3088b3
|
||||
github.com/influxdata/config 8ec4638a81500c20be24855812bc8498ebe2dc92
|
||||
github.com/influxdata/toml ad49a5c2936f96b8f5943c3fdba47630ccf45a0d
|
||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||
github.com/jackc/pgx b84338d7d62598f75859b2b146d830b22f1b9ec8
|
||||
github.com/jackc/pgx c8080fc4a1bfa44bf90383ad0fdce2f68b7d313c
|
||||
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
|
||||
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
|
||||
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
|
||||
@@ -45,12 +48,11 @@ github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
|
||||
github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
|
||||
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||
github.com/shirou/gopsutil 9a4a9167ad3b4355dbf1c2c7a0f5f0d3fb1e9ab9
|
||||
github.com/shirou/gopsutil 77b5d0080adb6f028e457906f1944d9fcca34442
|
||||
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
|
||||
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
|
||||
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
|
||||
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
|
||||
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
|
||||
github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
|
||||
github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
|
||||
@@ -61,5 +63,4 @@ golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
|
||||
gopkg.in/dancannon/gorethink.v1 edc7a6a68e2d8015f5ffe1b2560eed989f8a45be
|
||||
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
|
||||
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
|
||||
gopkg.in/olivere/elastic.v5 ee3ebceab960cf68ab9a89ee6d78c031ef5b4a4e
|
||||
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
|
||||
|
||||
6
Makefile
6
Makefile
@@ -51,7 +51,6 @@ docker-run:
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
|
||||
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
||||
docker run --name memcached -p "11211:11211" -d memcached
|
||||
docker run --name postgres -p "5432:5432" -d postgres
|
||||
@@ -70,7 +69,6 @@ docker-run-circle:
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
|
||||
@@ -78,8 +76,8 @@ docker-run-circle:
|
||||
|
||||
# Kill all docker containers, ignore errors
|
||||
docker-kill:
|
||||
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats elasticsearch
|
||||
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats elasticsearch
|
||||
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
|
||||
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
|
||||
|
||||
# Run full unit tests using docker containers (includes setup and teardown)
|
||||
test: vet docker-kill docker-run
|
||||
|
||||
24
README.md
24
README.md
@@ -43,7 +43,7 @@ Ansible role: https://github.com/rossmcdonald/telegraf
|
||||
|
||||
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
|
||||
which gets installed via the Makefile
|
||||
if you don't have it already. You also must build with golang version 1.8+.
|
||||
if you don't have it already. You also must build with golang version 1.5+.
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install)
|
||||
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
|
||||
@@ -97,21 +97,18 @@ configuration options.
|
||||
|
||||
## Input Plugins
|
||||
|
||||
* [aerospike](./plugins/inputs/aerospike)
|
||||
* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq)
|
||||
* [apache](./plugins/inputs/apache)
|
||||
* [aws cloudwatch](./plugins/inputs/cloudwatch)
|
||||
* [aerospike](./plugins/inputs/aerospike)
|
||||
* [apache](./plugins/inputs/apache)
|
||||
* [bcache](./plugins/inputs/bcache)
|
||||
* [cassandra](./plugins/inputs/cassandra)
|
||||
* [ceph](./plugins/inputs/ceph)
|
||||
* [cgroup](./plugins/inputs/cgroup)
|
||||
* [chrony](./plugins/inputs/chrony)
|
||||
* [consul](./plugins/inputs/consul)
|
||||
* [conntrack](./plugins/inputs/conntrack)
|
||||
* [couchbase](./plugins/inputs/couchbase)
|
||||
* [couchdb](./plugins/inputs/couchdb)
|
||||
* [disque](./plugins/inputs/disque)
|
||||
* [dmcache](./plugins/inputs/dmcache)
|
||||
* [dns query time](./plugins/inputs/dns_query)
|
||||
* [docker](./plugins/inputs/docker)
|
||||
* [dovecot](./plugins/inputs/dovecot)
|
||||
@@ -124,11 +121,9 @@ configuration options.
|
||||
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||
* [internal](./plugins/inputs/internal)
|
||||
* [influxdb](./plugins/inputs/influxdb)
|
||||
* [interrupts](./plugins/inputs/interrupts)
|
||||
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
|
||||
* [iptables](./plugins/inputs/iptables)
|
||||
* [jolokia](./plugins/inputs/jolokia)
|
||||
* [kapacitor](./plugins/inputs/kapacitor)
|
||||
* [kubernetes](./plugins/inputs/kubernetes)
|
||||
* [leofs](./plugins/inputs/leofs)
|
||||
* [lustre2](./plugins/inputs/lustre2)
|
||||
@@ -177,7 +172,6 @@ configuration options.
|
||||
* processes
|
||||
* kernel (/proc/stat)
|
||||
* kernel (/proc/vmstat)
|
||||
* linux_sysctl_fs (/proc/sys/fs)
|
||||
|
||||
Telegraf can also collect metrics via the following service plugins:
|
||||
|
||||
@@ -197,17 +191,6 @@ Telegraf can also collect metrics via the following service plugins:
|
||||
* [github](./plugins/inputs/webhooks/github)
|
||||
* [mandrill](./plugins/inputs/webhooks/mandrill)
|
||||
* [rollbar](./plugins/inputs/webhooks/rollbar)
|
||||
* [papertrail](./plugins/inputs/webhooks/papertrail)
|
||||
|
||||
Telegraf is able to parse the following input data formats into metrics, these
|
||||
formats may be used with input plugins supporting the `data_format` option:
|
||||
|
||||
* [InfluxDB Line Protocol](./docs/DATA_FORMATS_INPUT.md#influx)
|
||||
* [JSON](./docs/DATA_FORMATS_INPUT.md#json)
|
||||
* [Graphite](./docs/DATA_FORMATS_INPUT.md#graphite)
|
||||
* [Value](./docs/DATA_FORMATS_INPUT.md#value)
|
||||
* [Nagios](./docs/DATA_FORMATS_INPUT.md#nagios)
|
||||
* [Collectd](./docs/DATA_FORMATS_INPUT.md#collectd)
|
||||
|
||||
## Processor Plugins
|
||||
|
||||
@@ -226,7 +209,6 @@ formats may be used with input plugins supporting the `data_format` option:
|
||||
* [aws cloudwatch](./plugins/outputs/cloudwatch)
|
||||
* [datadog](./plugins/outputs/datadog)
|
||||
* [discard](./plugins/outputs/discard)
|
||||
* [elasticsearch](./plugins/outputs/elasticsearch)
|
||||
* [file](./plugins/outputs/file)
|
||||
* [graphite](./plugins/outputs/graphite)
|
||||
* [graylog](./plugins/outputs/graylog)
|
||||
|
||||
@@ -191,12 +191,6 @@ func (a *Agent) Test() error {
|
||||
}()
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
if _, ok := input.Input.(telegraf.ServiceInput); ok {
|
||||
fmt.Printf("\nWARNING: skipping plugin [[%s]]: service inputs not supported in --test mode\n",
|
||||
input.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
acc := NewAccumulator(input, metricC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
@@ -215,7 +209,7 @@ func (a *Agent) Test() error {
|
||||
// Special instructions for some inputs. cpu, for example, needs to be
|
||||
// run twice in order to return cpu usage percentages.
|
||||
switch input.Name() {
|
||||
case "inputs.cpu", "inputs.mongodb", "inputs.procstat":
|
||||
case "cpu", "mongodb", "procstat":
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name())
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
@@ -398,6 +392,5 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
a.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
12
circle.yml
12
circle.yml
@@ -1,11 +1,13 @@
|
||||
machine:
|
||||
go:
|
||||
version: 1.8.1
|
||||
services:
|
||||
- docker
|
||||
- memcached
|
||||
- redis
|
||||
- rabbitmq-server
|
||||
post:
|
||||
- sudo service zookeeper stop
|
||||
- go version
|
||||
- sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.8rc3.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.8rc3.linux-amd64.tar.gz
|
||||
- go version
|
||||
|
||||
dependencies:
|
||||
override:
|
||||
|
||||
@@ -4,10 +4,11 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
|
||||
"os"
|
||||
"os/signal"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"plugin"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
@@ -15,19 +16,20 @@ import (
|
||||
"github.com/influxdata/telegraf/agent"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/logger"
|
||||
"github.com/influxdata/telegraf/registry"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
"github.com/influxdata/telegraf/registry/outputs"
|
||||
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
_ "github.com/influxdata/telegraf/plugins/processors/all"
|
||||
|
||||
"github.com/kardianos/service"
|
||||
)
|
||||
|
||||
var fDebug = flag.Bool("debug", false,
|
||||
"turn on debug logging")
|
||||
var pprofAddr = flag.String("pprof-addr", "",
|
||||
"pprof address to listen on, not activate pprof if empty")
|
||||
var fQuiet = flag.Bool("quiet", false,
|
||||
"run in quiet mode")
|
||||
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
||||
@@ -54,6 +56,8 @@ var fUsage = flag.String("usage", "",
|
||||
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
||||
var fService = flag.String("service", "",
|
||||
"operate on the service")
|
||||
var fPlugins = flag.String("plugins", "",
|
||||
"path to directory containing external plugins")
|
||||
|
||||
// Telegraf version, populated linker.
|
||||
// ie, -ldflags "-X main.version=`git describe --always --tags`"
|
||||
@@ -91,7 +95,6 @@ The commands & flags are:
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||
--quiet run in quiet mode
|
||||
|
||||
Examples:
|
||||
@@ -110,9 +113,6 @@ Examples:
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
|
||||
# run telegraf with pprof
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
`
|
||||
|
||||
var stop chan struct{}
|
||||
@@ -144,23 +144,13 @@ func reloadLoop(
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
}
|
||||
if !*fTest && len(c.Outputs) == 0 {
|
||||
if len(c.Outputs) == 0 {
|
||||
log.Fatalf("E! Error: no outputs found, did you provide a valid config file?")
|
||||
}
|
||||
if len(c.Inputs) == 0 {
|
||||
log.Fatalf("E! Error: no inputs found, did you provide a valid config file?")
|
||||
}
|
||||
|
||||
if int64(c.Agent.Interval.Duration) <= 0 {
|
||||
log.Fatalf("E! Agent interval must be positive, found %s",
|
||||
c.Agent.Interval.Duration)
|
||||
}
|
||||
|
||||
if int64(c.Agent.FlushInterval.Duration) <= 0 {
|
||||
log.Fatalf("E! Agent flush_interval must be positive; found %s",
|
||||
c.Agent.Interval.Duration)
|
||||
}
|
||||
|
||||
ag, err := agent.NewAgent(c)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
@@ -264,11 +254,62 @@ func (p *program) Stop(s service.Service) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadExternalPlugins loads external plugins from shared libraries (.so, .dll, etc.)
|
||||
// in the specified directory.
|
||||
func loadExternalPlugins(rootDir string) error {
|
||||
return filepath.Walk(rootDir, func(pth string, info os.FileInfo, err error) error {
|
||||
// Stop if there was an error.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ignore directories.
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ignore files that aren't shared libraries.
|
||||
ext := strings.ToLower(path.Ext(pth))
|
||||
if ext != ".so" && ext != ".dll" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// name will be the path to the plugin file beginning at the root
|
||||
// directory, minus the extension.
|
||||
// ie, if the plugin file is /opt/telegraf-plugins/group1/foo.so, name
|
||||
// will be "group1/foo"
|
||||
name := strings.TrimPrefix(strings.TrimPrefix(pth, rootDir), string(os.PathSeparator))
|
||||
name = strings.TrimSuffix(name, filepath.Ext(pth))
|
||||
registry.SetName("external" + string(os.PathSeparator) + name)
|
||||
defer registry.SetName("")
|
||||
|
||||
// Load plugin.
|
||||
_, err = plugin.Open(pth)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading [%s]: %s", pth, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() { usageExit(0) }
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
|
||||
// Load external plugins, if requested.
|
||||
if *fPlugins != "" {
|
||||
pluginsDir, err := filepath.Abs(*fPlugins)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
log.Printf("I! Loading external plugins from: %s\n", pluginsDir)
|
||||
if err := loadExternalPlugins(*fPlugins); err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
inputFilters, outputFilters := []string{}, []string{}
|
||||
if *fInputFilters != "" {
|
||||
inputFilters = strings.Split(":"+strings.TrimSpace(*fInputFilters)+":", ":")
|
||||
@@ -285,23 +326,6 @@ func main() {
|
||||
processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":")
|
||||
}
|
||||
|
||||
if *pprofAddr != "" {
|
||||
go func() {
|
||||
pprofHostPort := *pprofAddr
|
||||
parts := strings.Split(pprofHostPort, ":")
|
||||
if len(parts) == 2 && parts[0] == "" {
|
||||
pprofHostPort = fmt.Sprintf("localhost:%s", parts[1])
|
||||
}
|
||||
pprofHostPort = "http://" + pprofHostPort + "/debug/pprof"
|
||||
|
||||
log.Printf("I! Starting pprof HTTP server at: %s", pprofHostPort)
|
||||
|
||||
if err := http.ListenAndServe(*pprofAddr, nil); err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if len(args) > 0 {
|
||||
switch args[0] {
|
||||
case "version":
|
||||
|
||||
@@ -24,16 +24,6 @@ Environment variables can be used anywhere in the config file, simply prepend
|
||||
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
|
||||
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
|
||||
|
||||
## Configuration file locations
|
||||
|
||||
The location of the configuration file can be set via the `--config` command
|
||||
line flag. Telegraf will also pick up all files matching the pattern `*.conf` if
|
||||
the `-config-directory` command line flag is used.
|
||||
|
||||
On most systems, the default locations are `/etc/telegraf/telegraf.conf` for
|
||||
the main configuration file and `/etc/telegraf/telegraf.d` for the directory of
|
||||
configuration files.
|
||||
|
||||
# Global Tags
|
||||
|
||||
Global tags can be specified in the `[global_tags]` section of the config file
|
||||
@@ -70,7 +60,7 @@ ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s
|
||||
as the collection interval, with the maximum being 1s. Precision will NOT
|
||||
be used for service inputs, such as logparser and statsd. Valid values are
|
||||
"ns", "us" (or "µs"), "ms", "s".
|
||||
* **logfile**: Specify the log file name. The empty string means to log to stderr.
|
||||
* **logfile**: Specify the log file name. The empty string means to log to stdout.
|
||||
* **debug**: Run telegraf in debug mode.
|
||||
* **quiet**: Run telegraf in quiet mode (error messages only).
|
||||
* **hostname**: Override default hostname, if empty use os.Hostname().
|
||||
@@ -124,40 +114,31 @@ is not specified then processor execution order will be random.
|
||||
Filters can be configured per input, output, processor, or aggregator,
|
||||
see below for examples.
|
||||
|
||||
* **namepass**:
|
||||
An array of glob pattern strings. Only points whose measurement name matches
|
||||
a pattern in this list are emitted.
|
||||
* **namedrop**:
|
||||
The inverse of `namepass`. If a match is found the point is discarded. This
|
||||
is tested on points after they have passed the `namepass` test.
|
||||
* **fieldpass**:
|
||||
An array of glob pattern strings. Only fields whose field key matches a
|
||||
pattern in this list are emitted. Not available for outputs.
|
||||
* **fielddrop**:
|
||||
The inverse of `fieldpass`. Fields with a field key matching one of the
|
||||
patterns will be discarded from the point. Not available for outputs.
|
||||
* **tagpass**:
|
||||
A table mapping tag keys to arrays of glob pattern strings. Only points
|
||||
that contain a tag key in the table and a tag value matching one of its
|
||||
patterns is emitted.
|
||||
* **tagdrop**:
|
||||
The inverse of `tagpass`. If a match is found the point is discarded. This
|
||||
is tested on points after they have passed the `tagpass` test.
|
||||
* **taginclude**:
|
||||
An array of glob pattern strings. Only tags with a tag key matching one of
|
||||
the patterns are emitted. In contrast to `tagpass`, which will pass an entire
|
||||
point based on its tag, `taginclude` removes all non matching tags from the
|
||||
point. This filter can be used on both inputs & outputs, but it is
|
||||
_recommended_ to be used on inputs, as it is more efficient to filter out tags
|
||||
at the ingestion point.
|
||||
* **tagexclude**:
|
||||
The inverse of `taginclude`. Tags with a tag key matching one of the patterns
|
||||
will be discarded from the point.
|
||||
* **namepass**: An array of strings that is used to filter metrics generated by the
|
||||
current input. Each string in the array is tested as a glob match against
|
||||
measurement names and if it matches, the field is emitted.
|
||||
* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted.
|
||||
* **fieldpass**: An array of strings that is used to filter metrics generated by the
|
||||
current input. Each string in the array is tested as a glob match against field names
|
||||
and if it matches, the field is emitted. fieldpass is not available for outputs.
|
||||
* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted.
|
||||
fielddrop is not available for outputs.
|
||||
* **tagpass**: tag names and arrays of strings that are used to filter
|
||||
measurements by the current input. Each string in the array is tested as a glob
|
||||
match against the tag name, and if it matches the measurement is emitted.
|
||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
|
||||
emitted. This is tested on measurements that have passed the tagpass test.
|
||||
* **tagexclude**: tagexclude can be used to exclude a tag from measurement(s).
|
||||
As opposed to tagdrop, which will drop an entire measurement based on it's
|
||||
tags, tagexclude simply strips the given tag keys from the measurement. This
|
||||
can be used on inputs & outputs, but it is _recommended_ to be used on inputs,
|
||||
as it is more efficient to filter out tags at the ingestion point.
|
||||
* **taginclude**: taginclude is the inverse of tagexclude. It will only include
|
||||
the tag keys in the final measurement.
|
||||
|
||||
**NOTE** Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters
|
||||
must be defined at the _end_ of the plugin definition, otherwise subsequent
|
||||
plugin config options will be interpreted as part of the tagpass/tagdrop
|
||||
tables.
|
||||
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
|
||||
the plugin definition, otherwise subsequent plugin config options will be
|
||||
interpreted as part of the tagpass/tagdrop map.
|
||||
|
||||
#### Input Configuration Examples
|
||||
|
||||
@@ -370,4 +351,4 @@ to the system load metrics due to the `namepass` parameter.
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
```
|
||||
```
|
||||
@@ -7,7 +7,6 @@ Telegraf is able to parse the following input data formats into metrics:
|
||||
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite)
|
||||
1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
|
||||
1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
|
||||
1. [Collectd](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#collectd)
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||
@@ -41,7 +40,7 @@ example, in the exec plugin:
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
@@ -68,7 +67,7 @@ metrics are parsed directly into Telegraf metrics.
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
@@ -118,7 +117,7 @@ For example, if you had this configuration:
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
@@ -162,7 +161,7 @@ For example, if the following configuration:
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
@@ -233,7 +232,7 @@ name of the plugin.
|
||||
name_override = "entropy_available"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "value"
|
||||
@@ -391,7 +390,7 @@ There are many more options available,
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "graphite"
|
||||
@@ -428,54 +427,14 @@ Note: Nagios Input Data Formats is only supported in `exec` input plugin.
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"]
|
||||
commands = ["/usr/lib/nagios/plugins/check_load", "-w 5,6,7 -c 7,8,9"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "nagios"
|
||||
```
|
||||
|
||||
# Collectd:
|
||||
|
||||
The collectd format parses the collectd binary network protocol. Tags are
|
||||
created for host, instance, type, and type instance. All collectd values are
|
||||
added as float64 fields.
|
||||
|
||||
For more information about the binary network protocol see
|
||||
[here](https://collectd.org/wiki/index.php/Binary_protocol).
|
||||
|
||||
You can control the cryptographic settings with parser options. Create an
|
||||
authentication file and set `collectd_auth_file` to the path of the file, then
|
||||
set the desired security level in `collectd_security_level`.
|
||||
|
||||
Additional information including client setup can be found
|
||||
[here](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup).
|
||||
|
||||
You can also change the path to the typesdb or add additional typesdb using
|
||||
`collectd_typesdb`.
|
||||
|
||||
#### Collectd Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.socket_listener]]
|
||||
service_address = "udp://127.0.0.1:25826"
|
||||
name_prefix = "collectd_"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "collectd"
|
||||
|
||||
## Authentication file for cryptographic security levels
|
||||
collectd_auth_file = "/etc/collectd/auth_file"
|
||||
## One of none (default), sign, or encrypt
|
||||
collectd_security_level = "encrypt"
|
||||
## Path of to TypesDB specifications
|
||||
collectd_typesdb = ["/usr/share/collectd/types.db"]
|
||||
```
|
||||
|
||||
@@ -36,7 +36,7 @@ config option, for example, in the `file` output plugin:
|
||||
files = ["stdout"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
@@ -60,7 +60,7 @@ metrics are serialized directly into InfluxDB line-protocol.
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
@@ -104,7 +104,7 @@ tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "graphite"
|
||||
@@ -143,18 +143,8 @@ The JSON data format serialized Telegraf metrics in json format. The format is:
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "json"
|
||||
json_timestamp_units = "1ns"
|
||||
```
|
||||
|
||||
By default, the timestamp that is output in JSON data format serialized Telegraf
|
||||
metrics is in seconds. The precision of this timestamp can be adjusted for any output
|
||||
by adding the optional `json_timestamp_units` parameter to the configuration for
|
||||
that output. This parameter can be used to set the timestamp units to nanoseconds (`ns`),
|
||||
microseconds (`us` or `µs`), milliseconds (`ms`), or seconds (`s`). Note that this
|
||||
parameter will be truncated to the nearest power of 10 that, so if the `json_timestamp_units`
|
||||
are set to `15ms` the timestamps for the JSON format serialized Telegraf metrics will be
|
||||
output in hundredths of a second (`10ms`).
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# List
|
||||
- collectd.org [MIT LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
|
||||
- github.com/Shopify/sarama [MIT LICENSE](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
|
||||
- github.com/Sirupsen/logrus [MIT LICENSE](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
|
||||
- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
|
||||
@@ -31,3 +30,4 @@
|
||||
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||
- golang.org/x/crypto/ [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
# Telegraf profiling
|
||||
|
||||
Telegraf uses the standard package `net/http/pprof`. This package serves via its HTTP server runtime profiling data in the format expected by the pprof visualization tool.
|
||||
|
||||
By default, the profiling is turned off.
|
||||
|
||||
To enable profiling you need to specify address to config parameter `pprof-addr`, for example:
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
```
|
||||
|
||||
There are several paths to get different profiling information:
|
||||
|
||||
To look at the heap profile:
|
||||
|
||||
`go tool pprof http://localhost:6060/debug/pprof/heap`
|
||||
|
||||
or to look at a 30-second CPU profile:
|
||||
|
||||
`go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30`
|
||||
|
||||
To view all available profiles, open `http://localhost:6060/debug/pprof/` in your browser.
|
||||
|
||||
@@ -55,13 +55,10 @@
|
||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
## By default or when set to "0s", precision will be set to the same
|
||||
## timestamp order as the collection interval, with the maximum being 1s.
|
||||
## ie, when interval = "10s", precision will be "1s"
|
||||
## when interval = "250ms", precision will be "1ms"
|
||||
## Precision will NOT be used for service inputs. It is up to each individual
|
||||
## service input to set the timestamp at the appropriate precision.
|
||||
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
|
||||
## By default, precision will be set to the same timestamp order as the
|
||||
## collection interval, with the maximum being 1s.
|
||||
## Precision will NOT be used for service inputs, such as logparser and statsd.
|
||||
## Valid values are "ns", "us" (or "µs"), "ms", "s".
|
||||
precision = ""
|
||||
|
||||
## Logging configuration:
|
||||
@@ -84,10 +81,7 @@
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[[outputs.influxdb]]
|
||||
## The HTTP or UDP URL for your InfluxDB instance. Each item should be
|
||||
## of the form:
|
||||
## scheme "://" host [ ":" port]
|
||||
##
|
||||
## The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||
## Multiple urls can be specified as part of the same cluster,
|
||||
## this means that only ONE of the urls will be written to each interval.
|
||||
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
||||
@@ -95,8 +89,7 @@
|
||||
## The target database for metrics (telegraf will create it if not exists).
|
||||
database = "telegraf" # required
|
||||
|
||||
## Name of existing retention policy to write to. Empty string writes to
|
||||
## the default retention policy.
|
||||
## Retention policy to write to. Empty string writes to the default rp.
|
||||
retention_policy = ""
|
||||
## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
|
||||
write_consistency = "any"
|
||||
@@ -138,11 +131,9 @@
|
||||
# ## AMQP exchange
|
||||
# exchange = "telegraf"
|
||||
# ## Auth method. PLAIN and EXTERNAL are supported
|
||||
# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
|
||||
# ## described here: https://www.rabbitmq.com/plugins.html
|
||||
# # auth_method = "PLAIN"
|
||||
# ## Telegraf tag to use as a routing key
|
||||
# ## ie, if this tag exists, its value will be used as the routing key
|
||||
# ## ie, if this tag exists, it's value will be used as the routing key
|
||||
# routing_tag = "host"
|
||||
#
|
||||
# ## InfluxDB retention policy
|
||||
@@ -150,10 +141,6 @@
|
||||
# ## InfluxDB database
|
||||
# # database = "telegraf"
|
||||
#
|
||||
# ## Write timeout, formatted as a string. If not provided, will default
|
||||
# ## to 5s. 0s means no timeout (not recommended).
|
||||
# # timeout = "5s"
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
@@ -162,7 +149,7 @@
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## Data format to output.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# data_format = "influx"
|
||||
@@ -206,52 +193,13 @@
|
||||
# # no configuration
|
||||
|
||||
|
||||
# # Configuration for Elasticsearch to send metrics to.
|
||||
# [[outputs.elasticsearch]]
|
||||
# ## The full HTTP endpoint URL for your Elasticsearch instance
|
||||
# ## Multiple urls can be specified as part of the same cluster,
|
||||
# ## this means that only ONE of the urls will be written to each interval.
|
||||
# urls = [ "http://node1.es.example.com:9200" ] # required.
|
||||
# ## Elasticsearch client timeout, defaults to "5s" if not set.
|
||||
# timeout = "5s"
|
||||
# ## Set to true to ask Elasticsearch a list of all cluster nodes,
|
||||
# ## thus it is not necessary to list all nodes in the urls config option.
|
||||
# enable_sniffer = false
|
||||
# ## Set the interval to check if the Elasticsearch nodes are available
|
||||
# ## Setting to "0s" will disable the health check (not recommended in production)
|
||||
# health_check_interval = "10s"
|
||||
# ## HTTP basic authentication details (eg. when using Shield)
|
||||
# # username = "telegraf"
|
||||
# # password = "mypassword"
|
||||
#
|
||||
# ## Index Config
|
||||
# ## The target index for metrics (Elasticsearch will create if it not exists).
|
||||
# ## You can use the date specifiers below to create indexes per time frame.
|
||||
# ## The metric timestamp will be used to decide the destination index name
|
||||
# # %Y - year (2016)
|
||||
# # %y - last two digits of year (00..99)
|
||||
# # %m - month (01..12)
|
||||
# # %d - day of month (e.g., 01)
|
||||
# # %H - hour (00..23)
|
||||
# index_name = "telegraf-%Y.%m.%d" # required.
|
||||
#
|
||||
# ## Template Config
|
||||
# ## Set to true if you want telegraf to manage its index template.
|
||||
# ## If enabled it will create a recommended index template for telegraf indexes
|
||||
# manage_template = true
|
||||
# ## The template name used for telegraf indexes
|
||||
# template_name = "telegraf"
|
||||
# ## Set to true if you want telegraf to overwrite an existing template
|
||||
# overwrite_template = false
|
||||
|
||||
|
||||
# # Send telegraf metrics to file(s)
|
||||
# [[outputs.file]]
|
||||
# ## Files to write to, "stdout" is a specially handled file.
|
||||
# files = ["stdout", "/tmp/metrics.out"]
|
||||
#
|
||||
# ## Data format to output.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# data_format = "influx"
|
||||
@@ -300,7 +248,7 @@
|
||||
# ## Kafka topic for producer messages
|
||||
# topic = "telegraf"
|
||||
# ## Telegraf tag to use as a routing key
|
||||
# ## ie, if this tag exists, its value will be used as the routing key
|
||||
# ## ie, if this tag exists, it's value will be used as the routing key
|
||||
# routing_tag = "host"
|
||||
#
|
||||
# ## CompressionCodec represents the various compression codecs recognized by
|
||||
@@ -336,12 +284,8 @@
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## Optional SASL Config
|
||||
# # sasl_username = "kafka"
|
||||
# # sasl_password = "secret"
|
||||
#
|
||||
# ## Data format to output.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# data_format = "influx"
|
||||
@@ -371,14 +315,9 @@
|
||||
# streamname = "StreamName"
|
||||
# ## PartitionKey as used for sharding data.
|
||||
# partitionkey = "PartitionKey"
|
||||
# ## If set the paritionKey will be a random UUID on every put.
|
||||
# ## This allows for scaling across multiple shards in a stream.
|
||||
# ## This will cause issues with ordering.
|
||||
# use_random_partitionkey = false
|
||||
#
|
||||
#
|
||||
# ## Data format to output.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# data_format = "influx"
|
||||
@@ -430,7 +369,7 @@
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## Data format to output.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# data_format = "influx"
|
||||
@@ -454,7 +393,7 @@
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## Data format to output.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# data_format = "influx"
|
||||
@@ -468,7 +407,7 @@
|
||||
# topic = "telegraf"
|
||||
#
|
||||
# ## Data format to output.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# data_format = "influx"
|
||||
@@ -504,7 +443,7 @@
|
||||
# # expiration_interval = "60s"
|
||||
|
||||
|
||||
# # Configuration for the Riemann server to send metrics to
|
||||
# # Configuration for Riemann server to send metrics to
|
||||
# [[outputs.riemann]]
|
||||
# ## The full TCP or UDP URL of the Riemann server
|
||||
# url = "tcp://localhost:5555"
|
||||
@@ -533,12 +472,9 @@
|
||||
#
|
||||
# ## Description for Riemann event
|
||||
# # description_text = "metrics collected from telegraf"
|
||||
#
|
||||
# ## Riemann client write timeout, defaults to "5s" if not set.
|
||||
# # timeout = "5s"
|
||||
|
||||
|
||||
# # Configuration for the Riemann server to send metrics to
|
||||
# # Configuration for the legacy Riemann plugin
|
||||
# [[outputs.riemann_legacy]]
|
||||
# ## URL of server
|
||||
# url = "localhost:5555"
|
||||
@@ -548,33 +484,6 @@
|
||||
# separator = " "
|
||||
|
||||
|
||||
# # Generic socket writer capable of handling multiple socket types.
|
||||
# [[outputs.socket_writer]]
|
||||
# ## URL to connect to
|
||||
# # address = "tcp://127.0.0.1:8094"
|
||||
# # address = "tcp://example.com:http"
|
||||
# # address = "tcp4://127.0.0.1:8094"
|
||||
# # address = "tcp6://127.0.0.1:8094"
|
||||
# # address = "tcp6://[2001:db8::1]:8094"
|
||||
# # address = "udp://127.0.0.1:8094"
|
||||
# # address = "udp4://127.0.0.1:8094"
|
||||
# # address = "udp6://127.0.0.1:8094"
|
||||
# # address = "unix:///tmp/telegraf.sock"
|
||||
# # address = "unixgram:///tmp/telegraf.sock"
|
||||
#
|
||||
# ## Period between keep alive probes.
|
||||
# ## Only applies to TCP sockets.
|
||||
# ## 0 disables keep alive probes.
|
||||
# ## Defaults to the OS configuration.
|
||||
# # keep_alive_period = "5m"
|
||||
#
|
||||
# ## Data format to generate.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# # data_format = "influx"
|
||||
|
||||
|
||||
|
||||
###############################################################################
|
||||
# PROCESSOR PLUGINS #
|
||||
@@ -622,7 +531,7 @@
|
||||
|
||||
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
## present on /run, /var/run, /dev/shm or /dev).
|
||||
ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
|
||||
ignore_fs = ["tmpfs", "devtmpfs"]
|
||||
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
@@ -633,23 +542,6 @@
|
||||
# devices = ["sda", "sdb"]
|
||||
## Uncomment the following line if you need disk serial numbers.
|
||||
# skip_serial_number = false
|
||||
#
|
||||
## On systems which support it, device metadata can be added in the form of
|
||||
## tags.
|
||||
## Currently only Linux is supported via udev properties. You can view
|
||||
## available properties for a device by running:
|
||||
## 'udevadm info -q property -n /dev/sda'
|
||||
# device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
|
||||
#
|
||||
## Using the same metadata source as device_tags, you can also customize the
|
||||
## name of the device via templates.
|
||||
## The 'name_templates' parameter is a list of templates to try and apply to
|
||||
## the device. The template may contain variables in the form of '$PROPERTY' or
|
||||
## '${PROPERTY}'. The first template which does not contain any variables not
|
||||
## present for the device is used as the device name tag.
|
||||
## The typical use case is for LVM volumes, to get the VG/LV name instead of
|
||||
## the near-meaningless DM-0 name.
|
||||
# name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
|
||||
|
||||
|
||||
# Get kernel statistics from /proc/stat
|
||||
@@ -766,7 +658,7 @@
|
||||
# gather_admin_socket_stats = true
|
||||
#
|
||||
# ## Whether to gather statistics via ceph commands
|
||||
# gather_cluster_stats = false
|
||||
# gather_cluster_stats = true
|
||||
|
||||
|
||||
# # Read specific statistics per cgroup
|
||||
@@ -785,12 +677,6 @@
|
||||
# # files = ["memory.*usage*", "memory.limit_in_bytes"]
|
||||
|
||||
|
||||
# # Get standard chrony metrics, requires chronyc executable.
|
||||
# [[inputs.chrony]]
|
||||
# ## If true, chronyc tries to perform a DNS lookup for the time server.
|
||||
# # dns_lookup = false
|
||||
|
||||
|
||||
# # Pull Metric Statistics from Amazon CloudWatch
|
||||
# [[inputs.cloudwatch]]
|
||||
# ## Amazon Region
|
||||
@@ -836,10 +722,9 @@
|
||||
# namespace = "AWS/ELB"
|
||||
#
|
||||
# ## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
# ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
# ## maximum of 400. Optional - default value is 200.
|
||||
# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
|
||||
# ratelimit = 200
|
||||
# ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
# ## maximum of 10. Optional - default value is 10.
|
||||
# ratelimit = 10
|
||||
#
|
||||
# ## Metrics to Pull (optional)
|
||||
# ## Defaults to all Metrics in Namespace if nothing is provided
|
||||
@@ -853,22 +738,6 @@
|
||||
# # value = "p-example"
|
||||
|
||||
|
||||
# # Collects conntrack stats from the configured directories and files.
|
||||
# [[inputs.conntrack]]
|
||||
# ## The following defaults would work with multiple versions of conntrack.
|
||||
# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across
|
||||
# ## kernel versions, as are the directory locations.
|
||||
#
|
||||
# ## Superset of filenames to look for within the conntrack dirs.
|
||||
# ## Missing files will be ignored.
|
||||
# files = ["ip_conntrack_count","ip_conntrack_max",
|
||||
# "nf_conntrack_count","nf_conntrack_max"]
|
||||
#
|
||||
# ## Directories to search within for the conntrack files above.
|
||||
# ## Missing directrories will be ignored.
|
||||
# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
|
||||
|
||||
|
||||
# # Gather health check statuses from services registered in Consul
|
||||
# [[inputs.consul]]
|
||||
# ## Most of these values defaults to the one configured on a Consul's agent level.
|
||||
@@ -916,12 +785,6 @@
|
||||
# servers = ["localhost"]
|
||||
|
||||
|
||||
# # Provide a native collection for dmsetup based statistics for dm-cache
|
||||
# [[inputs.dmcache]]
|
||||
# ## Whether to report per-device stats or not
|
||||
# per_device = true
|
||||
|
||||
|
||||
# # Query given DNS server and gives statistics
|
||||
# [[inputs.dns_query]]
|
||||
# ## servers to query
|
||||
@@ -958,10 +821,6 @@
|
||||
# ## Whether to report for each container total blkio and network stats or not
|
||||
# total = false
|
||||
#
|
||||
# ## docker labels to include and exclude as tags. Globs accepted.
|
||||
# ## Note that an empty array for both will include all labels as tags
|
||||
# docker_label_include = []
|
||||
# docker_label_exclude = []
|
||||
|
||||
|
||||
# # Read statistics from one or many dovecot servers
|
||||
@@ -1025,7 +884,7 @@
|
||||
# name_suffix = "_mycollector"
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
@@ -1090,39 +949,14 @@
|
||||
# ## with optional port. ie localhost, 10.10.3.33:1936, etc.
|
||||
# ## Make sure you specify the complete path to the stats endpoint
|
||||
# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
|
||||
#
|
||||
# #
|
||||
# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
|
||||
# servers = ["http://myhaproxy.com:1936/haproxy?stats"]
|
||||
#
|
||||
# ##
|
||||
# ## You can also use local socket with standard wildcard globbing.
|
||||
# ## Server address not starting with 'http' will be treated as a possible
|
||||
# ## socket, so both examples below are valid.
|
||||
# # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
|
||||
#
|
||||
# ## By default, some of the fields are renamed from what haproxy calls them.
|
||||
# ## Setting this option to true results in the plugin keeping the original
|
||||
# ## field names.
|
||||
# # keep_field_names = true
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Monitor disks' temperatures using hddtemp
|
||||
# [[inputs.hddtemp]]
|
||||
# ## By default, telegraf gathers temps data from all disks detected by the
|
||||
# ## hddtemp.
|
||||
# ##
|
||||
# ## Only collect temps from the selected disks.
|
||||
# ##
|
||||
# ## A * as the device name will return the temperature values of all disks.
|
||||
# ##
|
||||
# # address = "127.0.0.1:7634"
|
||||
# # devices = ["sda", "*"]
|
||||
# ## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
|
||||
|
||||
|
||||
# # HTTP/HTTPS request given an address a method and a timeout
|
||||
@@ -1143,11 +977,6 @@
|
||||
# # {'fake':'data'}
|
||||
# # '''
|
||||
#
|
||||
# ## Optional substring or regex match in body of the response
|
||||
# ## response_string_match = "\"service_status\": \"up\""
|
||||
# ## response_string_match = "ok"
|
||||
# ## response_string_match = "\".*_status\".?:.?\"up\""
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
@@ -1161,10 +990,7 @@
|
||||
# ## NOTE This plugin only reads numerical measurements, strings and booleans
|
||||
# ## will be ignored.
|
||||
#
|
||||
# ## Name for the service being polled. Will be appended to the name of the
|
||||
# ## measurement e.g. httpjson_webserver_stats
|
||||
# ##
|
||||
# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
|
||||
# ## a name for the service being polled
|
||||
# name = "webserver_stats"
|
||||
#
|
||||
# ## URL of each server in the service's cluster
|
||||
@@ -1184,14 +1010,12 @@
|
||||
# # "my_tag_2"
|
||||
# # ]
|
||||
#
|
||||
# ## HTTP parameters (all values must be strings). For "GET" requests, data
|
||||
# ## will be included in the query. For "POST" requests, data will be included
|
||||
# ## in the request body as "x-www-form-urlencoded".
|
||||
# # [inputs.httpjson.parameters]
|
||||
# # event_type = "cpu_spike"
|
||||
# # threshold = "0.75"
|
||||
# ## HTTP parameters (all values must be strings)
|
||||
# [inputs.httpjson.parameters]
|
||||
# event_type = "cpu_spike"
|
||||
# threshold = "0.75"
|
||||
#
|
||||
# ## HTTP Headers (all values must be strings)
|
||||
# ## HTTP Header parameters (all values must be strings)
|
||||
# # [inputs.httpjson.headers]
|
||||
# # X-Auth-Token = "my-xauth-token"
|
||||
# # apiVersion = "v1"
|
||||
@@ -1226,44 +1050,14 @@
|
||||
# # collect_memstats = true
|
||||
|
||||
|
||||
# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs.
|
||||
# [[inputs.interrupts]]
|
||||
# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.
|
||||
# # [inputs.interrupts.tagdrop]
|
||||
# # irq = [ "NET_RX", "TASKLET" ]
|
||||
|
||||
|
||||
# # Read metrics from the bare metal servers via IPMI
|
||||
# # Read metrics from one or many bare metal servers
|
||||
# [[inputs.ipmi_sensor]]
|
||||
# ## optionally specify the path to the ipmitool executable
|
||||
# # path = "/usr/bin/ipmitool"
|
||||
# #
|
||||
# ## optionally specify one or more servers via a url matching
|
||||
# ## specify servers via a url matching:
|
||||
# ## [username[:password]@][protocol[(address)]]
|
||||
# ## e.g.
|
||||
# ## root:passwd@lan(127.0.0.1)
|
||||
# ##
|
||||
# ## if no servers are specified, local machine sensor stats will be queried
|
||||
# ##
|
||||
# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
|
||||
|
||||
|
||||
# # Gather packets and bytes throughput from iptables
|
||||
# [[inputs.iptables]]
|
||||
# ## iptables require root access on most systems.
|
||||
# ## Setting 'use_sudo' to true will make use of sudo to run iptables.
|
||||
# ## Users must configure sudo to allow telegraf user to run iptables with no password.
|
||||
# ## iptables can be restricted to only list command "iptables -nvL".
|
||||
# use_sudo = false
|
||||
# ## Setting 'use_lock' to true runs iptables with the "-w" option.
|
||||
# ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl")
|
||||
# use_lock = false
|
||||
# ## defines the table to monitor:
|
||||
# table = "filter"
|
||||
# ## defines the chains to monitor.
|
||||
# ## NOTE: iptables rules without a comment will not be monitored.
|
||||
# ## Read the plugin documentation for more information.
|
||||
# chains = [ "INPUT" ]
|
||||
# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
|
||||
|
||||
|
||||
# # Read JMX metrics through Jolokia
|
||||
@@ -1293,13 +1087,6 @@
|
||||
# ## Includes connection time, any redirects, and reading the response body.
|
||||
# # client_timeout = "4s"
|
||||
#
|
||||
# ## Attribute delimiter
|
||||
# ##
|
||||
# ## When multiple attributes are returned for a single
|
||||
# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric
|
||||
# ## name, and the attribute name, separated by the given delimiter.
|
||||
# # delimiter = "_"
|
||||
#
|
||||
# ## List of servers exposing jolokia read service
|
||||
# [[inputs.jolokia.servers]]
|
||||
# name = "as-server-01"
|
||||
@@ -1330,23 +1117,6 @@
|
||||
# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
|
||||
|
||||
|
||||
# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints
|
||||
# [[inputs.kapacitor]]
|
||||
# ## Multiple URLs from which to read Kapacitor-formatted JSON
|
||||
# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars".
|
||||
# urls = [
|
||||
# "http://localhost:9092/kapacitor/v1/debug/vars"
|
||||
# ]
|
||||
#
|
||||
# ## Time limit for http requests
|
||||
# timeout = "5s"
|
||||
|
||||
|
||||
# # Get kernel statistics from /proc/vmstat
|
||||
# [[inputs.kernel_vmstat]]
|
||||
# # no configuration
|
||||
|
||||
|
||||
# # Read metrics from the kubernetes kubelet api
|
||||
# [[inputs.kubernetes]]
|
||||
# ## URL for the kubelet
|
||||
@@ -1370,11 +1140,6 @@
|
||||
# servers = ["127.0.0.1:4021"]
|
||||
|
||||
|
||||
# # Provides Linux sysctl fs metrics
|
||||
# [[inputs.linux_sysctl_fs]]
|
||||
# # no configuration
|
||||
|
||||
|
||||
# # Read metrics from local Lustre service on OST, MDS
|
||||
# [[inputs.lustre2]]
|
||||
# ## An array of /proc globs to search for Lustre stats
|
||||
@@ -1451,13 +1216,6 @@
|
||||
# ## 10.0.0.1:10000, etc.
|
||||
# servers = ["127.0.0.1:27017"]
|
||||
# gather_perdb_stats = false
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Read metrics from one or many mysql servers
|
||||
@@ -1485,15 +1243,9 @@
|
||||
# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
|
||||
# gather_process_list = true
|
||||
# #
|
||||
# ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS
|
||||
# gather_user_statistics = true
|
||||
# #
|
||||
# ## gather auto_increment columns and max values from information schema
|
||||
# gather_info_schema_auto_inc = true
|
||||
# #
|
||||
# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
|
||||
# gather_innodb_metrics = true
|
||||
# #
|
||||
# ## gather metrics from SHOW SLAVE STATUS command output
|
||||
# gather_slave_status = true
|
||||
# #
|
||||
@@ -1631,7 +1383,7 @@
|
||||
# ## NOTE: this plugin forks the ping command. You may need to set capabilities
|
||||
# ## via setcap cap_net_raw+p /bin/ping
|
||||
# #
|
||||
# ## List of urls to ping
|
||||
# ## urls to ping
|
||||
# urls = ["www.google.com"] # required
|
||||
# ## number of pings to send per collection (ping -c <COUNT>)
|
||||
# # count = 1
|
||||
@@ -1665,7 +1417,7 @@
|
||||
# # ignored_databases = ["postgres", "template0", "template1"]
|
||||
#
|
||||
# ## A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# ## databases are gathered. Do NOT use with the 'ignored_databases' option.
|
||||
# ## databases are gathered. Do NOT use with the 'ignore_databases' option.
|
||||
# # databases = ["app_production", "testing"]
|
||||
|
||||
|
||||
@@ -1847,13 +1599,6 @@
|
||||
# servers = ["http://localhost:8098"]
|
||||
|
||||
|
||||
# # Monitor sensors, requires lm-sensors package
|
||||
# [[inputs.sensors]]
|
||||
# ## Remove numbers from field names.
|
||||
# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
|
||||
# # remove_numbers = true
|
||||
|
||||
|
||||
# # Retrieves SNMP values from remote agents
|
||||
# [[inputs.snmp]]
|
||||
# agents = [ "127.0.0.1:161" ]
|
||||
@@ -2030,68 +1775,6 @@
|
||||
# # ]
|
||||
|
||||
|
||||
# # Sysstat metrics collector
|
||||
# [[inputs.sysstat]]
|
||||
# ## Path to the sadc command.
|
||||
# #
|
||||
# ## Common Defaults:
|
||||
# ## Debian/Ubuntu: /usr/lib/sysstat/sadc
|
||||
# ## Arch: /usr/lib/sa/sadc
|
||||
# ## RHEL/CentOS: /usr/lib64/sa/sadc
|
||||
# sadc_path = "/usr/lib/sa/sadc" # required
|
||||
# #
|
||||
# #
|
||||
# ## Path to the sadf command, if it is not in PATH
|
||||
# # sadf_path = "/usr/bin/sadf"
|
||||
# #
|
||||
# #
|
||||
# ## Activities is a list of activities, that are passed as argument to the
|
||||
# ## sadc collector utility (e.g: DISK, SNMP etc...)
|
||||
# ## The more activities that are added, the more data is collected.
|
||||
# # activities = ["DISK"]
|
||||
# #
|
||||
# #
|
||||
# ## Group metrics to measurements.
|
||||
# ##
|
||||
# ## If group is false each metric will be prefixed with a description
|
||||
# ## and represents itself a measurement.
|
||||
# ##
|
||||
# ## If Group is true, corresponding metrics are grouped to a single measurement.
|
||||
# # group = true
|
||||
# #
|
||||
# #
|
||||
# ## Options for the sadf command. The values on the left represent the sadf
|
||||
# ## options and the values on the right their description (wich are used for
|
||||
# ## grouping and prefixing metrics).
|
||||
# ##
|
||||
# ## Run 'sar -h' or 'man sar' to find out the supported options for your
|
||||
# ## sysstat version.
|
||||
# [inputs.sysstat.options]
|
||||
# -C = "cpu"
|
||||
# -B = "paging"
|
||||
# -b = "io"
|
||||
# -d = "disk" # requires DISK activity
|
||||
# "-n ALL" = "network"
|
||||
# "-P ALL" = "per_cpu"
|
||||
# -q = "queue"
|
||||
# -R = "mem"
|
||||
# -r = "mem_util"
|
||||
# -S = "swap_util"
|
||||
# -u = "cpu_util"
|
||||
# -v = "inode"
|
||||
# -W = "swap"
|
||||
# -w = "task"
|
||||
# # -H = "hugepages" # only available for newer linux distributions
|
||||
# # "-I ALL" = "interrupts" # requires INT activity
|
||||
# #
|
||||
# #
|
||||
# ## Device tags can be used to add additional tags for devices.
|
||||
# ## For example the configuration below adds a tag vg with value rootvg for
|
||||
# ## all metrics with sda devices.
|
||||
# # [[inputs.sysstat.device_tags.sda]]
|
||||
# # vg = "rootvg"
|
||||
|
||||
|
||||
# # Inserts sine and cosine waves for demonstration purposes
|
||||
# [[inputs.trig]]
|
||||
# ## Set the amplitude
|
||||
@@ -2147,39 +1830,6 @@
|
||||
# SERVICE INPUT PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
# # AMQP consumer plugin
|
||||
# [[inputs.amqp_consumer]]
|
||||
# ## AMQP url
|
||||
# url = "amqp://localhost:5672/influxdb"
|
||||
# ## AMQP exchange
|
||||
# exchange = "telegraf"
|
||||
# ## AMQP queue name
|
||||
# queue = "telegraf"
|
||||
# ## Binding Key
|
||||
# binding_key = "#"
|
||||
#
|
||||
# ## Maximum number of messages server should give to the worker.
|
||||
# prefetch_count = 50
|
||||
#
|
||||
# ## Auth method. PLAIN and EXTERNAL are supported
|
||||
# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
|
||||
# ## described here: https://www.rabbitmq.com/plugins.html
|
||||
# # auth_method = "PLAIN"
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## Data format to output.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# data_format = "influx"
|
||||
|
||||
|
||||
# # Influx HTTP write listener
|
||||
# [[inputs.http_listener]]
|
||||
# ## Address and port to host HTTP listener on
|
||||
@@ -2213,14 +1863,10 @@
|
||||
# offset = "oldest"
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
#
|
||||
# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
|
||||
# ## larger messages are dropped
|
||||
# max_message_len = 65536
|
||||
|
||||
|
||||
# # Stream and parse log file(s).
|
||||
@@ -2232,9 +1878,7 @@
|
||||
# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
|
||||
# ## /var/log/apache.log -> only tail the apache log file
|
||||
# files = ["/var/log/apache/access.log"]
|
||||
# ## Read files that currently exist from the beginning. Files that are created
|
||||
# ## while telegraf is running (and that match the "files" globs) will always
|
||||
# ## be read from the beginning.
|
||||
# ## Read file from beginning.
|
||||
# from_beginning = false
|
||||
#
|
||||
# ## Parse logstash-style "grok" patterns:
|
||||
@@ -2288,7 +1932,7 @@
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
@@ -2311,7 +1955,7 @@
|
||||
# # pending_bytes_limit = 67108864
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
@@ -2326,50 +1970,12 @@
|
||||
# max_in_flight = 100
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
|
||||
|
||||
# # Generic socket listener capable of handling multiple socket types.
|
||||
# [[inputs.socket_listener]]
|
||||
# ## URL to listen on
|
||||
# # service_address = "tcp://:8094"
|
||||
# # service_address = "tcp://127.0.0.1:http"
|
||||
# # service_address = "tcp4://:8094"
|
||||
# # service_address = "tcp6://:8094"
|
||||
# # service_address = "tcp6://[2001:db8::1]:8094"
|
||||
# # service_address = "udp://:8094"
|
||||
# # service_address = "udp4://:8094"
|
||||
# # service_address = "udp6://:8094"
|
||||
# # service_address = "unix:///tmp/telegraf.sock"
|
||||
# # service_address = "unixgram:///tmp/telegraf.sock"
|
||||
#
|
||||
# ## Maximum number of concurrent connections.
|
||||
# ## Only applies to stream sockets (e.g. TCP).
|
||||
# ## 0 (default) is unlimited.
|
||||
# # max_connections = 1024
|
||||
#
|
||||
# ## Maximum socket buffer size in bytes.
|
||||
# ## For stream sockets, once the buffer fills up, the sender will start backing up.
|
||||
# ## For datagram sockets, once the buffer fills up, metrics will start dropping.
|
||||
# ## Defaults to the OS default.
|
||||
# # read_buffer_size = 65535
|
||||
#
|
||||
# ## Period between keep alive probes.
|
||||
# ## Only applies to TCP sockets.
|
||||
# ## 0 disables keep alive probes.
|
||||
# ## Defaults to the OS configuration.
|
||||
# # keep_alive_period = "5m"
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# # data_format = "influx"
|
||||
|
||||
|
||||
# # Statsd Server
|
||||
# [[inputs.statsd]]
|
||||
# ## Address and port to host UDP listener on
|
||||
@@ -2431,7 +2037,7 @@
|
||||
# pipe = false
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
@@ -2439,16 +2045,41 @@
|
||||
|
||||
# # Generic TCP listener
|
||||
# [[inputs.tcp_listener]]
|
||||
# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
|
||||
# # socket_listener plugin
|
||||
# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
|
||||
# ## Address and port to host TCP listener on
|
||||
# # service_address = ":8094"
|
||||
#
|
||||
# ## Number of TCP messages allowed to queue up. Once filled, the
|
||||
# ## TCP listener will start dropping packets.
|
||||
# # allowed_pending_messages = 10000
|
||||
#
|
||||
# ## Maximum number of concurrent TCP connections to allow
|
||||
# # max_tcp_connections = 250
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
|
||||
|
||||
# # Generic UDP listener
|
||||
# [[inputs.udp_listener]]
|
||||
# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
|
||||
# # socket_listener plugin
|
||||
# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
|
||||
# ## Address and port to host UDP listener on
|
||||
# # service_address = ":8092"
|
||||
#
|
||||
# ## Number of UDP messages allowed to queue up. Once filled, the
|
||||
# ## UDP listener will start dropping packets.
|
||||
# # allowed_pending_messages = 10000
|
||||
#
|
||||
# ## Set the buffer size of the UDP connection outside of OS default (in bytes)
|
||||
# ## If set to 0, take OS default
|
||||
# udp_buffer_size = 16777216
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
|
||||
|
||||
# # A Webhooks Event collector
|
||||
@@ -2461,14 +2092,10 @@
|
||||
#
|
||||
# [inputs.webhooks.github]
|
||||
# path = "/github"
|
||||
# # secret = ""
|
||||
#
|
||||
# [inputs.webhooks.mandrill]
|
||||
# path = "/mandrill"
|
||||
#
|
||||
# [inputs.webhooks.rollbar]
|
||||
# path = "/rollbar"
|
||||
#
|
||||
# [inputs.webhooks.papertrail]
|
||||
# path = "/papertrail"
|
||||
|
||||
|
||||
@@ -117,8 +117,7 @@
|
||||
Instances = ["*"]
|
||||
Counters = [
|
||||
"% Idle Time",
|
||||
"% Disk Time",
|
||||
"% Disk Read Time",
|
||||
"% Disk Time","% Disk Read Time",
|
||||
"% Disk Write Time",
|
||||
"Current Disk Queue Length",
|
||||
"% Free Space",
|
||||
|
||||
@@ -45,11 +45,9 @@ func (b *Buffer) Add(metrics ...telegraf.Metric) {
|
||||
select {
|
||||
case b.buf <- metrics[i]:
|
||||
default:
|
||||
b.mu.Lock()
|
||||
MetricsDropped.Incr(1)
|
||||
<-b.buf
|
||||
b.buf <- metrics[i]
|
||||
b.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
@@ -19,13 +18,14 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/plugins/processors"
|
||||
"github.com/influxdata/telegraf/plugins/serializers"
|
||||
"github.com/influxdata/telegraf/registry/aggregators"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
"github.com/influxdata/telegraf/registry/outputs"
|
||||
"github.com/influxdata/telegraf/registry/processors"
|
||||
|
||||
"github.com/influxdata/config"
|
||||
"github.com/influxdata/toml"
|
||||
"github.com/influxdata/toml/ast"
|
||||
)
|
||||
@@ -40,6 +40,14 @@ var (
|
||||
|
||||
// envVarRe is a regex to find environment variables in the config file
|
||||
envVarRe = regexp.MustCompile(`\$\w+`)
|
||||
|
||||
// addQuoteRe is a regex for finding and adding quotes around / characters
|
||||
// when they are used for distinguishing external plugins.
|
||||
// ie, a ReplaceAll() with this pattern will be used to turn this:
|
||||
// [[inputs.external/test/example]]
|
||||
// to
|
||||
// [[inputs."external/test/example"]]
|
||||
addQuoteRe = regexp.MustCompile(`(\[?\[?inputs|outputs|processors|aggregators)\.(external\/[^.\]]+)`)
|
||||
)
|
||||
|
||||
// Config specifies the URL/user/password for the database that telegraf
|
||||
@@ -85,8 +93,8 @@ type AgentConfig struct {
|
||||
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
||||
RoundInterval bool
|
||||
|
||||
// By default or when set to "0s", precision will be set to the same
|
||||
// timestamp order as the collection interval, with the maximum being 1s.
|
||||
// By default, precision will be set to the same timestamp order as the
|
||||
// collection interval, with the maximum being 1s.
|
||||
// ie, when interval = "10s", precision will be "1s"
|
||||
// when interval = "250ms", precision will be "1ms"
|
||||
// Precision will NOT be used for service inputs. It is up to each individual
|
||||
@@ -230,13 +238,10 @@ var header = `# Telegraf Configuration
|
||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
## By default or when set to "0s", precision will be set to the same
|
||||
## timestamp order as the collection interval, with the maximum being 1s.
|
||||
## ie, when interval = "10s", precision will be "1s"
|
||||
## when interval = "250ms", precision will be "1ms"
|
||||
## Precision will NOT be used for service inputs. It is up to each individual
|
||||
## service input to set the timestamp at the appropriate precision.
|
||||
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
|
||||
## By default, precision will be set to the same timestamp order as the
|
||||
## collection interval, with the maximum being 1s.
|
||||
## Precision will NOT be used for service inputs, such as logparser and statsd.
|
||||
## Valid values are "ns", "us" (or "µs"), "ms", "s".
|
||||
precision = ""
|
||||
|
||||
## Logging configuration:
|
||||
@@ -509,10 +514,6 @@ func PrintOutputConfig(name string) error {
|
||||
|
||||
func (c *Config) LoadDirectory(path string) error {
|
||||
walkfn := func(thispath string, info os.FileInfo, _ error) error {
|
||||
if info == nil {
|
||||
log.Printf("W! Telegraf is not permitted to read %s", thispath)
|
||||
return nil
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
@@ -573,7 +574,7 @@ func (c *Config) LoadConfig(path string) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("%s: invalid configuration", path)
|
||||
}
|
||||
if err = toml.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||
if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||
log.Printf("E! Could not parse [global_tags] config\n")
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
@@ -586,7 +587,7 @@ func (c *Config) LoadConfig(path string) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("%s: invalid configuration", path)
|
||||
}
|
||||
if err = toml.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||
if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||
log.Printf("E! Could not parse [agent] config\n")
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
@@ -708,6 +709,9 @@ func parseFile(fpath string) (*ast.Table, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// add quotes around external plugin paths.
|
||||
contents = addQuoteRe.ReplaceAll(contents, []byte(`$1."$2"`))
|
||||
|
||||
return toml.Parse(contents)
|
||||
}
|
||||
|
||||
@@ -723,7 +727,7 @@ func (c *Config) addAggregator(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := toml.UnmarshalTable(table, aggregator); err != nil {
|
||||
if err := config.UnmarshalTable(table, aggregator); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -743,7 +747,7 @@ func (c *Config) addProcessor(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := toml.UnmarshalTable(table, processor); err != nil {
|
||||
if err := config.UnmarshalTable(table, processor); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -783,7 +787,7 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := toml.UnmarshalTable(table, output); err != nil {
|
||||
if err := config.UnmarshalTable(table, output); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -824,7 +828,7 @@ func (c *Config) addInput(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := toml.UnmarshalTable(table, input); err != nil {
|
||||
if err := config.UnmarshalTable(table, input); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -916,7 +920,7 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err
|
||||
conf.Tags = make(map[string]string)
|
||||
if node, ok := tbl.Fields["tags"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
if err := toml.UnmarshalTable(subtbl, conf.Tags); err != nil {
|
||||
if err := config.UnmarshalTable(subtbl, conf.Tags); err != nil {
|
||||
log.Printf("Could not parse tags for input %s\n", name)
|
||||
}
|
||||
}
|
||||
@@ -1153,7 +1157,7 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
|
||||
cp.Tags = make(map[string]string)
|
||||
if node, ok := tbl.Fields["tags"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
log.Printf("E! Could not parse tags for input %s\n", name)
|
||||
}
|
||||
}
|
||||
@@ -1233,34 +1237,6 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["collectd_auth_file"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.CollectdAuthFile = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["collectd_security_level"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.CollectdSecurityLevel = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["collectd_typesdb"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
c.CollectdTypesDB = append(c.CollectdTypesDB, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.MetricName = name
|
||||
|
||||
delete(tbl.Fields, "data_format")
|
||||
@@ -1268,9 +1244,6 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
|
||||
delete(tbl.Fields, "templates")
|
||||
delete(tbl.Fields, "tag_keys")
|
||||
delete(tbl.Fields, "data_type")
|
||||
delete(tbl.Fields, "collectd_auth_file")
|
||||
delete(tbl.Fields, "collectd_security_level")
|
||||
delete(tbl.Fields, "collectd_typesdb")
|
||||
|
||||
return parsers.NewParser(c)
|
||||
}
|
||||
@@ -1279,7 +1252,7 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
|
||||
// a serializers.Serializer object, and creates it, which can then be added onto
|
||||
// an Output object.
|
||||
func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) {
|
||||
c := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)}
|
||||
c := &serializers.Config{}
|
||||
|
||||
if node, ok := tbl.Fields["data_format"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
@@ -1309,26 +1282,9 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["json_timestamp_units"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
timestampVal, err := time.ParseDuration(str.Value)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to parse json_timestamp_units as a duration, %s", err)
|
||||
}
|
||||
// now that we have a duration, truncate it to the nearest
|
||||
// power of ten (just in case)
|
||||
nearest_exponent := int64(math.Log10(float64(timestampVal.Nanoseconds())))
|
||||
new_nanoseconds := int64(math.Pow(10.0, float64(nearest_exponent)))
|
||||
c.TimestampUnits = time.Duration(new_nanoseconds)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "data_format")
|
||||
delete(tbl.Fields, "prefix")
|
||||
delete(tbl.Fields, "template")
|
||||
delete(tbl.Fields, "json_timestamp_units")
|
||||
return serializers.NewSerializer(c)
|
||||
}
|
||||
|
||||
|
||||
@@ -6,11 +6,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/procstat"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
2
internal/config/testdata/telegraf-agent.toml
vendored
2
internal/config/testdata/telegraf-agent.toml
vendored
@@ -60,7 +60,7 @@
|
||||
# Kafka topic for producer messages
|
||||
topic = "telegraf"
|
||||
# Telegraf tag to use as a routing key
|
||||
# ie, if this tag exists, its value will be used as the routing key
|
||||
# ie, if this tag exists, it's value will be used as the routing key
|
||||
routing_tag = "host"
|
||||
|
||||
|
||||
|
||||
37
internal/errchan/errchan.go
Normal file
37
internal/errchan/errchan.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package errchan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ErrChan struct {
|
||||
C chan error
|
||||
}
|
||||
|
||||
// New returns an error channel of max length 'n'
|
||||
// errors can be sent to the ErrChan.C channel, and will be returned when
|
||||
// ErrChan.Error() is called.
|
||||
func New(n int) *ErrChan {
|
||||
return &ErrChan{
|
||||
C: make(chan error, n),
|
||||
}
|
||||
}
|
||||
|
||||
// Error closes the ErrChan.C channel and returns an error if there are any
|
||||
// non-nil errors, otherwise returns nil.
|
||||
func (e *ErrChan) Error() error {
|
||||
close(e.C)
|
||||
|
||||
var out string
|
||||
for err := range e.C {
|
||||
if err != nil {
|
||||
out += "[" + err.Error() + "], "
|
||||
}
|
||||
}
|
||||
|
||||
if out != "" {
|
||||
return fmt.Errorf("Errors encountered: " + strings.TrimRight(out, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func (g *GlobPath) Match() map[string]os.FileInfo {
|
||||
if !g.hasMeta {
|
||||
out := make(map[string]os.FileInfo)
|
||||
info, err := os.Stat(g.path)
|
||||
if err == nil {
|
||||
if !os.IsNotExist(err) {
|
||||
out[g.path] = info
|
||||
}
|
||||
return out
|
||||
@@ -55,7 +55,7 @@ func (g *GlobPath) Match() map[string]os.FileInfo {
|
||||
files, _ := filepath.Glob(g.path)
|
||||
for _, file := range files {
|
||||
info, err := os.Stat(file)
|
||||
if err == nil {
|
||||
if !os.IsNotExist(err) {
|
||||
out[file] = info
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package globpath
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -71,20 +70,3 @@ func getTestdataDir() string {
|
||||
_, filename, _, _ := runtime.Caller(1)
|
||||
return strings.Replace(filename, "globpath_test.go", "testdata", 1)
|
||||
}
|
||||
|
||||
func TestMatch_ErrPermission(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected map[string]os.FileInfo
|
||||
}{
|
||||
{"/root/foo", map[string]os.FileInfo{}},
|
||||
{"/root/f*", map[string]os.FileInfo{}},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
glob, err := Compile(test.input)
|
||||
require.NoError(t, err)
|
||||
actual := glob.Match()
|
||||
require.Equal(t, test.expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package models
|
||||
import (
|
||||
"log"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -78,27 +77,7 @@ func makemetric(
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range tags {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
log.Printf("D! Measurement [%s] tag [%s] "+
|
||||
"ends with a backslash, skipping", measurement, k)
|
||||
delete(tags, k)
|
||||
continue
|
||||
} else if strings.HasSuffix(v, `\`) {
|
||||
log.Printf("D! Measurement [%s] tag [%s] has a value "+
|
||||
"ending with a backslash, skipping", measurement, k)
|
||||
delete(tags, k)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range fields {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
log.Printf("D! Measurement [%s] field [%s] "+
|
||||
"ends with a backslash, skipping", measurement, k)
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
// Validate uint64 and float64 fields
|
||||
// convert all int & uint types to int64
|
||||
switch val := v.(type) {
|
||||
@@ -149,14 +128,6 @@ func makemetric(
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
case string:
|
||||
if strings.HasSuffix(val, `\`) {
|
||||
log.Printf("D! Measurement [%s] field [%s] has a value "+
|
||||
"ending with a backslash, skipping", measurement, k)
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
fields[k] = v
|
||||
default:
|
||||
fields[k] = v
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMakeMetricNoFields(t *testing.T) {
|
||||
@@ -333,128 +332,6 @@ func TestMakeMetricNameSuffix(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetric_TrailingSlash(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
measurement string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
expectedNil bool
|
||||
expectedMeasurement string
|
||||
expectedFields map[string]interface{}
|
||||
expectedTags map[string]string
|
||||
}{
|
||||
{
|
||||
name: "Measurement cannot have trailing slash",
|
||||
measurement: `cpu\`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedNil: true,
|
||||
},
|
||||
{
|
||||
name: "Field key with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
`bad\`: `xyzzy`,
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Field value with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
"bad": `xyzzy\`,
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Must have one field after dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"bad": `xyzzy\`,
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedNil: true,
|
||||
},
|
||||
{
|
||||
name: "Tag key with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{
|
||||
`host\`: "localhost",
|
||||
"a": "x",
|
||||
},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Tag value with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{
|
||||
`host`: `localhost\`,
|
||||
"a": "x",
|
||||
},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
m := ri.MakeMetric(
|
||||
tc.measurement,
|
||||
tc.fields,
|
||||
tc.tags,
|
||||
telegraf.Untyped,
|
||||
now)
|
||||
|
||||
if tc.expectedNil {
|
||||
require.Nil(t, m)
|
||||
} else {
|
||||
require.NotNil(t, m)
|
||||
require.Equal(t, tc.expectedMeasurement, m.Name())
|
||||
require.Equal(t, tc.expectedFields, m.Fields())
|
||||
require.Equal(t, tc.expectedTags, m.Tags())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testInput struct{}
|
||||
|
||||
func (t *testInput) Description() string { return "" }
|
||||
|
||||
@@ -2,7 +2,6 @@ package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -35,9 +34,6 @@ type RunningOutput struct {
|
||||
|
||||
metrics *buffer.Buffer
|
||||
failMetrics *buffer.Buffer
|
||||
|
||||
// Guards against concurrent calls to the Output as described in #3009
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewRunningOutput(
|
||||
@@ -126,9 +122,9 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
|
||||
// Write writes all cached points to this output.
|
||||
func (ro *RunningOutput) Write() error {
|
||||
nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len()
|
||||
ro.BufferSize.Set(int64(nFails + nMetrics))
|
||||
log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ",
|
||||
ro.Name, nFails+nMetrics, ro.MetricBufferLimit)
|
||||
ro.BufferSize.Incr(int64(nFails + nMetrics))
|
||||
var err error
|
||||
if !ro.failMetrics.IsEmpty() {
|
||||
// how many batches of failed writes we need to write.
|
||||
@@ -173,8 +169,6 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
if nMetrics == 0 {
|
||||
return nil
|
||||
}
|
||||
ro.Lock()
|
||||
defer ro.Unlock()
|
||||
start := time.Now()
|
||||
err := ro.Output.Write(metrics)
|
||||
elapsed := time.Since(start)
|
||||
@@ -182,6 +176,7 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
log.Printf("D! Output [%s] wrote batch of %d metrics in %s\n",
|
||||
ro.Name, nMetrics, elapsed)
|
||||
ro.MetricsWritten.Incr(int64(nMetrics))
|
||||
ro.BufferSize.Incr(-int64(nMetrics))
|
||||
ro.WriteTime.Incr(elapsed.Nanoseconds())
|
||||
}
|
||||
return err
|
||||
|
||||
@@ -4,14 +4,11 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/wlog"
|
||||
)
|
||||
|
||||
var prefixRegex = regexp.MustCompile("^[DIWE]!")
|
||||
|
||||
// newTelegrafWriter returns a logging-wrapped writer.
|
||||
func newTelegrafWriter(w io.Writer) io.Writer {
|
||||
return &telegrafLog{
|
||||
@@ -24,13 +21,7 @@ type telegrafLog struct {
|
||||
}
|
||||
|
||||
func (t *telegrafLog) Write(b []byte) (n int, err error) {
|
||||
var line []byte
|
||||
if !prefixRegex.Match(b) {
|
||||
line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" I! "), b...)
|
||||
} else {
|
||||
line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...)
|
||||
}
|
||||
return t.writer.Write(line)
|
||||
return t.writer.Write(append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...))
|
||||
}
|
||||
|
||||
// SetupLogging configures the logging output.
|
||||
|
||||
@@ -51,19 +51,6 @@ func TestErrorWriteLogToFile(t *testing.T) {
|
||||
assert.Equal(t, f[19:], []byte("Z E! TEST\n"))
|
||||
}
|
||||
|
||||
func TestAddDefaultLogLevel(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
assert.NoError(t, err)
|
||||
defer func() { os.Remove(tmpfile.Name()) }()
|
||||
|
||||
SetupLogging(true, false, tmpfile.Name())
|
||||
log.Printf("TEST")
|
||||
|
||||
f, err := ioutil.ReadFile(tmpfile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, f[19:], []byte("Z I! TEST\n"))
|
||||
}
|
||||
|
||||
func BenchmarkTelegrafLogWrite(b *testing.B) {
|
||||
var msg = []byte("test")
|
||||
var buf bytes.Buffer
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -27,9 +26,6 @@ func New(
|
||||
if len(name) == 0 {
|
||||
return nil, fmt.Errorf("Metric cannot be made with an empty name")
|
||||
}
|
||||
if strings.HasSuffix(name, `\`) {
|
||||
return nil, fmt.Errorf("Metric cannot have measurement name ending with a backslash")
|
||||
}
|
||||
|
||||
var thisType telegraf.ValueType
|
||||
if len(mType) > 0 {
|
||||
@@ -48,25 +44,13 @@ func New(
|
||||
// pre-allocate exact size of the tags slice
|
||||
taglen := 0
|
||||
for k, v := range tags {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("Metric cannot have tag key ending with a backslash")
|
||||
}
|
||||
if strings.HasSuffix(v, `\`) {
|
||||
return nil, fmt.Errorf("Metric cannot have tag value ending with a backslash")
|
||||
}
|
||||
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
// TODO check that length of tag key & value are > 0
|
||||
taglen += 2 + len(escape(k, "tagkey")) + len(escape(v, "tagval"))
|
||||
}
|
||||
m.tags = make([]byte, taglen)
|
||||
|
||||
i := 0
|
||||
for k, v := range tags {
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
m.tags[i] = ','
|
||||
i++
|
||||
i += copy(m.tags[i:], escape(k, "tagkey"))
|
||||
@@ -77,17 +61,7 @@ func New(
|
||||
|
||||
// pre-allocate capacity of the fields slice
|
||||
fieldlen := 0
|
||||
for k, v := range fields {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("Metric cannot have field key ending with a backslash")
|
||||
}
|
||||
switch val := v.(type) {
|
||||
case string:
|
||||
if strings.HasSuffix(val, `\`) {
|
||||
return nil, fmt.Errorf("Metric cannot have field value ending with a backslash")
|
||||
}
|
||||
}
|
||||
|
||||
for k, _ := range fields {
|
||||
// 10 bytes is completely arbitrary, but will at least prevent some
|
||||
// amount of allocations. There's a small possibility this will create
|
||||
// slightly more allocations for a metric that has many short fields.
|
||||
@@ -119,7 +93,7 @@ func indexUnescapedByte(buf []byte, b byte) int {
|
||||
break
|
||||
}
|
||||
keyi += i
|
||||
if buf[keyi-1] != '\\' {
|
||||
if countBackslashes(buf, keyi-1)%2 == 0 {
|
||||
break
|
||||
} else {
|
||||
keyi++
|
||||
@@ -128,6 +102,24 @@ func indexUnescapedByte(buf []byte, b byte) int {
|
||||
return keyi
|
||||
}
|
||||
|
||||
// countBackslashes counts the number of preceding backslashes starting at
|
||||
// the 'start' index.
|
||||
func countBackslashes(buf []byte, index int) int {
|
||||
var count int
|
||||
for {
|
||||
if index < 0 {
|
||||
return count
|
||||
}
|
||||
if buf[index] == '\\' {
|
||||
count++
|
||||
index--
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
name []byte
|
||||
tags []byte
|
||||
@@ -221,7 +213,7 @@ func (m *metric) SerializeTo(dst []byte) int {
|
||||
}
|
||||
|
||||
func (m *metric) Split(maxSize int) []telegraf.Metric {
|
||||
if m.Len() <= maxSize {
|
||||
if m.Len() < maxSize {
|
||||
return []telegraf.Metric{m}
|
||||
}
|
||||
var out []telegraf.Metric
|
||||
@@ -251,7 +243,7 @@ func (m *metric) Split(maxSize int) []telegraf.Metric {
|
||||
|
||||
// if true, then we need to create a metric _not_ including the currently
|
||||
// selected field
|
||||
if len(m.fields[i:j])+len(fields)+constant >= maxSize {
|
||||
if len(m.fields[i:j])+len(fields)+constant > maxSize {
|
||||
// if false, then we'll create a metric including the currently
|
||||
// selected field anyways. This means that the given maxSize is too
|
||||
// small for a single field to fit.
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewMetric(t *testing.T) {
|
||||
@@ -250,13 +249,11 @@ func TestNewMetric_Fields(t *testing.T) {
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
"quote_string": `x"y`,
|
||||
"backslash_quote_string": `x\"y`,
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
@@ -369,7 +366,7 @@ func TestIndexUnescapedByte(t *testing.T) {
|
||||
{
|
||||
in: []byte(`foo\\bar`),
|
||||
b: 'b',
|
||||
expected: -1,
|
||||
expected: 5,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
@@ -461,7 +458,7 @@ func TestSplitMetric(t *testing.T) {
|
||||
assert.Len(t, split70, 3)
|
||||
|
||||
split60 := m.Split(60)
|
||||
assert.Len(t, split60, 5)
|
||||
assert.Len(t, split60, 4)
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
@@ -581,42 +578,6 @@ func TestSplitMetric_OneField(t *testing.T) {
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
}
|
||||
|
||||
func TestSplitMetric_ExactSize(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
actual := m.Split(m.Len())
|
||||
// check that no copy was made
|
||||
require.Equal(t, &m, &actual[0])
|
||||
}
|
||||
|
||||
func TestSplitMetric_NoRoomForNewline(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
actual := m.Split(m.Len() - 1)
|
||||
require.Equal(t, 2, len(actual))
|
||||
}
|
||||
|
||||
func TestNewMetricAggregate(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
@@ -664,78 +625,3 @@ func TestNewMetricFailNaN(t *testing.T) {
|
||||
_, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestEmptyTagValueOrKey(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"emptytag": "",
|
||||
"": "valuewithoutkey",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
|
||||
assert.True(t, m.HasTag("host"))
|
||||
assert.False(t, m.HasTag("emptytag"))
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestNewMetric_TrailingSlash(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
}{
|
||||
{
|
||||
name: `cpu\`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
fields: map[string]interface{}{
|
||||
`value\`: "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
fields: map[string]interface{}{
|
||||
"value": `x\`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
tags: map[string]string{
|
||||
`host\`: "localhost",
|
||||
},
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
tags: map[string]string{
|
||||
"host": `localhost\`,
|
||||
},
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
_, err := New(tc.name, tc.tags, tc.fields, now)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -41,21 +40,10 @@ const (
|
||||
)
|
||||
|
||||
func Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
return ParseWithDefaultTimePrecision(buf, time.Now(), "")
|
||||
return ParseWithDefaultTime(buf, time.Now())
|
||||
}
|
||||
|
||||
func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
|
||||
return ParseWithDefaultTimePrecision(buf, t, "")
|
||||
}
|
||||
|
||||
func ParseWithDefaultTimePrecision(
|
||||
buf []byte,
|
||||
t time.Time,
|
||||
precision string,
|
||||
) ([]telegraf.Metric, error) {
|
||||
if len(buf) == 0 {
|
||||
return []telegraf.Metric{}, nil
|
||||
}
|
||||
if len(buf) <= 6 {
|
||||
return []telegraf.Metric{}, makeError("buffer too short", buf, 0)
|
||||
}
|
||||
@@ -72,7 +60,7 @@ func ParseWithDefaultTimePrecision(
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := parseMetric(buf[i:i+j], t, precision)
|
||||
m, err := parseMetric(buf[i:i+j], t)
|
||||
if err != nil {
|
||||
i += j + 1 // increment i past the previous newline
|
||||
errStr += " " + err.Error()
|
||||
@@ -89,10 +77,7 @@ func ParseWithDefaultTimePrecision(
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func parseMetric(buf []byte,
|
||||
defaultTime time.Time,
|
||||
precision string,
|
||||
) (telegraf.Metric, error) {
|
||||
func parseMetric(buf []byte, defaultTime time.Time) (telegraf.Metric, error) {
|
||||
var dTime string
|
||||
// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
|
||||
pos, key, err := scanKey(buf, 0)
|
||||
@@ -126,23 +111,9 @@ func parseMetric(buf []byte,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// apply precision multiplier
|
||||
var nsec int64
|
||||
multiplier := getPrecisionMultiplier(precision)
|
||||
if len(ts) > 0 && multiplier > 1 {
|
||||
tsint, err := parseIntBytes(ts, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nsec := multiplier * tsint
|
||||
ts = []byte(strconv.FormatInt(nsec, 10))
|
||||
}
|
||||
|
||||
m := &metric{
|
||||
fields: fields,
|
||||
t: ts,
|
||||
nsec: nsec,
|
||||
}
|
||||
|
||||
// parse out the measurement name
|
||||
@@ -654,21 +625,3 @@ func makeError(reason string, buf []byte, i int) error {
|
||||
return fmt.Errorf("metric parsing error, reason: [%s], buffer: [%s], index: [%d]",
|
||||
reason, buf, i)
|
||||
}
|
||||
|
||||
// getPrecisionMultiplier will return a multiplier for the precision specified.
|
||||
func getPrecisionMultiplier(precision string) int64 {
|
||||
d := time.Nanosecond
|
||||
switch precision {
|
||||
case "u":
|
||||
d = time.Microsecond
|
||||
case "ms":
|
||||
d = time.Millisecond
|
||||
case "s":
|
||||
d = time.Second
|
||||
case "m":
|
||||
d = time.Minute
|
||||
case "h":
|
||||
d = time.Hour
|
||||
}
|
||||
return int64(d)
|
||||
}
|
||||
|
||||
@@ -364,41 +364,6 @@ func TestParseNegativeTimestamps(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePrecision(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
line string
|
||||
precision string
|
||||
expected int64
|
||||
}{
|
||||
{"test v=42 1491847420", "s", 1491847420000000000},
|
||||
{"test v=42 1491847420123", "ms", 1491847420123000000},
|
||||
{"test v=42 1491847420123456", "u", 1491847420123456000},
|
||||
{"test v=42 1491847420123456789", "ns", 1491847420123456789},
|
||||
|
||||
{"test v=42 1491847420123456789", "1s", 1491847420123456789},
|
||||
{"test v=42 1491847420123456789", "asdf", 1491847420123456789},
|
||||
} {
|
||||
metrics, err := ParseWithDefaultTimePrecision(
|
||||
[]byte(tt.line+"\n"), time.Now(), tt.precision)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expected, metrics[0].UnixNano())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePrecisionUnsetTime(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
line string
|
||||
precision string
|
||||
}{
|
||||
{"test v=42", "s"},
|
||||
{"test v=42", "ns"},
|
||||
} {
|
||||
_, err := ParseWithDefaultTimePrecision(
|
||||
[]byte(tt.line+"\n"), time.Now(), tt.precision)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMaxKeyLength(t *testing.T) {
|
||||
key := ""
|
||||
for {
|
||||
|
||||
@@ -57,7 +57,7 @@ func (r *reader) Read(p []byte) (n int, err error) {
|
||||
// this for-loop is the sunny-day scenario, where we are given a
|
||||
// buffer that is large enough to hold at least a single metric.
|
||||
// all of the cases below it are edge-cases.
|
||||
if r.metrics[r.iM].Len() <= len(p[i:]) {
|
||||
if r.metrics[r.iM].Len() < len(p[i:]) {
|
||||
i += r.metrics[r.iM].SerializeTo(p[i:])
|
||||
} else {
|
||||
break
|
||||
@@ -76,7 +76,7 @@ func (r *reader) Read(p []byte) (n int, err error) {
|
||||
if len(tmp) > 1 {
|
||||
r.splitMetrics = tmp
|
||||
r.state = split
|
||||
if r.splitMetrics[0].Len() <= len(p) {
|
||||
if r.splitMetrics[0].Len() < len(p) {
|
||||
i += r.splitMetrics[0].SerializeTo(p)
|
||||
r.iSM = 1
|
||||
} else {
|
||||
@@ -99,7 +99,7 @@ func (r *reader) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
case split:
|
||||
if r.splitMetrics[r.iSM].Len() <= len(p) {
|
||||
if r.splitMetrics[r.iSM].Len() < len(p) {
|
||||
// write the current split metric
|
||||
i += r.splitMetrics[r.iSM].SerializeTo(p)
|
||||
r.iSM++
|
||||
@@ -131,10 +131,6 @@ func (r *reader) Read(p []byte) (n int, err error) {
|
||||
r.iSM++
|
||||
if r.iSM == len(r.splitMetrics) {
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
} else {
|
||||
r.state = split
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func BenchmarkMetricReader(b *testing.B) {
|
||||
@@ -116,140 +116,6 @@ func TestMetricReader_OverflowMetric(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric is the same size as the buffer.
|
||||
//
|
||||
// Previously EOF would not be set until the next call to Read.
|
||||
func TestMetricReader_MetricSizeEqualsBufferSize(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, m1.Len())
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric requires to be split and one of the
|
||||
// split metrics is exactly the size of the buffer.
|
||||
//
|
||||
// Previously an empty string would be returned on the next Read without error,
|
||||
// and then next Read call would panic.
|
||||
func TestMetricReader_SplitWithExactLengthSplit(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1), "bb": int64(2)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
// foo a=1i,bb=2i 1481032190000000000\n // len 35
|
||||
//
|
||||
// Requires this specific split order:
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo bb=2i 1481032190000000000\n // len 30
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regresssion test for when a metric requires to be split and one of the
|
||||
// split metrics is larger than the buffer.
|
||||
//
|
||||
// Previously the metric index would be set incorrectly causing a panic.
|
||||
func TestMetricReader_SplitOverflowOversized(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
"bbb": int64(2),
|
||||
}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
// foo a=1i,bbb=2i 1481032190000000000\n // len 36
|
||||
//
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo bbb=2i 1481032190000000000\n // len 31
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regresssion test for when a split metric exactly fits in the buffer.
|
||||
//
|
||||
// Previously the metric would be overflow split when not required.
|
||||
func TestMetricReader_SplitOverflowUneeded(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1), "b": int64(2)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 29)
|
||||
|
||||
// foo a=1i,b=2i 1481032190000000000\n // len 34
|
||||
//
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo b=2i 1481032190000000000\n // len 29
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader_OverflowMultipleMetrics(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m, _ := New("foo", map[string]string{},
|
||||
@@ -619,17 +485,3 @@ func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) {
|
||||
assert.Equal(t, test.err, err, test.expRegex)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricRoundtrip(t *testing.T) {
|
||||
const lp = `nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=netstat,sr=database IpExtInBcastOctets=12570626154i,IpExtInBcastPkts=95541226i,IpExtInCEPkts=0i,IpExtInCsumErrors=0i,IpExtInECT0Pkts=55674i,IpExtInECT1Pkts=0i,IpExtInMcastOctets=5928296i,IpExtInMcastPkts=174365i,IpExtInNoECTPkts=17965863529i,IpExtInNoRoutes=20i,IpExtInOctets=3334866321815i,IpExtInTruncatedPkts=0i,IpExtOutBcastOctets=0i,IpExtOutBcastPkts=0i,IpExtOutMcastOctets=0i,IpExtOutMcastPkts=0i,IpExtOutOctets=31397892391399i,TcpExtArpFilter=0i,TcpExtBusyPollRxPackets=0i,TcpExtDelayedACKLocked=14094i,TcpExtDelayedACKLost=302083i,TcpExtDelayedACKs=55486507i,TcpExtEmbryonicRsts=11879i,TcpExtIPReversePathFilter=0i,TcpExtListenDrops=1736i,TcpExtListenOverflows=0i,TcpExtLockDroppedIcmps=0i,TcpExtOfoPruned=0i,TcpExtOutOfWindowIcmps=8i,TcpExtPAWSActive=0i,TcpExtPAWSEstab=974i,TcpExtPAWSPassive=0i,TcpExtPruneCalled=0i,TcpExtRcvPruned=0i,TcpExtSyncookiesFailed=12593i,TcpExtSyncookiesRecv=0i,TcpExtSyncookiesSent=0i,TcpExtTCPACKSkippedChallenge=0i,TcpExtTCPACKSkippedFinWait2=0i,TcpExtTCPACKSkippedPAWS=806i,TcpExtTCPACKSkippedSeq=519i,TcpExtTCPACKSkippedSynRecv=0i,TcpExtTCPACKSkippedTimeWait=0i,TcpExtTCPAbortFailed=0i,TcpExtTCPAbortOnClose=22i,TcpExtTCPAbortOnData=36593i,TcpExtTCPAbortOnLinger=0i,TcpExtTCPAbortOnMemory=0i,TcpExtTCPAbortOnTimeout=674i,TcpExtTCPAutoCorking=494253233i,TcpExtTCPBacklogDrop=0i,TcpExtTCPChallengeACK=281i,TcpExtTCPDSACKIgnoredNoUndo=93354i,TcpExtTCPDSACKIgnoredOld=336i,TcpExtTCPDSACKOfoRecv=0i,TcpExtTCPDSACKOfoSent=7i,TcpExtTCPDSACKOldSent=302073i,TcpExtTCPDSACKRecv=215884i,TcpExtTCPDSACKUndo=7633i,TcpExtTCPDeferAcceptDrop=0i,TcpExtTCPDirectCopyFromBacklog=0i,TcpExtTCPDirectCopyFromPrequeue=0i,TcpExtTCPFACKReorder=1320i,TcpExtTCPFastOpenActive=0i,TcpExtTCPFastOpenActiveFail=0i,TcpExtTCPFastOpenCookieReqd=0i,TcpExtTCPFastOpenListenOverflow=0i,TcpExtTCPFastOpenPassive=0i,TcpExtTCPFastOpenPassiveFail=0i,TcpExtTCPFastRetrans=350681i,TcpExtTCPForwardRetrans=142168i,TcpExtTCPFromZeroWindowAdv=4317i,TcpExtTCPFullUndo=29502i,TcpExtTCPHPAcks=10267073000i,TcpExtTCPHPHits=5629837098i,TcpExtTCPHPHitsToUser=0i,TcpExtTCPHystartDelayCwnd=285127i,TcpExtTCPHystartDelayDetect=12318i,TcpExtTCPHystartTrainCwnd=69160570i,TcpExtTCPHystartTrainDetect=3315799i,TcpExtTCPLossFailures=109i,TcpExtTCPLossProbeRecovery=110819i,TcpExtTCPLossProbes=233995i,TcpExtTCPLossUndo=5276i,TcpExtTCPLostRetransmit=397i,TcpExtTCPMD5NotFound=0i,TcpExtTCPMD5Unexpected=0i,TcpExtTCPMemoryPressures=0i,TcpExtTCPMinTTLDrop=0i,TcpExtTCPOFODrop=0i,TcpExtTCPOFOMerge=7i,TcpExtTCPOFOQueue=15196i,TcpExtTCPOrigDataSent=29055119435i,TcpExtTCPPartialUndo=21320i,TcpExtTCPPrequeueDropped=0i,TcpExtTCPPrequeued=0i,TcpExtTCPPureAcks=1236441827i,TcpExtTCPRcvCoalesce=225590473i,TcpExtTCPRcvCollapsed=0i,TcpExtTCPRenoFailures=0i,TcpExtTCPRenoRecovery=0i,TcpExtTCPRenoRecoveryFail=0i,TcpExtTCPRenoReorder=0i,TcpExtTCPReqQFullDoCookies=0i,TcpExtTCPReqQFullDrop=0i,TcpExtTCPRetransFail=41i,TcpExtTCPSACKDiscard=0i,TcpExtTCPSACKReneging=0i,TcpExtTCPSACKReorder=4307i,TcpExtTCPSYNChallenge=244i,TcpExtTCPSackFailures=1698i,TcpExtTCPSackMerged=184668i,TcpExtTCPSackRecovery=97369i,TcpExtTCPSackRecoveryFail=381i,TcpExtTCPSackShiftFallback=2697079i,TcpExtTCPSackShifted=760299i,TcpExtTCPSchedulerFailed=0i,TcpExtTCPSlowStartRetrans=9276i,TcpExtTCPSpuriousRTOs=959i,TcpExtTCPSpuriousRtxHostQueues=2973i,TcpExtTCPSynRetrans=200970i,TcpExtTCPTSReorder=15221i,TcpExtTCPTimeWaitOverflow=0i,TcpExtTCPTimeouts=70127i,TcpExtTCPToZeroWindowAdv=4317i,TcpExtTCPWantZeroWindowAdv=2133i,TcpExtTW=24809813i,TcpExtTWKilled=0i,TcpExtTWRecycled=0i 1496460785000000000
|
||||
nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=snmp,sr=database IcmpInAddrMaskReps=0i,IcmpInAddrMasks=90i,IcmpInCsumErrors=0i,IcmpInDestUnreachs=284401i,IcmpInEchoReps=9i,IcmpInEchos=1761912i,IcmpInErrors=407i,IcmpInMsgs=2047767i,IcmpInParmProbs=0i,IcmpInRedirects=0i,IcmpInSrcQuenchs=0i,IcmpInTimeExcds=46i,IcmpInTimestampReps=0i,IcmpInTimestamps=1309i,IcmpMsgInType0=9i,IcmpMsgInType11=46i,IcmpMsgInType13=1309i,IcmpMsgInType17=90i,IcmpMsgInType3=284401i,IcmpMsgInType8=1761912i,IcmpMsgOutType0=1761912i,IcmpMsgOutType14=1248i,IcmpMsgOutType3=108709i,IcmpMsgOutType8=9i,IcmpOutAddrMaskReps=0i,IcmpOutAddrMasks=0i,IcmpOutDestUnreachs=108709i,IcmpOutEchoReps=1761912i,IcmpOutEchos=9i,IcmpOutErrors=0i,IcmpOutMsgs=1871878i,IcmpOutParmProbs=0i,IcmpOutRedirects=0i,IcmpOutSrcQuenchs=0i,IcmpOutTimeExcds=0i,IcmpOutTimestampReps=1248i,IcmpOutTimestamps=0i,IpDefaultTTL=64i,IpForwDatagrams=0i,IpForwarding=2i,IpFragCreates=0i,IpFragFails=0i,IpFragOKs=0i,IpInAddrErrors=0i,IpInDelivers=17658795773i,IpInDiscards=0i,IpInHdrErrors=0i,IpInReceives=17659269339i,IpInUnknownProtos=0i,IpOutDiscards=236976i,IpOutNoRoutes=1009i,IpOutRequests=23466783734i,IpReasmFails=0i,IpReasmOKs=0i,IpReasmReqds=0i,IpReasmTimeout=0i,TcpActiveOpens=23308977i,TcpAttemptFails=3757543i,TcpCurrEstab=280i,TcpEstabResets=184792i,TcpInCsumErrors=0i,TcpInErrs=232i,TcpInSegs=17536573089i,TcpMaxConn=-1i,TcpOutRsts=4051451i,TcpOutSegs=29836254873i,TcpPassiveOpens=176546974i,TcpRetransSegs=878085i,TcpRtoAlgorithm=1i,TcpRtoMax=120000i,TcpRtoMin=200i,UdpInCsumErrors=0i,UdpInDatagrams=24441661i,UdpInErrors=0i,UdpLiteInCsumErrors=0i,UdpLiteInDatagrams=0i,UdpLiteInErrors=0i,UdpLiteNoPorts=0i,UdpLiteOutDatagrams=0i,UdpLiteRcvbufErrors=0i,UdpLiteSndbufErrors=0i,UdpNoPorts=17660i,UdpOutDatagrams=51807896i,UdpRcvbufErrors=0i,UdpSndbufErrors=236922i 1496460785000000000
|
||||
`
|
||||
metrics, err := Parse([]byte(lp))
|
||||
require.NoError(t, err)
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 128)
|
||||
_, err = r.Read(buf)
|
||||
require.NoError(t, err)
|
||||
metrics, err = Parse(buf)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package minmax
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||
"github.com/influxdata/telegraf/registry/aggregators"
|
||||
)
|
||||
|
||||
type MinMax struct {
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
package aggregators
|
||||
|
||||
import "github.com/influxdata/telegraf"
|
||||
|
||||
type Creator func() telegraf.Aggregator
|
||||
|
||||
var Aggregators = map[string]Creator{}
|
||||
|
||||
func Add(name string, creator Creator) {
|
||||
Aggregators[name] = creator
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
# Example Input Plugin
|
||||
|
||||
The example plugin gathers metrics about example things. This description
|
||||
explains at a high level what the plugin does and provides links to where
|
||||
additional information can be found.
|
||||
The example plugin gathers metrics about example things
|
||||
|
||||
### Configuration:
|
||||
|
||||
@@ -14,8 +12,7 @@ additional information can be found.
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
Here you should add an optional description and links to where the user can
|
||||
get more information about the measurements.
|
||||
<optional description>
|
||||
|
||||
- measurement1
|
||||
- field1 (type, unit)
|
||||
@@ -30,14 +27,11 @@ get more information about the measurements.
|
||||
- tag2
|
||||
- measurement2 has the following tags:
|
||||
- tag3
|
||||
|
||||
|
||||
### Sample Queries:
|
||||
|
||||
This section should contain some useful InfluxDB queries that can be used to
|
||||
get started with the plugin or to generate dashboards. For each query listed,
|
||||
describe at a high level what data is returned.
|
||||
These are some useful queries (to generate dashboards or other) to run against data from this plugin:
|
||||
|
||||
Get the max, mean, and min for the measurement in the last hour:
|
||||
```
|
||||
SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag
|
||||
```
|
||||
@@ -45,7 +39,7 @@ SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar A
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ telegraf -input-filter example -test
|
||||
$ ./telegraf -config telegraf.conf -input-filter example -test
|
||||
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
|
||||
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455
|
||||
```
|
||||
|
||||
@@ -10,7 +10,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
|
||||
as "github.com/aerospike/aerospike-client-go"
|
||||
)
|
||||
@@ -40,16 +41,17 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := errchan.New(len(a.Servers))
|
||||
wg.Add(len(a.Servers))
|
||||
for _, server := range a.Servers {
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
acc.AddError(a.gatherServer(serv, acc))
|
||||
errChan.C <- a.gatherServer(serv, acc)
|
||||
}(server)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
return errChan.Error()
|
||||
}
|
||||
|
||||
func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) error {
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestAerospikeStatistics(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := acc.GatherError(a.Gather)
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, acc.HasMeasurement("aerospike_node"))
|
||||
@@ -41,7 +41,8 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
require.Error(t, acc.GatherError(a.Gather))
|
||||
err := a.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
|
||||
assert.True(t, acc.HasMeasurement("aerospike_node"))
|
||||
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
|
||||
|
||||
@@ -2,7 +2,6 @@ package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
|
||||
@@ -15,7 +14,6 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dmcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/docker"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
|
||||
@@ -30,12 +28,10 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/internal"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/interrupts"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kubernetes"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
# AMQP Consumer Input Plugin
|
||||
|
||||
This plugin provides a consumer for use with AMQP 0-9-1, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/).
|
||||
|
||||
Metrics are read from a topic exchange using the configured queue and binding_key.
|
||||
|
||||
Message payload should be formatted in one of the [Telegraf Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
|
||||
|
||||
For an introduction to AMQP see:
|
||||
- https://www.rabbitmq.com/tutorials/amqp-concepts.html
|
||||
- https://www.rabbitmq.com/getstarted.html
|
||||
|
||||
The following defaults are known to work with RabbitMQ:
|
||||
|
||||
```toml
|
||||
# AMQP consumer plugin
|
||||
[[inputs.amqp_consumer]]
|
||||
## AMQP url
|
||||
url = "amqp://localhost:5672/influxdb"
|
||||
## AMQP exchange
|
||||
exchange = "telegraf"
|
||||
## AMQP queue name
|
||||
queue = "telegraf"
|
||||
## Binding Key
|
||||
binding_key = "#"
|
||||
|
||||
## Controls how many messages the server will try to keep on the network
|
||||
## for consumers before receiving delivery acks.
|
||||
#prefetch_count = 50
|
||||
|
||||
## Auth method. PLAIN and EXTERNAL are supported.
|
||||
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
|
||||
## described here: https://www.rabbitmq.com/plugins.html
|
||||
# auth_method = "PLAIN"
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
@@ -1,280 +0,0 @@
|
||||
package amqp_consumer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/streadway/amqp"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
|
||||
// AMQPConsumer is the top level struct for this plugin
|
||||
type AMQPConsumer struct {
|
||||
URL string
|
||||
// AMQP exchange
|
||||
Exchange string
|
||||
// Queue Name
|
||||
Queue string
|
||||
// Binding Key
|
||||
BindingKey string `toml:"binding_key"`
|
||||
|
||||
// Controls how many messages the server will try to keep on the network
|
||||
// for consumers before receiving delivery acks.
|
||||
PrefetchCount int
|
||||
|
||||
// AMQP Auth method
|
||||
AuthMethod string
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
parser parsers.Parser
|
||||
conn *amqp.Connection
|
||||
wg *sync.WaitGroup
|
||||
}
|
||||
|
||||
type externalAuth struct{}
|
||||
|
||||
func (a *externalAuth) Mechanism() string {
|
||||
return "EXTERNAL"
|
||||
}
|
||||
func (a *externalAuth) Response() string {
|
||||
return fmt.Sprintf("\000")
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultAuthMethod = "PLAIN"
|
||||
DefaultPrefetchCount = 50
|
||||
)
|
||||
|
||||
func (a *AMQPConsumer) SampleConfig() string {
|
||||
return `
|
||||
## AMQP url
|
||||
url = "amqp://localhost:5672/influxdb"
|
||||
## AMQP exchange
|
||||
exchange = "telegraf"
|
||||
## AMQP queue name
|
||||
queue = "telegraf"
|
||||
## Binding Key
|
||||
binding_key = "#"
|
||||
|
||||
## Maximum number of messages server should give to the worker.
|
||||
prefetch_count = 50
|
||||
|
||||
## Auth method. PLAIN and EXTERNAL are supported
|
||||
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
|
||||
## described here: https://www.rabbitmq.com/plugins.html
|
||||
# auth_method = "PLAIN"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
`
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) Description() string {
|
||||
return "AMQP consumer plugin"
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) SetParser(parser parsers.Parser) {
|
||||
a.parser = parser
|
||||
}
|
||||
|
||||
// All gathering is done in the Start function
|
||||
func (a *AMQPConsumer) Gather(_ telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) createConfig() (*amqp.Config, error) {
|
||||
// make new tls config
|
||||
tls, err := internal.GetTLSConfig(
|
||||
a.SSLCert, a.SSLKey, a.SSLCA, a.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// parse auth method
|
||||
var sasl []amqp.Authentication // nil by default
|
||||
|
||||
if strings.ToUpper(a.AuthMethod) == "EXTERNAL" {
|
||||
sasl = []amqp.Authentication{&externalAuth{}}
|
||||
}
|
||||
|
||||
config := amqp.Config{
|
||||
TLSClientConfig: tls,
|
||||
SASL: sasl, // if nil, it will be PLAIN
|
||||
}
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// Start satisfies the telegraf.ServiceInput interface
|
||||
func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error {
|
||||
amqpConf, err := a.createConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgs, err := a.connect(amqpConf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.wg = &sync.WaitGroup{}
|
||||
a.wg.Add(1)
|
||||
go a.process(msgs, acc)
|
||||
|
||||
go func() {
|
||||
err := <-a.conn.NotifyClose(make(chan *amqp.Error))
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("I! AMQP consumer connection closed: %s; trying to reconnect", err)
|
||||
for {
|
||||
msgs, err := a.connect(amqpConf)
|
||||
if err != nil {
|
||||
log.Printf("E! AMQP connection failed: %s", err)
|
||||
time.Sleep(10 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
a.wg.Add(1)
|
||||
go a.process(msgs, acc)
|
||||
break
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, error) {
|
||||
conn, err := amqp.DialConfig(a.URL, *amqpConf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.conn = conn
|
||||
|
||||
ch, err := conn.Channel()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to open a channel: %s", err)
|
||||
}
|
||||
|
||||
err = ch.ExchangeDeclare(
|
||||
a.Exchange, // name
|
||||
"topic", // type
|
||||
true, // durable
|
||||
false, // auto-deleted
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to declare an exchange: %s", err)
|
||||
}
|
||||
|
||||
q, err := ch.QueueDeclare(
|
||||
a.Queue, // queue
|
||||
true, // durable
|
||||
false, // delete when unused
|
||||
false, // exclusive
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to declare a queue: %s", err)
|
||||
}
|
||||
|
||||
err = ch.QueueBind(
|
||||
q.Name, // queue
|
||||
a.BindingKey, // binding-key
|
||||
a.Exchange, // exchange
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to bind a queue: %s", err)
|
||||
}
|
||||
|
||||
err = ch.Qos(
|
||||
a.PrefetchCount,
|
||||
0, // prefetch-size
|
||||
false, // global
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to set QoS: %s", err)
|
||||
}
|
||||
|
||||
msgs, err := ch.Consume(
|
||||
q.Name, // queue
|
||||
"", // consumer
|
||||
false, // auto-ack
|
||||
false, // exclusive
|
||||
false, // no-local
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed establishing connection to queue: %s", err)
|
||||
}
|
||||
|
||||
log.Println("I! Started AMQP consumer")
|
||||
return msgs, err
|
||||
}
|
||||
|
||||
// Read messages from queue and add them to the Accumulator
|
||||
func (a *AMQPConsumer) process(msgs <-chan amqp.Delivery, acc telegraf.Accumulator) {
|
||||
defer a.wg.Done()
|
||||
for d := range msgs {
|
||||
metrics, err := a.parser.Parse(d.Body)
|
||||
if err != nil {
|
||||
log.Printf("E! %v: error parsing metric - %v", err, string(d.Body))
|
||||
} else {
|
||||
for _, m := range metrics {
|
||||
acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||
}
|
||||
}
|
||||
|
||||
d.Ack(false)
|
||||
}
|
||||
log.Printf("I! AMQP consumer queue closed")
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) Stop() {
|
||||
err := a.conn.Close()
|
||||
if err != nil && err != amqp.ErrClosed {
|
||||
log.Printf("E! Error closing AMQP connection: %s", err)
|
||||
return
|
||||
}
|
||||
a.wg.Wait()
|
||||
log.Println("I! Stopped AMQP service")
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("amqp_consumer", func() telegraf.Input {
|
||||
return &AMQPConsumer{
|
||||
AuthMethod: DefaultAuthMethod,
|
||||
PrefetchCount: DefaultPrefetchCount,
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
- **urls** []string: List of apache-status URLs to collect from. Default is "http://localhost/server-status?auto".
|
||||
- **username** string: Username for HTTP basic authentication
|
||||
- **password** string: Password for HTTP basic authentication
|
||||
- **timeout** duration: time that the HTTP connection will remain waiting for response. Default 4 seconds ("4s")
|
||||
- **timeout** duration: time that the HTTP connection will remain waiting for response. Defalt 4 seconds ("4s")
|
||||
|
||||
##### Optional SSL Config
|
||||
|
||||
|
||||
@@ -8,12 +8,11 @@ import (
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
type Apache struct {
|
||||
@@ -29,8 +28,6 @@ type Apache struct {
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
@@ -68,51 +65,55 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
n.ResponseTimeout.Duration = time.Second * 5
|
||||
}
|
||||
|
||||
if n.client == nil {
|
||||
client, err := n.createHttpClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.client = client
|
||||
}
|
||||
var outerr error
|
||||
var errch = make(chan error)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(n.Urls))
|
||||
for _, u := range n.Urls {
|
||||
addr, err := url.Parse(u)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
|
||||
continue
|
||||
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
||||
}
|
||||
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
acc.AddError(n.gatherUrl(addr, acc))
|
||||
errch <- n.gatherUrl(addr, acc)
|
||||
}(addr)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Apache) createHttpClient() (*http.Client, error) {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Drain channel, waiting for all requests to finish and save last error.
|
||||
for range n.Urls {
|
||||
if err := <-errch; err != nil {
|
||||
outerr = err
|
||||
}
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
Timeout: n.ResponseTimeout.Duration,
|
||||
}
|
||||
|
||||
return client, nil
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
|
||||
var tr *http.Transport
|
||||
|
||||
if addr.Scheme == "https" {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
} else {
|
||||
tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: n.ResponseTimeout.Duration,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", addr.String(), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error on new request to %s : %s\n", addr.String(), err)
|
||||
@@ -122,7 +123,7 @@ func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
req.SetBasicAuth(n.Username, n.Password)
|
||||
}
|
||||
|
||||
resp, err := n.client.Do(req)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error on request to %s : %s\n", addr.String(), err)
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestHTTPApache(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(a.Gather)
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
type Bcache struct {
|
||||
|
||||
@@ -5,8 +5,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
@@ -122,8 +123,8 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) {
|
||||
}
|
||||
j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
|
||||
} else {
|
||||
j.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
j.metric, out))
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
j.metric, out)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -154,8 +155,8 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
||||
addCassandraMetric(k, c, v.(map[string]interface{}))
|
||||
}
|
||||
} else {
|
||||
c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
c.metric, out))
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
c.metric, out)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
@@ -163,8 +164,8 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
||||
addCassandraMetric(r.(map[string]interface{})["mbean"].(string),
|
||||
c, values.(map[string]interface{}))
|
||||
} else {
|
||||
c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
c.metric, out))
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
c.metric, out)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -273,8 +274,8 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
m = newCassandraMetric(serverTokens["host"], metric, acc)
|
||||
} else {
|
||||
// unsupported metric type
|
||||
acc.AddError(fmt.Errorf("E! Unsupported Cassandra metric [%s], skipping",
|
||||
metric))
|
||||
log.Printf("I! Unsupported Cassandra metric [%s], skipping",
|
||||
metric)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -282,8 +283,7 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
requestUrl, err := url.Parse("http://" + serverTokens["host"] + ":" +
|
||||
serverTokens["port"] + context + metric)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
continue
|
||||
return err
|
||||
}
|
||||
if serverTokens["user"] != "" && serverTokens["passwd"] != "" {
|
||||
requestUrl.User = url.UserPassword(serverTokens["user"],
|
||||
@@ -291,12 +291,8 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
out, err := c.getAttr(requestUrl)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
continue
|
||||
}
|
||||
if out["status"] != 200.0 {
|
||||
acc.AddError(fmt.Errorf("URL returned with status %v\n", out["status"]))
|
||||
fmt.Printf("URL returned with status %v\n", out["status"])
|
||||
continue
|
||||
}
|
||||
m.addTagsFields(out)
|
||||
|
||||
@@ -151,7 +151,7 @@ func TestHttpJsonJavaMultiValue(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := acc.GatherError(cassandra.Gather)
|
||||
err := cassandra.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(acc.Metrics))
|
||||
@@ -180,7 +180,7 @@ func TestHttpJsonJavaMultiType(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := acc.GatherError(cassandra.Gather)
|
||||
err := cassandra.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(acc.Metrics))
|
||||
@@ -197,17 +197,16 @@ func TestHttpJsonJavaMultiType(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttp404(t *testing.T) {
|
||||
func TestHttpJsonOn404(t *testing.T) {
|
||||
|
||||
jolokia := genJolokiaClientStub(invalidJSON, 404, Servers,
|
||||
jolokia := genJolokiaClientStub(validJavaMultiValueJSON, 404, Servers,
|
||||
[]string{HeapMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(jolokia.Gather)
|
||||
err := jolokia.Gather(&acc)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 0, len(acc.Metrics))
|
||||
assert.Contains(t, err.Error(), "has status code 404")
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected for class=Cassandra
|
||||
@@ -215,7 +214,7 @@ func TestHttpJsonCassandraMultiValue(t *testing.T) {
|
||||
cassandra := genJolokiaClientStub(validCassandraMultiValueJSON, 200, Servers, []string{ReadLatencyMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(cassandra.Gather)
|
||||
err := cassandra.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(acc.Metrics))
|
||||
@@ -247,7 +246,7 @@ func TestHttpJsonCassandraNestedMultiValue(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := acc.GatherError(cassandra.Gather)
|
||||
err := cassandra.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(acc.Metrics))
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -101,12 +101,12 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
|
||||
for _, s := range sockets {
|
||||
dump, err := perfDump(c.CephBinary, s)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! error reading from socket '%s': %v", s.socket, err))
|
||||
log.Printf("E! error reading from socket '%s': %v", s.socket, err)
|
||||
continue
|
||||
}
|
||||
data, err := parseDump(dump)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! error parsing dump from socket '%s': %v", s.socket, err))
|
||||
log.Printf("E! error parsing dump from socket '%s': %v", s.socket, err)
|
||||
continue
|
||||
}
|
||||
for tag, metrics := range data {
|
||||
|
||||
@@ -2,7 +2,7 @@ package cgroup
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
type CGroup struct {
|
||||
|
||||
@@ -22,11 +22,10 @@ func (g *CGroup) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
for dir := range list {
|
||||
if dir.err != nil {
|
||||
acc.AddError(dir.err)
|
||||
continue
|
||||
return dir.err
|
||||
}
|
||||
if err := g.gatherDir(dir.path, acc); err != nil {
|
||||
acc.AddError(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ var cg1 = &CGroup{
|
||||
func TestCgroupStatistics_1(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := acc.GatherError(cg1.Gather)
|
||||
err := cg1.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
@@ -56,7 +56,7 @@ var cg2 = &CGroup{
|
||||
func TestCgroupStatistics_2(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := acc.GatherError(cg2.Gather)
|
||||
err := cg2.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
@@ -81,7 +81,7 @@ var cg3 = &CGroup{
|
||||
func TestCgroupStatistics_3(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := acc.GatherError(cg3.Gather)
|
||||
err := cg3.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
@@ -108,7 +108,7 @@ var cg4 = &CGroup{
|
||||
func TestCgroupStatistics_4(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := acc.GatherError(cg4.Gather)
|
||||
err := cg4.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
@@ -140,7 +140,7 @@ var cg5 = &CGroup{
|
||||
func TestCgroupStatistics_5(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := acc.GatherError(cg5.Gather)
|
||||
err := cg5.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
@@ -167,7 +167,7 @@ var cg6 = &CGroup{
|
||||
func TestCgroupStatistics_6(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := acc.GatherError(cg6.Gather)
|
||||
err := cg6.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
|
||||
@@ -63,7 +63,6 @@ Delete second or Not synchronised.
|
||||
### Measurements & Fields:
|
||||
|
||||
- chrony
|
||||
- system_time (float, seconds)
|
||||
- last_offset (float, seconds)
|
||||
- rms_offset (float, seconds)
|
||||
- frequency (float, ppm)
|
||||
@@ -85,7 +84,7 @@ Delete second or Not synchronised.
|
||||
```
|
||||
$ telegraf -config telegraf.conf -input-filter chrony -test
|
||||
* Plugin: chrony, Collection 1
|
||||
> chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,system_time=0.000027073,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161
|
||||
> chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -92,7 +92,7 @@ func processChronycOutput(out string) (map[string]interface{}, map[string]string
|
||||
}
|
||||
name := strings.ToLower(strings.Replace(strings.TrimSpace(stats[0]), " ", "_", -1))
|
||||
// ignore reference time
|
||||
if strings.Contains(name, "ref_time") {
|
||||
if strings.Contains(name, "time") {
|
||||
continue
|
||||
}
|
||||
valueFields := strings.Fields(stats[1])
|
||||
|
||||
@@ -31,7 +31,6 @@ func TestGather(t *testing.T) {
|
||||
"stratum": "3",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"system_time": 0.000020390,
|
||||
"last_offset": 0.000012651,
|
||||
"rms_offset": 0.000025577,
|
||||
"frequency": -16.001,
|
||||
|
||||
@@ -42,10 +42,9 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
namespace = "AWS/ELB"
|
||||
|
||||
## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
## maximum of 400. Optional - default value is 200.
|
||||
## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
|
||||
ratelimit = 200
|
||||
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
## maximum of 10. Optional - default value is 10.
|
||||
ratelimit = 10
|
||||
|
||||
## Metrics to Pull (optional)
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
|
||||
@@ -13,8 +13,9 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
internalaws "github.com/influxdata/telegraf/internal/config/aws"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/internal/limiter"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
type (
|
||||
@@ -104,10 +105,9 @@ func (c *CloudWatch) SampleConfig() string {
|
||||
namespace = "AWS/ELB"
|
||||
|
||||
## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
## maximum of 400. Optional - default value is 200.
|
||||
## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
|
||||
ratelimit = 200
|
||||
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
## maximum of 10. Optional - default value is 10.
|
||||
ratelimit = 10
|
||||
|
||||
## Metrics to Pull (optional)
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
@@ -185,6 +185,8 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metricCount := len(metrics)
|
||||
errChan := errchan.New(metricCount)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
@@ -199,12 +201,12 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
<-lmtr.C
|
||||
go func(inm *cloudwatch.Metric) {
|
||||
defer wg.Done()
|
||||
acc.AddError(c.gatherMetric(acc, inm, now))
|
||||
c.gatherMetric(acc, inm, now, errChan.C)
|
||||
}(m)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
return errChan.Error()
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -212,7 +214,7 @@ func init() {
|
||||
ttl, _ := time.ParseDuration("1hr")
|
||||
return &CloudWatch{
|
||||
CacheTTL: internal.Duration{Duration: ttl},
|
||||
RateLimit: 200,
|
||||
RateLimit: 10,
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -282,11 +284,13 @@ func (c *CloudWatch) gatherMetric(
|
||||
acc telegraf.Accumulator,
|
||||
metric *cloudwatch.Metric,
|
||||
now time.Time,
|
||||
) error {
|
||||
errChan chan error,
|
||||
) {
|
||||
params := c.getStatisticsInput(metric, now)
|
||||
resp, err := c.client.GetMetricStatistics(params)
|
||||
if err != nil {
|
||||
return err
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
for _, point := range resp.Datapoints {
|
||||
@@ -321,7 +325,7 @@ func (c *CloudWatch) gatherMetric(
|
||||
acc.AddFields(formatMeasurement(c.Namespace), fields, tags, *point.Timestamp)
|
||||
}
|
||||
|
||||
return nil
|
||||
errChan <- nil
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -58,13 +58,13 @@ func TestGather(t *testing.T) {
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: internalDuration,
|
||||
Period: internalDuration,
|
||||
RateLimit: 200,
|
||||
RateLimit: 10,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
c.client = &mockGatherCloudWatchClient{}
|
||||
|
||||
acc.GatherError(c.Gather)
|
||||
c.Gather(&acc)
|
||||
|
||||
fields := map[string]interface{}{}
|
||||
fields["latency_minimum"] = 0.1
|
||||
@@ -146,7 +146,7 @@ func TestSelectMetrics(t *testing.T) {
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: internalDuration,
|
||||
Period: internalDuration,
|
||||
RateLimit: 200,
|
||||
RateLimit: 10,
|
||||
Metrics: []*Metric{
|
||||
&Metric{
|
||||
MetricNames: []string{"Latency", "RequestCount"},
|
||||
@@ -207,13 +207,14 @@ func TestGenerateStatisticsInputParams(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMetricsCacheTimeout(t *testing.T) {
|
||||
ttl, _ := time.ParseDuration("5ms")
|
||||
cache := &MetricCache{
|
||||
Metrics: []*cloudwatch.Metric{},
|
||||
Fetched: time.Now(),
|
||||
TTL: time.Minute,
|
||||
TTL: ttl,
|
||||
}
|
||||
|
||||
assert.True(t, cache.IsValid())
|
||||
cache.Fetched = time.Now().Add(-time.Minute)
|
||||
time.Sleep(ttl)
|
||||
assert.False(t, cache.IsValid())
|
||||
}
|
||||
|
||||
@@ -10,7 +10,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
"log"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
@@ -92,15 +93,15 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
contents, err := ioutil.ReadFile(fName)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! failed to read file '%s': %v", fName, err))
|
||||
log.Printf("E! failed to read file '%s': %v", fName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
v := strings.TrimSpace(string(contents))
|
||||
fields[metricKey], err = strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! failed to parse metric, expected number but "+
|
||||
" found '%s': %v", v, err))
|
||||
log.Printf("E! failed to parse metric, expected number but "+
|
||||
" found '%s': %v", v, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
type Consul struct {
|
||||
|
||||
@@ -3,7 +3,7 @@ package couchbase
|
||||
import (
|
||||
couchbase "github.com/couchbase/go-couchbase"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@@ -42,17 +42,19 @@ func (r *Couchbase) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, serv := range r.Servers {
|
||||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
acc.AddError(r.gatherServer(serv, acc, nil))
|
||||
outerr = r.gatherServer(serv, acc, nil)
|
||||
}(serv)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *couchbase.Pool) error {
|
||||
|
||||
@@ -2,11 +2,13 @@ package couchdb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -81,20 +83,34 @@ func (*CouchDB) SampleConfig() string {
|
||||
}
|
||||
|
||||
func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error {
|
||||
errorChannel := make(chan error, len(c.HOSTs))
|
||||
var wg sync.WaitGroup
|
||||
for _, u := range c.HOSTs {
|
||||
wg.Add(1)
|
||||
go func(host string) {
|
||||
defer wg.Done()
|
||||
if err := c.fetchAndInsertData(accumulator, host); err != nil {
|
||||
accumulator.AddError(fmt.Errorf("[host=%s]: %s", host, err))
|
||||
errorChannel <- fmt.Errorf("[host=%s]: %s", host, err)
|
||||
}
|
||||
}(u)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errorChannel)
|
||||
|
||||
// If there weren't any errors, we can return nil now.
|
||||
if len(errorChannel) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// There were errors, so join them all together as one big error.
|
||||
errorStrings := make([]string, 0, len(errorChannel))
|
||||
for err := range errorChannel {
|
||||
errorStrings = append(errorStrings, err.Error())
|
||||
}
|
||||
|
||||
return errors.New(strings.Join(errorStrings, "\n"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
|
||||
@@ -316,5 +316,5 @@ func TestBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(plugin.Gather))
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
type Disque struct {
|
||||
@@ -75,11 +75,12 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, serv := range g.Servers {
|
||||
u, err := url.Parse(serv)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("Unable to parse to address '%s': %s", serv, err))
|
||||
continue
|
||||
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err)
|
||||
} else if u.Scheme == "" {
|
||||
// fallback to simple string based address (i.e. "10.0.0.1:10000")
|
||||
u.Scheme = "tcp"
|
||||
@@ -89,13 +90,13 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error {
|
||||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
acc.AddError(g.gatherServer(u, acc))
|
||||
outerr = g.gatherServer(u, acc)
|
||||
}(serv)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
return outerr
|
||||
}
|
||||
|
||||
const defaultPort = "7711"
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestDisqueGeneratesMetrics(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = acc.GatherError(r.Gather)
|
||||
err = r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
@@ -117,7 +117,7 @@ func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = acc.GatherError(r.Gather)
|
||||
err = r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
# DMCache Input Plugin
|
||||
|
||||
This plugin provide a native collection for dmsetup based statistics for dm-cache.
|
||||
|
||||
This plugin requires sudo, that is why you should setup and be sure that the telegraf is able to execute sudo without a password.
|
||||
|
||||
`sudo /sbin/dmsetup status --target cache` is the full command that telegraf will run for debugging purposes.
|
||||
|
||||
### Configuration
|
||||
|
||||
```toml
|
||||
[[inputs.dmcache]]
|
||||
## Whether to report per-device stats or not
|
||||
per_device = true
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- dmcache
|
||||
- length
|
||||
- target
|
||||
- metadata_blocksize
|
||||
- metadata_used
|
||||
- metadata_total
|
||||
- cache_blocksize
|
||||
- cache_used
|
||||
- cache_total
|
||||
- read_hits
|
||||
- read_misses
|
||||
- write_hits
|
||||
- write_misses
|
||||
- demotions
|
||||
- promotions
|
||||
- dirty
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- device
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf --test --config /etc/telegraf/telegraf.conf --input-filter dmcache
|
||||
* Plugin: inputs.dmcache, Collection 1
|
||||
> dmcache,device=example cache_blocksize=0i,read_hits=995134034411520i,read_misses=916807089127424i,write_hits=195107267543040i,metadata_used=12861440i,write_misses=563725346013184i,promotions=3265223720960i,dirty=0i,metadata_blocksize=0i,cache_used=1099511627776ii,cache_total=0i,length=0i,metadata_total=1073741824i,demotions=3265223720960i 1491482035000000000
|
||||
```
|
||||
@@ -1,33 +0,0 @@
|
||||
package dmcache
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type DMCache struct {
|
||||
PerDevice bool `toml:"per_device"`
|
||||
getCurrentStatus func() ([]string, error)
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Whether to report per-device stats or not
|
||||
per_device = true
|
||||
`
|
||||
|
||||
func (c *DMCache) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (c *DMCache) Description() string {
|
||||
return "Provide a native collection for dmsetup based statistics for dm-cache"
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("dmcache", func() telegraf.Input {
|
||||
return &DMCache{
|
||||
PerDevice: true,
|
||||
getCurrentStatus: dmSetupStatus,
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,190 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package dmcache
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
const metricName = "dmcache"
|
||||
|
||||
type cacheStatus struct {
|
||||
device string
|
||||
length int
|
||||
target string
|
||||
metadataBlocksize int
|
||||
metadataUsed int
|
||||
metadataTotal int
|
||||
cacheBlocksize int
|
||||
cacheUsed int
|
||||
cacheTotal int
|
||||
readHits int
|
||||
readMisses int
|
||||
writeHits int
|
||||
writeMisses int
|
||||
demotions int
|
||||
promotions int
|
||||
dirty int
|
||||
}
|
||||
|
||||
func (c *DMCache) Gather(acc telegraf.Accumulator) error {
|
||||
outputLines, err := c.getCurrentStatus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totalStatus := cacheStatus{}
|
||||
|
||||
for _, s := range outputLines {
|
||||
status, err := parseDMSetupStatus(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.PerDevice {
|
||||
tags := map[string]string{"device": status.device}
|
||||
acc.AddFields(metricName, toFields(status), tags)
|
||||
}
|
||||
aggregateStats(&totalStatus, status)
|
||||
}
|
||||
|
||||
acc.AddFields(metricName, toFields(totalStatus), map[string]string{"device": "all"})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseDMSetupStatus(line string) (cacheStatus, error) {
|
||||
var err error
|
||||
parseError := errors.New("Output from dmsetup could not be parsed")
|
||||
status := cacheStatus{}
|
||||
values := strings.Fields(line)
|
||||
if len(values) < 15 {
|
||||
return cacheStatus{}, parseError
|
||||
}
|
||||
|
||||
status.device = strings.TrimRight(values[0], ":")
|
||||
status.length, err = strconv.Atoi(values[2])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.target = values[3]
|
||||
status.metadataBlocksize, err = strconv.Atoi(values[4])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
metadata := strings.Split(values[5], "/")
|
||||
if len(metadata) != 2 {
|
||||
return cacheStatus{}, parseError
|
||||
}
|
||||
status.metadataUsed, err = strconv.Atoi(metadata[0])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.metadataTotal, err = strconv.Atoi(metadata[1])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.cacheBlocksize, err = strconv.Atoi(values[6])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
cache := strings.Split(values[7], "/")
|
||||
if len(cache) != 2 {
|
||||
return cacheStatus{}, parseError
|
||||
}
|
||||
status.cacheUsed, err = strconv.Atoi(cache[0])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.cacheTotal, err = strconv.Atoi(cache[1])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.readHits, err = strconv.Atoi(values[8])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.readMisses, err = strconv.Atoi(values[9])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.writeHits, err = strconv.Atoi(values[10])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.writeMisses, err = strconv.Atoi(values[11])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.demotions, err = strconv.Atoi(values[12])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.promotions, err = strconv.Atoi(values[13])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.dirty, err = strconv.Atoi(values[14])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func aggregateStats(totalStatus *cacheStatus, status cacheStatus) {
|
||||
totalStatus.length += status.length
|
||||
totalStatus.metadataBlocksize += status.metadataBlocksize
|
||||
totalStatus.metadataUsed += status.metadataUsed
|
||||
totalStatus.metadataTotal += status.metadataTotal
|
||||
totalStatus.cacheBlocksize += status.cacheBlocksize
|
||||
totalStatus.cacheUsed += status.cacheUsed
|
||||
totalStatus.cacheTotal += status.cacheTotal
|
||||
totalStatus.readHits += status.readHits
|
||||
totalStatus.readMisses += status.readMisses
|
||||
totalStatus.writeHits += status.writeHits
|
||||
totalStatus.writeMisses += status.writeMisses
|
||||
totalStatus.demotions += status.demotions
|
||||
totalStatus.promotions += status.promotions
|
||||
totalStatus.dirty += status.dirty
|
||||
}
|
||||
|
||||
func toFields(status cacheStatus) map[string]interface{} {
|
||||
fields := make(map[string]interface{})
|
||||
fields["length"] = status.length
|
||||
fields["metadata_blocksize"] = status.metadataBlocksize
|
||||
fields["metadata_used"] = status.metadataUsed
|
||||
fields["metadata_total"] = status.metadataTotal
|
||||
fields["cache_blocksize"] = status.cacheBlocksize
|
||||
fields["cache_used"] = status.cacheUsed
|
||||
fields["cache_total"] = status.cacheTotal
|
||||
fields["read_hits"] = status.readHits
|
||||
fields["read_misses"] = status.readMisses
|
||||
fields["write_hits"] = status.writeHits
|
||||
fields["write_misses"] = status.writeMisses
|
||||
fields["demotions"] = status.demotions
|
||||
fields["promotions"] = status.promotions
|
||||
fields["dirty"] = status.dirty
|
||||
return fields
|
||||
}
|
||||
|
||||
func dmSetupStatus() ([]string, error) {
|
||||
out, err := exec.Command("/bin/sh", "-c", "sudo /sbin/dmsetup status --target cache").Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if string(out) == "No devices found\n" {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
outString := strings.TrimRight(string(out), "\n")
|
||||
status := strings.Split(outString, "\n")
|
||||
|
||||
return status, nil
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package dmcache
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
func (c *DMCache) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func dmSetupStatus() ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
@@ -1,169 +0,0 @@
|
||||
package dmcache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
measurement = "dmcache"
|
||||
badFormatOutput = []string{"cs-1: 0 4883791872 cache 8 1018/1501122 512 7/464962 139 352643 "}
|
||||
good2DevicesFormatOutput = []string{
|
||||
"cs-1: 0 4883791872 cache 8 1018/1501122 512 7/464962 139 352643 15 46 0 7 0 1 writeback 2 migration_threshold 2048 mq 10 random_threshold 4 sequential_threshold 512 discard_promote_adjustment 1 read_promote_adjustment 4 write_promote_adjustment 8",
|
||||
"cs-2: 0 4294967296 cache 8 72352/1310720 128 26/24327168 2409 286 265 524682 0 0 0 1 writethrough 2 migration_threshold 2048 mq 10 random_threshold 4 sequential_threshold 512 discard_promote_adjustment 1 read_promote_adjustment 4 write_promote_adjustment 8",
|
||||
}
|
||||
)
|
||||
|
||||
func TestPerDeviceGoodOutput(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
var plugin = &DMCache{
|
||||
PerDevice: true,
|
||||
getCurrentStatus: func() ([]string, error) {
|
||||
return good2DevicesFormatOutput, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := plugin.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"device": "cs-1",
|
||||
}
|
||||
fields1 := map[string]interface{}{
|
||||
"length": 4883791872,
|
||||
"metadata_blocksize": 8,
|
||||
"metadata_used": 1018,
|
||||
"metadata_total": 1501122,
|
||||
"cache_blocksize": 512,
|
||||
"cache_used": 7,
|
||||
"cache_total": 464962,
|
||||
"read_hits": 139,
|
||||
"read_misses": 352643,
|
||||
"write_hits": 15,
|
||||
"write_misses": 46,
|
||||
"demotions": 0,
|
||||
"promotions": 7,
|
||||
"dirty": 0,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, measurement, fields1, tags1)
|
||||
|
||||
tags2 := map[string]string{
|
||||
"device": "cs-2",
|
||||
}
|
||||
fields2 := map[string]interface{}{
|
||||
"length": 4294967296,
|
||||
"metadata_blocksize": 8,
|
||||
"metadata_used": 72352,
|
||||
"metadata_total": 1310720,
|
||||
"cache_blocksize": 128,
|
||||
"cache_used": 26,
|
||||
"cache_total": 24327168,
|
||||
"read_hits": 2409,
|
||||
"read_misses": 286,
|
||||
"write_hits": 265,
|
||||
"write_misses": 524682,
|
||||
"demotions": 0,
|
||||
"promotions": 0,
|
||||
"dirty": 0,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, measurement, fields2, tags2)
|
||||
|
||||
tags3 := map[string]string{
|
||||
"device": "all",
|
||||
}
|
||||
|
||||
fields3 := map[string]interface{}{
|
||||
"length": 9178759168,
|
||||
"metadata_blocksize": 16,
|
||||
"metadata_used": 73370,
|
||||
"metadata_total": 2811842,
|
||||
"cache_blocksize": 640,
|
||||
"cache_used": 33,
|
||||
"cache_total": 24792130,
|
||||
"read_hits": 2548,
|
||||
"read_misses": 352929,
|
||||
"write_hits": 280,
|
||||
"write_misses": 524728,
|
||||
"demotions": 0,
|
||||
"promotions": 7,
|
||||
"dirty": 0,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, measurement, fields3, tags3)
|
||||
}
|
||||
|
||||
func TestNotPerDeviceGoodOutput(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
var plugin = &DMCache{
|
||||
PerDevice: false,
|
||||
getCurrentStatus: func() ([]string, error) {
|
||||
return good2DevicesFormatOutput, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := plugin.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"device": "all",
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"length": 9178759168,
|
||||
"metadata_blocksize": 16,
|
||||
"metadata_used": 73370,
|
||||
"metadata_total": 2811842,
|
||||
"cache_blocksize": 640,
|
||||
"cache_used": 33,
|
||||
"cache_total": 24792130,
|
||||
"read_hits": 2548,
|
||||
"read_misses": 352929,
|
||||
"write_hits": 280,
|
||||
"write_misses": 524728,
|
||||
"demotions": 0,
|
||||
"promotions": 7,
|
||||
"dirty": 0,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, measurement, fields, tags)
|
||||
}
|
||||
|
||||
func TestNoDevicesOutput(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
var plugin = &DMCache{
|
||||
PerDevice: true,
|
||||
getCurrentStatus: func() ([]string, error) {
|
||||
return []string{}, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := plugin.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestErrorDuringGettingStatus(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
var plugin = &DMCache{
|
||||
PerDevice: true,
|
||||
getCurrentStatus: func() ([]string, error) {
|
||||
return nil, errors.New("dmsetup doesn't exist")
|
||||
},
|
||||
}
|
||||
|
||||
err := plugin.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestBadFormatOfStatus(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
var plugin = &DMCache{
|
||||
PerDevice: true,
|
||||
getCurrentStatus: func() ([]string, error) {
|
||||
return badFormatOutput, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := plugin.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
}
|
||||
@@ -9,7 +9,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
type DnsQuery struct {
|
||||
@@ -57,10 +58,11 @@ func (d *DnsQuery) Description() string {
|
||||
func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
|
||||
d.setDefaultValues()
|
||||
|
||||
errChan := errchan.New(len(d.Domains) * len(d.Servers))
|
||||
for _, domain := range d.Domains {
|
||||
for _, server := range d.Servers {
|
||||
dnsQueryTime, err := d.getDnsQueryTime(domain, server)
|
||||
acc.AddError(err)
|
||||
errChan.C <- err
|
||||
tags := map[string]string{
|
||||
"server": server,
|
||||
"domain": domain,
|
||||
@@ -72,7 +74,7 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return errChan.Error()
|
||||
}
|
||||
|
||||
func (d *DnsQuery) setDefaultValues() {
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestGathering(t *testing.T) {
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := acc.GatherError(dnsConfig.Gather)
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
require.True(t, ok)
|
||||
@@ -44,7 +44,7 @@ func TestGatheringMxRecord(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
dnsConfig.RecordType = "MX"
|
||||
|
||||
err := acc.GatherError(dnsConfig.Gather)
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
require.True(t, ok)
|
||||
@@ -70,7 +70,7 @@ func TestGatheringRootDomain(t *testing.T) {
|
||||
}
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
err := acc.GatherError(dnsConfig.Gather)
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
require.True(t, ok)
|
||||
@@ -96,7 +96,7 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) {
|
||||
}
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
err := acc.GatherError(dnsConfig.Gather)
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
require.True(t, ok)
|
||||
@@ -121,7 +121,7 @@ func TestGatheringTimeout(t *testing.T) {
|
||||
|
||||
channel := make(chan error, 1)
|
||||
go func() {
|
||||
channel <- acc.GatherError(dnsConfig.Gather)
|
||||
channel <- dnsConfig.Gather(&acc)
|
||||
}()
|
||||
select {
|
||||
case res := <-channel:
|
||||
|
||||
@@ -16,26 +16,12 @@ for the stat structure can be found
|
||||
```
|
||||
# Read metrics about docker containers
|
||||
[[inputs.docker]]
|
||||
## Docker Endpoint
|
||||
## To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
# Docker Endpoint
|
||||
# To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
# To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
## Only collect metrics for these containers, collect all if empty
|
||||
# Only collect metrics for these containers, collect all if empty
|
||||
container_names = []
|
||||
## Timeout for docker list, info, and stats commands
|
||||
timeout = "5s"
|
||||
|
||||
## Whether to report for each container per-device blkio (8:0, 8:1...) and
|
||||
## network (eth0, eth1, ...) stats or not
|
||||
perdevice = true
|
||||
## Whether to report for each container total blkio and network stats or not
|
||||
total = false
|
||||
|
||||
## docker labels to include and exclude as tags. Globs accepted.
|
||||
## Note that an empty array for both will include all labels as tags
|
||||
docker_label_include = []
|
||||
docker_label_exclude = []
|
||||
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
@@ -136,32 +122,30 @@ based on the availability of per-cpu stats on your system.
|
||||
|
||||
|
||||
### Tags:
|
||||
#### Docker Engine tags
|
||||
|
||||
- docker (memory_total)
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker (pool_blocksize)
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker_data
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker_metadata
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
|
||||
#### Docker Container tags
|
||||
- Tags on all containers:
|
||||
- engine_host
|
||||
- docker_container_mem specific:
|
||||
- container_image
|
||||
- container_name
|
||||
- container_version
|
||||
- docker_container_mem specific:
|
||||
- docker_container_cpu specific:
|
||||
- container_image
|
||||
- container_name
|
||||
- cpu
|
||||
- docker_container_net specific:
|
||||
- container_image
|
||||
- container_name
|
||||
- network
|
||||
- docker_container_blkio specific:
|
||||
- container_image
|
||||
- container_name
|
||||
- device
|
||||
|
||||
### Example Output:
|
||||
|
||||
@@ -1,82 +1,42 @@
|
||||
package docker
|
||||
package system
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
"golang.org/x/net/context"
|
||||
|
||||
type DockerLabelFilter struct {
|
||||
labelInclude filter.Filter
|
||||
labelExclude filter.Filter
|
||||
}
|
||||
"github.com/docker/engine-api/client"
|
||||
"github.com/docker/engine-api/types"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
// Docker object
|
||||
type Docker struct {
|
||||
Endpoint string
|
||||
ContainerNames []string
|
||||
Timeout internal.Duration
|
||||
PerDevice bool `toml:"perdevice"`
|
||||
Total bool `toml:"total"`
|
||||
LabelInclude []string `toml:"docker_label_include"`
|
||||
LabelExclude []string `toml:"docker_label_exclude"`
|
||||
PerDevice bool `toml:"perdevice"`
|
||||
Total bool `toml:"total"`
|
||||
|
||||
LabelFilter DockerLabelFilter
|
||||
|
||||
client *client.Client
|
||||
client DockerClient
|
||||
engine_host string
|
||||
|
||||
testing bool
|
||||
labelFiltersCreated bool
|
||||
}
|
||||
|
||||
// infoWrapper wraps client.Client.List for testing.
|
||||
func infoWrapper(c *client.Client, ctx context.Context) (types.Info, error) {
|
||||
if c != nil {
|
||||
return c.Info(ctx)
|
||||
}
|
||||
fc := FakeDockerClient{}
|
||||
return fc.Info(ctx)
|
||||
}
|
||||
|
||||
// listWrapper wraps client.Client.ContainerList for testing.
|
||||
func listWrapper(
|
||||
c *client.Client,
|
||||
ctx context.Context,
|
||||
options types.ContainerListOptions,
|
||||
) ([]types.Container, error) {
|
||||
if c != nil {
|
||||
return c.ContainerList(ctx, options)
|
||||
}
|
||||
fc := FakeDockerClient{}
|
||||
return fc.ContainerList(ctx, options)
|
||||
}
|
||||
|
||||
// statsWrapper wraps client.Client.ContainerStats for testing.
|
||||
func statsWrapper(
|
||||
c *client.Client,
|
||||
ctx context.Context,
|
||||
containerID string,
|
||||
stream bool,
|
||||
) (types.ContainerStats, error) {
|
||||
if c != nil {
|
||||
return c.ContainerStats(ctx, containerID, stream)
|
||||
}
|
||||
fc := FakeDockerClient{}
|
||||
return fc.ContainerStats(ctx, containerID, stream)
|
||||
// DockerClient interface, useful for testing
|
||||
type DockerClient interface {
|
||||
Info(ctx context.Context) (types.Info, error)
|
||||
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
|
||||
ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// KB, MB, GB, TB, PB...human friendly
|
||||
@@ -108,10 +68,6 @@ var sampleConfig = `
|
||||
## Whether to report for each container total blkio and network stats or not
|
||||
total = false
|
||||
|
||||
## docker labels to include and exclude as tags. Globs accepted.
|
||||
## Note that an empty array for both will include all labels as tags
|
||||
docker_label_include = []
|
||||
docker_label_exclude = []
|
||||
`
|
||||
|
||||
// Description returns input description
|
||||
@@ -124,7 +80,7 @@ func (d *Docker) SampleConfig() string { return sampleConfig }
|
||||
|
||||
// Gather starts stats collection
|
||||
func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
if d.client == nil && !d.testing {
|
||||
if d.client == nil {
|
||||
var c *client.Client
|
||||
var err error
|
||||
defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"}
|
||||
@@ -146,26 +102,18 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
d.client = c
|
||||
}
|
||||
// Create label filters if not already created
|
||||
if !d.labelFiltersCreated {
|
||||
err := d.createLabelFilters()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.labelFiltersCreated = true
|
||||
}
|
||||
|
||||
// Get daemon info
|
||||
err := d.gatherInfo(acc)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
|
||||
// List containers
|
||||
opts := types.ContainerListOptions{}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
defer cancel()
|
||||
containers, err := listWrapper(d.client, ctx, opts)
|
||||
containers, err := d.client.ContainerList(ctx, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -178,8 +126,8 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
defer wg.Done()
|
||||
err := d.gatherContainer(c, acc)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! Error gathering container %s stats: %s\n",
|
||||
c.Names, err.Error()))
|
||||
log.Printf("E! Error gathering container %s stats: %s\n",
|
||||
c.Names, err.Error())
|
||||
}
|
||||
}(container)
|
||||
}
|
||||
@@ -196,7 +144,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
// Get info from docker daemon
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
defer cancel()
|
||||
info, err := infoWrapper(d.client, ctx)
|
||||
info, err := d.client.Info(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -299,12 +247,12 @@ func (d *Docker) gatherContainer(
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
defer cancel()
|
||||
r, err := statsWrapper(d.client, ctx, container.ID, false)
|
||||
r, err := d.client.ContainerStats(ctx, container.ID, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting docker stats: %s", err.Error())
|
||||
}
|
||||
defer r.Body.Close()
|
||||
dec := json.NewDecoder(r.Body)
|
||||
defer r.Close()
|
||||
dec := json.NewDecoder(r)
|
||||
if err = dec.Decode(&v); err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
@@ -314,11 +262,7 @@ func (d *Docker) gatherContainer(
|
||||
|
||||
// Add labels to tags
|
||||
for k, label := range container.Labels {
|
||||
if len(d.LabelInclude) == 0 || d.LabelFilter.labelInclude.Match(k) {
|
||||
if len(d.LabelExclude) == 0 || !d.LabelFilter.labelExclude.Match(k) {
|
||||
tags[k] = label
|
||||
}
|
||||
}
|
||||
tags[k] = label
|
||||
}
|
||||
|
||||
gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total)
|
||||
@@ -624,32 +568,11 @@ func parseSize(sizeStr string) (int64, error) {
|
||||
return int64(size), nil
|
||||
}
|
||||
|
||||
func (d *Docker) createLabelFilters() error {
|
||||
if len(d.LabelInclude) != 0 && d.LabelFilter.labelInclude == nil {
|
||||
var err error
|
||||
d.LabelFilter.labelInclude, err = filter.Compile(d.LabelInclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(d.LabelExclude) != 0 && d.LabelFilter.labelExclude == nil {
|
||||
var err error
|
||||
d.LabelFilter.labelExclude, err = filter.Compile(d.LabelExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("docker", func() telegraf.Input {
|
||||
return &Docker{
|
||||
PerDevice: true,
|
||||
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||
labelFiltersCreated: false,
|
||||
PerDevice: true,
|
||||
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,12 +1,18 @@
|
||||
package docker
|
||||
package system
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/engine-api/types"
|
||||
"github.com/docker/engine-api/types/registry"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -244,65 +250,146 @@ func testStats() *types.StatsJSON {
|
||||
return stats
|
||||
}
|
||||
|
||||
var gatherLabelsTests = []struct {
|
||||
include []string
|
||||
exclude []string
|
||||
expected []string
|
||||
notexpected []string
|
||||
}{
|
||||
{[]string{}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||
{[]string{"*"}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||
{[]string{"lab*"}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||
{[]string{"label1"}, []string{}, []string{"label1"}, []string{"label2"}},
|
||||
{[]string{"label1*"}, []string{}, []string{"label1"}, []string{"label2"}},
|
||||
{[]string{}, []string{"*"}, []string{}, []string{"label1", "label2"}},
|
||||
{[]string{}, []string{"lab*"}, []string{}, []string{"label1", "label2"}},
|
||||
{[]string{}, []string{"label1"}, []string{"label2"}, []string{"label1"}},
|
||||
{[]string{"*"}, []string{"*"}, []string{}, []string{"label1", "label2"}},
|
||||
type FakeDockerClient struct {
|
||||
}
|
||||
|
||||
func TestDockerGatherLabels(t *testing.T) {
|
||||
for _, tt := range gatherLabelsTests {
|
||||
var acc testutil.Accumulator
|
||||
d := Docker{
|
||||
client: nil,
|
||||
testing: true,
|
||||
}
|
||||
|
||||
for _, label := range tt.include {
|
||||
d.LabelInclude = append(d.LabelInclude, label)
|
||||
}
|
||||
for _, label := range tt.exclude {
|
||||
d.LabelExclude = append(d.LabelExclude, label)
|
||||
}
|
||||
|
||||
err := d.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, label := range tt.expected {
|
||||
if !acc.HasTag("docker_container_cpu", label) {
|
||||
t.Errorf("Didn't get expected label of %s. Test was: Include: %s Exclude %s",
|
||||
label, tt.include, tt.exclude)
|
||||
}
|
||||
}
|
||||
|
||||
for _, label := range tt.notexpected {
|
||||
if acc.HasTag("docker_container_cpu", label) {
|
||||
t.Errorf("Got unexpected label of %s. Test was: Include: %s Exclude %s",
|
||||
label, tt.include, tt.exclude)
|
||||
}
|
||||
}
|
||||
func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) {
|
||||
env := types.Info{
|
||||
Containers: 108,
|
||||
ContainersRunning: 98,
|
||||
ContainersStopped: 6,
|
||||
ContainersPaused: 3,
|
||||
OomKillDisable: false,
|
||||
SystemTime: "2016-02-24T00:55:09.15073105-05:00",
|
||||
NEventsListener: 0,
|
||||
ID: "5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD",
|
||||
Debug: false,
|
||||
LoggingDriver: "json-file",
|
||||
KernelVersion: "4.3.0-1-amd64",
|
||||
IndexServerAddress: "https://index.docker.io/v1/",
|
||||
MemTotal: 3840757760,
|
||||
Images: 199,
|
||||
CPUCfsQuota: true,
|
||||
Name: "absol",
|
||||
SwapLimit: false,
|
||||
IPv4Forwarding: true,
|
||||
ExperimentalBuild: false,
|
||||
CPUCfsPeriod: true,
|
||||
RegistryConfig: ®istry.ServiceConfig{
|
||||
IndexConfigs: map[string]*registry.IndexInfo{
|
||||
"docker.io": {
|
||||
Name: "docker.io",
|
||||
Mirrors: []string{},
|
||||
Official: true,
|
||||
Secure: true,
|
||||
},
|
||||
}, InsecureRegistryCIDRs: []*registry.NetIPNet{{IP: []byte{127, 0, 0, 0}, Mask: []byte{255, 0, 0, 0}}}, Mirrors: []string{}},
|
||||
OperatingSystem: "Linux Mint LMDE (containerized)",
|
||||
BridgeNfIptables: true,
|
||||
HTTPSProxy: "",
|
||||
Labels: []string{},
|
||||
MemoryLimit: false,
|
||||
DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}},
|
||||
NFd: 19,
|
||||
HTTPProxy: "",
|
||||
Driver: "devicemapper",
|
||||
NGoroutines: 39,
|
||||
NCPU: 4,
|
||||
DockerRootDir: "/var/lib/docker",
|
||||
NoProxy: "",
|
||||
BridgeNfIP6tables: true,
|
||||
}
|
||||
return env, nil
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) ContainerList(octx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
|
||||
container1 := types.Container{
|
||||
ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
|
||||
Names: []string{"/etcd"},
|
||||
Image: "quay.io/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941930,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []types.Port{
|
||||
types.Port{
|
||||
PrivatePort: 7001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 4001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2380,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2379,
|
||||
PublicPort: 2379,
|
||||
Type: "tcp",
|
||||
IP: "0.0.0.0",
|
||||
},
|
||||
},
|
||||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
}
|
||||
container2 := types.Container{
|
||||
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||
Names: []string{"/etcd2"},
|
||||
Image: "quay.io:4443/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941933,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []types.Port{
|
||||
types.Port{
|
||||
PrivatePort: 7002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 4002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2381,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2382,
|
||||
PublicPort: 2382,
|
||||
Type: "tcp",
|
||||
IP: "0.0.0.0",
|
||||
},
|
||||
},
|
||||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
}
|
||||
|
||||
containers := []types.Container{container1, container2}
|
||||
return containers, nil
|
||||
|
||||
//#{e6a96c84ca91a5258b7cb752579fb68826b68b49ff957487695cd4d13c343b44 titilambert/snmpsim /bin/sh -c 'snmpsimd --agent-udpv4-endpoint=0.0.0.0:31161 --process-user=root --process-group=user' 1455724831 Up 4 hours [{31161 31161 udp 0.0.0.0}] 0 0 [/snmp] map[]}]2016/02/24 01:05:01 Gathered metrics, (3s interval), from 1 inputs in 1.233836656s
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) {
|
||||
var stat io.ReadCloser
|
||||
jsonStat := `{"read":"2016-02-24T11:42:27.472459608-05:00","memory_stats":{"stats":{},"limit":18935443456},"blkio_stats":{"io_service_bytes_recursive":[{"major":252,"minor":1,"op":"Read","value":753664},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":753664},{"major":252,"minor":1,"op":"Total","value":753664}],"io_serviced_recursive":[{"major":252,"minor":1,"op":"Read","value":26},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":26},{"major":252,"minor":1,"op":"Total","value":26}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052607520000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052599550000000,"throttling_data":{}}}`
|
||||
stat = ioutil.NopCloser(strings.NewReader(jsonStat))
|
||||
return stat, nil
|
||||
}
|
||||
|
||||
func TestDockerGatherInfo(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
d := Docker{
|
||||
client: nil,
|
||||
testing: true,
|
||||
}
|
||||
client := FakeDockerClient{}
|
||||
d := Docker{client: client}
|
||||
|
||||
err := d.Gather(&acc)
|
||||
|
||||
err := acc.GatherError(d.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -345,8 +432,6 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"cpu": "cpu3",
|
||||
"container_version": "v2.2.2",
|
||||
"engine_host": "absol",
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
},
|
||||
)
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -393,8 +478,6 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"container_name": "etcd2",
|
||||
"container_image": "quay.io:4443/coreos/etcd",
|
||||
"container_version": "v2.2.2",
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -1,151 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
)
|
||||
|
||||
type FakeDockerClient struct {
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) {
|
||||
env := types.Info{
|
||||
Containers: 108,
|
||||
ContainersRunning: 98,
|
||||
ContainersStopped: 6,
|
||||
ContainersPaused: 3,
|
||||
OomKillDisable: false,
|
||||
SystemTime: "2016-02-24T00:55:09.15073105-05:00",
|
||||
NEventsListener: 0,
|
||||
ID: "5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD",
|
||||
Debug: false,
|
||||
LoggingDriver: "json-file",
|
||||
KernelVersion: "4.3.0-1-amd64",
|
||||
IndexServerAddress: "https://index.docker.io/v1/",
|
||||
MemTotal: 3840757760,
|
||||
Images: 199,
|
||||
CPUCfsQuota: true,
|
||||
Name: "absol",
|
||||
SwapLimit: false,
|
||||
IPv4Forwarding: true,
|
||||
ExperimentalBuild: false,
|
||||
CPUCfsPeriod: true,
|
||||
RegistryConfig: ®istry.ServiceConfig{
|
||||
IndexConfigs: map[string]*registry.IndexInfo{
|
||||
"docker.io": {
|
||||
Name: "docker.io",
|
||||
Mirrors: []string{},
|
||||
Official: true,
|
||||
Secure: true,
|
||||
},
|
||||
}, InsecureRegistryCIDRs: []*registry.NetIPNet{{IP: []byte{127, 0, 0, 0}, Mask: []byte{255, 0, 0, 0}}}, Mirrors: []string{}},
|
||||
OperatingSystem: "Linux Mint LMDE (containerized)",
|
||||
BridgeNfIptables: true,
|
||||
HTTPSProxy: "",
|
||||
Labels: []string{},
|
||||
MemoryLimit: false,
|
||||
DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}},
|
||||
NFd: 19,
|
||||
HTTPProxy: "",
|
||||
Driver: "devicemapper",
|
||||
NGoroutines: 39,
|
||||
NCPU: 4,
|
||||
DockerRootDir: "/var/lib/docker",
|
||||
NoProxy: "",
|
||||
BridgeNfIP6tables: true,
|
||||
}
|
||||
return env, nil
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) ContainerList(octx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
|
||||
container1 := types.Container{
|
||||
ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
|
||||
Names: []string{"/etcd"},
|
||||
Image: "quay.io/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941930,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []types.Port{
|
||||
types.Port{
|
||||
PrivatePort: 7001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 4001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2380,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2379,
|
||||
PublicPort: 2379,
|
||||
Type: "tcp",
|
||||
IP: "0.0.0.0",
|
||||
},
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
},
|
||||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
}
|
||||
container2 := types.Container{
|
||||
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||
Names: []string{"/etcd2"},
|
||||
Image: "quay.io:4443/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941933,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []types.Port{
|
||||
types.Port{
|
||||
PrivatePort: 7002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 4002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2381,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2382,
|
||||
PublicPort: 2382,
|
||||
Type: "tcp",
|
||||
IP: "0.0.0.0",
|
||||
},
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
},
|
||||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
}
|
||||
|
||||
containers := []types.Container{container1, container2}
|
||||
return containers, nil
|
||||
|
||||
//#{e6a96c84ca91a5258b7cb752579fb68826b68b49ff957487695cd4d13c343b44 titilambert/snmpsim /bin/sh -c 'snmpsimd --agent-udpv4-endpoint=0.0.0.0:31161 --process-user=root --process-group=user' 1455724831 Up 4 hours [{31161 31161 udp 0.0.0.0}] 0 0 [/snmp] map[]}]2016/02/24 01:05:01 Gathered metrics, (3s interval), from 1 inputs in 1.233836656s
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
|
||||
var stat types.ContainerStats
|
||||
jsonStat := `{"read":"2016-02-24T11:42:27.472459608-05:00","memory_stats":{"stats":{},"limit":18935443456},"blkio_stats":{"io_service_bytes_recursive":[{"major":252,"minor":1,"op":"Read","value":753664},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":753664},{"major":252,"minor":1,"op":"Total","value":753664}],"io_serviced_recursive":[{"major":252,"minor":1,"op":"Read","value":26},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":26},{"major":252,"minor":1,"op":"Total","value":26}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052607520000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052599550000000,"throttling_data":{}}}`
|
||||
stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat))
|
||||
return stat, nil
|
||||
}
|
||||
@@ -12,7 +12,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
type Dovecot struct {
|
||||
@@ -65,18 +66,19 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := errchan.New(len(d.Servers) * len(d.Filters))
|
||||
for _, server := range d.Servers {
|
||||
for _, filter := range d.Filters {
|
||||
wg.Add(1)
|
||||
go func(s string, f string) {
|
||||
defer wg.Done()
|
||||
acc.AddError(d.gatherServer(s, acc, d.Type, f))
|
||||
errChan.C <- d.gatherServer(s, acc, d.Type, f)
|
||||
}(server, filter)
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
return errChan.Error()
|
||||
}
|
||||
|
||||
func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype string, filter string) error {
|
||||
|
||||
@@ -10,8 +10,9 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
@@ -152,6 +153,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
e.client = client
|
||||
}
|
||||
|
||||
errChan := errchan.New(len(e.Servers) * 3)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(e.Servers))
|
||||
|
||||
@@ -169,29 +171,29 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
if e.ClusterStats {
|
||||
// get cat/master information here so NodeStats can determine
|
||||
// whether this node is the Master
|
||||
if err := e.setCatMaster(s + "/_cat/master"); err != nil {
|
||||
acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
return
|
||||
}
|
||||
e.setCatMaster(s + "/_cat/master")
|
||||
}
|
||||
|
||||
// Always gather node states
|
||||
if err := e.gatherNodeStats(url, acc); err != nil {
|
||||
acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
err = fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))
|
||||
errChan.C <- err
|
||||
return
|
||||
}
|
||||
|
||||
if e.ClusterHealth {
|
||||
url = s + "/_cluster/health?level=indices"
|
||||
if err := e.gatherClusterHealth(url, acc); err != nil {
|
||||
acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
err = fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))
|
||||
errChan.C <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if e.ClusterStats && e.isMaster {
|
||||
if err := e.gatherClusterStats(s+"/_cluster/stats", acc); err != nil {
|
||||
acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
err = fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))
|
||||
errChan.C <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -199,7 +201,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
return errChan.Error()
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
|
||||
@@ -356,7 +358,7 @@ func (e *Elasticsearch) setCatMaster(url string) error {
|
||||
// NOTE: we are not going to read/discard r.Body under the assumption we'd prefer
|
||||
// to let the underlying transport close the connection and re-establish a new one for
|
||||
// future calls.
|
||||
return fmt.Errorf("elasticsearch: Unable to retrieve master node information. API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK)
|
||||
return fmt.Errorf("status-code %d, expected %d", r.StatusCode, http.StatusOK)
|
||||
}
|
||||
response, err := ioutil.ReadAll(r.Body)
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestGather(t *testing.T) {
|
||||
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
if err := acc.GatherError(es.Gather); err != nil {
|
||||
if err := es.Gather(&acc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -15,9 +15,10 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/nagios"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
const sampleConfig = `
|
||||
@@ -35,7 +36,7 @@ const sampleConfig = `
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
@@ -48,7 +49,8 @@ type Exec struct {
|
||||
|
||||
parser parsers.Parser
|
||||
|
||||
runner Runner
|
||||
runner Runner
|
||||
errChan chan error
|
||||
}
|
||||
|
||||
func NewExec() *Exec {
|
||||
@@ -148,13 +150,13 @@ func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync
|
||||
|
||||
out, err := e.runner.Run(e, command, acc)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
e.errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
metrics, err := e.parser.Parse(out)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
e.errChan <- err
|
||||
} else {
|
||||
for _, metric := range metrics {
|
||||
acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
|
||||
@@ -191,8 +193,7 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
matches, err := filepath.Glob(cmdAndArgs[0])
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
continue
|
||||
return err
|
||||
}
|
||||
|
||||
if len(matches) == 0 {
|
||||
@@ -213,12 +214,15 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
|
||||
errChan := errchan.New(len(commands))
|
||||
e.errChan = errChan.C
|
||||
|
||||
wg.Add(len(commands))
|
||||
for _, command := range commands {
|
||||
go e.ProcessCommand(command, acc, &wg)
|
||||
}
|
||||
wg.Wait()
|
||||
return nil
|
||||
return errChan.Error()
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -37,8 +37,6 @@ const malformedJson = `
|
||||
`
|
||||
|
||||
const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1\n"
|
||||
const lineProtocolEmpty = ""
|
||||
const lineProtocolShort = "ab"
|
||||
|
||||
const lineProtocolMulti = `
|
||||
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
@@ -101,7 +99,7 @@ func TestExec(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(e.Gather)
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored")
|
||||
|
||||
@@ -127,7 +125,8 @@ func TestExecMalformed(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.Error(t, acc.GatherError(e.Gather))
|
||||
err := e.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
|
||||
}
|
||||
|
||||
@@ -140,7 +139,8 @@ func TestCommandError(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.Error(t, acc.GatherError(e.Gather))
|
||||
err := e.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
|
||||
}
|
||||
|
||||
@@ -153,7 +153,8 @@ func TestLineProtocolParse(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(e.Gather))
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
@@ -166,33 +167,6 @@ func TestLineProtocolParse(t *testing.T) {
|
||||
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
|
||||
}
|
||||
|
||||
func TestLineProtocolEmptyParse(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocolEmpty), nil),
|
||||
Commands: []string{"line-protocol"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestLineProtocolShortParse(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocolShort), nil),
|
||||
Commands: []string{"line-protocol"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(e.Gather)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "buffer too short", "A buffer too short error was expected")
|
||||
}
|
||||
|
||||
func TestLineProtocolParseMultiple(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
@@ -202,7 +176,7 @@ func TestLineProtocolParseMultiple(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(e.Gather)
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
@@ -228,7 +202,7 @@ func TestExecCommandWithGlob(t *testing.T) {
|
||||
e.SetParser(parser)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(e.Gather)
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
@@ -244,7 +218,7 @@ func TestExecCommandWithoutGlob(t *testing.T) {
|
||||
e.SetParser(parser)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(e.Gather)
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
@@ -260,7 +234,7 @@ func TestExecCommandWithoutGlobAndPath(t *testing.T) {
|
||||
e.SetParser(parser)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(e.Gather)
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/globpath"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
const sampleConfig = `
|
||||
@@ -48,6 +48,7 @@ func (_ *FileStat) Description() string {
|
||||
func (_ *FileStat) SampleConfig() string { return sampleConfig }
|
||||
|
||||
func (f *FileStat) Gather(acc telegraf.Accumulator) error {
|
||||
var errS string
|
||||
var err error
|
||||
|
||||
for _, filepath := range f.Files {
|
||||
@@ -55,7 +56,7 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error {
|
||||
g, ok := f.globs[filepath]
|
||||
if !ok {
|
||||
if g, err = globpath.Compile(filepath); err != nil {
|
||||
acc.AddError(err)
|
||||
errS += err.Error() + " "
|
||||
continue
|
||||
}
|
||||
f.globs[filepath] = g
|
||||
@@ -91,7 +92,7 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error {
|
||||
if f.Md5 {
|
||||
md5, err := getMd5(fileName)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
errS += err.Error() + " "
|
||||
} else {
|
||||
fields["md5_sum"] = md5
|
||||
}
|
||||
@@ -101,6 +102,9 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
|
||||
if errS != "" {
|
||||
return fmt.Errorf(errS)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestGatherNoMd5(t *testing.T) {
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
acc.GatherError(fs.Gather)
|
||||
fs.Gather(&acc)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
@@ -59,7 +59,7 @@ func TestGatherExplicitFiles(t *testing.T) {
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
acc.GatherError(fs.Gather)
|
||||
fs.Gather(&acc)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
@@ -99,7 +99,7 @@ func TestGatherGlob(t *testing.T) {
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
acc.GatherError(fs.Gather)
|
||||
fs.Gather(&acc)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
@@ -131,7 +131,7 @@ func TestGatherSuperAsterisk(t *testing.T) {
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
acc.GatherError(fs.Gather)
|
||||
fs.Gather(&acc)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
type ResponseMetrics struct {
|
||||
@@ -148,17 +149,31 @@ func (h *GrayLog) Gather(acc telegraf.Accumulator) error {
|
||||
h.client.SetHTTPClient(client)
|
||||
}
|
||||
|
||||
errorChannel := make(chan error, len(h.Servers))
|
||||
|
||||
for _, server := range h.Servers {
|
||||
wg.Add(1)
|
||||
go func(server string) {
|
||||
defer wg.Done()
|
||||
acc.AddError(h.gatherServer(acc, server))
|
||||
if err := h.gatherServer(acc, server); err != nil {
|
||||
errorChannel <- err
|
||||
}
|
||||
}(server)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errorChannel)
|
||||
|
||||
return nil
|
||||
// Get all errors and return them as one giant error
|
||||
errorStrings := []string{}
|
||||
for err := range errorChannel {
|
||||
errorStrings = append(errorStrings, err.Error())
|
||||
}
|
||||
|
||||
if len(errorStrings) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errors.New(strings.Join(errorStrings, "\n"))
|
||||
}
|
||||
|
||||
// Gathers data from a particular server
|
||||
|
||||
@@ -157,7 +157,7 @@ func TestNormalResponse(t *testing.T) {
|
||||
|
||||
for _, service := range graylog {
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(service.Gather)
|
||||
err := service.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
for k, v := range expectedFields {
|
||||
acc.AssertContainsTaggedFields(t, k, v, validTags[k])
|
||||
@@ -170,9 +170,9 @@ func TestHttpJson500(t *testing.T) {
|
||||
graylog := genMockGrayLog(validJSON, 500)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(graylog[0].Gather)
|
||||
err := graylog[0].Gather(&acc)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
@@ -181,9 +181,9 @@ func TestHttpJsonBadJson(t *testing.T) {
|
||||
graylog := genMockGrayLog(invalidJSON, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(graylog[0].Gather)
|
||||
err := graylog[0].Gather(&acc)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
@@ -192,8 +192,8 @@ func TestHttpJsonEmptyResponse(t *testing.T) {
|
||||
graylog := genMockGrayLog(empty, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(graylog[0].Gather)
|
||||
err := graylog[0].Gather(&acc)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
@@ -7,30 +7,7 @@
|
||||
```toml
|
||||
# SampleConfig
|
||||
[[inputs.haproxy]]
|
||||
## An array of address to gather stats about. Specify an ip on hostname
|
||||
## with optional port. ie localhost, 10.10.3.33:1936, etc.
|
||||
## Make sure you specify the complete path to the stats endpoint
|
||||
## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
|
||||
|
||||
## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
|
||||
servers = ["http://myhaproxy.com:1936/haproxy?stats"]
|
||||
|
||||
## You can also use local socket with standard wildcard globbing.
|
||||
## Server address not starting with 'http' will be treated as a possible
|
||||
## socket, so both examples below are valid.
|
||||
# servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
|
||||
|
||||
## By default, some of the fields are renamed from what haproxy calls them.
|
||||
## Setting this option to true results in the plugin keeping the original
|
||||
## field names.
|
||||
# keep_field_names = true
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
servers = ["http://1.2.3.4/haproxy?stats", "/var/run/haproxy*.sock"]
|
||||
```
|
||||
|
||||
#### `servers`
|
||||
|
||||
@@ -14,8 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
//CSV format: https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1
|
||||
@@ -26,15 +25,6 @@ type haproxy struct {
|
||||
client *http.Client
|
||||
|
||||
KeepFieldNames bool
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
@@ -42,26 +32,19 @@ var sampleConfig = `
|
||||
## with optional port. ie localhost, 10.10.3.33:1936, etc.
|
||||
## Make sure you specify the complete path to the stats endpoint
|
||||
## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
|
||||
|
||||
#
|
||||
## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
|
||||
servers = ["http://myhaproxy.com:1936/haproxy?stats"]
|
||||
|
||||
##
|
||||
## You can also use local socket with standard wildcard globbing.
|
||||
## Server address not starting with 'http' will be treated as a possible
|
||||
## socket, so both examples below are valid.
|
||||
# servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
|
||||
|
||||
## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
|
||||
#
|
||||
## By default, some of the fields are renamed from what haproxy calls them.
|
||||
## Setting this option to true results in the plugin keeping the original
|
||||
## field names.
|
||||
# keep_field_names = true
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
## keep_field_names = true
|
||||
`
|
||||
|
||||
func (r *haproxy) SampleConfig() string {
|
||||
@@ -144,15 +127,7 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
if g.client == nil {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
g.SSLCert, g.SSLKey, g.SSLCA, g.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tr := &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
|
||||
@@ -4,8 +4,8 @@ package hddtemp
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
gohddtemp "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
const defaultAddress = "127.0.0.1:7634"
|
||||
|
||||
@@ -2,18 +2,11 @@
|
||||
|
||||
The HTTP listener is a service input plugin that listens for messages sent via HTTP POST.
|
||||
The plugin expects messages in the InfluxDB line-protocol ONLY, other Telegraf input data formats are not supported.
|
||||
The intent of the plugin is to allow Telegraf to serve as a proxy/router for the `/write` endpoint of the InfluxDB HTTP API.
|
||||
|
||||
The `/write` endpoint supports the `precision` query parameter and can be set to one of `ns`, `u`, `ms`, `s`, `m`, `h`. All other parameters are ignored and defer to the output plugins configuration.
|
||||
|
||||
The intent of the plugin is to allow Telegraf to serve as a proxy/router for the /write endpoint of the InfluxDB HTTP API.
|
||||
When chaining Telegraf instances using this plugin, CREATE DATABASE requests receive a 200 OK response with message body `{"results":[]}` but they are not relayed. The output configuration of the Telegraf instance which ultimately submits data to InfluxDB determines the destination database.
|
||||
|
||||
See: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx).
|
||||
|
||||
**Example:**
|
||||
```
|
||||
curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000'
|
||||
```
|
||||
Example: curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000'
|
||||
|
||||
### Configuration:
|
||||
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
@@ -35,7 +35,6 @@ type HTTPListener struct {
|
||||
WriteTimeout internal.Duration
|
||||
MaxBodySize int64
|
||||
MaxLineSize int
|
||||
Port int
|
||||
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
@@ -125,7 +124,6 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
|
||||
return err
|
||||
}
|
||||
h.listener = listener
|
||||
h.Port = listener.Addr().(*net.TCPAddr).Port
|
||||
|
||||
h.wg.Add(1)
|
||||
go func() {
|
||||
@@ -207,12 +205,10 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
precision := req.URL.Query().Get("precision")
|
||||
|
||||
// Handle gzip request bodies
|
||||
body := req.Body
|
||||
var err error
|
||||
if req.Header.Get("Content-Encoding") == "gzip" {
|
||||
var err error
|
||||
body, err = gzip.NewReader(req.Body)
|
||||
defer body.Close()
|
||||
if err != nil {
|
||||
@@ -265,7 +261,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
||||
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
// finished reading the request body
|
||||
if err := h.parse(buf[:n+bufStart], now, precision); err != nil {
|
||||
if err := h.parse(buf[:n+bufStart], now); err != nil {
|
||||
log.Println("E! " + err.Error())
|
||||
return400 = true
|
||||
}
|
||||
@@ -290,7 +286,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
||||
bufStart = 0
|
||||
continue
|
||||
}
|
||||
if err := h.parse(buf[:i+1], now, precision); err != nil {
|
||||
if err := h.parse(buf[:i+1], now); err != nil {
|
||||
log.Println("E! " + err.Error())
|
||||
return400 = true
|
||||
}
|
||||
@@ -303,8 +299,8 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HTTPListener) parse(b []byte, t time.Time, precision string) error {
|
||||
metrics, err := h.parser.ParseWithDefaultTimePrecision(b, t, precision)
|
||||
func (h *HTTPListener) parse(b []byte, t time.Time) error {
|
||||
metrics, err := h.parser.ParseWithDefaultTime(b, t)
|
||||
|
||||
for _, m := range metrics {
|
||||
h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
// HTTPResponse struct
|
||||
@@ -25,6 +25,7 @@ type HTTPResponse struct {
|
||||
Headers map[string]string
|
||||
FollowRedirects bool
|
||||
ResponseStringMatch string
|
||||
compiledStringMatch *regexp.Regexp
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
@@ -34,9 +35,6 @@ type HTTPResponse struct {
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
compiledStringMatch *regexp.Regexp
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// Description returns the plugin Description
|
||||
@@ -90,12 +88,13 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tr := &http.Transport{
|
||||
ResponseHeaderTimeout: h.ResponseTimeout.Duration,
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DisableKeepAlives: true,
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
Timeout: h.ResponseTimeout.Duration,
|
||||
Transport: tr,
|
||||
Timeout: h.ResponseTimeout.Duration,
|
||||
}
|
||||
|
||||
if h.FollowRedirects == false {
|
||||
@@ -107,10 +106,15 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
|
||||
}
|
||||
|
||||
// HTTPGather gathers all fields and returns any errors it encounters
|
||||
func (h *HTTPResponse) httpGather() (map[string]interface{}, error) {
|
||||
func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) {
|
||||
// Prepare fields
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
client, err := h.createHttpClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var body io.Reader
|
||||
if h.Body != "" {
|
||||
body = strings.NewReader(h.Body)
|
||||
@@ -129,7 +133,7 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, error) {
|
||||
|
||||
// Start Timer
|
||||
start := time.Now()
|
||||
resp, err := h.client.Do(request)
|
||||
resp, err := client.Do(request)
|
||||
if err != nil {
|
||||
if h.FollowRedirects {
|
||||
return nil, err
|
||||
@@ -141,11 +145,6 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}()
|
||||
|
||||
fields["response_time"] = time.Since(start).Seconds()
|
||||
fields["http_response_code"] = resp.StatusCode
|
||||
|
||||
@@ -203,17 +202,8 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
|
||||
// Prepare data
|
||||
tags := map[string]string{"server": h.Address, "method": h.Method}
|
||||
var fields map[string]interface{}
|
||||
|
||||
if h.client == nil {
|
||||
client, err := h.createHttpClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.client = client
|
||||
}
|
||||
|
||||
// Gather data
|
||||
fields, err = h.httpGather()
|
||||
fields, err = h.HTTPGather()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -74,13 +73,13 @@ func TestHeaders(t *testing.T) {
|
||||
"Host": "Hello",
|
||||
},
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
fields, err := h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||
}
|
||||
assert.NotNil(t, fields["response_time"])
|
||||
}
|
||||
|
||||
func TestFields(t *testing.T) {
|
||||
@@ -98,14 +97,13 @@ func TestFields(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
fields, err := h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||
}
|
||||
assert.NotNil(t, fields["response_time"])
|
||||
}
|
||||
|
||||
func TestRedirects(t *testing.T) {
|
||||
@@ -123,13 +121,12 @@ func TestRedirects(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
fields, err := h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||
}
|
||||
|
||||
h = &HTTPResponse{
|
||||
Address: ts.URL + "/badredirect",
|
||||
@@ -141,12 +138,8 @@ func TestRedirects(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
acc = testutil.Accumulator{}
|
||||
err = h.Gather(&acc)
|
||||
fields, err = h.HTTPGather()
|
||||
require.Error(t, err)
|
||||
|
||||
value, ok = acc.IntField("http_response", "http_response_code")
|
||||
require.False(t, ok)
|
||||
}
|
||||
|
||||
func TestMethod(t *testing.T) {
|
||||
@@ -164,13 +157,12 @@ func TestMethod(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
fields, err := h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||
}
|
||||
|
||||
h = &HTTPResponse{
|
||||
Address: ts.URL + "/mustbepostmethod",
|
||||
@@ -182,13 +174,12 @@ func TestMethod(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
acc = testutil.Accumulator{}
|
||||
err = h.Gather(&acc)
|
||||
fields, err = h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok = acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusMethodNotAllowed, value)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusMethodNotAllowed, fields["http_response_code"])
|
||||
}
|
||||
|
||||
//check that lowercase methods work correctly
|
||||
h = &HTTPResponse{
|
||||
@@ -201,13 +192,12 @@ func TestMethod(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
acc = testutil.Accumulator{}
|
||||
err = h.Gather(&acc)
|
||||
fields, err = h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok = acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusMethodNotAllowed, value)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusMethodNotAllowed, fields["http_response_code"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestBody(t *testing.T) {
|
||||
@@ -225,13 +215,12 @@ func TestBody(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
fields, err := h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||
}
|
||||
|
||||
h = &HTTPResponse{
|
||||
Address: ts.URL + "/musthaveabody",
|
||||
@@ -242,13 +231,12 @@ func TestBody(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
acc = testutil.Accumulator{}
|
||||
err = h.Gather(&acc)
|
||||
fields, err = h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
|
||||
value, ok = acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusBadRequest, value)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusBadRequest, fields["http_response_code"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringMatch(t *testing.T) {
|
||||
@@ -267,18 +255,15 @@ func TestStringMatch(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
fields, err := h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||
}
|
||||
assert.Equal(t, 1, fields["response_string_match"])
|
||||
assert.NotNil(t, fields["response_time"])
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
value, ok = acc.IntField("http_response", "response_string_match")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 1, value)
|
||||
_, ok = acc.FloatField("http_response", "response_time")
|
||||
require.True(t, ok)
|
||||
}
|
||||
|
||||
func TestStringMatchJson(t *testing.T) {
|
||||
@@ -297,18 +282,15 @@ func TestStringMatchJson(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
fields, err := h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||
}
|
||||
assert.Equal(t, 1, fields["response_string_match"])
|
||||
assert.NotNil(t, fields["response_time"])
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
value, ok = acc.IntField("http_response", "response_string_match")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 1, value)
|
||||
_, ok = acc.FloatField("http_response", "response_time")
|
||||
require.True(t, ok)
|
||||
}
|
||||
|
||||
func TestStringMatchFail(t *testing.T) {
|
||||
@@ -327,26 +309,18 @@ func TestStringMatchFail(t *testing.T) {
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
fields, err := h.HTTPGather()
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, fields)
|
||||
if assert.NotNil(t, fields["http_response_code"]) {
|
||||
assert.Equal(t, http.StatusOK, fields["http_response_code"])
|
||||
}
|
||||
assert.Equal(t, 0, fields["response_string_match"])
|
||||
assert.NotNil(t, fields["response_time"])
|
||||
|
||||
value, ok := acc.IntField("http_response", "http_response_code")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, http.StatusOK, value)
|
||||
value, ok = acc.IntField("http_response", "response_string_match")
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 0, value)
|
||||
_, ok = acc.FloatField("http_response", "response_time")
|
||||
require.True(t, ok)
|
||||
}
|
||||
|
||||
func TestTimeout(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping test with sleep in short mode.")
|
||||
}
|
||||
|
||||
mux := setUpTestMux()
|
||||
ts := httptest.NewServer(mux)
|
||||
defer ts.Close()
|
||||
@@ -355,16 +329,12 @@ func TestTimeout(t *testing.T) {
|
||||
Address: ts.URL + "/twosecondnap",
|
||||
Body: "{ 'test': 'data'}",
|
||||
Method: "GET",
|
||||
ResponseTimeout: internal.Duration{Duration: time.Millisecond},
|
||||
ResponseTimeout: internal.Duration{Duration: time.Second * 1},
|
||||
Headers: map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
FollowRedirects: true,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
err := h.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
ok := acc.HasIntField("http_response", "http_response_code")
|
||||
require.False(t, ok)
|
||||
_, err := h.HTTPGather()
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -1,79 +1,128 @@
|
||||
# HTTP JSON Input Plugin
|
||||
# HTTP JSON Plugin
|
||||
|
||||
The httpjson plugin collects data from HTTP URLs which respond with JSON. It flattens the JSON and finds all numeric values, treating them as floats.
|
||||
The httpjson plugin can collect data from remote URLs which respond with JSON. Then it flattens JSON and finds all numeric values, treating them as floats.
|
||||
|
||||
### Configuration:
|
||||
For example, if you have a service called _mycollector_, which has HTTP endpoint for gathering stats at http://my.service.com/_stats, you would configure the HTTP JSON plugin like this:
|
||||
|
||||
```toml
|
||||
```
|
||||
[[inputs.httpjson]]
|
||||
## NOTE This plugin only reads numerical measurements, strings and booleans
|
||||
## will be ignored.
|
||||
name = "mycollector"
|
||||
|
||||
## Name for the service being polled. Will be appended to the name of the
|
||||
## measurement e.g. "httpjson_webserver_stats".
|
||||
##
|
||||
## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
|
||||
name = "webserver_stats"
|
||||
|
||||
## URL of each server in the service's cluster
|
||||
servers = [
|
||||
"http://localhost:9999/stats/",
|
||||
"http://localhost:9998/stats/",
|
||||
"http://my.service.com/_stats"
|
||||
]
|
||||
## Set response_timeout (default 5 seconds)
|
||||
response_timeout = "5s"
|
||||
|
||||
## HTTP method to use: GET or POST (case-sensitive)
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
## Tags to extract from top-level of JSON server response.
|
||||
# tag_keys = [
|
||||
# "my_tag_1",
|
||||
# "my_tag_2"
|
||||
# ]
|
||||
|
||||
## HTTP Request Parameters (all values must be strings). For "GET" requests, data
|
||||
## will be included in the query. For "POST" requests, data will be included
|
||||
## in the request body as "x-www-form-urlencoded".
|
||||
# [inputs.httpjson.parameters]
|
||||
# event_type = "cpu_spike"
|
||||
# threshold = "0.75"
|
||||
|
||||
## HTTP Request Headers (all values must be strings).
|
||||
# [inputs.httpjson.headers]
|
||||
# X-Auth-Token = "my-xauth-token"
|
||||
# apiVersion = "v1"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
# Set response_timeout (default 5 seconds)
|
||||
response_timeout = "5s"
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
`name` is used as a prefix for the measurements.
|
||||
|
||||
- httpjson
|
||||
- response_time (float): Response time in seconds
|
||||
`method` specifies HTTP method to use for requests.
|
||||
|
||||
Additional fields are dependant on the response of the remote service being polled.
|
||||
`response_timeout` specifies timeout to wait to get the response
|
||||
|
||||
### Tags:
|
||||
You can also specify which keys from server response should be considered tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- server: HTTP origin as defined in configuration as `servers`.
|
||||
```
|
||||
[[inputs.httpjson]]
|
||||
...
|
||||
|
||||
Any top level keys listed under `tag_keys` in the configuration are added as tags. Top level keys are defined as keys in the root level of the object in a single object response, or in the root level of each object within an array of objects.
|
||||
tag_keys = [
|
||||
"role",
|
||||
"version"
|
||||
]
|
||||
```
|
||||
|
||||
If the JSON response is an array of objects, then each object will be parsed with the same configuration.
|
||||
|
||||
### Examples Output:
|
||||
You can also specify additional request parameters for the service:
|
||||
|
||||
This plugin understands responses containing a single JSON object, or a JSON Array of Objects.
|
||||
```
|
||||
[[inputs.httpjson]]
|
||||
...
|
||||
|
||||
**Object Output:**
|
||||
[inputs.httpjson.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
|
||||
Given the following response body:
|
||||
```
|
||||
|
||||
You can also specify additional request header parameters for the service:
|
||||
|
||||
```
|
||||
[[inputs.httpjson]]
|
||||
...
|
||||
|
||||
[inputs.httpjson.headers]
|
||||
X-Auth-Token = "my-xauth-token"
|
||||
apiVersion = "v1"
|
||||
```
|
||||
|
||||
# Example:
|
||||
|
||||
Let's say that we have a service named "mycollector" configured like this:
|
||||
|
||||
```
|
||||
[[inputs.httpjson]]
|
||||
name = "mycollector"
|
||||
servers = [
|
||||
"http://my.service.com/_stats"
|
||||
]
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
tag_keys = ["service"]
|
||||
```
|
||||
|
||||
which responds with the following JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"service": "service01",
|
||||
"a": 0.5,
|
||||
"b": {
|
||||
"c": "some text",
|
||||
"d": 0.1,
|
||||
"e": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The collected metrics will be:
|
||||
```
|
||||
httpjson_mycollector_a,service='service01',server='http://my.service.com/_stats' value=0.5
|
||||
httpjson_mycollector_b_d,service='service01',server='http://my.service.com/_stats' value=0.1
|
||||
httpjson_mycollector_b_e,service='service01',server='http://my.service.com/_stats' value=5
|
||||
```
|
||||
|
||||
# Example 2, Multiple Services:
|
||||
|
||||
There is also the option to collect JSON from multiple services, here is an example doing that.
|
||||
|
||||
```
|
||||
[[inputs.httpjson]]
|
||||
name = "mycollector1"
|
||||
servers = [
|
||||
"http://my.service1.com/_stats"
|
||||
]
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
[[inputs.httpjson]]
|
||||
name = "mycollector2"
|
||||
servers = [
|
||||
"http://service.net/json/stats"
|
||||
]
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "POST"
|
||||
```
|
||||
|
||||
The services respond with the following JSON:
|
||||
|
||||
mycollector1:
|
||||
```json
|
||||
{
|
||||
"a": 0.5,
|
||||
@@ -81,30 +130,45 @@ Given the following response body:
|
||||
"c": "some text",
|
||||
"d": 0.1,
|
||||
"e": 5
|
||||
},
|
||||
"service": "service01"
|
||||
}
|
||||
}
|
||||
```
|
||||
The following metric is produced:
|
||||
|
||||
`httpjson,server=http://localhost:9999/stats/ b_d=0.1,a=0.5,b_e=5,response_time=0.001`
|
||||
mycollector2:
|
||||
```json
|
||||
{
|
||||
"load": 100,
|
||||
"users": 1335
|
||||
}
|
||||
```
|
||||
|
||||
Note that only numerical values are extracted and the type is float.
|
||||
The collected metrics will be:
|
||||
|
||||
If `tag_keys` is included in the configuration:
|
||||
```
|
||||
httpjson_mycollector1_a,server='http://my.service.com/_stats' value=0.5
|
||||
httpjson_mycollector1_b_d,server='http://my.service.com/_stats' value=0.1
|
||||
httpjson_mycollector1_b_e,server='http://my.service.com/_stats' value=5
|
||||
|
||||
```toml
|
||||
httpjson_mycollector2_load,server='http://service.net/json/stats' value=100
|
||||
httpjson_mycollector2_users,server='http://service.net/json/stats' value=1335
|
||||
```
|
||||
|
||||
# Example 3, Multiple Metrics in Response:
|
||||
|
||||
The response JSON can be treated as an array of data points that are all parsed with the same configuration.
|
||||
|
||||
```
|
||||
[[inputs.httpjson]]
|
||||
name = "mycollector"
|
||||
servers = [
|
||||
"http://my.service.com/_stats"
|
||||
]
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
tag_keys = ["service"]
|
||||
```
|
||||
|
||||
Then the `service` tag will also be added:
|
||||
|
||||
`httpjson,server=http://localhost:9999/stats/,service=service01 b_d=0.1,a=0.5,b_e=5,response_time=0.001`
|
||||
|
||||
**Array Output:**
|
||||
|
||||
If the service returns an array of objects, one metric is be created for each object:
|
||||
which responds with the following JSON:
|
||||
|
||||
```json
|
||||
[
|
||||
@@ -129,5 +193,12 @@ If the service returns an array of objects, one metric is be created for each ob
|
||||
]
|
||||
```
|
||||
|
||||
`httpjson,server=http://localhost:9999/stats/,service=service01 a=0.5,b_d=0.1,b_e=5,response_time=0.003`
|
||||
`httpjson,server=http://localhost:9999/stats/,service=service02 a=0.6,b_d=0.2,b_e=6,response_time=0.003`
|
||||
The collected metrics will be:
|
||||
```
|
||||
httpjson_mycollector_a,service='service01',server='http://my.service.com/_stats' value=0.5
|
||||
httpjson_mycollector_b_d,service='service01',server='http://my.service.com/_stats' value=0.1
|
||||
httpjson_mycollector_b_e,service='service01',server='http://my.service.com/_stats' value=5
|
||||
httpjson_mycollector_a,service='service02',server='http://my.service.com/_stats' value=0.6
|
||||
httpjson_mycollector_b_d,service='service02',server='http://my.service.com/_stats' value=0.2
|
||||
httpjson_mycollector_b_e,service='service02',server='http://my.service.com/_stats' value=6
|
||||
```
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package httpjson
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -11,8 +12,8 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
// HttpJson struct
|
||||
@@ -72,10 +73,7 @@ var sampleConfig = `
|
||||
## NOTE This plugin only reads numerical measurements, strings and booleans
|
||||
## will be ignored.
|
||||
|
||||
## Name for the service being polled. Will be appended to the name of the
|
||||
## measurement e.g. httpjson_webserver_stats
|
||||
##
|
||||
## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
|
||||
## a name for the service being polled
|
||||
name = "webserver_stats"
|
||||
|
||||
## URL of each server in the service's cluster
|
||||
@@ -95,14 +93,12 @@ var sampleConfig = `
|
||||
# "my_tag_2"
|
||||
# ]
|
||||
|
||||
## HTTP parameters (all values must be strings). For "GET" requests, data
|
||||
## will be included in the query. For "POST" requests, data will be included
|
||||
## in the request body as "x-www-form-urlencoded".
|
||||
# [inputs.httpjson.parameters]
|
||||
# event_type = "cpu_spike"
|
||||
# threshold = "0.75"
|
||||
## HTTP parameters (all values must be strings)
|
||||
[inputs.httpjson.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
|
||||
## HTTP Headers (all values must be strings)
|
||||
## HTTP Header parameters (all values must be strings)
|
||||
# [inputs.httpjson.headers]
|
||||
# X-Auth-Token = "my-xauth-token"
|
||||
# apiVersion = "v1"
|
||||
@@ -144,17 +140,31 @@ func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
|
||||
h.client.SetHTTPClient(client)
|
||||
}
|
||||
|
||||
errorChannel := make(chan error, len(h.Servers))
|
||||
|
||||
for _, server := range h.Servers {
|
||||
wg.Add(1)
|
||||
go func(server string) {
|
||||
defer wg.Done()
|
||||
acc.AddError(h.gatherServer(acc, server))
|
||||
if err := h.gatherServer(acc, server); err != nil {
|
||||
errorChannel <- err
|
||||
}
|
||||
}(server)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errorChannel)
|
||||
|
||||
return nil
|
||||
// Get all errors and return them as one giant error
|
||||
errorStrings := []string{}
|
||||
for err := range errorChannel {
|
||||
errorStrings = append(errorStrings, err.Error())
|
||||
}
|
||||
|
||||
if len(errorStrings) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errors.New(strings.Join(errorStrings, "\n"))
|
||||
}
|
||||
|
||||
// Gathers data from a particular server
|
||||
|
||||
@@ -210,7 +210,7 @@ func TestHttpJson200(t *testing.T) {
|
||||
|
||||
for _, service := range httpjson {
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(service.Gather)
|
||||
err := service.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 12, acc.NFields())
|
||||
// Set responsetime
|
||||
@@ -245,7 +245,7 @@ func TestHttpJsonGET_URL(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(a.Gather)
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// remove response_time from gathered fields because it's non-deterministic
|
||||
@@ -318,7 +318,7 @@ func TestHttpJsonGET(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(a.Gather)
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// remove response_time from gathered fields because it's non-deterministic
|
||||
@@ -392,7 +392,7 @@ func TestHttpJsonPOST(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(a.Gather)
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// remove response_time from gathered fields because it's non-deterministic
|
||||
@@ -448,9 +448,9 @@ func TestHttpJson500(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 500)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(httpjson[0].Gather)
|
||||
err := httpjson[0].Gather(&acc)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
@@ -460,9 +460,9 @@ func TestHttpJsonBadMethod(t *testing.T) {
|
||||
httpjson[0].Method = "NOT_A_REAL_METHOD"
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(httpjson[0].Gather)
|
||||
err := httpjson[0].Gather(&acc)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
@@ -471,9 +471,9 @@ func TestHttpJsonBadJson(t *testing.T) {
|
||||
httpjson := genMockHttpJson(invalidJSON, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(httpjson[0].Gather)
|
||||
err := httpjson[0].Gather(&acc)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
@@ -482,9 +482,9 @@ func TestHttpJsonEmptyResponse(t *testing.T) {
|
||||
httpjson := genMockHttpJson(empty, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(httpjson[0].Gather)
|
||||
err := httpjson[0].Gather(&acc)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
@@ -495,7 +495,7 @@ func TestHttpJson200Tags(t *testing.T) {
|
||||
for _, service := range httpjson {
|
||||
if service.Name == "other_webapp" {
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(service.Gather)
|
||||
err := service.Gather(&acc)
|
||||
// Set responsetime
|
||||
for _, p := range acc.Metrics {
|
||||
p.Fields["response_time"] = 1.0
|
||||
@@ -533,7 +533,7 @@ func TestHttpJsonArray200Tags(t *testing.T) {
|
||||
for _, service := range httpjson {
|
||||
if service.Name == "other_webapp" {
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(service.Gather)
|
||||
err := service.Gather(&acc)
|
||||
// Set responsetime
|
||||
for _, p := range acc.Metrics {
|
||||
p.Fields["response_time"] = 1.0
|
||||
|
||||
@@ -5,12 +5,13 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
)
|
||||
|
||||
type InfluxDB struct {
|
||||
@@ -56,20 +57,34 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
|
||||
errorChannel := make(chan error, len(i.URLs))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, u := range i.URLs {
|
||||
wg.Add(1)
|
||||
go func(url string) {
|
||||
defer wg.Done()
|
||||
if err := i.gatherURL(acc, url); err != nil {
|
||||
acc.AddError(fmt.Errorf("[url=%s]: %s", url, err))
|
||||
errorChannel <- fmt.Errorf("[url=%s]: %s", url, err)
|
||||
}
|
||||
}(u)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errorChannel)
|
||||
|
||||
return nil
|
||||
// If there weren't any errors, we can return nil now.
|
||||
if len(errorChannel) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// There were errors, so join them all together as one big error.
|
||||
errorStrings := make([]string, 0, len(errorChannel))
|
||||
for err := range errorChannel {
|
||||
errorStrings = append(errorStrings, err.Error())
|
||||
}
|
||||
|
||||
return errors.New(strings.Join(errorStrings, "\n"))
|
||||
}
|
||||
|
||||
type point struct {
|
||||
|
||||
@@ -25,7 +25,7 @@ func TestBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(plugin.Gather))
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
|
||||
require.Len(t, acc.Metrics, 3)
|
||||
fields := map[string]interface{}{
|
||||
@@ -72,7 +72,7 @@ func TestInfluxDB(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(plugin.Gather))
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
|
||||
require.Len(t, acc.Metrics, 34)
|
||||
|
||||
@@ -132,7 +132,7 @@ func TestInfluxDB2(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, acc.GatherError(plugin.Gather))
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
|
||||
require.Len(t, acc.Metrics, 34)
|
||||
|
||||
@@ -157,7 +157,7 @@ func TestErrorHandling(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.Error(t, acc.GatherError(plugin.Gather))
|
||||
require.Error(t, plugin.Gather(&acc))
|
||||
}
|
||||
|
||||
func TestErrorHandling404(t *testing.T) {
|
||||
@@ -175,7 +175,7 @@ func TestErrorHandling404(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.Error(t, acc.GatherError(plugin.Gather))
|
||||
require.Error(t, plugin.Gather(&acc))
|
||||
}
|
||||
|
||||
const basicJSON = `
|
||||
|
||||
@@ -80,4 +80,4 @@ internal_write,output=file,host=tyrion buffer_limit=10000i,write_time_ns=636609i
|
||||
internal_gather,input=internal,host=tyrion metrics_gathered=19i,gather_time_ns=442114i 1480682800000000000
|
||||
internal_gather,input=http_listener,host=tyrion metrics_gathered=0i,gather_time_ns=167285i 1480682800000000000
|
||||
internal_http_listener,address=:8186,host=tyrion queries_received=0i,writes_received=0i,requests_received=0i,buffers_created=0i,requests_served=0i,pings_received=0i,bytes_received=0i,not_founds_served=0i,pings_served=0i,queries_served=0i,writes_served=0i 1480682800000000000
|
||||
```
|
||||
```
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"runtime"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/registry/inputs"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
@@ -48,7 +48,7 @@ func (s *Self) Gather(acc telegraf.Accumulator) error {
|
||||
"heap_idle_bytes": m.HeapIdle, // bytes in idle spans
|
||||
"heap_in_use_bytes": m.HeapInuse, // bytes in non-idle span
|
||||
"heap_released_bytes": m.HeapReleased, // bytes released to the OS
|
||||
"heap_objects": m.HeapObjects, // total number of allocated objects
|
||||
"heap_objects_bytes": m.HeapObjects, // total number of allocated objects
|
||||
"num_gc": m.NumGC,
|
||||
}
|
||||
acc.AddFields("internal_memstats", fields, map[string]string{})
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user