Compare commits
63 Commits
release-1.
...
kube-state
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
54ac4d70c9 | ||
|
|
90a38bd125 | ||
|
|
d017718033 | ||
|
|
8ff50e4327 | ||
|
|
4ec7999186 | ||
|
|
3457c98eb1 | ||
|
|
e7ff7d506b | ||
|
|
cdc15205d8 | ||
|
|
73eaa057d1 | ||
|
|
9c85c05fcb | ||
|
|
23523ffd10 | ||
|
|
523d761f34 | ||
|
|
3f28add025 | ||
|
|
ee6e4b0afd | ||
|
|
16454e25ba | ||
|
|
2a1feb6db9 | ||
|
|
61e197d254 | ||
|
|
1bd41ef3ce | ||
|
|
d7c756e9ff | ||
|
|
39206677f8 | ||
|
|
b66eb2fec7 | ||
|
|
3ad10283ef | ||
|
|
84e9a5c97e | ||
|
|
c98b58dacc | ||
|
|
98d86df797 | ||
|
|
4e9e57e210 | ||
|
|
7781507c01 | ||
|
|
8482c40a91 | ||
|
|
0dda9b8319 | ||
|
|
4e69d10ff7 | ||
|
|
f689463e8e | ||
|
|
f217d12de5 | ||
|
|
886795063e | ||
|
|
30dc95fa78 | ||
|
|
40fac0a9b4 | ||
|
|
36df4c5ae5 | ||
|
|
70ffed3a4d | ||
|
|
bf59bcf721 | ||
|
|
a789f97feb | ||
|
|
d2e00a3205 | ||
|
|
daddd8bbac | ||
|
|
d16530677d | ||
|
|
1ea18ffd0a | ||
|
|
dd2223ae1c | ||
|
|
90eebd88af | ||
|
|
d2e729dfaf | ||
|
|
f64d612294 | ||
|
|
76ec90e66d | ||
|
|
1690f36b09 | ||
|
|
87f711a19a | ||
|
|
58895d6b03 | ||
|
|
cd9ad77038 | ||
|
|
8563238059 | ||
|
|
11335f5fee | ||
|
|
acba20af1a | ||
|
|
229b6bd944 | ||
|
|
7fe6e2f5ae | ||
|
|
a4214abfc4 | ||
|
|
5f0cbd1255 | ||
|
|
3ef4dff4ec | ||
|
|
dfe7b5eec2 | ||
|
|
92a8f795f5 | ||
|
|
b1d77ade55 |
@@ -2,15 +2,12 @@
|
||||
defaults:
|
||||
defaults: &defaults
|
||||
working_directory: '/go/src/github.com/influxdata/telegraf'
|
||||
go-1_8: &go-1_8
|
||||
docker:
|
||||
- image: 'circleci/golang:1.8.7'
|
||||
go-1_9: &go-1_9
|
||||
docker:
|
||||
- image: 'circleci/golang:1.9.5'
|
||||
- image: 'circleci/golang:1.9.7'
|
||||
go-1_10: &go-1_10
|
||||
docker:
|
||||
- image: 'circleci/golang:1.10.1'
|
||||
- image: 'circleci/golang:1.10.3'
|
||||
|
||||
version: 2
|
||||
jobs:
|
||||
@@ -18,17 +15,18 @@ jobs:
|
||||
<<: [ *defaults, *go-1_10 ]
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
key: vendor-{{ .Branch }}-{{ checksum "Gopkg.lock" }}
|
||||
- run: 'make deps'
|
||||
- save_cache:
|
||||
name: 'vendored deps'
|
||||
key: vendor-{{ .Branch }}-{{ checksum "Gopkg.lock" }}
|
||||
paths:
|
||||
- './vendor'
|
||||
- persist_to_workspace:
|
||||
root: '/go/src'
|
||||
paths:
|
||||
- '*'
|
||||
test-go-1.8:
|
||||
<<: [ *defaults, *go-1_8 ]
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: '/go/src'
|
||||
- run: 'make test-ci'
|
||||
test-go-1.9:
|
||||
<<: [ *defaults, *go-1_9 ]
|
||||
steps:
|
||||
@@ -66,9 +64,6 @@ workflows:
|
||||
build_and_release:
|
||||
jobs:
|
||||
- 'deps'
|
||||
- 'test-go-1.8':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'test-go-1.9':
|
||||
requires:
|
||||
- 'deps'
|
||||
@@ -77,15 +72,11 @@ workflows:
|
||||
- 'deps'
|
||||
- 'release':
|
||||
requires:
|
||||
- 'test-go-1.8'
|
||||
- 'test-go-1.9'
|
||||
- 'test-go-1.10'
|
||||
nightly:
|
||||
jobs:
|
||||
- 'deps'
|
||||
- 'test-go-1.8':
|
||||
requires:
|
||||
- 'deps'
|
||||
- 'test-go-1.9':
|
||||
requires:
|
||||
- 'deps'
|
||||
@@ -94,7 +85,6 @@ workflows:
|
||||
- 'deps'
|
||||
- 'nightly':
|
||||
requires:
|
||||
- 'test-go-1.8'
|
||||
- 'test-go-1.9'
|
||||
- 'test-go-1.10'
|
||||
triggers:
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -2,3 +2,4 @@
|
||||
/telegraf
|
||||
/telegraf.exe
|
||||
/telegraf.gz
|
||||
/vendor
|
||||
|
||||
39
CHANGELOG.md
39
CHANGELOG.md
@@ -1,4 +1,37 @@
|
||||
## v1.7 [unreleased]
|
||||
## v1.8 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
### New Inputs
|
||||
|
||||
- [tengine](./plugins/inputs/tengine/README.md) - Contributed by @ertaoxu
|
||||
|
||||
### New Aggregators
|
||||
|
||||
- [valuecounter](./plugins/aggregators/valuecounter/README.md) - Contributed by @piotr1212
|
||||
|
||||
### Features
|
||||
|
||||
- [#4236](https://github.com/influxdata/telegraf/pull/4236): Add SSL/TLS support to redis input.
|
||||
- [#4160](https://github.com/influxdata/telegraf/pull/4160): Add tengine input plugin.
|
||||
- [#4262](https://github.com/influxdata/telegraf/pull/4262): Add power draw field to nvidia_smi plugin.
|
||||
- [#4271](https://github.com/influxdata/telegraf/pull/4271): Add support for solr 7 to the solr input.
|
||||
- [#4281](https://github.com/influxdata/telegraf/pull/4281): Add owner tag on partitions in burrow input.
|
||||
- [#4259](https://github.com/influxdata/telegraf/pull/4259): Add container status tag to docker input.
|
||||
- [#3523](https://github.com/influxdata/telegraf/pull/3523): Add valuecounter aggregator plugin.
|
||||
- [#4307](https://github.com/influxdata/telegraf/pull/4307): Add new measurement with results of pgrep lookup to procstat input.
|
||||
- [#4311](https://github.com/influxdata/telegraf/pull/4311): Add support for comma in logparser timestamp format.
|
||||
- [#4292](https://github.com/influxdata/telegraf/pull/4292): Add path tag to tail input plugin.
|
||||
|
||||
## v1.7.1 [unreleased]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#4277](https://github.com/influxdata/telegraf/pull/4277): Treat sigterm as a clean shutdown signal.
|
||||
- [#4284](https://github.com/influxdata/telegraf/pull/4284): Fix selection of tags under nested objects in the JSON parser.
|
||||
- [#4135](https://github.com/influxdata/telegraf/issues/4135): Fix postfix input handling multi-level queues.
|
||||
|
||||
## v1.7 [2018-06-12]
|
||||
|
||||
### Release Notes
|
||||
|
||||
@@ -73,8 +106,10 @@
|
||||
- [#2879](https://github.com/influxdata/telegraf/issues/2879): Fix wildcards and multi instance processes in win_perf_counters.
|
||||
- [#2468](https://github.com/influxdata/telegraf/issues/2468): Fix crash on 32-bit Windows in win_perf_counters.
|
||||
- [#4198](https://github.com/influxdata/telegraf/issues/4198): Fix win_perf_counters not collecting at every interval.
|
||||
- [#4227](https://github.com/influxdata/telegraf/issues/4227): Use same flags for all BSD family ping variants.
|
||||
- [#4266](https://github.com/influxdata/telegraf/issues/4266): Remove tags with empty values from Wavefront output.
|
||||
|
||||
## v1.6.4 [unreleased]
|
||||
## v1.6.4 [2018-06-05]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
|
||||
@@ -30,9 +30,9 @@ which can be found [on our website](http://influxdb.com/community/cla.html)
|
||||
|
||||
Assuming you can already build the project, run these in the telegraf directory:
|
||||
|
||||
1. `go get github.com/sparrc/gdm`
|
||||
1. `gdm restore`
|
||||
1. `GOOS=linux gdm save`
|
||||
1. `go get -u github.com/golang/dep/cmd/dep`
|
||||
2. `dep ensure`
|
||||
3. `dep ensure -add github.com/[dependency]/[new-package]`
|
||||
|
||||
## Input Plugins
|
||||
|
||||
|
||||
101
Godeps
101
Godeps
@@ -1,101 +0,0 @@
|
||||
code.cloudfoundry.org/clock e9dc86bbf0e5bbe6bf7ff5a6f71e048959b61f71
|
||||
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
|
||||
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
|
||||
github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
|
||||
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
|
||||
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/bsm/sarama-cluster abf039439f66c1ce78017f560b490612552f6472
|
||||
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
|
||||
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
|
||||
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
|
||||
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/dgrijalva/jwt-go dbeaa9332f19a944acb5736b4456cfcc02140e29
|
||||
github.com/docker/docker f5ec1e2936dcbe7b5001c2b817188b095c700c27
|
||||
github.com/docker/go-connections 990a1a1a70b0da4c4cb70e117971a4f0babfbf1a
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
|
||||
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
|
||||
github.com/eclipse/paho.mqtt.golang aff15770515e3c57fc6109da73d42b0d46f7f483
|
||||
github.com/go-logfmt/logfmt 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
|
||||
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
|
||||
github.com/gogo/protobuf 7b6c6391c4ff245962047fc1e2c6e08b1cdfa0e8
|
||||
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
|
||||
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
|
||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
||||
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
|
||||
github.com/gorilla/mux 53c1911da2b537f792e7cafcb446b05ffe33b996
|
||||
github.com/go-redis/redis 73b70592cdaa9e6abdfcfbf97b4a90d80728c836
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hashicorp/consul 5174058f0d2bda63fa5198ab96c33d9a909c58ed
|
||||
github.com/influxdata/go-syslog 84f3b60009444d298f97454feb1f20cf91d1fa6e
|
||||
github.com/influxdata/tail c43482518d410361b6c383d7aebce33d0471d7bc
|
||||
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
|
||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||
github.com/fsnotify/fsnotify c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9
|
||||
github.com/jackc/pgx 63f58fd32edb5684b9e9f4cfaac847c6b42b3917
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
|
||||
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
|
||||
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
|
||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
github.com/Microsoft/ApplicationInsights-Go 3612f58550c1de70f1a110c78c830e55f29aa65d
|
||||
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
||||
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
|
||||
github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4
|
||||
github.com/multiplay/go-ts3 07477f49b8dfa3ada231afc7b7b17617d42afe8e
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/nats-io/gnatsd 393bbb7c031433e68707c8810fda0bfcfbe6ab9b
|
||||
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
|
||||
github.com/nsqio/go-nsq eee57a3ac4174c55924125bb15eeeda8cffb6e6f
|
||||
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
||||
github.com/opentracing-contrib/go-observer a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
|
||||
github.com/opentracing/opentracing-go 06f47b42c792fef2796e9681353e1d908c417827
|
||||
github.com/openzipkin/zipkin-go-opentracing 1cafbdfde94fbf2b373534764e0863aa3bd0bf7b
|
||||
github.com/pierrec/lz4 5c9560bfa9ace2bf86080bf40d46b34ae44604df
|
||||
github.com/pierrec/xxHash 5a004441f897722c627870a981d02b29924215fa
|
||||
github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d
|
||||
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/prometheus/client_golang c317fb74746eac4fc65fe3909195f4cf67c5562a
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
|
||||
github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
|
||||
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||
github.com/shirou/gopsutil c95755e4bcd7a62bb8bd33f3a597a7c7f35e2cf3
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
|
||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||
github.com/soniah/gosnmp f15472a4cd6f6ea7929e4c7d9f163c49f059924f
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
|
||||
github.com/stretchr/objx facf9a85c22f48d2f52f2380e4efce1768749a89
|
||||
github.com/stretchr/testify 12b6f73e6084dad08a7c6e575284b177ecafbc71
|
||||
github.com/tidwall/gjson 0623bd8fbdbf97cc62b98d15108832851a658e59
|
||||
github.com/tidwall/match 173748da739a410c5b0b813b956f89ff94730b4c
|
||||
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
|
||||
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
|
||||
github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
|
||||
github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto dc137beb6cce2043eb6b5f223ab8bf51c32459f4
|
||||
golang.org/x/net a337091b0525af65de94df2eb7e98bd9962dcbe2
|
||||
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
|
||||
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
|
||||
google.golang.org/genproto 11c7f9e547da6db876260ce49ea7536985904c9b
|
||||
google.golang.org/grpc de2209a968d48e8970546c8a710189f7461370f7
|
||||
gopkg.in/asn1-ber.v1 4e86f4367175e39f69d9358a5f17b4dda270378d
|
||||
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
|
||||
gopkg.in/gorethink/gorethink.v3 7ab832f7b65573104a555d84a27992ae9ea1f659
|
||||
gopkg.in/ldap.v2 8168ee085ee43257585e50c6441aadf54ecb2c9f
|
||||
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
|
||||
gopkg.in/olivere/elastic.v5 3113f9b9ad37509fe5f8a0e5e91c96fdc4435e26
|
||||
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
|
||||
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
|
||||
1028
Gopkg.lock
generated
Normal file
1028
Gopkg.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
255
Gopkg.toml
Normal file
255
Gopkg.toml
Normal file
@@ -0,0 +1,255 @@
|
||||
[[constraint]]
|
||||
name = "collectd.org"
|
||||
version = "0.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aerospike/aerospike-client-go"
|
||||
version = "^1.33.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/amir/raidman"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/apache/thrift"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
version = "1.14.8"
|
||||
# version = "1.8.39"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/bsm/sarama-cluster"
|
||||
version = "2.1.13"
|
||||
# version = "2.1.10"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/couchbase/go-couchbase"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
version = "3.2.0"
|
||||
# version = "3.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/docker/docker"
|
||||
version = "~17.03.2-ce"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/docker/go-connections"
|
||||
version = "0.3.0"
|
||||
# version = "0.2.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/eclipse/paho.mqtt.golang"
|
||||
version = "~1.1.1"
|
||||
# version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-sql-driver/mysql"
|
||||
version = "1.4.0"
|
||||
# version = "1.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gobwas/glob"
|
||||
version = "0.2.3"
|
||||
# version = "0.2.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/golang/protobuf"
|
||||
version = "1.1.0"
|
||||
# version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/google/go-cmp"
|
||||
version = "0.2.0"
|
||||
# version = "0.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gorilla/mux"
|
||||
version = "1.6.2"
|
||||
# version = "1.6.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-redis/redis"
|
||||
version = "6.12.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/hashicorp/consul"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/influxdata/go-syslog"
|
||||
version = "1.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/influxdata/tail"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/influxdata/toml"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/influxdata/wlog"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/jackc/pgx"
|
||||
version = "3.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/kardianos/service"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/kballard/go-shellquote"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
version = "1.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Microsoft/ApplicationInsights-Go"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/miekg/dns"
|
||||
version = "1.0.8"
|
||||
# version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/multiplay/go-ts3"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/nats-io/gnatsd"
|
||||
version = "1.1.0"
|
||||
# version = "1.0.4"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/nats-io/go-nats"
|
||||
version = "1.5.0"
|
||||
# version = "1.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/nsqio/go-nsq"
|
||||
version = "1.0.7"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/openzipkin/zipkin-go-opentracing"
|
||||
version = "0.3.4"
|
||||
# version = "0.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_model"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/common"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
version = "1.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/shirou/gopsutil"
|
||||
version = "2.18.05"
|
||||
# version = "2.18.04"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Shopify/sarama"
|
||||
version = "1.17.0"
|
||||
# version = "1.15.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/soniah/gosnmp"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/StackExchange/wmi"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/streadway/amqp"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "1.2.2"
|
||||
# version = "1.2.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tidwall/gjson"
|
||||
version = "1.1.1"
|
||||
# version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/vjeantet/grok"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/wvanbergen/kafka"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/zensqlmonitor/go-mssqldb"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/net"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/sys"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "1.12.2"
|
||||
# version = "1.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/gorethink/gorethink.v3"
|
||||
version = "3.0.5"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/ldap.v2"
|
||||
version = "2.5.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/mgo.v2"
|
||||
branch = "v2"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/olivere/elastic.v5"
|
||||
version = "^5.0.69"
|
||||
# version = "^6.1.23"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
version = "^2.2.1"
|
||||
|
||||
[[override]]
|
||||
source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz"
|
||||
name = "gopkg.in/fsnotify.v1"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/api"
|
||||
version = "kubernetes-1.11.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apimachinery"
|
||||
version = "kubernetes-1.11.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/kubernetes"
|
||||
version = "v1.11.0"
|
||||
16
Makefile
16
Makefile
@@ -23,8 +23,8 @@ all:
|
||||
|
||||
deps:
|
||||
go get -u github.com/golang/lint/golint
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore --parallel=false
|
||||
go get -u github.com/golang/dep/cmd/dep
|
||||
dep ensure
|
||||
|
||||
telegraf:
|
||||
go build -ldflags "$(LDFLAGS)" ./cmd/telegraf
|
||||
@@ -34,7 +34,7 @@ go-install:
|
||||
|
||||
install: telegraf
|
||||
mkdir -p $(DESTDIR)$(PREFIX)/bin/
|
||||
cp $(TELEGRAF) $(DESTDIR)$(PREFIX)/bin/
|
||||
cp telegraf $(DESTDIR)$(PREFIX)/bin/
|
||||
|
||||
test:
|
||||
go test -short ./...
|
||||
@@ -54,11 +54,11 @@ fmtcheck:
|
||||
@echo '[INFO] done.'
|
||||
|
||||
test-windows:
|
||||
go test ./plugins/inputs/ping/...
|
||||
go test ./plugins/inputs/win_perf_counters/...
|
||||
go test ./plugins/inputs/win_services/...
|
||||
go test ./plugins/inputs/procstat/...
|
||||
go test ./plugins/inputs/ntpq/...
|
||||
go test -short ./plugins/inputs/ping/...
|
||||
go test -short ./plugins/inputs/win_perf_counters/...
|
||||
go test -short ./plugins/inputs/win_services/...
|
||||
go test -short ./plugins/inputs/procstat/...
|
||||
go test -short ./plugins/inputs/ntpq/...
|
||||
|
||||
# vet runs the Go source code static analysis tool `vet` to find
|
||||
# any common errors.
|
||||
|
||||
@@ -40,9 +40,9 @@ Ansible role: https://github.com/rossmcdonald/telegraf
|
||||
|
||||
### From Source:
|
||||
|
||||
Telegraf requires golang version 1.8+, the Makefile requires GNU make.
|
||||
Telegraf requires golang version 1.9 or newer, the Makefile requires GNU make.
|
||||
|
||||
Dependencies are managed with [gdm](https://github.com/sparrc/gdm),
|
||||
Dependencies are managed with [dep](https://github.com/golang/dep),
|
||||
which is installed by the Makefile if you don't have it already.
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install)
|
||||
@@ -213,6 +213,7 @@ configuration options.
|
||||
* [sql server](./plugins/inputs/sqlserver) (microsoft)
|
||||
* [syslog](./plugins/inputs/syslog)
|
||||
* [teamspeak](./plugins/inputs/teamspeak)
|
||||
* [tengine](./plugins/inputs/tengine)
|
||||
* [tomcat](./plugins/inputs/tomcat)
|
||||
* [twemproxy](./plugins/inputs/twemproxy)
|
||||
* [unbound](./plugins/inputs/unbound)
|
||||
@@ -281,6 +282,7 @@ formats may be used with input plugins supporting the `data_format` option:
|
||||
* [basicstats](./plugins/aggregators/basicstats)
|
||||
* [minmax](./plugins/aggregators/minmax)
|
||||
* [histogram](./plugins/aggregators/histogram)
|
||||
* [valuecounter](./plugins/aggregators/valuecounter)
|
||||
|
||||
## Output Plugins
|
||||
|
||||
|
||||
@@ -362,24 +362,6 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
metricC := make(chan telegraf.Metric, 100)
|
||||
aggC := make(chan telegraf.Metric, 100)
|
||||
|
||||
// Start all ServicePlugins
|
||||
for _, input := range a.Config.Inputs {
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
switch p := input.Input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
acc := NewAccumulator(input, metricC)
|
||||
// Service input plugins should set their own precision of their
|
||||
// metrics.
|
||||
acc.SetPrecision(time.Nanosecond, 0)
|
||||
if err := p.Start(acc); err != nil {
|
||||
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name(), err.Error())
|
||||
return err
|
||||
}
|
||||
defer p.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// Round collection to nearest interval by sleeping
|
||||
if a.Config.Agent.RoundInterval {
|
||||
i := int64(a.Config.Agent.Interval.Duration)
|
||||
@@ -419,6 +401,25 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
}(input, interval)
|
||||
}
|
||||
|
||||
// Start all ServicePlugins inputs after all other
|
||||
// plugins are loaded so that no metrics get dropped
|
||||
for _, input := range a.Config.Inputs {
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
switch p := input.Input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
acc := NewAccumulator(input, metricC)
|
||||
// Service input plugins should set their own precision of their
|
||||
// metrics.
|
||||
acc.SetPrecision(time.Nanosecond, 0)
|
||||
if err := p.Start(acc); err != nil {
|
||||
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name(), err.Error())
|
||||
return err
|
||||
}
|
||||
defer p.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
a.Close()
|
||||
return nil
|
||||
|
||||
@@ -21,6 +21,7 @@ install:
|
||||
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
|
||||
- go version
|
||||
- go env
|
||||
- git config --system core.longpaths true
|
||||
|
||||
build_script:
|
||||
- cmd: C:\GnuWin32\bin\make deps
|
||||
|
||||
@@ -58,7 +58,7 @@ var fService = flag.String("service", "",
|
||||
var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)")
|
||||
|
||||
var (
|
||||
nextVersion = "1.7.0"
|
||||
nextVersion = "1.8.0"
|
||||
version string
|
||||
commit string
|
||||
branch string
|
||||
@@ -147,11 +147,11 @@ func reloadLoop(
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
signals := make(chan os.Signal)
|
||||
signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
|
||||
signal.Notify(signals, os.Interrupt, syscall.SIGHUP, syscall.SIGTERM)
|
||||
go func() {
|
||||
select {
|
||||
case sig := <-signals:
|
||||
if sig == os.Interrupt {
|
||||
if sig == os.Interrupt || sig == syscall.SIGTERM {
|
||||
close(shutdown)
|
||||
}
|
||||
if sig == syscall.SIGHUP {
|
||||
|
||||
@@ -1077,7 +1077,7 @@
|
||||
# mount_points = ["/"]
|
||||
|
||||
## Ignore mount points by filesystem type.
|
||||
ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
|
||||
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
|
||||
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
|
||||
@@ -242,7 +242,7 @@
|
||||
#
|
||||
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
# ## present on /run, /var/run, /dev/shm or /dev).
|
||||
# # ignore_fs = ["tmpfs", "devtmpfs"]
|
||||
# # ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
|
||||
|
||||
|
||||
# # Read metrics about disk IO by device
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
@@ -193,3 +194,15 @@ func RandomSleep(max time.Duration, shutdown chan struct{}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Exit status takes the error from exec.Command
|
||||
// and returns the exit status and true
|
||||
// if error is not exit status, will return 0 and false
|
||||
func ExitStatus(err error) (int, bool) {
|
||||
if exiterr, ok := err.(*exec.ExitError); ok {
|
||||
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
|
||||
return status.ExitStatus(), true
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -153,6 +154,7 @@ func (r *RunningAggregator) Run(
|
||||
m.Time().After(r.periodEnd.Add(truncation).Add(r.Config.Delay)) {
|
||||
// the metric is outside the current aggregation period, so
|
||||
// skip it.
|
||||
log.Printf("D! aggregator: metric \"%s\" is not in the current timewindow, skipping", m.Name())
|
||||
continue
|
||||
}
|
||||
r.add(m)
|
||||
|
||||
@@ -17,7 +17,7 @@ type ClientConfig struct {
|
||||
// Deprecated in 1.7; use TLS variables above
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
SSLKey string `toml:"ssl_ca"`
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
}
|
||||
|
||||
// ServerConfig represents the standard server TLS config.
|
||||
|
||||
@@ -4,4 +4,5 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/basicstats"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/histogram"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/valuecounter"
|
||||
)
|
||||
|
||||
73
plugins/aggregators/valuecounter/README.md
Normal file
73
plugins/aggregators/valuecounter/README.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# ValueCounter Aggregator Plugin
|
||||
|
||||
The valuecounter plugin counts the occurrence of values in fields and emits the
|
||||
counter once every 'period' seconds.
|
||||
|
||||
A use case for the valuecounter plugin is when you are processing a HTTP access
|
||||
log (with the logparser input) and want to count the HTTP status codes.
|
||||
|
||||
The fields which will be counted must be configured with the `fields`
|
||||
configuration directive. When no `fields` is provided the plugin will not count
|
||||
any fields. The results are emitted in fields in the format:
|
||||
`originalfieldname_fieldvalue = count`.
|
||||
|
||||
Valuecounter only works on fields of the type int, bool or string. Float fields
|
||||
are being dropped to prevent the creating of too many fields.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
[[aggregators.valuecounter]]
|
||||
## General Aggregator Arguments:
|
||||
## The period on which to flush & clear the aggregator.
|
||||
period = "30s"
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
## The fields for which the values will be counted
|
||||
fields = ["status"]
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- measurement1
|
||||
- field_value1
|
||||
- field_value2
|
||||
|
||||
### Tags:
|
||||
|
||||
No tags are applied by this aggregator.
|
||||
|
||||
### Example Output:
|
||||
|
||||
Example for parsing a HTTP access log.
|
||||
|
||||
telegraf.conf:
|
||||
```
|
||||
[[inputs.logparser]]
|
||||
files = ["/tmp/tst.log"]
|
||||
[inputs.logparser.grok]
|
||||
patterns = ['%{DATA:url:tag} %{NUMBER:response:string}']
|
||||
measurement = "access"
|
||||
|
||||
[[aggregators.valuecounter]]
|
||||
namepass = ["access"]
|
||||
fields = ["response"]
|
||||
```
|
||||
|
||||
/tmp/tst.log
|
||||
```
|
||||
/some/path 200
|
||||
/some/path 401
|
||||
/some/path 200
|
||||
```
|
||||
|
||||
```
|
||||
$ telegraf --config telegraf.conf --quiet
|
||||
|
||||
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="200" 1511948755991487011
|
||||
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="401" 1511948755991522282
|
||||
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="200" 1511948755991531697
|
||||
|
||||
access,path=/tmp/tst.log,host=localhost.localdomain,url=/some/path response_200=2i,response_401=1i 1511948761000000000
|
||||
```
|
||||
108
plugins/aggregators/valuecounter/valuecounter.go
Normal file
108
plugins/aggregators/valuecounter/valuecounter.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package valuecounter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||
)
|
||||
|
||||
type aggregate struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fieldCount map[string]int
|
||||
}
|
||||
|
||||
// ValueCounter an aggregation plugin
|
||||
type ValueCounter struct {
|
||||
cache map[uint64]aggregate
|
||||
Fields []string
|
||||
}
|
||||
|
||||
// NewValueCounter create a new aggregation plugin which counts the occurances
|
||||
// of fields and emits the count.
|
||||
func NewValueCounter() telegraf.Aggregator {
|
||||
vc := &ValueCounter{}
|
||||
vc.Reset()
|
||||
return vc
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## General Aggregator Arguments:
|
||||
## The period on which to flush & clear the aggregator.
|
||||
period = "30s"
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
## The fields for which the values will be counted
|
||||
fields = []
|
||||
`
|
||||
|
||||
// SampleConfig generates a sample config for the ValueCounter plugin
|
||||
func (vc *ValueCounter) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description returns the description of the ValueCounter plugin
|
||||
func (vc *ValueCounter) Description() string {
|
||||
return "Count the occurance of values in fields."
|
||||
}
|
||||
|
||||
// Add is run on every metric which passes the plugin
|
||||
func (vc *ValueCounter) Add(in telegraf.Metric) {
|
||||
id := in.HashID()
|
||||
|
||||
// Check if the cache already has an entry for this metric, if not create it
|
||||
if _, ok := vc.cache[id]; !ok {
|
||||
a := aggregate{
|
||||
name: in.Name(),
|
||||
tags: in.Tags(),
|
||||
fieldCount: make(map[string]int),
|
||||
}
|
||||
vc.cache[id] = a
|
||||
}
|
||||
|
||||
// Check if this metric has fields which we need to count, if so increment
|
||||
// the count.
|
||||
for fk, fv := range in.Fields() {
|
||||
for _, cf := range vc.Fields {
|
||||
if fk == cf {
|
||||
// Do not process float types to prevent memory from blowing up
|
||||
switch fv.(type) {
|
||||
default:
|
||||
log.Printf("I! Valuecounter: Unsupported field type. " +
|
||||
"Must be an int, string or bool. Ignoring.")
|
||||
continue
|
||||
case uint64, int64, string, bool:
|
||||
}
|
||||
fn := fmt.Sprintf("%v_%v", fk, fv)
|
||||
vc.cache[id].fieldCount[fn]++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Push emits the counters
|
||||
func (vc *ValueCounter) Push(acc telegraf.Accumulator) {
|
||||
for _, agg := range vc.cache {
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
for field, count := range agg.fieldCount {
|
||||
fields[field] = count
|
||||
}
|
||||
|
||||
acc.AddFields(agg.name, fields, agg.tags)
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the cache, executed after each push
|
||||
func (vc *ValueCounter) Reset() {
|
||||
vc.cache = make(map[uint64]aggregate)
|
||||
}
|
||||
|
||||
func init() {
|
||||
aggregators.Add("valuecounter", func() telegraf.Aggregator {
|
||||
return NewValueCounter()
|
||||
})
|
||||
}
|
||||
126
plugins/aggregators/valuecounter/valuecounter_test.go
Normal file
126
plugins/aggregators/valuecounter/valuecounter_test.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package valuecounter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
// Create a valuecounter with config
|
||||
func NewTestValueCounter(fields []string) telegraf.Aggregator {
|
||||
vc := &ValueCounter{
|
||||
Fields: fields,
|
||||
}
|
||||
vc.Reset()
|
||||
|
||||
return vc
|
||||
}
|
||||
|
||||
var m1, _ = metric.New("m1",
|
||||
map[string]string{"foo": "bar"},
|
||||
map[string]interface{}{
|
||||
"status": 200,
|
||||
"somefield": 20.1,
|
||||
"foobar": "bar",
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
var m2, _ = metric.New("m1",
|
||||
map[string]string{"foo": "bar"},
|
||||
map[string]interface{}{
|
||||
"status": "OK",
|
||||
"ignoreme": "string",
|
||||
"andme": true,
|
||||
"boolfield": false,
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
func BenchmarkApply(b *testing.B) {
|
||||
vc := NewTestValueCounter([]string{"status"})
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
vc.Add(m1)
|
||||
vc.Add(m2)
|
||||
}
|
||||
}
|
||||
|
||||
// Test basic functionality
|
||||
func TestBasic(t *testing.T) {
|
||||
vc := NewTestValueCounter([]string{"status"})
|
||||
acc := testutil.Accumulator{}
|
||||
|
||||
vc.Add(m1)
|
||||
vc.Add(m2)
|
||||
vc.Add(m1)
|
||||
vc.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"status_200": 2,
|
||||
"status_OK": 1,
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test with multiple fields to count
|
||||
func TestMultipleFields(t *testing.T) {
|
||||
vc := NewTestValueCounter([]string{"status", "somefield", "boolfield"})
|
||||
acc := testutil.Accumulator{}
|
||||
|
||||
vc.Add(m1)
|
||||
vc.Add(m2)
|
||||
vc.Add(m2)
|
||||
vc.Add(m1)
|
||||
vc.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"status_200": 2,
|
||||
"status_OK": 2,
|
||||
"boolfield_false": 2,
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test with a reset between two runs
|
||||
func TestWithReset(t *testing.T) {
|
||||
vc := NewTestValueCounter([]string{"status"})
|
||||
acc := testutil.Accumulator{}
|
||||
|
||||
vc.Add(m1)
|
||||
vc.Add(m1)
|
||||
vc.Add(m2)
|
||||
vc.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"status_200": 2,
|
||||
"status_OK": 1,
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
|
||||
acc.ClearMetrics()
|
||||
vc.Reset()
|
||||
|
||||
vc.Add(m2)
|
||||
vc.Add(m2)
|
||||
vc.Add(m1)
|
||||
vc.Push(&acc)
|
||||
|
||||
expectedFields = map[string]interface{}{
|
||||
"status_200": 1,
|
||||
"status_OK": 2,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
@@ -48,6 +48,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kube_state"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kubernetes"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
|
||||
@@ -103,6 +104,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/teamspeak"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tengine"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tomcat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||
|
||||
@@ -92,6 +92,7 @@ Supported Burrow version: `1.x`
|
||||
- group (string)
|
||||
- topic (string)
|
||||
- partition (int)
|
||||
- owner (string)
|
||||
|
||||
* `burrow_topic`
|
||||
- cluster (string)
|
||||
|
||||
@@ -116,6 +116,7 @@ type (
|
||||
Start apiStatusResponseLagItem `json:"start"`
|
||||
End apiStatusResponseLagItem `json:"end"`
|
||||
CurrentLag int64 `json:"current_lag"`
|
||||
Owner string `json:"owner"`
|
||||
}
|
||||
|
||||
// response: lag field item
|
||||
@@ -447,6 +448,7 @@ func (b *burrow) genGroupLagMetrics(r *apiResponse, cluster, group string, acc t
|
||||
"group": group,
|
||||
"topic": partition.Topic,
|
||||
"partition": strconv.FormatInt(int64(partition.Partition), 10),
|
||||
"owner": partition.Owner,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -129,9 +129,9 @@ func TestBurrowPartition(t *testing.T) {
|
||||
},
|
||||
}
|
||||
tags := []map[string]string{
|
||||
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "0"},
|
||||
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "1"},
|
||||
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "2"},
|
||||
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "0", "owner": "kafka1"},
|
||||
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "1", "owner": "kafka2"},
|
||||
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "2", "owner": "kafka3"},
|
||||
}
|
||||
|
||||
require.Empty(t, acc.Errors)
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{
|
||||
"topic": "topicA",
|
||||
"partition": 0,
|
||||
"owner": "kafka",
|
||||
"owner": "kafka1",
|
||||
"status": "OK",
|
||||
"start": {
|
||||
"offset": 431323195,
|
||||
@@ -28,7 +28,7 @@
|
||||
{
|
||||
"topic": "topicA",
|
||||
"partition": 1,
|
||||
"owner": "kafka",
|
||||
"owner": "kafka2",
|
||||
"status": "OK",
|
||||
"start": {
|
||||
"offset": 431322962,
|
||||
@@ -46,7 +46,7 @@
|
||||
{
|
||||
"topic": "topicA",
|
||||
"partition": 2,
|
||||
"owner": "kafka",
|
||||
"owner": "kafka3",
|
||||
"status": "OK",
|
||||
"start": {
|
||||
"offset": 428636563,
|
||||
|
||||
@@ -124,6 +124,7 @@ docker API.
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- fields:
|
||||
- total_pgmafault
|
||||
@@ -167,6 +168,7 @@ docker API.
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- cpu
|
||||
- fields:
|
||||
@@ -186,6 +188,7 @@ docker API.
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- network
|
||||
- fields:
|
||||
@@ -205,6 +208,7 @@ docker API.
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- device
|
||||
- fields:
|
||||
@@ -226,11 +230,27 @@ docker API.
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- fields:
|
||||
- health_status (string)
|
||||
- failing_streak (integer)
|
||||
|
||||
- docker_container_status
|
||||
- tags:
|
||||
- engine_host
|
||||
- server_version
|
||||
- container_image
|
||||
- container_name
|
||||
- container_status
|
||||
- container_version
|
||||
- fields:
|
||||
- oomkilled (boolean)
|
||||
- pid (integer)
|
||||
- exitcode (integer)
|
||||
- started_at (integer)
|
||||
- finished_at (integer)
|
||||
|
||||
- docker_swarm
|
||||
- tags:
|
||||
- service_id
|
||||
@@ -245,12 +265,12 @@ docker API.
|
||||
```
|
||||
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce n_containers=6i,n_containers_paused=0i,n_containers_running=1i,n_containers_stopped=5i,n_cpus=2i,n_goroutines=41i,n_images=2i,n_listener_events=0i,n_used_file_descriptors=27i 1524002041000000000
|
||||
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce,unit=bytes memory_total=2101661696i 1524002041000000000
|
||||
docker_container_mem,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce active_anon=8327168i,active_file=2314240i,cache=27402240i,container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",hierarchical_memory_limit=9223372036854771712i,inactive_anon=0i,inactive_file=25088000i,limit=2101661696i,mapped_file=20582400i,max_usage=36646912i,pgfault=4193i,pgmajfault=214i,pgpgin=9243i,pgpgout=520i,rss=8327168i,rss_huge=0i,total_active_anon=8327168i,total_active_file=2314240i,total_cache=27402240i,total_inactive_anon=0i,total_inactive_file=25088000i,total_mapped_file=20582400i,total_pgfault=4193i,total_pgmajfault=214i,total_pgpgin=9243i,total_pgpgout=520i,total_rss=8327168i,total_rss_huge=0i,total_unevictable=0i,total_writeback=0i,unevictable=0i,usage=36528128i,usage_percent=0.4342225020025297,writeback=0i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu-total,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",throttling_periods=0i,throttling_throttled_periods=0i,throttling_throttled_time=0i,usage_in_kernelmode=40000000i,usage_in_usermode=100000000i,usage_percent=0,usage_system=6394210000000i,usage_total=117319068i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=20825265i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu1,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=96493803i 1524002042000000000
|
||||
docker_container_net,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,network=eth0,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",rx_bytes=1576i,rx_dropped=0i,rx_errors=0i,rx_packets=20i,tx_bytes=0i,tx_dropped=0i,tx_errors=0i,tx_packets=0i 1524002042000000000
|
||||
docker_container_blkio,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,device=254:0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",io_service_bytes_recursive_async=27398144i,io_service_bytes_recursive_read=27398144i,io_service_bytes_recursive_sync=0i,io_service_bytes_recursive_total=27398144i,io_service_bytes_recursive_write=0i,io_serviced_recursive_async=529i,io_serviced_recursive_read=529i,io_serviced_recursive_sync=0i,io_serviced_recursive_total=529i,io_serviced_recursive_write=0i 1524002042000000000
|
||||
docker_container_health,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce failing_streak=0i,health_status="healthy" 1524007529000000000
|
||||
docker_container_mem,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce active_anon=8327168i,active_file=2314240i,cache=27402240i,container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",hierarchical_memory_limit=9223372036854771712i,inactive_anon=0i,inactive_file=25088000i,limit=2101661696i,mapped_file=20582400i,max_usage=36646912i,pgfault=4193i,pgmajfault=214i,pgpgin=9243i,pgpgout=520i,rss=8327168i,rss_huge=0i,total_active_anon=8327168i,total_active_file=2314240i,total_cache=27402240i,total_inactive_anon=0i,total_inactive_file=25088000i,total_mapped_file=20582400i,total_pgfault=4193i,total_pgmajfault=214i,total_pgpgin=9243i,total_pgpgout=520i,total_rss=8327168i,total_rss_huge=0i,total_unevictable=0i,total_writeback=0i,unevictable=0i,usage=36528128i,usage_percent=0.4342225020025297,writeback=0i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,cpu=cpu-total,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",throttling_periods=0i,throttling_throttled_periods=0i,throttling_throttled_time=0i,usage_in_kernelmode=40000000i,usage_in_usermode=100000000i,usage_percent=0,usage_system=6394210000000i,usage_total=117319068i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,cpu=cpu0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=20825265i 1524002042000000000
|
||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,cpu=cpu1,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=96493803i 1524002042000000000
|
||||
docker_container_net,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,network=eth0,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",rx_bytes=1576i,rx_dropped=0i,rx_errors=0i,rx_packets=20i,tx_bytes=0i,tx_dropped=0i,tx_errors=0i,tx_packets=0i 1524002042000000000
|
||||
docker_container_blkio,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,device=254:0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",io_service_bytes_recursive_async=27398144i,io_service_bytes_recursive_read=27398144i,io_service_bytes_recursive_sync=0i,io_service_bytes_recursive_total=27398144i,io_service_bytes_recursive_write=0i,io_serviced_recursive_async=529i,io_serviced_recursive_read=529i,io_serviced_recursive_sync=0i,io_serviced_recursive_total=529i,io_serviced_recursive_write=0i 1524002042000000000
|
||||
docker_container_health,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce failing_streak=0i,health_status="healthy" 1524007529000000000
|
||||
docker_swarm,service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test tasks_desired=3,tasks_running=3 1508968160000000000
|
||||
```
|
||||
|
||||
@@ -435,6 +435,23 @@ func (d *Docker) gatherContainer(
|
||||
}
|
||||
}
|
||||
}
|
||||
if info.State != nil {
|
||||
tags["container_status"] = info.State.Status
|
||||
statefields := map[string]interface{}{
|
||||
"oomkilled": info.State.OOMKilled,
|
||||
"pid": info.State.Pid,
|
||||
"exitcode": info.State.ExitCode,
|
||||
}
|
||||
container_time, err := time.Parse(time.RFC3339, info.State.StartedAt)
|
||||
if err == nil && !container_time.IsZero() {
|
||||
statefields["started_at"] = container_time.UnixNano()
|
||||
}
|
||||
container_time, err = time.Parse(time.RFC3339, info.State.FinishedAt)
|
||||
if err == nil && !container_time.IsZero() {
|
||||
statefields["finished_at"] = container_time.UnixNano()
|
||||
}
|
||||
acc.AddFields("docker_container_status", statefields, tags, time.Now())
|
||||
}
|
||||
|
||||
if info.State.Health != nil {
|
||||
healthfields := map[string]interface{}{
|
||||
|
||||
@@ -653,6 +653,7 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
"server_version": "17.09.0-ce",
|
||||
"container_status": "running",
|
||||
},
|
||||
)
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -676,6 +677,7 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
"server_version": "17.09.0-ce",
|
||||
"container_status": "running",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -484,6 +484,12 @@ var containerInspect = types.ContainerJSON{
|
||||
FailingStreak: 1,
|
||||
Status: "Unhealthy",
|
||||
},
|
||||
Status: "running",
|
||||
OOMKilled: false,
|
||||
Pid: 1234,
|
||||
ExitCode: 0,
|
||||
StartedAt: "2018-06-14T05:48:53.266176036Z",
|
||||
FinishedAt: "0001-01-01T00:00:00Z",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
77
plugins/inputs/kube_state/README.md
Normal file
77
plugins/inputs/kube_state/README.md
Normal file
@@ -0,0 +1,77 @@
|
||||
### Line Protocol
|
||||
|
||||
### PODs
|
||||
|
||||
#### kube_pod
|
||||
namespace =
|
||||
name =
|
||||
host_ip =
|
||||
pod_ip =
|
||||
node =
|
||||
created_by_kind =
|
||||
created_by_name =
|
||||
owner_kind =
|
||||
owner_name =
|
||||
owner_is_controller = "true"
|
||||
label_1 = ""
|
||||
label_2 = ""
|
||||
created = ""
|
||||
|
||||
|
||||
start_time =
|
||||
completion_time =
|
||||
owner =
|
||||
label_* =
|
||||
created =
|
||||
|
||||
status_scheduled_time
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#### kube_pod_status_scheduled_time
|
||||
|
||||
#### kube_pod_status_phase
|
||||
|
||||
#### kube_pod_status_ready
|
||||
|
||||
#### kube_pod_status_scheduled
|
||||
|
||||
#### kube_pod_container_info
|
||||
namespace=
|
||||
pod_name=
|
||||
container_name=
|
||||
|
||||
|
||||
|
||||
#### kube_pod_container_status_waiting
|
||||
|
||||
#### kube_pod_container_status_waiting_reason
|
||||
|
||||
#### kube_pod_container_status_running
|
||||
|
||||
#### kube_pod_container_status_terminated
|
||||
|
||||
#### kube_pod_container_status_terminated_reason
|
||||
|
||||
#### kube_pod_container_status_ready
|
||||
|
||||
#### kube_pod_container_status_restarts_total
|
||||
|
||||
#### kube_pod_container_resource_requests
|
||||
|
||||
#### kube_pod_container_resource_limits
|
||||
|
||||
#### kube_pod_container_resource_requests_cpu_cores
|
||||
|
||||
#### kube_pod_container_resource_requests_memory_bytes
|
||||
|
||||
#### kube_pod_container_resource_limits_cpu_cores
|
||||
|
||||
#### kube_pod_container_resource_limits_memory_bytes
|
||||
|
||||
|
||||
#### kube_pod_spec_volumes_persistentvolumeclaims_info
|
||||
|
||||
#### kube_pod_spec_volumes_persistentvolumeclaims_readonly
|
||||
144
plugins/inputs/kube_state/client.go
Normal file
144
plugins/inputs/kube_state/client.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package kube_state
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type client struct {
|
||||
baseURL string
|
||||
httpClient *http.Client
|
||||
bearerToken string
|
||||
semaphore chan struct{}
|
||||
}
|
||||
|
||||
func newClient(baseURL string, timeout time.Duration, maxConns int, bearerToken string, tlsConfig *tls.Config) *client {
|
||||
return &client{
|
||||
baseURL: baseURL,
|
||||
httpClient: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
MaxIdleConns: maxConns,
|
||||
TLSClientConfig: tlsConfig,
|
||||
},
|
||||
Timeout: timeout,
|
||||
},
|
||||
bearerToken: bearerToken,
|
||||
semaphore: make(chan struct{}, maxConns),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *client) getAPIResourceList(ctx context.Context) (rList *metav1.APIResourceList, err error) {
|
||||
rList = new(metav1.APIResourceList)
|
||||
if err = c.doGet(ctx, "", rList); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rList.GroupVersion == "" {
|
||||
return nil, &APIError{
|
||||
URL: c.baseURL,
|
||||
StatusCode: http.StatusOK,
|
||||
Title: "empty group version",
|
||||
}
|
||||
}
|
||||
return rList, nil
|
||||
}
|
||||
|
||||
func (c *client) getNodes(ctx context.Context) (list *v1.NodeList, err error) {
|
||||
list = new(v1.NodeList)
|
||||
if err = c.doGet(ctx, "/nodes/", list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func (c *client) getPods(ctx context.Context) (list *v1.PodList, err error) {
|
||||
list = new(v1.PodList)
|
||||
if err = c.doGet(ctx, "/pods/", list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func (c *client) getConfigMaps(ctx context.Context) (list *v1.ConfigMapList, err error) {
|
||||
list = new(v1.ConfigMapList)
|
||||
if err = c.doGet(ctx, "/configmaps/", list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func (c *client) doGet(ctx context.Context, url string, v interface{}) error {
|
||||
req, err := createGetRequest(c.baseURL+url, c.bearerToken)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case c.semaphore <- struct{}{}:
|
||||
break
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
resp, err := c.httpClient.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
<-c.semaphore
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
resp.Body.Close()
|
||||
<-c.semaphore
|
||||
}()
|
||||
|
||||
// Clear invalid token if unauthorized
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
c.bearerToken = ""
|
||||
}
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return &APIError{
|
||||
URL: url,
|
||||
StatusCode: resp.StatusCode,
|
||||
Title: resp.Status,
|
||||
}
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusNoContent {
|
||||
return nil
|
||||
}
|
||||
|
||||
return json.NewDecoder(resp.Body).Decode(v)
|
||||
}
|
||||
|
||||
func createGetRequest(url string, token string) (*http.Request, error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if token != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+token)
|
||||
}
|
||||
req.Header.Add("Accept", "application/json")
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
type APIError struct {
|
||||
URL string
|
||||
StatusCode int
|
||||
Title string
|
||||
Description string
|
||||
}
|
||||
|
||||
func (e APIError) Error() string {
|
||||
if e.Description != "" {
|
||||
return fmt.Sprintf("[%s] %s: %s", e.URL, e.Title, e.Description)
|
||||
}
|
||||
return fmt.Sprintf("[%s] %s", e.URL, e.Title)
|
||||
}
|
||||
42
plugins/inputs/kube_state/configmap.go
Normal file
42
plugins/inputs/kube_state/configmap.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package kube_state
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
var configMapMeasurement = "kube_configmap"
|
||||
|
||||
func registerConfigMapCollector(ctx context.Context, acc telegraf.Accumulator, ks *KubenetesState) {
|
||||
list, err := ks.client.getConfigMaps(ctx)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
for _, s := range list.Items {
|
||||
if err = ks.gatherConfigMap(s, acc); err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ks *KubenetesState) gatherConfigMap(s v1.ConfigMap, acc telegraf.Accumulator) error {
|
||||
var creationTime time.Time
|
||||
if !s.CreationTimestamp.IsZero() {
|
||||
creationTime = s.CreationTimestamp.Time
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"gauge": 1,
|
||||
}
|
||||
tags := map[string]string{
|
||||
"namespace": s.Namespace,
|
||||
"configmap": s.Name,
|
||||
"resource_version": s.ResourceVersion,
|
||||
}
|
||||
acc.AddFields(configMapMeasurement, fields, tags, creationTime)
|
||||
return nil
|
||||
}
|
||||
194
plugins/inputs/kube_state/kubernetes_state_metrics.go
Normal file
194
plugins/inputs/kube_state/kubernetes_state_metrics.go
Normal file
@@ -0,0 +1,194 @@
|
||||
package kube_state
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// KubenetesState represents the config object for the plugin.
|
||||
type KubenetesState struct {
|
||||
URL string
|
||||
|
||||
// Bearer Token authorization file path
|
||||
BearerToken string `toml:"bearer_token"`
|
||||
|
||||
// MaxConnections for worker pool tcp connections
|
||||
MaxConnections int `toml:"max_connections"`
|
||||
|
||||
// HTTP Timeout specified as a string - 3s, 1m, 1h
|
||||
ResponseTimeout internal.Duration `toml:"response_timeout"`
|
||||
|
||||
tls.ClientConfig
|
||||
|
||||
client *client
|
||||
rListHash string
|
||||
filter filter.Filter
|
||||
lastFilterBuilt int64
|
||||
ResourceListCheckInterval *internal.Duration `toml:"resouce_list_check_interval"`
|
||||
ResourceExclude []string `toml:"resource_exclude"`
|
||||
|
||||
DisablePodNonGenericResourceMetrics bool `json:"disable_pod_non_generic_resource_metrics"`
|
||||
DisableNodeNonGenericResourceMetrics bool `json:"disable_node_non_generic_resource_metrics"`
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## URL for the kubelet
|
||||
url = "http://1.1.1.1:10255"
|
||||
|
||||
## Use bearer token for authorization
|
||||
# bearer_token = /path/to/bearer/token
|
||||
|
||||
## Set response_timeout (default 5 seconds)
|
||||
# response_timeout = "5s"
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = /path/to/cafile
|
||||
# tls_cert = /path/to/certfile
|
||||
# tls_key = /path/to/keyfile
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Woker pool for kube_state_metric plugin only
|
||||
# empty this field will use default value 30
|
||||
# max_connections = 30
|
||||
`
|
||||
|
||||
//SampleConfig returns a sample config
|
||||
func (k *KubenetesState) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
//Description returns the description of this plugin
|
||||
func (k *KubenetesState) Description() string {
|
||||
return "Read metrics from the kubernetes kubelet api"
|
||||
}
|
||||
|
||||
//Gather collects kubernetes metrics from a given URL
|
||||
func (k *KubenetesState) Gather(acc telegraf.Accumulator) (err error) {
|
||||
var rList *metav1.APIResourceList
|
||||
if k.client == nil {
|
||||
if k.client, rList, err = k.initClient(); err != nil {
|
||||
return err
|
||||
}
|
||||
goto buildFilter
|
||||
}
|
||||
|
||||
if k.lastFilterBuilt > 0 && time.Now().Unix()-k.lastFilterBuilt < int64(k.ResourceListCheckInterval.Duration.Seconds()) {
|
||||
println("! skip to gather")
|
||||
goto doGather
|
||||
}
|
||||
|
||||
rList, err = k.client.getAPIResourceList(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buildFilter:
|
||||
k.lastFilterBuilt = time.Now().Unix()
|
||||
if err = k.buildFilter(rList); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
doGather:
|
||||
for n, f := range availableCollectors {
|
||||
ctx := context.Background()
|
||||
if k.filter.Match(n) {
|
||||
println("!", n)
|
||||
go f(ctx, acc, k)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KubenetesState) buildFilter(rList *metav1.APIResourceList) error {
|
||||
hash, err := genHash(rList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if k.rListHash == hash {
|
||||
return nil
|
||||
}
|
||||
k.rListHash = hash
|
||||
include := make([]string, len(rList.APIResources))
|
||||
for k, v := range rList.APIResources {
|
||||
include[k] = v.Name
|
||||
}
|
||||
k.filter, err = filter.NewIncludeExcludeFilter(include, k.ResourceExclude)
|
||||
return err
|
||||
}
|
||||
|
||||
func genHash(rList *metav1.APIResourceList) (string, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
for _, v := range rList.APIResources {
|
||||
if _, err := buf.WriteString(v.Name + "|"); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
sum := md5.Sum(buf.Bytes())
|
||||
return string(sum[:]), nil
|
||||
}
|
||||
|
||||
var availableCollectors = map[string]func(ctx context.Context, acc telegraf.Accumulator, k *KubenetesState){
|
||||
// "cronjobs": RegisterCronJobCollector,
|
||||
// "daemonsets": RegisterDaemonSetCollector,
|
||||
// "deployments": RegisterDeploymentCollector,
|
||||
// "jobs": RegisterJobCollector,
|
||||
// "limitranges": RegisterLimitRangeCollector,
|
||||
"nodes": registerNodeCollector,
|
||||
"pods": registerPodCollector,
|
||||
// "replicasets": RegisterReplicaSetCollector,
|
||||
// "replicationcontrollers": RegisterReplicationControllerCollector,
|
||||
// "resourcequotas": RegisterResourceQuotaCollector,
|
||||
// "services": RegisterServiceCollector,
|
||||
// "statefulsets": RegisterStatefulSetCollector,
|
||||
// "persistentvolumes": RegisterPersistentVolumeCollector,
|
||||
// "persistentvolumeclaims": RegisterPersistentVolumeClaimCollector,
|
||||
// "namespaces": RegisterNamespaceCollector,
|
||||
// "horizontalpodautoscalers": RegisterHorizontalPodAutoScalerCollector,
|
||||
// "endpoints": RegisterEndpointCollector,
|
||||
// "secrets": RegisterSecretCollector,
|
||||
"configmaps": registerConfigMapCollector,
|
||||
}
|
||||
|
||||
func (k *KubenetesState) initClient() (*client, *metav1.APIResourceList, error) {
|
||||
tlsCfg, err := k.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error parse kube state metrics config[%s]: %v", k.URL, err)
|
||||
}
|
||||
// default 30 concurrent TCP connections
|
||||
if k.MaxConnections == 0 {
|
||||
k.MaxConnections = 30
|
||||
}
|
||||
|
||||
// default check resourceList every hour
|
||||
if k.ResourceListCheckInterval == nil {
|
||||
k.ResourceListCheckInterval = &internal.Duration{
|
||||
Duration: time.Hour,
|
||||
}
|
||||
}
|
||||
c := newClient(k.URL, k.ResponseTimeout.Duration, k.MaxConnections, k.BearerToken, tlsCfg)
|
||||
rList, err := c.getAPIResourceList(context.Background())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error connect to kubenetes api endpoint[%s]: %v", k.URL, err)
|
||||
}
|
||||
log.Printf("I! Kubenetes API group version is %s", rList.GroupVersion)
|
||||
return c, rList, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("kubernetes_state", func() telegraf.Input {
|
||||
return &KubenetesState{}
|
||||
})
|
||||
}
|
||||
73
plugins/inputs/kube_state/node.go
Normal file
73
plugins/inputs/kube_state/node.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package kube_state
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
nodeMeasurement = "kube_node"
|
||||
nodeTaintMeasurement = "kube_node_spec_taint"
|
||||
)
|
||||
|
||||
func registerNodeCollector(ctx context.Context, acc telegraf.Accumulator, ks *KubenetesState) {
|
||||
list, err := ks.client.getNodes(ctx)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
for _, n := range list.Items {
|
||||
if err = ks.gatherNode(n, acc); err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
func (ks *KubenetesState) gatherNode(n v1.Node, acc telegraf.Accumulator) error {
|
||||
fields := map[string]interface{}{}
|
||||
tags := map[string]string{
|
||||
"node": n.Name,
|
||||
"kernel_version": n.Status.NodeInfo.KernelVersion,
|
||||
"os_image": n.Status.NodeInfo.OSImage,
|
||||
"container_runtime_version": n.Status.NodeInfo.ContainerRuntimeVersion,
|
||||
"kubelet_version": n.Status.NodeInfo.KubeletVersion,
|
||||
"kubeproxy_version": n.Status.NodeInfo.KubeProxyVersion,
|
||||
"provider_id": n.Spec.ProviderID,
|
||||
"spec_unschedulable": strconv.FormatBool(n.Spec.Unschedulable)
|
||||
}
|
||||
|
||||
if !n.CreationTimestamp.IsZero() {
|
||||
fields["created"] = n.CreationTimestamp.Unix()
|
||||
}
|
||||
|
||||
for k, v := range n.Labels {
|
||||
tags["label_"+sanitizeLabelName(k)] = v
|
||||
}
|
||||
|
||||
// Collect node taints
|
||||
for _, taint := range n.Spec.Taints {
|
||||
go gatherNodeTaint(n, taint, acc)
|
||||
}
|
||||
|
||||
acc.AddFields(nodeMeasurement, fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
func gatherNodeTaint(n v1.Node, taint v1.Taint,acc telegraf.Accumulator){
|
||||
fields := map[string]interface{}{
|
||||
"gauge":1,
|
||||
}
|
||||
tags := map[string]string{
|
||||
"node": n.Name,
|
||||
"key": taint.Key,
|
||||
"value": taint.Value,
|
||||
"effect":string(taint.Effect),
|
||||
}
|
||||
|
||||
acc.AddFields(nodeTaintMeasurement, fields, tags)
|
||||
|
||||
}
|
||||
202
plugins/inputs/kube_state/pod.go
Normal file
202
plugins/inputs/kube_state/pod.go
Normal file
@@ -0,0 +1,202 @@
|
||||
package kube_state
|
||||
|
||||
import (
|
||||
"context"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
var (
|
||||
podMeasurement = "kube_pod"
|
||||
podContainerMeasurement = "kube_pod_container"
|
||||
podVolumeMeasurement = "kube_pod_spec_volumes"
|
||||
)
|
||||
|
||||
func registerPodCollector(ctx context.Context, acc telegraf.Accumulator, ks *KubenetesState) {
|
||||
list, err := ks.client.getPods(ctx)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
for _, p := range list.Items {
|
||||
if err = ks.gatherPod(p, acc); err != nil {
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ks *KubenetesState) gatherPod(p v1.Pod, acc telegraf.Accumulator) error {
|
||||
nodeName := p.Spec.NodeName
|
||||
fields := make(map[string]interface{})
|
||||
tags := make(map[string]string)
|
||||
|
||||
createdBy := metav1.GetControllerOf(&p)
|
||||
createdByKind := ""
|
||||
createdByName := ""
|
||||
if createdBy != nil {
|
||||
if createdBy.Kind != "" {
|
||||
createdByKind = createdBy.Kind
|
||||
}
|
||||
if createdBy.Name != "" {
|
||||
createdByName = createdBy.Name
|
||||
}
|
||||
}
|
||||
|
||||
if p.Status.StartTime != nil {
|
||||
fields["start_time"] = p.Status.StartTime.UnixNano()
|
||||
}
|
||||
|
||||
tags["namesapce"] = p.Namespace
|
||||
tags["name"] = p.Name
|
||||
tags["host_ip"] = p.Status.HostIP
|
||||
tags["pod_ip"] = p.Status.PodIP
|
||||
tags["node"] = nodeName
|
||||
tags["created_by_kind"] = createdByKind
|
||||
tags["created_by_name"] = createdByName
|
||||
tags["status_scheduled"] = "false"
|
||||
tags["status_ready"] = "false"
|
||||
|
||||
owners := p.GetOwnerReferences()
|
||||
if len(owners) == 0 {
|
||||
tags["owner_kind"] = ""
|
||||
tags["owner_name"] = ""
|
||||
tags["owner_is_controller"] = ""
|
||||
} else {
|
||||
tags["owner_kind"] = owners[0].Kind
|
||||
tags["owner_name"] = owners[0].Name
|
||||
if owners[0].Controller != nil {
|
||||
tags["owner_is_controller"] = strconv.FormatBool(*owners[0].Controller)
|
||||
} else {
|
||||
tags["owner_is_controller"] = "false"
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range p.Labels {
|
||||
tags["label_"+sanitizeLabelName(k)] = v
|
||||
}
|
||||
|
||||
if phase := p.Status.Phase; phase != "" {
|
||||
tags["status_phase"] = string(phase)
|
||||
// This logic is directly copied from: https://github.com/kubernetes/kubernetes/blob/d39bfa0d138368bbe72b0eaf434501dcb4ec9908/pkg/printers/internalversion/printers.go#L597-L601
|
||||
// For more info, please go to: https://github.com/kubernetes/kube-state-metrics/issues/410
|
||||
if p.DeletionTimestamp != nil && p.Status.Reason == node.NodeUnreachablePodReason {
|
||||
tags["status_phase"] = string(v1.PodUnknown)
|
||||
}
|
||||
}
|
||||
|
||||
if !p.CreationTimestamp.IsZero() {
|
||||
fields["created"] = p.CreationTimestamp.Unix()
|
||||
}
|
||||
|
||||
for _, c := range p.Status.Conditions {
|
||||
switch c.Type {
|
||||
case v1.PodReady:
|
||||
tags["status_ready"] = "true"
|
||||
case v1.PodScheduled:
|
||||
tags["status_scheduled"] = "true"
|
||||
fields["status_scheduled_time"] = c.LastTransitionTime.Unix()
|
||||
}
|
||||
}
|
||||
|
||||
var lastFinishTime int64
|
||||
|
||||
for i, cs := range p.Status.ContainerStatuses {
|
||||
c := p.Spec.Containers[i]
|
||||
gatherPodContainer(nodeName, p, cs, c, &lastFinishTime, acc)
|
||||
}
|
||||
|
||||
if lastFinishTime > 0 {
|
||||
fields["completion_time"] = lastFinishTime
|
||||
}
|
||||
|
||||
for _, v := range p.Spec.Volumes {
|
||||
if v.PersistentVolumeClaim != nil {
|
||||
gatherPodVolume(v, p, acc)
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields(podMeasurement, fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
func gatherPodVolume(v v1.Volume, p v1.Pod, acc telegraf.Accumulator) {
|
||||
fields := map[string]interface{}{
|
||||
"read_only": 0.0,
|
||||
}
|
||||
tags := map[string]string{
|
||||
"namespace": p.Namespace,
|
||||
"pod": p.Name,
|
||||
"volume": v.Name,
|
||||
"persistentvolumeclaim": v.PersistentVolumeClaim.ClaimName,
|
||||
}
|
||||
if v.PersistentVolumeClaim.ReadOnly {
|
||||
fields["read_only"] = 1.0
|
||||
}
|
||||
acc.AddFields(podVolumeMeasurement, fields, tags)
|
||||
}
|
||||
|
||||
func gatherPodContainer(nodeName string, p v1.Pod, cs v1.ContainerStatus, c v1.Container, lastFinishTime *int64, acc telegraf.Accumulator) {
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"status_restarts_total": cs.RestartCount,
|
||||
}
|
||||
tags := map[string]string{
|
||||
"namespace": p.Namespace,
|
||||
"pod_name": p.Name,
|
||||
"node_name": nodeName,
|
||||
"container": c.Name,
|
||||
"image": cs.Image,
|
||||
"image_id": cs.ImageID,
|
||||
"container_id": cs.ContainerID,
|
||||
"status_waiting": strconv.FormatBool(cs.State.Waiting != nil),
|
||||
"status_waiting_reason": "",
|
||||
"status_running": strconv.FormatBool(cs.State.Terminated != nil),
|
||||
"status_terminated": strconv.FormatBool(cs.State.Running != nil),
|
||||
"status_terminated_reason": "",
|
||||
"container_status_ready": strconv.FormatBool(cs.Ready),
|
||||
}
|
||||
|
||||
if cs.State.Waiting != nil {
|
||||
tags["status_waiting_reason"] = cs.State.Waiting.Reason
|
||||
}
|
||||
|
||||
if cs.State.Terminated != nil {
|
||||
tags["status_terminated_reason"] = cs.State.Terminated.Reason
|
||||
if *lastFinishTime == 0 || *lastFinishTime < cs.State.Terminated.FinishedAt.Unix() {
|
||||
*lastFinishTime = cs.State.Terminated.FinishedAt.Unix()
|
||||
}
|
||||
}
|
||||
req := c.Resources.Requests
|
||||
lim := c.Resources.Limits
|
||||
|
||||
for resourceName, val := range req {
|
||||
switch resourceName {
|
||||
case v1.ResourceCPU:
|
||||
fields["resource_requests_cpu_cores"] = val.MilliValue() / 1000
|
||||
default:
|
||||
fields["resource_requests_"+sanitizeLabelName(string(resourceName))+"_bytes"] = val.Value()
|
||||
}
|
||||
}
|
||||
for resourceName, val := range lim {
|
||||
switch resourceName {
|
||||
case v1.ResourceCPU:
|
||||
fields["resource_limits_cpu_cores"] = val.MilliValue() / 1000
|
||||
default:
|
||||
fields["resource_limits_"+sanitizeLabelName(string(resourceName))+"_bytes"] = val.Value()
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields(podContainerMeasurement, fields, tags)
|
||||
}
|
||||
|
||||
var invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)
|
||||
|
||||
func sanitizeLabelName(s string) string {
|
||||
return invalidLabelCharRE.ReplaceAllString(s, "_")
|
||||
}
|
||||
@@ -108,7 +108,9 @@ You must capture at least one field per line.
|
||||
- ts-"CUSTOM"
|
||||
|
||||
CUSTOM time layouts must be within quotes and be the representation of the
|
||||
"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`
|
||||
"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`.
|
||||
To match a comma decimal point you can use a period. For example `%{TIMESTAMP:timestamp:ts-"2006-01-02 15:04:05.000"}` can be used to match `"2018-01-02 15:04:05,000"`
|
||||
To match a comma decimal point you can use a period in the pattern string.
|
||||
See https://golang.org/pkg/time/#Parse for more details.
|
||||
|
||||
Telegraf has many of its own [built-in patterns](./grok/patterns/influx-patterns),
|
||||
|
||||
@@ -335,6 +335,9 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||
case DROP:
|
||||
// goodbye!
|
||||
default:
|
||||
// Replace commas with dot character
|
||||
v = strings.Replace(v, ",", ".", -1)
|
||||
|
||||
ts, err := time.ParseInLocation(t, v, p.loc)
|
||||
if err == nil {
|
||||
timestamp = ts
|
||||
|
||||
@@ -982,3 +982,21 @@ func TestSyslogTimestampParser(t *testing.T) {
|
||||
require.NotNil(t, m)
|
||||
require.Equal(t, 2018, m.Time().Year())
|
||||
}
|
||||
|
||||
func TestReplaceTimestampComma(t *testing.T) {
|
||||
|
||||
p := &Parser{
|
||||
Patterns: []string{`%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05.000"} successfulMatches=%{NUMBER:value:int}`},
|
||||
}
|
||||
|
||||
require.NoError(t, p.Compile())
|
||||
m, err := p.ParseLine("2018-02-21 13:10:34,555 successfulMatches=1")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, m)
|
||||
|
||||
require.Equal(t, 2018, m.Time().Year())
|
||||
require.Equal(t, 13, m.Time().Hour())
|
||||
require.Equal(t, 34, m.Time().Second())
|
||||
//Convert Nanosecond to milisecond for compare
|
||||
require.Equal(t, 555, m.Time().Nanosecond()/1000000)
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ func TestMysqlGetDSNTag(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"tcp(localhost)/",
|
||||
"localhost",
|
||||
"localhost:3306",
|
||||
},
|
||||
{
|
||||
"root:passwd@tcp(192.168.1.1:3306)/?tls=false",
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/nats-io/nats"
|
||||
nats "github.com/nats-io/go-nats"
|
||||
)
|
||||
|
||||
type natsError struct {
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/nats-io/nats"
|
||||
nats "github.com/nats-io/go-nats"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
||||
@@ -17,16 +17,17 @@ This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvid
|
||||
### Metrics
|
||||
- measurement: `nvidia_smi`
|
||||
- tags
|
||||
- `name` (type of GPU e.g. `GeForce GTX 170 Ti`)
|
||||
- `name` (type of GPU e.g. `GeForce GTX 1070 Ti`)
|
||||
- `compute_mode` (The compute mode of the GPU e.g. `Default`)
|
||||
- `index` (The port index where the GPU is connected to the motherboard e.g. `1`)
|
||||
- `pstate` (Overclocking state for the GPU e.g. `P0`)
|
||||
- `uuid` (A unique identifier for the GPU e.g. `GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665`)
|
||||
- fields
|
||||
- `fan_speed` (integer, percentage)
|
||||
- `memory_free` (integer, KB)
|
||||
- `memory_used` (integer, KB)
|
||||
- `memory_total` (integer, KB)
|
||||
- `memory_free` (integer, MiB)
|
||||
- `memory_used` (integer, MiB)
|
||||
- `memory_total` (integer, MiB)
|
||||
- `power_draw` (float, W)
|
||||
- `temperature_gpu` (integer, degrees C)
|
||||
- `utilization_gpu` (integer, percentage)
|
||||
- `utilization_memory` (integer, percentage)
|
||||
|
||||
@@ -16,20 +16,21 @@ import (
|
||||
|
||||
var (
|
||||
measurement = "nvidia_smi"
|
||||
metrics = "fan.speed,memory.total,memory.used,memory.free,pstate,temperature.gpu,name,uuid,compute_mode,utilization.gpu,utilization.memory,index"
|
||||
metrics = "fan.speed,memory.total,memory.used,memory.free,pstate,temperature.gpu,name,uuid,compute_mode,utilization.gpu,utilization.memory,index,power.draw"
|
||||
metricNames = [][]string{
|
||||
[]string{"fan_speed", "field"},
|
||||
[]string{"memory_total", "field"},
|
||||
[]string{"memory_used", "field"},
|
||||
[]string{"memory_free", "field"},
|
||||
[]string{"fan_speed", "integer"},
|
||||
[]string{"memory_total", "integer"},
|
||||
[]string{"memory_used", "integer"},
|
||||
[]string{"memory_free", "integer"},
|
||||
[]string{"pstate", "tag"},
|
||||
[]string{"temperature_gpu", "field"},
|
||||
[]string{"temperature_gpu", "integer"},
|
||||
[]string{"name", "tag"},
|
||||
[]string{"uuid", "tag"},
|
||||
[]string{"compute_mode", "tag"},
|
||||
[]string{"utilization_gpu", "field"},
|
||||
[]string{"utilization_memory", "field"},
|
||||
[]string{"utilization_gpu", "integer"},
|
||||
[]string{"utilization_memory", "integer"},
|
||||
[]string{"index", "tag"},
|
||||
[]string{"power_draw", "float"},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -127,7 +128,7 @@ func parseLine(line string) (map[string]string, map[string]interface{}, error) {
|
||||
for i, m := range metricNames {
|
||||
col := strings.TrimSpace(met[i])
|
||||
|
||||
// First handle the tags
|
||||
// Handle the tags
|
||||
if m[1] == "tag" {
|
||||
tags[m[0]] = col
|
||||
continue
|
||||
@@ -137,12 +138,23 @@ func parseLine(line string) (map[string]string, map[string]interface{}, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Then parse the integers out of the fields
|
||||
out, err := strconv.ParseInt(col, 10, 64)
|
||||
if err != nil {
|
||||
return tags, fields, err
|
||||
// Parse the integers
|
||||
if m[1] == "integer" {
|
||||
out, err := strconv.ParseInt(col, 10, 64)
|
||||
if err != nil {
|
||||
return tags, fields, err
|
||||
}
|
||||
fields[m[0]] = out
|
||||
}
|
||||
|
||||
// Parse the floats
|
||||
if m[1] == "float" {
|
||||
out, err := strconv.ParseFloat(col, 64)
|
||||
if err != nil {
|
||||
return tags, fields, err
|
||||
}
|
||||
fields[m[0]] = out
|
||||
}
|
||||
fields[m[0]] = out
|
||||
}
|
||||
|
||||
// Return the tags and fields
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
)
|
||||
|
||||
func TestParseLineStandard(t *testing.T) {
|
||||
line := "85, 8114, 553, 7561, P2, 61, GeForce GTX 1070 Ti, GPU-d1911b8a-f5c8-5e66-057c-486561269de8, Default, 100, 93, 1\n"
|
||||
line := "85, 8114, 553, 7561, P2, 61, GeForce GTX 1070 Ti, GPU-d1911b8a-f5c8-5e66-057c-486561269de8, Default, 100, 93, 1, 0.0\n"
|
||||
tags, fields, err := parseLine(line)
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
@@ -37,7 +37,7 @@ func TestParseLineBad(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestParseLineNotSupported(t *testing.T) {
|
||||
line := "[Not Supported], 7606, 0, 7606, P0, 38, Tesla P4, GPU-xxx, Default, 0, 0, 0\n"
|
||||
line := "[Not Supported], 7606, 0, 7606, P0, 38, Tesla P4, GPU-xxx, Default, 0, 0, 0, 0.0\n"
|
||||
_, fields, err := parseLine(line)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, nil, fields["fan_speed"])
|
||||
|
||||
@@ -14,7 +14,7 @@ To use this plugin you must enable the [monitoring](https://www.openldap.org/dev
|
||||
# ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
|
||||
# note that port will likely need to be changed to 636 for ldaps
|
||||
# valid options: "" | "starttls" | "ldaps"
|
||||
ssl = ""
|
||||
tls = ""
|
||||
|
||||
# skip peer certificate verification. Default is false.
|
||||
insecure_skip_verify = false
|
||||
|
||||
@@ -15,9 +15,11 @@ import (
|
||||
type Openldap struct {
|
||||
Host string
|
||||
Port int
|
||||
Ssl string
|
||||
SSL string `toml:"ssl"` // Deprecated in 1.7; use TLS
|
||||
TLS string `toml:"tls"`
|
||||
InsecureSkipVerify bool
|
||||
SslCa string
|
||||
SSLCA string `toml:"ssl_ca"` // Deprecated in 1.7; use TLSCA
|
||||
TLSCA string `toml:"tls_ca"`
|
||||
BindDn string
|
||||
BindPassword string
|
||||
ReverseMetricNames bool
|
||||
@@ -30,7 +32,7 @@ const sampleConfig string = `
|
||||
# ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
|
||||
# note that port will likely need to be changed to 636 for ldaps
|
||||
# valid options: "" | "starttls" | "ldaps"
|
||||
ssl = ""
|
||||
tls = ""
|
||||
|
||||
# skip peer certificate verification. Default is false.
|
||||
insecure_skip_verify = false
|
||||
@@ -70,9 +72,11 @@ func NewOpenldap() *Openldap {
|
||||
return &Openldap{
|
||||
Host: "localhost",
|
||||
Port: 389,
|
||||
Ssl: "",
|
||||
SSL: "",
|
||||
TLS: "",
|
||||
InsecureSkipVerify: false,
|
||||
SslCa: "",
|
||||
SSLCA: "",
|
||||
TLSCA: "",
|
||||
BindDn: "",
|
||||
BindPassword: "",
|
||||
ReverseMetricNames: false,
|
||||
@@ -81,12 +85,19 @@ func NewOpenldap() *Openldap {
|
||||
|
||||
// gather metrics
|
||||
func (o *Openldap) Gather(acc telegraf.Accumulator) error {
|
||||
if o.TLS == "" {
|
||||
o.TLS = o.SSL
|
||||
}
|
||||
if o.TLSCA == "" {
|
||||
o.TLSCA = o.SSLCA
|
||||
}
|
||||
|
||||
var err error
|
||||
var l *ldap.Conn
|
||||
if o.Ssl != "" {
|
||||
if o.TLS != "" {
|
||||
// build tls config
|
||||
clientTLSConfig := tls.ClientConfig{
|
||||
SSLCA: o.SslCa,
|
||||
TLSCA: o.TLSCA,
|
||||
InsecureSkipVerify: o.InsecureSkipVerify,
|
||||
}
|
||||
tlsConfig, err := clientTLSConfig.TLSConfig()
|
||||
@@ -94,13 +105,13 @@ func (o *Openldap) Gather(acc telegraf.Accumulator) error {
|
||||
acc.AddError(err)
|
||||
return nil
|
||||
}
|
||||
if o.Ssl == "ldaps" {
|
||||
if o.TLS == "ldaps" {
|
||||
l, err = ldap.DialTLS("tcp", fmt.Sprintf("%s:%d", o.Host, o.Port), tlsConfig)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
return nil
|
||||
}
|
||||
} else if o.Ssl == "starttls" {
|
||||
} else if o.TLS == "starttls" {
|
||||
l, err = ldap.Dial("tcp", fmt.Sprintf("%s:%d", o.Host, o.Port))
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
@@ -108,7 +119,7 @@ func (o *Openldap) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
err = l.StartTLS(tlsConfig)
|
||||
} else {
|
||||
acc.AddError(fmt.Errorf("Invalid setting for ssl: %s", o.Ssl))
|
||||
acc.AddError(fmt.Errorf("Invalid setting for ssl: %s", o.TLS))
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package openldap
|
||||
|
||||
import (
|
||||
"gopkg.in/ldap.v2"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/ldap.v2"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -74,7 +75,7 @@ func TestOpenldapStartTLS(t *testing.T) {
|
||||
o := &Openldap{
|
||||
Host: testutil.GetLocalHost(),
|
||||
Port: 389,
|
||||
Ssl: "starttls",
|
||||
SSL: "starttls",
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
@@ -92,7 +93,7 @@ func TestOpenldapLDAPS(t *testing.T) {
|
||||
o := &Openldap{
|
||||
Host: testutil.GetLocalHost(),
|
||||
Port: 636,
|
||||
Ssl: "ldaps",
|
||||
SSL: "ldaps",
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
@@ -110,7 +111,7 @@ func TestOpenldapInvalidSSL(t *testing.T) {
|
||||
o := &Openldap{
|
||||
Host: testutil.GetLocalHost(),
|
||||
Port: 636,
|
||||
Ssl: "invalid",
|
||||
SSL: "invalid",
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
@@ -129,7 +130,7 @@ func TestOpenldapBind(t *testing.T) {
|
||||
o := &Openldap{
|
||||
Host: testutil.GetLocalHost(),
|
||||
Port: 389,
|
||||
Ssl: "",
|
||||
SSL: "",
|
||||
InsecureSkipVerify: true,
|
||||
BindDn: "cn=manager,cn=config",
|
||||
BindPassword: "secret",
|
||||
@@ -157,7 +158,7 @@ func TestOpenldapReverseMetrics(t *testing.T) {
|
||||
o := &Openldap{
|
||||
Host: testutil.GetLocalHost(),
|
||||
Port: 389,
|
||||
Ssl: "",
|
||||
SSL: "",
|
||||
InsecureSkipVerify: true,
|
||||
BindDn: "cn=manager,cn=config",
|
||||
BindPassword: "secret",
|
||||
|
||||
@@ -175,7 +175,7 @@ func (p *Ping) args(url string) []string {
|
||||
}
|
||||
if p.Timeout > 0 {
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
case "darwin", "freebsd", "netbsd", "openbsd":
|
||||
args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64))
|
||||
case "linux":
|
||||
args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64))
|
||||
@@ -186,7 +186,7 @@ func (p *Ping) args(url string) []string {
|
||||
}
|
||||
if p.Deadline > 0 {
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
case "darwin", "freebsd", "netbsd", "openbsd":
|
||||
args = append(args, "-t", strconv.Itoa(p.Deadline))
|
||||
case "linux":
|
||||
args = append(args, "-w", strconv.Itoa(p.Deadline))
|
||||
@@ -197,10 +197,10 @@ func (p *Ping) args(url string) []string {
|
||||
}
|
||||
if p.Interface != "" {
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "freebsd", "netbsd", "openbsd":
|
||||
args = append(args, "-S", p.Interface)
|
||||
case "linux":
|
||||
args = append(args, "-I", p.Interface)
|
||||
case "freebsd", "darwin":
|
||||
args = append(args, "-S", p.Interface)
|
||||
default:
|
||||
// Not sure the best option here, just assume GNU ping?
|
||||
args = append(args, "-I", p.Interface)
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -28,36 +28,37 @@ func getQueueDirectory() (string, error) {
|
||||
return strings.TrimSpace(string(qd)), nil
|
||||
}
|
||||
|
||||
func qScan(path string) (int64, int64, int64, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
finfos, err := f.Readdir(-1)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
func qScan(path string, acc telegraf.Accumulator) (int64, int64, int64, error) {
|
||||
var length, size int64
|
||||
var oldest time.Time
|
||||
for _, finfo := range finfos {
|
||||
err := filepath.Walk(path, func(_ string, finfo os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("error scanning %s: %s", path, err))
|
||||
return nil
|
||||
}
|
||||
if finfo.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
length++
|
||||
size += finfo.Size()
|
||||
|
||||
ctime := statCTime(finfo.Sys())
|
||||
if ctime.IsZero() {
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
if oldest.IsZero() || ctime.Before(oldest) {
|
||||
oldest = ctime
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
var age int64
|
||||
if !oldest.IsZero() {
|
||||
age = int64(time.Now().Sub(oldest) / time.Second)
|
||||
} else if len(finfos) != 0 {
|
||||
} else if length != 0 {
|
||||
// system doesn't support ctime
|
||||
age = -1
|
||||
}
|
||||
@@ -77,8 +78,8 @@ func (p *Postfix) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
|
||||
for _, q := range []string{"active", "hold", "incoming", "maildrop"} {
|
||||
length, size, age, err := qScan(path.Join(p.QueueDirectory, q))
|
||||
for _, q := range []string{"active", "hold", "incoming", "maildrop", "deferred"} {
|
||||
length, size, age, err := qScan(filepath.Join(p.QueueDirectory, q), acc)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("error scanning queue %s: %s", q, err))
|
||||
continue
|
||||
@@ -90,30 +91,6 @@ func (p *Postfix) Gather(acc telegraf.Accumulator) error {
|
||||
acc.AddFields("postfix_queue", fields, map[string]string{"queue": q})
|
||||
}
|
||||
|
||||
var dLength, dSize int64
|
||||
dAge := int64(-1)
|
||||
for _, q := range []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F"} {
|
||||
length, size, age, err := qScan(path.Join(p.QueueDirectory, "deferred", q))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// the directories are created on first use
|
||||
continue
|
||||
}
|
||||
acc.AddError(fmt.Errorf("error scanning queue deferred/%s: %s", q, err))
|
||||
return nil
|
||||
}
|
||||
dLength += length
|
||||
dSize += size
|
||||
if age > dAge {
|
||||
dAge = age
|
||||
}
|
||||
}
|
||||
fields := map[string]interface{}{"length": dLength, "size": dSize}
|
||||
if dAge != -1 {
|
||||
fields["age"] = dAge
|
||||
}
|
||||
acc.AddFields("postfix_queue", fields, map[string]string{"queue": "deferred"})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ package postfix
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
@@ -16,19 +16,16 @@ func TestGather(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(td)
|
||||
|
||||
for _, q := range []string{"active", "hold", "incoming", "maildrop", "deferred"} {
|
||||
require.NoError(t, os.Mkdir(path.Join(td, q), 0755))
|
||||
}
|
||||
for _, q := range []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "F"} { // "E" deliberately left off
|
||||
require.NoError(t, os.Mkdir(path.Join(td, "deferred", q), 0755))
|
||||
for _, q := range []string{"active", "hold", "incoming", "maildrop", "deferred/0/0", "deferred/F/F"} {
|
||||
require.NoError(t, os.MkdirAll(filepath.FromSlash(td+"/"+q), 0755))
|
||||
}
|
||||
|
||||
require.NoError(t, ioutil.WriteFile(path.Join(td, "active", "01"), []byte("abc"), 0644))
|
||||
require.NoError(t, ioutil.WriteFile(path.Join(td, "active", "02"), []byte("defg"), 0644))
|
||||
require.NoError(t, ioutil.WriteFile(path.Join(td, "hold", "01"), []byte("abc"), 0644))
|
||||
require.NoError(t, ioutil.WriteFile(path.Join(td, "incoming", "01"), []byte("abcd"), 0644))
|
||||
require.NoError(t, ioutil.WriteFile(path.Join(td, "deferred", "0", "01"), []byte("abc"), 0644))
|
||||
require.NoError(t, ioutil.WriteFile(path.Join(td, "deferred", "F", "F1"), []byte("abc"), 0644))
|
||||
require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/active/01"), []byte("abc"), 0644))
|
||||
require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/active/02"), []byte("defg"), 0644))
|
||||
require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/hold/01"), []byte("abc"), 0644))
|
||||
require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/incoming/01"), []byte("abcd"), 0644))
|
||||
require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/deferred/0/0/01"), []byte("abc"), 0644))
|
||||
require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/deferred/F/F/F1"), []byte("abc"), 0644))
|
||||
|
||||
p := Postfix{
|
||||
QueueDirectory: td,
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
)
|
||||
|
||||
// Implemention of PIDGatherer that execs pgrep to find processes
|
||||
@@ -62,6 +64,12 @@ func find(path string, args []string) ([]PID, error) {
|
||||
|
||||
func run(path string, args []string) (string, error) {
|
||||
out, err := exec.Command(path, args...).Output()
|
||||
|
||||
//if exit code 1, ie no processes found, do not return error
|
||||
if i, _ := internal.ExitStatus(err); i == 1 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error running %s: %s", path, err)
|
||||
}
|
||||
|
||||
@@ -97,7 +97,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error {
|
||||
p.createProcess = defaultProcess
|
||||
}
|
||||
|
||||
procs, err := p.updateProcesses(p.procs)
|
||||
procs, err := p.updateProcesses(acc, p.procs)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
|
||||
p.Exe, p.PidFile, p.Pattern, p.User, err.Error()))
|
||||
@@ -230,8 +230,8 @@ func (p *Procstat) addMetrics(proc Process, acc telegraf.Accumulator) {
|
||||
}
|
||||
|
||||
// Update monitored Processes
|
||||
func (p *Procstat) updateProcesses(prevInfo map[PID]Process) (map[PID]Process, error) {
|
||||
pids, tags, err := p.findPids()
|
||||
func (p *Procstat) updateProcesses(acc telegraf.Accumulator, prevInfo map[PID]Process) (map[PID]Process, error) {
|
||||
pids, tags, err := p.findPids(acc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -281,9 +281,9 @@ func (p *Procstat) getPIDFinder() (PIDFinder, error) {
|
||||
}
|
||||
|
||||
// Get matching PIDs and their initial tags
|
||||
func (p *Procstat) findPids() ([]PID, map[string]string, error) {
|
||||
func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string, error) {
|
||||
var pids []PID
|
||||
var tags map[string]string
|
||||
tags := make(map[string]string)
|
||||
var err error
|
||||
|
||||
f, err := p.getPIDFinder()
|
||||
@@ -313,7 +313,18 @@ func (p *Procstat) findPids() ([]PID, map[string]string, error) {
|
||||
err = fmt.Errorf("Either exe, pid_file, user, pattern, systemd_unit, or cgroup must be specified")
|
||||
}
|
||||
|
||||
return pids, tags, err
|
||||
rTags := make(map[string]string)
|
||||
for k, v := range tags {
|
||||
rTags[k] = v
|
||||
}
|
||||
|
||||
//adds a metric with info on the pgrep query
|
||||
fields := make(map[string]interface{})
|
||||
tags["pid_finder"] = p.PidFinder
|
||||
fields["pid_count"] = len(pids)
|
||||
acc.AddFields("procstat_lookup", fields, tags)
|
||||
|
||||
return pids, rTags, err
|
||||
}
|
||||
|
||||
// execCommand is so tests can mock out exec.Command usage.
|
||||
|
||||
@@ -343,7 +343,8 @@ func TestGather_systemdUnitPIDs(t *testing.T) {
|
||||
createPIDFinder: pidFinder([]PID{}, nil),
|
||||
SystemdUnit: "TestGather_systemdUnitPIDs",
|
||||
}
|
||||
pids, tags, err := p.findPids()
|
||||
var acc testutil.Accumulator
|
||||
pids, tags, err := p.findPids(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []PID{11408}, pids)
|
||||
assert.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"])
|
||||
@@ -364,8 +365,20 @@ func TestGather_cgroupPIDs(t *testing.T) {
|
||||
createPIDFinder: pidFinder([]PID{}, nil),
|
||||
CGroup: td,
|
||||
}
|
||||
pids, tags, err := p.findPids()
|
||||
var acc testutil.Accumulator
|
||||
pids, tags, err := p.findPids(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []PID{1234, 5678}, pids)
|
||||
assert.Equal(t, td, tags["cgroup"])
|
||||
}
|
||||
|
||||
func TestProcstatLookupMetric(t *testing.T) {
|
||||
p := Procstat{
|
||||
createPIDFinder: pidFinder([]PID{543}, nil),
|
||||
Exe: "-Gsys",
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(p.Gather)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(p.procs)+1, len(acc.Metrics))
|
||||
}
|
||||
|
||||
@@ -14,6 +14,13 @@
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
## If no port is specified, 6379 is used
|
||||
servers = ["tcp://localhost:6379"]
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
@@ -13,11 +13,13 @@ import (
|
||||
|
||||
"github.com/go-redis/redis"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Redis struct {
|
||||
Servers []string
|
||||
tls.ClientConfig
|
||||
|
||||
clients []Client
|
||||
initialized bool
|
||||
@@ -56,6 +58,13 @@ var sampleConfig = `
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
## If no port is specified, 6379 is used
|
||||
servers = ["tcp://localhost:6379"]
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = true
|
||||
`
|
||||
|
||||
func (r *Redis) SampleConfig() string {
|
||||
@@ -109,12 +118,18 @@ func (r *Redis) init(acc telegraf.Accumulator) error {
|
||||
address = u.Host
|
||||
}
|
||||
|
||||
tlsConfig, err := r.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := redis.NewClient(
|
||||
&redis.Options{
|
||||
Addr: address,
|
||||
Password: password,
|
||||
Network: u.Scheme,
|
||||
PoolSize: 1,
|
||||
Addr: address,
|
||||
Password: password,
|
||||
Network: u.Scheme,
|
||||
PoolSize: 1,
|
||||
TLSConfig: tlsConfig,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ The [solr](http://lucene.apache.org/solr/) plugin collects stats via the
|
||||
|
||||
More about [performance statistics](https://cwiki.apache.org/confluence/display/solr/Performance+Statistics+Reference)
|
||||
|
||||
Tested from 3.5 to 6.*
|
||||
Tested from 3.5 to 7.*
|
||||
|
||||
### Configuration:
|
||||
|
||||
|
||||
@@ -113,20 +113,7 @@ type Hitratio interface{}
|
||||
// Cache is an exported type that
|
||||
// contains cache metrics
|
||||
type Cache struct {
|
||||
Stats struct {
|
||||
CumulativeEvictions int64 `json:"cumulative_evictions"`
|
||||
CumulativeHitratio Hitratio `json:"cumulative_hitratio"`
|
||||
CumulativeHits int64 `json:"cumulative_hits"`
|
||||
CumulativeInserts int64 `json:"cumulative_inserts"`
|
||||
CumulativeLookups int64 `json:"cumulative_lookups"`
|
||||
Evictions int64 `json:"evictions"`
|
||||
Hitratio Hitratio `json:"hitratio"`
|
||||
Hits int64 `json:"hits"`
|
||||
Inserts int64 `json:"inserts"`
|
||||
Lookups int64 `json:"lookups"`
|
||||
Size int64 `json:"size"`
|
||||
WarmupTime int64 `json:"warmupTime"`
|
||||
} `json:"stats"`
|
||||
Stats map[string]interface{} `json:"stats"`
|
||||
}
|
||||
|
||||
// NewSolr return a new instance of Solr
|
||||
@@ -424,21 +411,30 @@ func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBe
|
||||
return err
|
||||
}
|
||||
for name, metrics := range cacheMetrics {
|
||||
cumulativeHits := getFloat(metrics.Stats.CumulativeHitratio)
|
||||
hitratio := getFloat(metrics.Stats.Hitratio)
|
||||
coreFields := map[string]interface{}{
|
||||
"cumulative_evictions": metrics.Stats.CumulativeEvictions,
|
||||
"cumulative_hitratio": cumulativeHits,
|
||||
"cumulative_hits": metrics.Stats.CumulativeHits,
|
||||
"cumulative_inserts": metrics.Stats.CumulativeInserts,
|
||||
"cumulative_lookups": metrics.Stats.CumulativeLookups,
|
||||
"evictions": metrics.Stats.Evictions,
|
||||
"hitratio": hitratio,
|
||||
"hits": metrics.Stats.Hits,
|
||||
"inserts": metrics.Stats.Inserts,
|
||||
"lookups": metrics.Stats.Lookups,
|
||||
"size": metrics.Stats.Size,
|
||||
"warmup_time": metrics.Stats.WarmupTime,
|
||||
coreFields := make(map[string]interface{})
|
||||
for key, value := range metrics.Stats {
|
||||
splitKey := strings.Split(key, ".")
|
||||
newKey := splitKey[len(splitKey)-1]
|
||||
switch newKey {
|
||||
case "cumulative_evictions",
|
||||
"cumulative_hits",
|
||||
"cumulative_inserts",
|
||||
"cumulative_lookups",
|
||||
"eviction",
|
||||
"hits",
|
||||
"inserts",
|
||||
"lookups",
|
||||
"size",
|
||||
"evictions":
|
||||
coreFields[newKey] = getInt(value)
|
||||
case "hitratio",
|
||||
"cumulative_hitratio":
|
||||
coreFields[newKey] = getFloat(value)
|
||||
case "warmupTime":
|
||||
coreFields["warmup_time"] = getInt(value)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
acc.AddFields(
|
||||
"solr_cache",
|
||||
|
||||
@@ -43,6 +43,17 @@ func TestGatherStats(t *testing.T) {
|
||||
map[string]string{"core": "main", "handler": "filterCache"})
|
||||
}
|
||||
|
||||
func TestSolr7MbeansStats(t *testing.T) {
|
||||
ts := createMockSolr7Server()
|
||||
solr := NewSolr()
|
||||
solr.Servers = []string{ts.URL}
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, solr.Gather(&acc))
|
||||
acc.AssertContainsTaggedFields(t, "solr_cache",
|
||||
solr7CacheExpected,
|
||||
map[string]string{"core": "main", "handler": "documentCache"})
|
||||
}
|
||||
|
||||
func TestSolr3GatherStats(t *testing.T) {
|
||||
ts := createMockSolr3Server()
|
||||
solr := NewSolr()
|
||||
@@ -150,3 +161,18 @@ func createMockSolr3Server() *httptest.Server {
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
func createMockSolr7Server() *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if strings.Contains(r.URL.Path, "/solr/admin/cores") {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, statusResponse)
|
||||
} else if strings.Contains(r.URL.Path, "solr/main/admin") {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, mBeansSolr7Response)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
fmt.Fprintln(w, "nope")
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
60
plugins/inputs/solr/testdata7_test.go
Normal file
60
plugins/inputs/solr/testdata7_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package solr
|
||||
|
||||
const mBeansSolr7Response = `
|
||||
{
|
||||
"responseHeader":{
|
||||
"status":0,
|
||||
"QTime":2
|
||||
},
|
||||
"solr-mbeans":[
|
||||
"CORE",
|
||||
{
|
||||
|
||||
},
|
||||
"QUERYHANDLER",
|
||||
{
|
||||
|
||||
},
|
||||
"UPDATEHANDLER",
|
||||
{
|
||||
|
||||
},
|
||||
"CACHE",
|
||||
{
|
||||
"documentCache":{
|
||||
"class":"org.apache.solr.search.LRUCache",
|
||||
"description":"LRU Cache(maxSize=16384, initialSize=4096)",
|
||||
"stats":{
|
||||
"CACHE.searcher.documentCache.evictions": 141485,
|
||||
"CACHE.searcher.documentCache.cumulative_lookups": 265132,
|
||||
"CACHE.searcher.documentCache.hitratio": 0.44,
|
||||
"CACHE.searcher.documentCache.size": 8192,
|
||||
"CACHE.searcher.documentCache.cumulative_hitratio": 0.42,
|
||||
"CACHE.searcher.documentCache.lookups": 1234,
|
||||
"CACHE.searcher.documentCache.warmupTime": 1,
|
||||
"CACHE.searcher.documentCache.inserts": 987,
|
||||
"CACHE.searcher.documentCache.hits": 1111,
|
||||
"CACHE.searcher.documentCache.cumulative_hits": 115364,
|
||||
"CACHE.searcher.documentCache.cumulative_inserts": 149768,
|
||||
"CACHE.searcher.documentCache.cumulative_evictions": 141486
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
||||
var solr7CacheExpected = map[string]interface{}{
|
||||
"evictions": int64(141485),
|
||||
"cumulative_evictions": int64(141486),
|
||||
"cumulative_hitratio": float64(0.42),
|
||||
"cumulative_hits": int64(115364),
|
||||
"cumulative_inserts": int64(149768),
|
||||
"cumulative_lookups": int64(265132),
|
||||
"hitratio": float64(0.44),
|
||||
"hits": int64(1111),
|
||||
"inserts": int64(987),
|
||||
"lookups": int64(1234),
|
||||
"size": int64(8192),
|
||||
"warmup_time": int64(1),
|
||||
}
|
||||
@@ -16,7 +16,7 @@ https://en.wikipedia.org/wiki/Df_(Unix) for more details.
|
||||
# mount_points = ["/"]
|
||||
|
||||
## Ignore mount points by filesystem type.
|
||||
ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
|
||||
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
|
||||
```
|
||||
|
||||
#### Docker container
|
||||
|
||||
@@ -28,7 +28,7 @@ var diskSampleConfig = `
|
||||
# mount_points = ["/"]
|
||||
|
||||
## Ignore mount points by filesystem type.
|
||||
ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
|
||||
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
|
||||
`
|
||||
|
||||
func (_ *DiskStats) SampleConfig() string {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# tail Input Plugin
|
||||
# Tail Input Plugin
|
||||
|
||||
The tail plugin "tails" a logfile and parses each log message.
|
||||
|
||||
@@ -49,3 +49,7 @@ The plugin expects messages in one of the
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
### Metrics:
|
||||
|
||||
Metrics are produced according to the `data_format` option. Additionally a
|
||||
tag labeled `path` is added to the metric containing the filename being tailed.
|
||||
|
||||
@@ -146,7 +146,11 @@ func (t *Tail) receiver(tailer *tail.Tail) {
|
||||
|
||||
m, err = t.parser.ParseLine(text)
|
||||
if err == nil {
|
||||
t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||
if m != nil {
|
||||
tags := m.Tags()
|
||||
tags["path"] = tailer.Filename
|
||||
t.acc.AddFields(m.Name(), m.Fields(), tags, m.Time())
|
||||
}
|
||||
} else {
|
||||
t.acc.AddError(fmt.Errorf("E! Malformed log line in %s: [%s], Error: %s\n",
|
||||
tailer.Filename, line.Text, err))
|
||||
|
||||
71
plugins/inputs/tengine/README.md
Normal file
71
plugins/inputs/tengine/README.md
Normal file
@@ -0,0 +1,71 @@
|
||||
# Tengine Input Plugin
|
||||
|
||||
The tengine plugin gathers metrics from the
|
||||
[Tengine Web Server](http://tengine.taobao.org/) via the
|
||||
[reqstat](http://tengine.taobao.org/document/http_reqstat.html) module.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Read Tengine's basic status information (ngx_http_reqstat_module)
|
||||
[[inputs.tengine]]
|
||||
## An array of Tengine reqstat module URI to gather stats.
|
||||
urls = ["http://127.0.0.1/us"]
|
||||
|
||||
## HTTP response timeout (default: 5s)
|
||||
# response_timeout = "5s"
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.pem"
|
||||
# tls_key = "/etc/telegraf/key.pem"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
### Metrics:
|
||||
|
||||
- Measurement
|
||||
- tags:
|
||||
- port
|
||||
- server
|
||||
- server_name
|
||||
- fields:
|
||||
- bytes_in (integer, total number of bytes received from client)
|
||||
- bytes_out (integer, total number of bytes sent to client)
|
||||
- conn_total (integer, total number of accepted connections)
|
||||
- req_total (integer, total number of processed requests)
|
||||
- http_2xx (integer, total number of 2xx requests)
|
||||
- http_3xx (integer, total number of 3xx requests)
|
||||
- http_4xx (integer, total number of 4xx requests)
|
||||
- http_5xx (integer, total number of 5xx requests)
|
||||
- http_other_status (integer, total number of other requests)
|
||||
- rt (integer, accumulation or rt)
|
||||
- ups_req (integer, total number of requests calling for upstream)
|
||||
- ups_rt (integer, accumulation or upstream rt)
|
||||
- ups_tries (integer, total number of times calling for upstream)
|
||||
- http_200 (integer, total number of 200 requests)
|
||||
- http_206 (integer, total number of 206 requests)
|
||||
- http_302 (integer, total number of 302 requests)
|
||||
- http_304 (integer, total number of 304 requests)
|
||||
- http_403 (integer, total number of 403 requests)
|
||||
- http_404 (integer, total number of 404 requests)
|
||||
- http_416 (integer, total number of 416 requests)
|
||||
- http_499 (integer, total number of 499 requests)
|
||||
- http_500 (integer, total number of 500 requests)
|
||||
- http_502 (integer, total number of 502 requests)
|
||||
- http_503 (integer, total number of 503 requests)
|
||||
- http_504 (integer, total number of 504 requests)
|
||||
- http_508 (integer, total number of 508 requests)
|
||||
- http_other_detail_status (integer, total number of requests of other status codes*http_ups_4xx total number of requests of upstream 4xx)
|
||||
- http_ups_5xx (integer, total number of requests of upstream 5xx)
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=localhost bytes_in=9129i,bytes_out=56334i,conn_total=14i,http_200=90i,http_206=0i,http_2xx=90i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=0i,http_416=0i,http_499=0i,http_4xx=0i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=90i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000
|
||||
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=28.79.190.35.bc.googleusercontent.com bytes_in=1500i,bytes_out=3009i,conn_total=4i,http_200=1i,http_206=0i,http_2xx=1i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=1i,http_416=0i,http_499=0i,http_4xx=3i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=4i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000
|
||||
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=www.google.com bytes_in=372i,bytes_out=786i,conn_total=1i,http_200=1i,http_206=0i,http_2xx=1i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=0i,http_416=0i,http_499=0i,http_4xx=0i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=1i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000
|
||||
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=35.190.79.28 bytes_in=4433i,bytes_out=10259i,conn_total=5i,http_200=3i,http_206=0i,http_2xx=3i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=11i,http_416=0i,http_499=0i,http_4xx=11i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=14i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000
|
||||
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=tenka-prod-api.txwy.tw bytes_in=3014397400i,bytes_out=14279992835i,conn_total=36844i,http_200=3177339i,http_206=0i,http_2xx=3177339i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=123i,http_416=0i,http_499=0i,http_4xx=123i,http_500=17214i,http_502=4453i,http_503=80i,http_504=0i,http_508=0i,http_5xx=21747i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=123i,http_ups_5xx=21747i,req_total=3199209i,rt=245874536i,ups_req=2685076i,ups_rt=245858217i,ups_tries=2685076i 1526546308000000000
|
||||
```
|
||||
338
plugins/inputs/tengine/tengine.go
Normal file
338
plugins/inputs/tengine/tengine.go
Normal file
@@ -0,0 +1,338 @@
|
||||
package tengine
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"io"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Tengine struct {
|
||||
Urls []string
|
||||
ResponseTimeout internal.Duration
|
||||
tls.ClientConfig
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# An array of Tengine reqstat module URI to gather stats.
|
||||
urls = ["http://127.0.0.1/us"]
|
||||
|
||||
# HTTP response timeout (default: 5s)
|
||||
# response_timeout = "5s"
|
||||
|
||||
## Optional TLS Config
|
||||
# tls_ca = "/etc/telegraf/ca.pem"
|
||||
# tls_cert = "/etc/telegraf/cert.cer"
|
||||
# tls_key = "/etc/telegraf/key.key"
|
||||
## Use TLS but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
func (n *Tengine) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (n *Tengine) Description() string {
|
||||
return "Read Tengine's basic status information (ngx_http_reqstat_module)"
|
||||
}
|
||||
|
||||
func (n *Tengine) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Create an HTTP client that is re-used for each
|
||||
// collection interval
|
||||
if n.client == nil {
|
||||
client, err := n.createHttpClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.client = client
|
||||
}
|
||||
|
||||
for _, u := range n.Urls {
|
||||
addr, err := url.Parse(u)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
acc.AddError(n.gatherUrl(addr, acc))
|
||||
}(addr)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Tengine) createHttpClient() (*http.Client, error) {
|
||||
tlsCfg, err := n.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if n.ResponseTimeout.Duration < time.Second {
|
||||
n.ResponseTimeout.Duration = time.Second * 5
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
Timeout: n.ResponseTimeout.Duration,
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
type TengineSatus struct {
|
||||
host string
|
||||
bytes_in uint64
|
||||
bytes_out uint64
|
||||
conn_total uint64
|
||||
req_total uint64
|
||||
http_2xx uint64
|
||||
http_3xx uint64
|
||||
http_4xx uint64
|
||||
http_5xx uint64
|
||||
http_other_status uint64
|
||||
rt uint64
|
||||
ups_req uint64
|
||||
ups_rt uint64
|
||||
ups_tries uint64
|
||||
http_200 uint64
|
||||
http_206 uint64
|
||||
http_302 uint64
|
||||
http_304 uint64
|
||||
http_403 uint64
|
||||
http_404 uint64
|
||||
http_416 uint64
|
||||
http_499 uint64
|
||||
http_500 uint64
|
||||
http_502 uint64
|
||||
http_503 uint64
|
||||
http_504 uint64
|
||||
http_508 uint64
|
||||
http_other_detail_status uint64
|
||||
http_ups_4xx uint64
|
||||
http_ups_5xx uint64
|
||||
}
|
||||
|
||||
func (n *Tengine) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
var tenginestatus TengineSatus
|
||||
resp, err := n.client.Get(addr.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
|
||||
}
|
||||
r := bufio.NewReader(resp.Body)
|
||||
|
||||
for {
|
||||
line, err := r.ReadString('\n')
|
||||
|
||||
if err != nil || io.EOF == err {
|
||||
break
|
||||
}
|
||||
line_split := strings.Split(strings.TrimSpace(line), ",")
|
||||
if len(line_split) != 30 {
|
||||
continue
|
||||
}
|
||||
tenginestatus.host = line_split[0]
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.bytes_in, err = strconv.ParseUint(line_split[1], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.bytes_out, err = strconv.ParseUint(line_split[2], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.conn_total, err = strconv.ParseUint(line_split[3], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.req_total, err = strconv.ParseUint(line_split[4], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_2xx, err = strconv.ParseUint(line_split[5], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_3xx, err = strconv.ParseUint(line_split[6], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_4xx, err = strconv.ParseUint(line_split[7], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_5xx, err = strconv.ParseUint(line_split[8], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_other_status, err = strconv.ParseUint(line_split[9], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.rt, err = strconv.ParseUint(line_split[10], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.ups_req, err = strconv.ParseUint(line_split[11], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.ups_rt, err = strconv.ParseUint(line_split[12], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.ups_tries, err = strconv.ParseUint(line_split[13], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_200, err = strconv.ParseUint(line_split[14], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_206, err = strconv.ParseUint(line_split[15], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_302, err = strconv.ParseUint(line_split[16], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_304, err = strconv.ParseUint(line_split[17], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_403, err = strconv.ParseUint(line_split[18], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_404, err = strconv.ParseUint(line_split[19], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_416, err = strconv.ParseUint(line_split[20], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_499, err = strconv.ParseUint(line_split[21], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_500, err = strconv.ParseUint(line_split[22], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_502, err = strconv.ParseUint(line_split[23], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_503, err = strconv.ParseUint(line_split[24], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_504, err = strconv.ParseUint(line_split[25], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_508, err = strconv.ParseUint(line_split[26], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_other_detail_status, err = strconv.ParseUint(line_split[27], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_ups_4xx, err = strconv.ParseUint(line_split[28], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenginestatus.http_ups_5xx, err = strconv.ParseUint(line_split[29], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tags := getTags(addr, tenginestatus.host)
|
||||
fields := map[string]interface{}{
|
||||
"bytes_in": tenginestatus.bytes_in,
|
||||
"bytes_out": tenginestatus.bytes_out,
|
||||
"conn_total": tenginestatus.conn_total,
|
||||
"req_total": tenginestatus.req_total,
|
||||
"http_2xx": tenginestatus.http_2xx,
|
||||
"http_3xx": tenginestatus.http_3xx,
|
||||
"http_4xx": tenginestatus.http_4xx,
|
||||
"http_5xx": tenginestatus.http_5xx,
|
||||
"http_other_status": tenginestatus.http_other_status,
|
||||
"rt": tenginestatus.rt,
|
||||
"ups_req": tenginestatus.ups_req,
|
||||
"ups_rt": tenginestatus.ups_rt,
|
||||
"ups_tries": tenginestatus.ups_tries,
|
||||
"http_200": tenginestatus.http_200,
|
||||
"http_206": tenginestatus.http_206,
|
||||
"http_302": tenginestatus.http_302,
|
||||
"http_304": tenginestatus.http_304,
|
||||
"http_403": tenginestatus.http_403,
|
||||
"http_404": tenginestatus.http_404,
|
||||
"http_416": tenginestatus.http_416,
|
||||
"http_499": tenginestatus.http_499,
|
||||
"http_500": tenginestatus.http_500,
|
||||
"http_502": tenginestatus.http_502,
|
||||
"http_503": tenginestatus.http_503,
|
||||
"http_504": tenginestatus.http_504,
|
||||
"http_508": tenginestatus.http_508,
|
||||
"http_other_detail_status": tenginestatus.http_other_detail_status,
|
||||
"http_ups_4xx": tenginestatus.http_ups_4xx,
|
||||
"http_ups_5xx": tenginestatus.http_ups_5xx,
|
||||
}
|
||||
acc.AddFields("tengine", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get tag(s) for the tengine plugin
|
||||
func getTags(addr *url.URL, server_name string) map[string]string {
|
||||
h := addr.Host
|
||||
host, port, err := net.SplitHostPort(h)
|
||||
if err != nil {
|
||||
host = addr.Host
|
||||
if addr.Scheme == "http" {
|
||||
port = "80"
|
||||
} else if addr.Scheme == "https" {
|
||||
port = "443"
|
||||
} else {
|
||||
port = ""
|
||||
}
|
||||
}
|
||||
return map[string]string{"server": host, "port": port, "server_name": server_name}
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("tengine", func() telegraf.Input {
|
||||
return &Tengine{}
|
||||
})
|
||||
}
|
||||
97
plugins/inputs/tengine/tengine_test.go
Normal file
97
plugins/inputs/tengine/tengine_test.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package tengine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const tengineSampleResponse = `127.0.0.1,784,1511,2,2,1,0,1,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0`
|
||||
|
||||
// Verify that tengine tags are properly parsed based on the server
|
||||
func TestTengineTags(t *testing.T) {
|
||||
urls := []string{"http://localhost/us", "http://localhost:80/us"}
|
||||
var addr *url.URL
|
||||
for _, url1 := range urls {
|
||||
addr, _ = url.Parse(url1)
|
||||
tagMap := getTags(addr, "127.0.0.1")
|
||||
assert.Contains(t, tagMap["server"], "localhost")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTengineGeneratesMetrics(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var rsp string
|
||||
rsp = tengineSampleResponse
|
||||
fmt.Fprintln(w, rsp)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
n := &Tengine{
|
||||
Urls: []string{fmt.Sprintf("%s/us", ts.URL)},
|
||||
}
|
||||
|
||||
var acc_tengine testutil.Accumulator
|
||||
|
||||
err_tengine := acc_tengine.GatherError(n.Gather)
|
||||
|
||||
require.NoError(t, err_tengine)
|
||||
|
||||
fields_tengine := map[string]interface{}{
|
||||
"bytes_in": uint64(784),
|
||||
"bytes_out": uint64(1511),
|
||||
"conn_total": uint64(2),
|
||||
"req_total": uint64(2),
|
||||
"http_2xx": uint64(1),
|
||||
"http_3xx": uint64(0),
|
||||
"http_4xx": uint64(1),
|
||||
"http_5xx": uint64(0),
|
||||
"http_other_status": uint64(0),
|
||||
"rt": uint64(0),
|
||||
"ups_req": uint64(0),
|
||||
"ups_rt": uint64(0),
|
||||
"ups_tries": uint64(0),
|
||||
"http_200": uint64(1),
|
||||
"http_206": uint64(0),
|
||||
"http_302": uint64(0),
|
||||
"http_304": uint64(0),
|
||||
"http_403": uint64(0),
|
||||
"http_404": uint64(1),
|
||||
"http_416": uint64(0),
|
||||
"http_499": uint64(0),
|
||||
"http_500": uint64(0),
|
||||
"http_502": uint64(0),
|
||||
"http_503": uint64(0),
|
||||
"http_504": uint64(0),
|
||||
"http_508": uint64(0),
|
||||
"http_other_detail_status": uint64(0),
|
||||
"http_ups_4xx": uint64(0),
|
||||
"http_ups_5xx": uint64(0),
|
||||
}
|
||||
|
||||
addr, err := url.Parse(ts.URL)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(addr.Host)
|
||||
if err != nil {
|
||||
host = addr.Host
|
||||
if addr.Scheme == "http" {
|
||||
port = "80"
|
||||
} else if addr.Scheme == "https" {
|
||||
port = "443"
|
||||
} else {
|
||||
port = ""
|
||||
}
|
||||
}
|
||||
tags := map[string]string{"server": host, "port": port, "server_name": "127.0.0.1"}
|
||||
acc_tengine.AssertContainsTaggedFields(t, "tengine", fields_tengine, tags)
|
||||
}
|
||||
@@ -1,16 +1,13 @@
|
||||
# win_perf_counters readme
|
||||
|
||||
Input plugin to read Performance Counters on Windows operating systems.
|
||||
This document presents the input plugin to read Performance Counters on Windows operating systems.
|
||||
|
||||
Configuration is parsed and then tested for validity such as
|
||||
The configuration is parsed and then tested for validity, such as
|
||||
whether the Object, Instance and Counter exist on Telegraf startup.
|
||||
|
||||
Counter paths are refreshed periodically, see [CountersRefreshInterval](#countersrefreshinterval)
|
||||
Counter paths are refreshed periodically, see the [CountersRefreshInterval](#countersrefreshinterval)
|
||||
configuration parameter for more info.
|
||||
|
||||
Wildcards can be used in instance and counter names. Partial wildcards are supported only
|
||||
in instance names on Windows Vista and newer.
|
||||
|
||||
In case of query for all instances `["*"]`, the plugin does not return the instance `_Total`
|
||||
by default. See [IncludeTotal](#includetotal) for more info.
|
||||
|
||||
@@ -19,7 +16,7 @@ by default. See [IncludeTotal](#includetotal) for more info.
|
||||
The examples contained in this file have been found on the internet
|
||||
as counters used when performance monitoring
|
||||
Active Directory and IIS in particular.
|
||||
There are a lot other good objects to monitor, if you know what to look for.
|
||||
There are a lot of other good objects to monitor, if you know what to look for.
|
||||
This file is likely to be updated in the future with more examples for
|
||||
useful configurations for separate scenarios.
|
||||
|
||||
@@ -34,23 +31,41 @@ Bool, if set to `true` will print out all matching performance objects.
|
||||
Example:
|
||||
`PrintValid=true`
|
||||
|
||||
#### UseWildcardsExpansion
|
||||
|
||||
If `UseWildcardsExpansion` is set to true, wildcards can be used in the
|
||||
instance name and the counter name. When using localized Windows, counters
|
||||
will be also be localized. Instance indexes will also be returned in the
|
||||
instance name.
|
||||
|
||||
Partial wildcards (e.g. `chrome*`) are supported only in the instance name on Windows Vista and newer.
|
||||
|
||||
If disabled, wildcards (not partial) in instance names can still be used, but
|
||||
instance indexes will not be returned in the instance names.
|
||||
|
||||
Example:
|
||||
`UseWildcardsExpansion=true`
|
||||
|
||||
#### CountersRefreshInterval
|
||||
|
||||
Configured counters are matched against available counters at the interval
|
||||
specified by the `CountersRefreshInterval` parameter. Default value is `1m` (1 minute).
|
||||
specified by the `CountersRefreshInterval` parameter. The default value is `1m` (1 minute).
|
||||
|
||||
If wildcards are used in instance or counter names, they are expanded at this point.
|
||||
If wildcards are used in instance or counter names, they are expanded at this point, if the `UseWildcardsExpansion` param is set to `true`.
|
||||
|
||||
Setting `CountersRefreshInterval` too low (order of seconds) can cause Telegraf to create
|
||||
Setting the `CountersRefreshInterval` too low (order of seconds) can cause Telegraf to create
|
||||
a high CPU load.
|
||||
|
||||
Set to `0s` to disable periodic refreshing.
|
||||
Set it to `0s` to disable periodic refreshing.
|
||||
|
||||
Example:
|
||||
`CountersRefreshInterval=1m`
|
||||
|
||||
#### PreVistaSupport
|
||||
|
||||
_Deprecated. Necessary features on Windows Vista and newer are checked dynamically_
|
||||
|
||||
Bool, if set to `true` will use the localized PerfCounter interface that has been present since before Vista for backwards compatability.
|
||||
Bool, if set to `true`, the plugin will use the localized PerfCounter interface that has been present since before Vista for backwards compatability.
|
||||
|
||||
It is recommended NOT to use this on OSes starting with Vista and newer because it requires more configuration to use this than the newer interface present since Vista.
|
||||
|
||||
@@ -62,12 +77,12 @@ Example for Windows Server 2003, this would be set to true:
|
||||
See Entry below.
|
||||
|
||||
### Entry
|
||||
A new configuration entry consists of the TOML header to start with,
|
||||
A new configuration entry consists of the TOML header starting with,
|
||||
`[[inputs.win_perf_counters.object]]`.
|
||||
This must follow before other plugin configurations,
|
||||
beneath the main win_perf_counters entry, `[[inputs.win_perf_counters]]`.
|
||||
|
||||
Following this are 3 required key/value pairs and the three optional parameters and their usage.
|
||||
Following this are 3 required key/value pairs and three optional parameters and their usage.
|
||||
|
||||
#### ObjectName
|
||||
**Required**
|
||||
@@ -79,16 +94,18 @@ Example: `ObjectName = "LogicalDisk"`
|
||||
#### Instances
|
||||
**Required**
|
||||
|
||||
Instances key (this is an array) is the instances of a counter you would like returned,
|
||||
The instances key (this is an array) declares the instances of a counter you would like returned,
|
||||
it can be one or more values.
|
||||
|
||||
Example, `Instances = ["C:","D:","E:"]` will return only for the instances
|
||||
Example: `Instances = ["C:","D:","E:"]`
|
||||
|
||||
This will return only for the instances
|
||||
C:, D: and E: where relevant. To get all instances of a Counter, use `["*"]` only.
|
||||
By default any results containing `_Total` are stripped,
|
||||
unless this is specified as the wanted instance.
|
||||
Alternatively see the option `IncludeTotal` below.
|
||||
|
||||
It is also possible to set partial wildcards, eg. `["chrome*"]`
|
||||
It is also possible to set partial wildcards, eg. `["chrome*"]`, if the `UseWildcardsExpansion` param is set to `true`
|
||||
|
||||
Some Objects do not have instances to select from at all.
|
||||
Here only one option is valid if you want data back,
|
||||
@@ -97,41 +114,43 @@ and that is to specify `Instances = ["------"]`.
|
||||
#### Counters
|
||||
**Required**
|
||||
|
||||
Counters key (this is an array) is the counters of the ObjectName
|
||||
The Counters key (this is an array) declares the counters of the ObjectName
|
||||
you would like returned, it can also be one or more values.
|
||||
|
||||
Example: `Counters = ["% Idle Time", "% Disk Read Time", "% Disk Write Time"]`
|
||||
This must be specified for every counter you want the results of,
|
||||
or use `["*"]` for all the counters for object.
|
||||
|
||||
This must be specified for every counter you want the results of, or use
|
||||
`["*"]` for all the counters of the object, if the `UseWildcardsExpansion` param
|
||||
is set to `true`.
|
||||
|
||||
#### Measurement
|
||||
*Optional*
|
||||
|
||||
This key is optional, if it is not set it will be `win_perf_counters`.
|
||||
In InfluxDB this is the key by which the returned data is stored underneath,
|
||||
so for ordering your data in a good manner,
|
||||
This key is optional. If it is not set it will be `win_perf_counters`.
|
||||
In InfluxDB this is the key underneath which the returned data is stored.
|
||||
So for ordering your data in a good manner,
|
||||
this is a good key to set with a value when you want your IIS and Disk results stored
|
||||
separately from Processor results.
|
||||
|
||||
Example: `Measurement = "win_disk"
|
||||
Example: `Measurement = "win_disk"``
|
||||
|
||||
#### IncludeTotal
|
||||
*Optional*
|
||||
|
||||
This key is optional, it is a simple bool.
|
||||
This key is optional. It is a simple bool.
|
||||
If it is not set to true or included it is treated as false.
|
||||
This key only has an effect if the Instances key is set to `["*"]`
|
||||
and you would also like all instances containing `_Total` returned,
|
||||
This key only has effect if the Instances key is set to `["*"]`
|
||||
and you would also like all instances containing `_Total` to be returned,
|
||||
like `_Total`, `0,_Total` and so on where applicable
|
||||
(Processor Information is one example).
|
||||
|
||||
#### WarnOnMissing
|
||||
*Optional*
|
||||
|
||||
This key is optional, it is a simple bool.
|
||||
This key is optional. It is a simple bool.
|
||||
If it is not set to true or included it is treated as false.
|
||||
This only has an effect on the first execution of the plugin,
|
||||
it will print out any ObjectName/Instance/Counter combinations
|
||||
This only has effect on the first execution of the plugin.
|
||||
It will print out any ObjectName/Instance/Counter combinations
|
||||
asked for that do not match. Useful when debugging new configurations.
|
||||
|
||||
#### FailOnMissing
|
||||
|
||||
@@ -352,7 +352,7 @@ func PdhGetFormattedCounterValueDouble(hCounter PDH_HCOUNTER, lpdwType *uint32,
|
||||
// time.Sleep(2000 * time.Millisecond)
|
||||
// }
|
||||
// }
|
||||
func PdhGetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_DOUBLE) uint32 {
|
||||
func PdhGetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *byte) uint32 {
|
||||
ret, _, _ := pdh_GetFormattedCounterArrayW.Call(
|
||||
uintptr(hCounter),
|
||||
uintptr(PDH_FMT_DOUBLE|PDH_FMT_NOCAP100),
|
||||
|
||||
@@ -9,6 +9,12 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//PerformanceQuery is abstraction for PDH_FMT_COUNTERVALUE_ITEM_DOUBLE
|
||||
type CounterValue struct {
|
||||
InstanceName string
|
||||
Value float64
|
||||
}
|
||||
|
||||
//PerformanceQuery provides wrappers around Windows performance counters API for easy usage in GO
|
||||
type PerformanceQuery interface {
|
||||
Open() error
|
||||
@@ -18,6 +24,7 @@ type PerformanceQuery interface {
|
||||
GetCounterPath(counterHandle PDH_HCOUNTER) (string, error)
|
||||
ExpandWildCardPath(counterPath string) ([]string, error)
|
||||
GetFormattedCounterValueDouble(hCounter PDH_HCOUNTER) (float64, error)
|
||||
GetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER) ([]CounterValue, error)
|
||||
CollectData() error
|
||||
AddEnglishCounterSupported() bool
|
||||
}
|
||||
@@ -151,6 +158,28 @@ func (m *PerformanceQueryImpl) GetFormattedCounterValueDouble(hCounter PDH_HCOUN
|
||||
}
|
||||
}
|
||||
|
||||
func (m *PerformanceQueryImpl) GetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER) ([]CounterValue, error) {
|
||||
var buffSize uint32
|
||||
var itemCount uint32
|
||||
ret := PdhGetFormattedCounterArrayDouble(hCounter, &buffSize, &itemCount, nil)
|
||||
if ret == PDH_MORE_DATA {
|
||||
buff := make([]byte, buffSize)
|
||||
ret = PdhGetFormattedCounterArrayDouble(hCounter, &buffSize, &itemCount, &buff[0])
|
||||
if ret == ERROR_SUCCESS {
|
||||
items := (*[1 << 20]PDH_FMT_COUNTERVALUE_ITEM_DOUBLE)(unsafe.Pointer(&buff[0]))[:itemCount]
|
||||
values := make([]CounterValue, 0, itemCount)
|
||||
for _, item := range items {
|
||||
if item.FmtValue.CStatus == PDH_CSTATUS_VALID_DATA || item.FmtValue.CStatus == PDH_CSTATUS_NEW_DATA {
|
||||
val := CounterValue{UTF16PtrToString(item.SzName), item.FmtValue.DoubleValue}
|
||||
values = append(values, val)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
}
|
||||
return nil, NewPdhError(ret)
|
||||
}
|
||||
|
||||
func (m *PerformanceQueryImpl) CollectData() error {
|
||||
if m.query == 0 {
|
||||
return errors.New("uninitialised query")
|
||||
@@ -181,7 +210,7 @@ func UTF16ToStringArray(buf []uint16) []string {
|
||||
stringLine := UTF16PtrToString(&buf[0])
|
||||
for stringLine != "" {
|
||||
strings = append(strings, stringLine)
|
||||
nextLineStart += len(stringLine) + 1
|
||||
nextLineStart += len([]rune(stringLine)) + 1
|
||||
remainingBuf := buf[nextLineStart:]
|
||||
stringLine = UTF16PtrToString(&remainingBuf[0])
|
||||
}
|
||||
|
||||
@@ -5,13 +5,14 @@ package win_perf_counters
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
var sampleConfig = `
|
||||
@@ -22,6 +23,10 @@ var sampleConfig = `
|
||||
## agent, it will not be gathered.
|
||||
## Settings:
|
||||
# PrintValid = false # Print All matching performance counters
|
||||
# If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded
|
||||
# and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names.
|
||||
# If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names.
|
||||
#UseWildcardsExpansion = false
|
||||
# Period after which counters will be reread from configuration and wildcards in counter paths expanded
|
||||
CountersRefreshInterval="1m"
|
||||
|
||||
@@ -75,6 +80,7 @@ type Win_PerfCounters struct {
|
||||
PreVistaSupport bool
|
||||
Object []perfobject
|
||||
CountersRefreshInterval internal.Duration
|
||||
UseWildcardsExpansion bool
|
||||
|
||||
lastRefreshed time.Time
|
||||
counters []*counter
|
||||
@@ -137,45 +143,59 @@ func (m *Win_PerfCounters) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (m *Win_PerfCounters) AddItem(counterPath string, instance string, measurement string, includeTotal bool) error {
|
||||
//objectName string, counter string, instance string, measurement string, include_total bool
|
||||
func (m *Win_PerfCounters) AddItem(counterPath string, objectName string, instance string, counterName string, measurement string, includeTotal bool) error {
|
||||
var err error
|
||||
var counterHandle PDH_HCOUNTER
|
||||
if !m.query.AddEnglishCounterSupported() {
|
||||
_, err := m.query.AddCounterToQuery(counterPath)
|
||||
counterHandle, err = m.query.AddCounterToQuery(counterPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
counterHandle, err := m.query.AddEnglishCounterToQuery(counterPath)
|
||||
counterHandle, err = m.query.AddEnglishCounterToQuery(counterPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if m.UseWildcardsExpansion {
|
||||
origInstance := instance
|
||||
counterPath, err = m.query.GetCounterPath(counterHandle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
counters, err := m.query.ExpandWildCardPath(counterPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, counterPath := range counters {
|
||||
var err error
|
||||
counterHandle, err := m.query.AddCounterToQuery(counterPath)
|
||||
|
||||
parsedObjectName, parsedInstance, parsedCounter, err := extractObjectInstanceCounterFromQuery(counterPath)
|
||||
counters, err := m.query.ExpandWildCardPath(counterPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if parsedInstance == "_Total" && instance == "*" && !includeTotal {
|
||||
continue
|
||||
}
|
||||
for _, counterPath := range counters {
|
||||
var err error
|
||||
counterHandle, err := m.query.AddCounterToQuery(counterPath)
|
||||
|
||||
newItem := &counter{counterPath, parsedObjectName, parsedCounter, parsedInstance, measurement,
|
||||
objectName, instance, counterName, err = extractObjectInstanceCounterFromQuery(counterPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if instance == "_Total" && origInstance == "*" && !includeTotal {
|
||||
continue
|
||||
}
|
||||
|
||||
newItem := &counter{counterPath, objectName, counterName, instance, measurement,
|
||||
includeTotal, counterHandle}
|
||||
m.counters = append(m.counters, newItem)
|
||||
|
||||
if m.PrintValid {
|
||||
log.Printf("Valid: %s\n", counterPath)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
newItem := &counter{counterPath, objectName, counterName, instance, measurement,
|
||||
includeTotal, counterHandle}
|
||||
m.counters = append(m.counters, newItem)
|
||||
|
||||
if m.PrintValid {
|
||||
log.Printf("Valid: %s\n", counterPath)
|
||||
}
|
||||
@@ -199,7 +219,7 @@ func (m *Win_PerfCounters) ParseConfig() error {
|
||||
counterPath = "\\" + objectname + "(" + instance + ")\\" + counter
|
||||
}
|
||||
|
||||
err := m.AddItem(counterPath, instance, PerfObject.Measurement, PerfObject.IncludeTotal)
|
||||
err := m.AddItem(counterPath, objectname, instance, counter, PerfObject.Measurement, PerfObject.IncludeTotal)
|
||||
|
||||
if err != nil {
|
||||
if PerfObject.FailOnMissing || PerfObject.WarnOnMissing {
|
||||
@@ -225,7 +245,9 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error {
|
||||
var err error
|
||||
|
||||
if m.lastRefreshed.IsZero() || (m.CountersRefreshInterval.Duration.Nanoseconds() > 0 && m.lastRefreshed.Add(m.CountersRefreshInterval.Duration).Before(time.Now())) {
|
||||
m.counters = m.counters[:0]
|
||||
if m.counters != nil {
|
||||
m.counters = m.counters[:0]
|
||||
}
|
||||
|
||||
err = m.query.Open()
|
||||
if err != nil {
|
||||
@@ -261,22 +283,61 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error {
|
||||
// For iterate over the known metrics and get the samples.
|
||||
for _, metric := range m.counters {
|
||||
// collect
|
||||
value, err := m.query.GetFormattedCounterValueDouble(metric.counterHandle)
|
||||
if err == nil {
|
||||
measurement := sanitizedChars.Replace(metric.measurement)
|
||||
if measurement == "" {
|
||||
measurement = "win_perf_counters"
|
||||
}
|
||||
if m.UseWildcardsExpansion {
|
||||
value, err := m.query.GetFormattedCounterValueDouble(metric.counterHandle)
|
||||
if err == nil {
|
||||
measurement := sanitizedChars.Replace(metric.measurement)
|
||||
if measurement == "" {
|
||||
measurement = "win_perf_counters"
|
||||
}
|
||||
|
||||
var instance = InstanceGrouping{measurement, metric.instance, metric.objectName}
|
||||
if collectFields[instance] == nil {
|
||||
collectFields[instance] = make(map[string]interface{})
|
||||
var instance = InstanceGrouping{measurement, metric.instance, metric.objectName}
|
||||
if collectFields[instance] == nil {
|
||||
collectFields[instance] = make(map[string]interface{})
|
||||
}
|
||||
collectFields[instance][sanitizedChars.Replace(metric.counter)] = float32(value)
|
||||
} else {
|
||||
//ignore invalid data from as some counters from process instances returns this sometimes
|
||||
if phderr, ok := err.(*PdhError); ok && phderr.ErrorCode != PDH_INVALID_DATA && phderr.ErrorCode != PDH_CALC_NEGATIVE_VALUE {
|
||||
return fmt.Errorf("error while getting value for counter %s: %v", metric.counterPath, err)
|
||||
}
|
||||
}
|
||||
collectFields[instance][sanitizedChars.Replace(metric.counter)] = float32(value)
|
||||
} else {
|
||||
//ignore invalid data from as some counters from process instances returns this sometimes
|
||||
if phderr, ok := err.(*PdhError); ok && phderr.ErrorCode != PDH_INVALID_DATA && phderr.ErrorCode != PDH_CALC_NEGATIVE_VALUE {
|
||||
return fmt.Errorf("error while getting value for counter %s: %v", metric.counterPath, err)
|
||||
counterValues, err := m.query.GetFormattedCounterArrayDouble(metric.counterHandle)
|
||||
if err == nil {
|
||||
for _, cValue := range counterValues {
|
||||
var add bool
|
||||
if metric.includeTotal {
|
||||
// If IncludeTotal is set, include all.
|
||||
add = true
|
||||
} else if metric.instance == "*" && !strings.Contains(cValue.InstanceName, "_Total") {
|
||||
// Catch if set to * and that it is not a '*_Total*' instance.
|
||||
add = true
|
||||
} else if metric.instance == cValue.InstanceName {
|
||||
// Catch if we set it to total or some form of it
|
||||
add = true
|
||||
} else if strings.Contains(metric.instance, "#") && strings.HasPrefix(metric.instance, cValue.InstanceName) {
|
||||
// If you are using a multiple instance identifier such as "w3wp#1"
|
||||
// phd.dll returns only the first 2 characters of the identifier.
|
||||
add = true
|
||||
cValue.InstanceName = metric.instance
|
||||
} else if metric.instance == "------" {
|
||||
add = true
|
||||
}
|
||||
|
||||
if add {
|
||||
measurement := sanitizedChars.Replace(metric.measurement)
|
||||
if measurement == "" {
|
||||
measurement = "win_perf_counters"
|
||||
}
|
||||
var instance = InstanceGrouping{measurement, cValue.InstanceName, metric.objectName}
|
||||
|
||||
if collectFields[instance] == nil {
|
||||
collectFields[instance] = make(map[string]interface{})
|
||||
}
|
||||
collectFields[instance][sanitizedChars.Replace(metric.counter)] = float32(cValue.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,6 +81,29 @@ func TestWinPerformanceQueryImpl(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, paths)
|
||||
assert.True(t, len(paths) > 1)
|
||||
|
||||
err = query.Open()
|
||||
require.NoError(t, err)
|
||||
|
||||
counterPath = "\\Process(*)\\% Processor Time"
|
||||
hCounter, err = query.AddEnglishCounterToQuery(counterPath)
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, 0, hCounter)
|
||||
|
||||
err = query.CollectData()
|
||||
require.NoError(t, err)
|
||||
time.Sleep(time.Second)
|
||||
|
||||
err = query.CollectData()
|
||||
require.NoError(t, err)
|
||||
|
||||
arr, err := query.GetFormattedCounterArrayDouble(hCounter)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, len(arr) > 0, "Too")
|
||||
|
||||
err = query.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestWinPerfcountersConfigGet1(t *testing.T) {
|
||||
@@ -573,7 +596,7 @@ func TestWinPerfcountersCollect2(t *testing.T) {
|
||||
|
||||
perfobjects[0] = PerfObject
|
||||
|
||||
m := Win_PerfCounters{PrintValid: false, Object: perfobjects, query: &PerformanceQueryImpl{}}
|
||||
m := Win_PerfCounters{PrintValid: false, Object: perfobjects, query: &PerformanceQueryImpl{}, UseWildcardsExpansion: true}
|
||||
var acc testutil.Accumulator
|
||||
err := m.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -25,6 +25,14 @@ type FakePerformanceQuery struct {
|
||||
openCalled bool
|
||||
}
|
||||
|
||||
func (m *testCounter) ToCounterValue() *CounterValue {
|
||||
_, inst, _, _ := extractObjectInstanceCounterFromQuery(m.path)
|
||||
if inst == "" {
|
||||
inst = "--"
|
||||
}
|
||||
return &CounterValue{inst, m.value}
|
||||
}
|
||||
|
||||
func (m *FakePerformanceQuery) Open() error {
|
||||
if m.openCalled {
|
||||
err := m.Close()
|
||||
@@ -102,6 +110,48 @@ func (m *FakePerformanceQuery) GetFormattedCounterValueDouble(counterHandle PDH_
|
||||
}
|
||||
return 0, fmt.Errorf("GetFormattedCounterValueDouble: invalid handle: %d", counterHandle)
|
||||
}
|
||||
func (m *FakePerformanceQuery) findCounterByPath(counterPath string) *testCounter {
|
||||
for _, c := range m.counters {
|
||||
if c.path == counterPath {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FakePerformanceQuery) findCounterByHandle(counterHandle PDH_HCOUNTER) *testCounter {
|
||||
for _, c := range m.counters {
|
||||
if c.handle == counterHandle {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FakePerformanceQuery) GetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER) ([]CounterValue, error) {
|
||||
if !m.openCalled {
|
||||
return nil, errors.New("GetFormattedCounterArrayDouble: uninitialised query")
|
||||
}
|
||||
for _, c := range m.counters {
|
||||
if c.handle == hCounter {
|
||||
if e, ok := m.expandPaths[c.path]; ok {
|
||||
counters := make([]CounterValue, 0, len(e))
|
||||
for _, p := range e {
|
||||
counter := m.findCounterByPath(p)
|
||||
if counter != nil && counter.value > 0 {
|
||||
counters = append(counters, *counter.ToCounterValue())
|
||||
} else {
|
||||
return nil, fmt.Errorf("GetFormattedCounterArrayDouble: invalid counter : %s", p)
|
||||
}
|
||||
}
|
||||
return counters, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("GetFormattedCounterArrayDouble: invalid counter : %d", hCounter)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("GetFormattedCounterArrayDouble: invalid counter : %d, no paths found", hCounter)
|
||||
}
|
||||
|
||||
func (m *FakePerformanceQuery) CollectData() error {
|
||||
if !m.openCalled {
|
||||
@@ -152,7 +202,7 @@ func TestAddItemSimple(t *testing.T) {
|
||||
}}
|
||||
err = m.query.Open()
|
||||
require.NoError(t, err)
|
||||
err = m.AddItem(cps1[0], "I", "test", false)
|
||||
err = m.AddItem(cps1[0], "O", "I", "c", "test", false)
|
||||
require.NoError(t, err)
|
||||
err = m.query.Close()
|
||||
require.NoError(t, err)
|
||||
@@ -161,7 +211,7 @@ func TestAddItemSimple(t *testing.T) {
|
||||
func TestAddItemInvalidCountPath(t *testing.T) {
|
||||
var err error
|
||||
cps1 := []string{"\\O\\C"}
|
||||
m := Win_PerfCounters{PrintValid: false, Object: nil, query: &FakePerformanceQuery{
|
||||
m := Win_PerfCounters{PrintValid: false, Object: nil, UseWildcardsExpansion: true, query: &FakePerformanceQuery{
|
||||
counters: createCounterMap(cps1, []float64{1.1}),
|
||||
expandPaths: map[string][]string{
|
||||
cps1[0]: {"\\O/C"},
|
||||
@@ -170,7 +220,7 @@ func TestAddItemInvalidCountPath(t *testing.T) {
|
||||
}}
|
||||
err = m.query.Open()
|
||||
require.NoError(t, err)
|
||||
err = m.AddItem("\\O\\C", "*", "test", false)
|
||||
err = m.AddItem("\\O\\C", "O", "------", "C", "test", false)
|
||||
require.Error(t, err)
|
||||
err = m.query.Close()
|
||||
require.NoError(t, err)
|
||||
@@ -197,13 +247,24 @@ func TestParseConfigBasic(t *testing.T) {
|
||||
assert.Len(t, m.counters, 4)
|
||||
err = m.query.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
m.UseWildcardsExpansion = true
|
||||
m.counters = nil
|
||||
|
||||
err = m.query.Open()
|
||||
require.NoError(t, err)
|
||||
err = m.ParseConfig()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, m.counters, 4)
|
||||
err = m.query.Close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestParseConfigNoInstance(t *testing.T) {
|
||||
var err error
|
||||
perfObjects := createPerfObject("m", "O", []string{"------"}, []string{"C1", "C2"}, false, false)
|
||||
cps1 := []string{"\\O\\C1", "\\O\\C2"}
|
||||
m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{
|
||||
m := Win_PerfCounters{PrintValid: false, Object: perfObjects, UseWildcardsExpansion: false, query: &FakePerformanceQuery{
|
||||
counters: createCounterMap(cps1, []float64{1.1, 1.2}),
|
||||
expandPaths: map[string][]string{
|
||||
cps1[0]: {cps1[0]},
|
||||
@@ -218,6 +279,17 @@ func TestParseConfigNoInstance(t *testing.T) {
|
||||
assert.Len(t, m.counters, 2)
|
||||
err = m.query.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
m.UseWildcardsExpansion = true
|
||||
m.counters = nil
|
||||
|
||||
err = m.query.Open()
|
||||
require.NoError(t, err)
|
||||
err = m.ParseConfig()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, m.counters, 2)
|
||||
err = m.query.Close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestParseConfigInvalidCounterError(t *testing.T) {
|
||||
@@ -239,6 +311,16 @@ func TestParseConfigInvalidCounterError(t *testing.T) {
|
||||
require.Error(t, err)
|
||||
err = m.query.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
m.UseWildcardsExpansion = true
|
||||
m.counters = nil
|
||||
|
||||
err = m.query.Open()
|
||||
require.NoError(t, err)
|
||||
err = m.ParseConfig()
|
||||
require.Error(t, err)
|
||||
err = m.query.Close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestParseConfigInvalidCounterNoError(t *testing.T) {
|
||||
@@ -260,13 +342,24 @@ func TestParseConfigInvalidCounterNoError(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = m.query.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
m.UseWildcardsExpansion = true
|
||||
m.counters = nil
|
||||
|
||||
err = m.query.Open()
|
||||
require.NoError(t, err)
|
||||
err = m.ParseConfig()
|
||||
require.NoError(t, err)
|
||||
err = m.query.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestParseConfigTotal(t *testing.T) {
|
||||
func TestParseConfigTotalExpansion(t *testing.T) {
|
||||
var err error
|
||||
perfObjects := createPerfObject("m", "O", []string{"*"}, []string{"*"}, true, true)
|
||||
cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(_Total)\\C1", "\\O(_Total)\\C2"}
|
||||
m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{
|
||||
m := Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: true, Object: perfObjects, query: &FakePerformanceQuery{
|
||||
counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}),
|
||||
expandPaths: map[string][]string{
|
||||
"\\O(*)\\*": cps1,
|
||||
@@ -283,7 +376,7 @@ func TestParseConfigTotal(t *testing.T) {
|
||||
|
||||
perfObjects[0].IncludeTotal = false
|
||||
|
||||
m = Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{
|
||||
m = Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: true, Object: perfObjects, query: &FakePerformanceQuery{
|
||||
counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}),
|
||||
expandPaths: map[string][]string{
|
||||
"\\O(*)\\*": cps1,
|
||||
@@ -303,7 +396,7 @@ func TestParseConfigExpand(t *testing.T) {
|
||||
var err error
|
||||
perfObjects := createPerfObject("m", "O", []string{"*"}, []string{"*"}, false, false)
|
||||
cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"}
|
||||
m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{
|
||||
m := Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: true, Object: perfObjects, query: &FakePerformanceQuery{
|
||||
counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}),
|
||||
expandPaths: map[string][]string{
|
||||
"\\O(*)\\*": cps1,
|
||||
@@ -346,6 +439,17 @@ func TestSimpleGather(t *testing.T) {
|
||||
"objectname": "O",
|
||||
}
|
||||
acc1.AssertContainsTaggedFields(t, measurement, fields1, tags1)
|
||||
|
||||
m.UseWildcardsExpansion = true
|
||||
m.counters = nil
|
||||
m.lastRefreshed = time.Time{}
|
||||
|
||||
var acc2 testutil.Accumulator
|
||||
|
||||
err = m.Gather(&acc2)
|
||||
require.NoError(t, err)
|
||||
acc1.AssertContainsTaggedFields(t, measurement, fields1, tags1)
|
||||
|
||||
}
|
||||
|
||||
func TestGatherInvalidDataIgnore(t *testing.T) {
|
||||
@@ -377,15 +481,25 @@ func TestGatherInvalidDataIgnore(t *testing.T) {
|
||||
"objectname": "O",
|
||||
}
|
||||
acc1.AssertContainsTaggedFields(t, measurement, fields1, tags1)
|
||||
|
||||
m.UseWildcardsExpansion = true
|
||||
m.counters = nil
|
||||
m.lastRefreshed = time.Time{}
|
||||
|
||||
var acc2 testutil.Accumulator
|
||||
err = m.Gather(&acc2)
|
||||
require.NoError(t, err)
|
||||
acc1.AssertContainsTaggedFields(t, measurement, fields1, tags1)
|
||||
}
|
||||
|
||||
func TestGatherRefreshing(t *testing.T) {
|
||||
//tests with expansion
|
||||
func TestGatherRefreshingWithExpansion(t *testing.T) {
|
||||
var err error
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping long taking test in short mode")
|
||||
}
|
||||
measurement := "test"
|
||||
perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"*"}, false, false)
|
||||
perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"*"}, true, false)
|
||||
cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"}
|
||||
fpm := &FakePerformanceQuery{
|
||||
counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}),
|
||||
@@ -394,7 +508,7 @@ func TestGatherRefreshing(t *testing.T) {
|
||||
},
|
||||
addEnglishSupported: true,
|
||||
}
|
||||
m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: fpm, CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}}
|
||||
m := Win_PerfCounters{PrintValid: false, Object: perfObjects, UseWildcardsExpansion: true, query: fpm, CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}}
|
||||
var acc1 testutil.Accumulator
|
||||
err = m.Gather(&acc1)
|
||||
assert.Len(t, m.counters, 4)
|
||||
@@ -463,3 +577,211 @@ func TestGatherRefreshing(t *testing.T) {
|
||||
acc3.AssertContainsTaggedFields(t, measurement, fields3, tags3)
|
||||
|
||||
}
|
||||
|
||||
func TestGatherRefreshingWithoutExpansion(t *testing.T) {
|
||||
var err error
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping long taking test in short mode")
|
||||
}
|
||||
measurement := "test"
|
||||
perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2"}, true, false)
|
||||
cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"}
|
||||
fpm := &FakePerformanceQuery{
|
||||
counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps1...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4}),
|
||||
expandPaths: map[string][]string{
|
||||
"\\O(*)\\C1": {cps1[0], cps1[2]},
|
||||
"\\O(*)\\C2": {cps1[1], cps1[3]},
|
||||
},
|
||||
addEnglishSupported: true,
|
||||
}
|
||||
m := Win_PerfCounters{PrintValid: false, Object: perfObjects, UseWildcardsExpansion: false, query: fpm, CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}}
|
||||
var acc1 testutil.Accumulator
|
||||
err = m.Gather(&acc1)
|
||||
assert.Len(t, m.counters, 2)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, acc1.Metrics, 2)
|
||||
|
||||
fields1 := map[string]interface{}{
|
||||
"C1": float32(1.1),
|
||||
"C2": float32(1.2),
|
||||
}
|
||||
tags1 := map[string]string{
|
||||
"instance": "I1",
|
||||
"objectname": "O",
|
||||
}
|
||||
acc1.AssertContainsTaggedFields(t, measurement, fields1, tags1)
|
||||
|
||||
fields2 := map[string]interface{}{
|
||||
"C1": float32(1.3),
|
||||
"C2": float32(1.4),
|
||||
}
|
||||
tags2 := map[string]string{
|
||||
"instance": "I2",
|
||||
"objectname": "O",
|
||||
}
|
||||
acc1.AssertContainsTaggedFields(t, measurement, fields2, tags2)
|
||||
//test finding new instance
|
||||
cps2 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2", "\\O(I3)\\C1", "\\O(I3)\\C2"}
|
||||
fpm = &FakePerformanceQuery{
|
||||
counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps2...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6}),
|
||||
expandPaths: map[string][]string{
|
||||
"\\O(*)\\C1": {cps2[0], cps2[2], cps2[4]},
|
||||
"\\O(*)\\C2": {cps2[1], cps2[3], cps2[5]},
|
||||
},
|
||||
addEnglishSupported: true,
|
||||
}
|
||||
m.query = fpm
|
||||
fpm.Open()
|
||||
var acc2 testutil.Accumulator
|
||||
|
||||
fields3 := map[string]interface{}{
|
||||
"C1": float32(1.5),
|
||||
"C2": float32(1.6),
|
||||
}
|
||||
tags3 := map[string]string{
|
||||
"instance": "I3",
|
||||
"objectname": "O",
|
||||
}
|
||||
|
||||
//test before elapsing CounterRefreshRate counters are not refreshed
|
||||
err = m.Gather(&acc2)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, m.counters, 2)
|
||||
assert.Len(t, acc2.Metrics, 3)
|
||||
|
||||
acc2.AssertContainsTaggedFields(t, measurement, fields1, tags1)
|
||||
acc2.AssertContainsTaggedFields(t, measurement, fields2, tags2)
|
||||
acc2.AssertContainsTaggedFields(t, measurement, fields3, tags3)
|
||||
//test changed configuration
|
||||
perfObjects = createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2", "C3"}, true, false)
|
||||
cps3 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I1)\\C3", "\\O(I2)\\C1", "\\O(I2)\\C2", "\\O(I2)\\C3"}
|
||||
fpm = &FakePerformanceQuery{
|
||||
counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2", "\\O(*)\\C3"}, cps3...), []float64{0, 0, 0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6}),
|
||||
expandPaths: map[string][]string{
|
||||
"\\O(*)\\C1": {cps3[0], cps3[3]},
|
||||
"\\O(*)\\C2": {cps3[1], cps3[4]},
|
||||
"\\O(*)\\C3": {cps3[2], cps3[5]},
|
||||
},
|
||||
addEnglishSupported: true,
|
||||
}
|
||||
m.query = fpm
|
||||
m.Object = perfObjects
|
||||
|
||||
fpm.Open()
|
||||
|
||||
time.Sleep(m.CountersRefreshInterval.Duration)
|
||||
|
||||
var acc3 testutil.Accumulator
|
||||
err = m.Gather(&acc3)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, acc3.Metrics, 2)
|
||||
fields4 := map[string]interface{}{
|
||||
"C1": float32(1.1),
|
||||
"C2": float32(1.2),
|
||||
"C3": float32(1.3),
|
||||
}
|
||||
tags4 := map[string]string{
|
||||
"instance": "I1",
|
||||
"objectname": "O",
|
||||
}
|
||||
fields5 := map[string]interface{}{
|
||||
"C1": float32(1.4),
|
||||
"C2": float32(1.5),
|
||||
"C3": float32(1.6),
|
||||
}
|
||||
tags5 := map[string]string{
|
||||
"instance": "I2",
|
||||
"objectname": "O",
|
||||
}
|
||||
|
||||
acc3.AssertContainsTaggedFields(t, measurement, fields4, tags4)
|
||||
acc3.AssertContainsTaggedFields(t, measurement, fields5, tags5)
|
||||
|
||||
}
|
||||
|
||||
func TestGatherTotalNoExpansion(t *testing.T) {
|
||||
var err error
|
||||
measurement := "m"
|
||||
perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2"}, true, true)
|
||||
cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(_Total)\\C1", "\\O(_Total)\\C2"}
|
||||
m := Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: false, Object: perfObjects, query: &FakePerformanceQuery{
|
||||
counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps1...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4}),
|
||||
expandPaths: map[string][]string{
|
||||
"\\O(*)\\C1": {cps1[0], cps1[2]},
|
||||
"\\O(*)\\C2": {cps1[1], cps1[3]},
|
||||
},
|
||||
addEnglishSupported: true,
|
||||
}}
|
||||
var acc1 testutil.Accumulator
|
||||
err = m.Gather(&acc1)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, m.counters, 2)
|
||||
assert.Len(t, acc1.Metrics, 2)
|
||||
fields1 := map[string]interface{}{
|
||||
"C1": float32(1.1),
|
||||
"C2": float32(1.2),
|
||||
}
|
||||
tags1 := map[string]string{
|
||||
"instance": "I1",
|
||||
"objectname": "O",
|
||||
}
|
||||
acc1.AssertContainsTaggedFields(t, measurement, fields1, tags1)
|
||||
|
||||
fields2 := map[string]interface{}{
|
||||
"C1": float32(1.3),
|
||||
"C2": float32(1.4),
|
||||
}
|
||||
tags2 := map[string]string{
|
||||
"instance": "_Total",
|
||||
"objectname": "O",
|
||||
}
|
||||
acc1.AssertContainsTaggedFields(t, measurement, fields2, tags2)
|
||||
|
||||
perfObjects[0].IncludeTotal = false
|
||||
|
||||
m.counters = nil
|
||||
m.lastRefreshed = time.Time{}
|
||||
|
||||
var acc2 testutil.Accumulator
|
||||
err = m.Gather(&acc2)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, m.counters, 2)
|
||||
assert.Len(t, acc2.Metrics, 1)
|
||||
|
||||
acc2.AssertContainsTaggedFields(t, measurement, fields1, tags1)
|
||||
|
||||
acc2.AssertDoesNotContainsTaggedFields(t, measurement, fields2, tags2)
|
||||
}
|
||||
|
||||
// list of nul terminated strings from WinAPI
|
||||
var unicodeStringListWithEnglishChars = []uint16{0x5c, 0x5c, 0x54, 0x34, 0x38, 0x30, 0x5c, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x69, 0x73, 0x6b, 0x28, 0x30, 0x20, 0x43, 0x3a, 0x29, 0x5c, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x44, 0x69, 0x73, 0x6b, 0x20, 0x51, 0x75, 0x65, 0x75, 0x65, 0x20, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x0, 0x5c, 0x5c, 0x54, 0x34, 0x38, 0x30, 0x5c, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x69, 0x73, 0x6b, 0x28, 0x5f, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x29, 0x5c, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x44, 0x69, 0x73, 0x6b, 0x20, 0x51, 0x75, 0x65, 0x75, 0x65, 0x20, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x0, 0x0}
|
||||
var unicodeStringListWithCzechChars = []uint16{0x5c, 0x5c, 0x54, 0x34, 0x38, 0x30, 0x5c, 0x46, 0x79, 0x7a, 0x69, 0x63, 0x6b, 0xfd, 0x20, 0x64, 0x69, 0x73, 0x6b, 0x28, 0x30, 0x20, 0x43, 0x3a, 0x29, 0x5c, 0x41, 0x6b, 0x74, 0x75, 0xe1, 0x6c, 0x6e, 0xed, 0x20, 0x64, 0xe9, 0x6c, 0x6b, 0x61, 0x20, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x79, 0x20, 0x64, 0x69, 0x73, 0x6b, 0x75, 0x0, 0x5c, 0x5c, 0x54, 0x34, 0x38, 0x30, 0x5c, 0x46, 0x79, 0x7a, 0x69, 0x63, 0x6b, 0xfd, 0x20, 0x64, 0x69, 0x73, 0x6b, 0x28, 0x5f, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x29, 0x5c, 0x41, 0x6b, 0x74, 0x75, 0xe1, 0x6c, 0x6e, 0xed, 0x20, 0x64, 0xe9, 0x6c, 0x6b, 0x61, 0x20, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x79, 0x20, 0x64, 0x69, 0x73, 0x6b, 0x75, 0x0, 0x0}
|
||||
var unicodeStringListSingleItem = []uint16{0x5c, 0x5c, 0x54, 0x34, 0x38, 0x30, 0x5c, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x69, 0x73, 0x6b, 0x28, 0x30, 0x20, 0x43, 0x3a, 0x29, 0x5c, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x44, 0x69, 0x73, 0x6b, 0x20, 0x51, 0x75, 0x65, 0x75, 0x65, 0x20, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x0, 0x0}
|
||||
var unicodeStringListNoItem = []uint16{0x0}
|
||||
|
||||
var stringArrayWithEnglishChars = []string{
|
||||
"\\\\T480\\PhysicalDisk(0 C:)\\Current Disk Queue Length",
|
||||
"\\\\T480\\PhysicalDisk(_Total)\\Current Disk Queue Length",
|
||||
}
|
||||
var stringArrayWithCzechChars = []string{
|
||||
"\\\\T480\\Fyzick\u00fd disk(0 C:)\\Aktu\u00e1ln\u00ed d\u00e9lka fronty disku",
|
||||
"\\\\T480\\Fyzick\u00fd disk(_Total)\\Aktu\u00e1ln\u00ed d\u00e9lka fronty disku",
|
||||
}
|
||||
|
||||
var stringArraySingleItem = []string{
|
||||
"\\\\T480\\PhysicalDisk(0 C:)\\Current Disk Queue Length",
|
||||
}
|
||||
|
||||
func TestUTF16ToStringArray(t *testing.T) {
|
||||
singleItem := UTF16ToStringArray(unicodeStringListSingleItem)
|
||||
assert.True(t, assert.ObjectsAreEqual(singleItem, stringArraySingleItem), "Not equal single arrays")
|
||||
|
||||
noItem := UTF16ToStringArray(unicodeStringListNoItem)
|
||||
assert.Nil(t, noItem)
|
||||
|
||||
engStrings := UTF16ToStringArray(unicodeStringListWithEnglishChars)
|
||||
assert.True(t, assert.ObjectsAreEqual(engStrings, stringArrayWithEnglishChars), "Not equal eng arrays")
|
||||
|
||||
czechStrings := UTF16ToStringArray(unicodeStringListWithCzechChars)
|
||||
assert.True(t, assert.ObjectsAreEqual(czechStrings, stringArrayWithCzechChars), "Not equal czech arrays")
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
"log"
|
||||
|
||||
"github.com/apache/thrift/lib/go/thrift"
|
||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
||||
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
|
||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
||||
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
|
||||
)
|
||||
|
||||
//now is a mockable time for now
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
|
||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
||||
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
|
||||
)
|
||||
|
||||
// JSON decodes spans from bodies `POST`ed to the spans endpoint
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
|
||||
|
||||
"github.com/apache/thrift/lib/go/thrift"
|
||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
||||
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
|
||||
)
|
||||
|
||||
// UnmarshalThrift converts raw bytes in thrift format to a slice of spans
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
||||
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
|
||||
)
|
||||
|
||||
func Test_endpointHost(t *testing.T) {
|
||||
|
||||
@@ -59,7 +59,7 @@ func TestPartitionKey(t *testing.T) {
|
||||
partitionKey := k.getPartitionKey(testPoint)
|
||||
u, err := uuid.FromString(partitionKey)
|
||||
assert.Nil(err, "Issue parsing UUID")
|
||||
assert.Equal(uint(4), u.Version(), "PartitionKey should be UUIDv4")
|
||||
assert.Equal(byte(4), u.Version(), "PartitionKey should be UUIDv4")
|
||||
|
||||
k = KinesisOutput{
|
||||
PartitionKey: "-",
|
||||
@@ -72,6 +72,5 @@ func TestPartitionKey(t *testing.T) {
|
||||
partitionKey = k.getPartitionKey(testPoint)
|
||||
u, err = uuid.FromString(partitionKey)
|
||||
assert.Nil(err, "Issue parsing UUID")
|
||||
assert.Equal(uint(4), u.Version(), "PartitionKey should be UUIDv4")
|
||||
|
||||
assert.Equal(byte(4), u.Version(), "PartitionKey should be UUIDv4")
|
||||
}
|
||||
|
||||
@@ -184,7 +184,7 @@ func (m *MQTT) publish(topic string, body []byte) error {
|
||||
|
||||
func (m *MQTT) createOpts() (*paho.ClientOptions, error) {
|
||||
opts := paho.NewClientOptions()
|
||||
opts.KeepAlive = 0 * time.Second
|
||||
opts.KeepAlive = 0
|
||||
|
||||
if m.Timeout.Duration < time.Second {
|
||||
m.Timeout.Duration = 5 * time.Second
|
||||
|
||||
@@ -3,7 +3,7 @@ package nats
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
nats_client "github.com/nats-io/nats"
|
||||
nats_client "github.com/nats-io/go-nats"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/tls"
|
||||
|
||||
@@ -180,6 +180,14 @@ func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricPoint {
|
||||
}
|
||||
|
||||
func buildTags(mTags map[string]string, w *Wavefront) (string, map[string]string) {
|
||||
|
||||
// Remove all empty tags.
|
||||
for k, v := range mTags {
|
||||
if v == "" {
|
||||
delete(mTags, k)
|
||||
}
|
||||
}
|
||||
|
||||
var source string
|
||||
sourceTagFound := false
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -44,25 +45,15 @@ func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]i
|
||||
tags[k] = v
|
||||
}
|
||||
|
||||
for _, tag := range p.TagKeys {
|
||||
switch v := jsonOut[tag].(type) {
|
||||
case string:
|
||||
tags[tag] = v
|
||||
case bool:
|
||||
tags[tag] = strconv.FormatBool(v)
|
||||
case float64:
|
||||
tags[tag] = strconv.FormatFloat(v, 'f', -1, 64)
|
||||
}
|
||||
delete(jsonOut, tag)
|
||||
}
|
||||
|
||||
f := JSONFlattener{}
|
||||
err := f.FlattenJSON("", jsonOut)
|
||||
err := f.FullFlattenJSON("", jsonOut, true, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metric, err := metric.New(p.MetricName, tags, f.Fields, time.Now().UTC())
|
||||
tags, nFields := p.switchFieldToTag(tags, f.Fields)
|
||||
|
||||
metric, err := metric.New(p.MetricName, tags, nFields, time.Now().UTC())
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -70,6 +61,43 @@ func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]i
|
||||
return append(metrics, metric), nil
|
||||
}
|
||||
|
||||
//will take in field map with strings and bools,
|
||||
//search for TagKeys that match fieldnames and add them to tags
|
||||
//will delete any strings/bools that shouldn't be fields
|
||||
//assumes that any non-numeric values in TagKeys should be displayed as tags
|
||||
func (p *JSONParser) switchFieldToTag(tags map[string]string, fields map[string]interface{}) (map[string]string, map[string]interface{}) {
|
||||
for _, name := range p.TagKeys {
|
||||
//switch any fields in tagkeys into tags
|
||||
if fields[name] == nil {
|
||||
continue
|
||||
}
|
||||
switch value := fields[name].(type) {
|
||||
case string:
|
||||
tags[name] = value
|
||||
delete(fields, name)
|
||||
case bool:
|
||||
tags[name] = strconv.FormatBool(value)
|
||||
delete(fields, name)
|
||||
case float64:
|
||||
tags[name] = strconv.FormatFloat(value, 'f', -1, 64)
|
||||
delete(fields, name)
|
||||
default:
|
||||
log.Printf("E! [parsers.json] Unrecognized type %T", value)
|
||||
}
|
||||
}
|
||||
|
||||
//remove any additional string/bool values from fields
|
||||
for k := range fields {
|
||||
switch fields[k].(type) {
|
||||
case string:
|
||||
delete(fields, k)
|
||||
case bool:
|
||||
delete(fields, k)
|
||||
}
|
||||
}
|
||||
return tags, fields
|
||||
}
|
||||
|
||||
func (p *JSONParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
buf = bytes.TrimSpace(buf)
|
||||
buf = bytes.TrimPrefix(buf, utf8BOM)
|
||||
@@ -119,6 +147,7 @@ func (f *JSONFlattener) FlattenJSON(
|
||||
if f.Fields == nil {
|
||||
f.Fields = make(map[string]interface{})
|
||||
}
|
||||
|
||||
return f.FullFlattenJSON(fieldname, v, false, false)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -440,3 +441,29 @@ func TestHttpJsonBOM(t *testing.T) {
|
||||
_, err := parser.Parse(jsonBOM)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
//for testing issue #4260
|
||||
func TestJSONParseNestedArray(t *testing.T) {
|
||||
testString := `{
|
||||
"total_devices": 5,
|
||||
"total_threads": 10,
|
||||
"shares": {
|
||||
"total": 5,
|
||||
"accepted": 5,
|
||||
"rejected": 0,
|
||||
"avg_find_time": 4,
|
||||
"tester": "work",
|
||||
"tester2": "don't want this",
|
||||
"tester3": 7.93
|
||||
}
|
||||
}`
|
||||
|
||||
parser := JSONParser{
|
||||
MetricName: "json_test",
|
||||
TagKeys: []string{"total_devices", "total_threads", "shares_tester", "shares_tester3"},
|
||||
}
|
||||
|
||||
metrics, err := parser.Parse([]byte(testString))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(parser.TagKeys), len(metrics[0].Tags()))
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# Converter Processor
|
||||
|
||||
The converter processor is used to change the type of tag or field values. In
|
||||
addition to changing field types it can convert fields to tags and vis versa.
|
||||
addition to changing field types it can convert between fields and tags.
|
||||
|
||||
Values that cannot be converted are dropped.
|
||||
|
||||
**Note:** When converting tags to fields, take care not to ensure the series is still
|
||||
**Note:** When converting tags to fields, take care to ensure the series is still
|
||||
uniquely identifiable. Fields with the same series key (measurement + tags)
|
||||
will overwrite one another.
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ supported_packages = {
|
||||
"freebsd": [ "tar" ]
|
||||
}
|
||||
|
||||
next_version = '1.7.0'
|
||||
next_version = '1.8.0'
|
||||
|
||||
################
|
||||
#### Telegraf Functions
|
||||
@@ -155,12 +155,12 @@ def go_get(branch, update=False, no_uncommitted=False):
|
||||
if local_changes() and no_uncommitted:
|
||||
logging.error("There are uncommitted changes in the current directory.")
|
||||
return False
|
||||
if not check_path_for("gdm"):
|
||||
logging.info("Downloading `gdm`...")
|
||||
get_command = "go get github.com/sparrc/gdm"
|
||||
if not check_path_for("dep"):
|
||||
logging.info("Downloading `dep`...")
|
||||
get_command = "go get -u github.com/golang/dep/cmd/dep"
|
||||
run(get_command)
|
||||
logging.info("Retrieving dependencies with `gdm`...")
|
||||
run("{}/bin/gdm restore -v".format(os.environ.get("GOPATH",
|
||||
logging.info("Retrieving dependencies with `dep`...")
|
||||
run("{}/bin/dep ensure -v".format(os.environ.get("GOPATH",
|
||||
os.path.expanduser("~/go"))))
|
||||
return True
|
||||
|
||||
|
||||
Reference in New Issue
Block a user