Compare commits

..

61 Commits

Author SHA1 Message Date
Max U
91964888c9 comments reflect deprecated NewJsonParser 2018-06-28 16:05:58 -07:00
Max U
e25fbed0de add old exported func NewJSONParser 2018-06-28 16:00:32 -07:00
Max U
674b9578d7 readability tweak 2018-06-28 15:56:36 -07:00
Max U
885193bc8f change test files to reflect unexported newJsonParser func 2018-06-28 15:39:39 -07:00
Max U
4db667573a newJsonParser function is now unexported, must use NewParser(config) 2018-06-28 15:35:20 -07:00
Max U
7f2b2a08ae test case edit 2018-06-28 11:45:26 -07:00
Max U
c50bfc2c71 edit unittests for JSON parser 2018-06-27 16:09:14 -07:00
Max U
420dafd591 add "field_tags" to json parser config 2018-06-27 16:09:05 -07:00
Daniel Nelson
23523ffd10 Document path tag in tail input 2018-06-21 18:02:34 -07:00
Daniel Nelson
523d761f34 Update changelog 2018-06-21 17:59:31 -07:00
JongHyok Lee
3f28add025 Added path tag to tail input plugin (#4292) 2018-06-21 17:55:54 -07:00
Daniel Nelson
ee6e4b0afd Run windows tests with -short 2018-06-21 17:46:58 -07:00
Patrick Hemmer
16454e25ba Fix postfix input handling of multi-level queues (#4333) 2018-06-21 16:01:38 -07:00
Daniel Nelson
2a1feb6db9 Update changelog 2018-06-21 14:20:35 -07:00
Ayrdrie
61e197d254 Add support for comma in logparser timestamp format (#4311) 2018-06-21 14:19:15 -07:00
Greg
1bd41ef3ce Update vendoring to dep from gdm (#4314) 2018-06-19 11:55:38 -07:00
Daniel Nelson
d7c756e9ff Update changelog 2018-06-19 11:48:08 -07:00
maxunt
39206677f8 Add new measurement with results of pgrep lookup to procstat input (#4307) 2018-06-19 11:47:13 -07:00
Daniel Nelson
b66eb2fec7 Update changelog 2018-06-18 18:09:31 -07:00
Piotr Popieluch
3ad10283ef Add valuecounter aggregator plugin (#3523) 2018-06-18 18:06:11 -07:00
Daniel Nelson
84e9a5c97e Update changelog 2018-06-18 15:40:00 -07:00
Daniel Nelson
c98b58dacc Update docker input documentation for container status 2018-06-18 15:38:21 -07:00
prashanthjbabu
98d86df797 Add container status tag to docker input (#4259) 2018-06-18 15:33:14 -07:00
Daniel Nelson
4e9e57e210 Drop CI support for Go 1.8 (#4309)
Go 1.8 is no longer a supported version and the circleci/golang images
has been removed.
2018-06-17 18:50:14 -07:00
Daniel Nelson
7781507c01 Update changelog 2018-06-14 13:18:31 -07:00
maxunt
8482c40a91 Fix selection of tags under nested objects in the JSON parser (#4284) 2018-06-14 13:17:32 -07:00
Daniel Nelson
0dda9b8319 Update changelog 2018-06-13 13:50:43 -07:00
Arkady Emelyanov
4e69d10ff7 Add owner tag on partitions in burrow input (#4281) 2018-06-13 13:05:27 -07:00
Daniel Nelson
f689463e8e Use linux/unix name only in make install
closes: #4278
2018-06-12 18:37:50 -07:00
Vlasta Hajek
f217d12de5 Fix grammar issues in win_perf_counters readme 2018-06-12 16:54:48 -07:00
Daniel Nelson
886795063e Fix typo 2018-06-12 16:50:14 -07:00
Daniel Nelson
30dc95fa78 Update changelog 2018-06-12 16:46:27 -07:00
Daniel Nelson
40fac0a9b4 Treat sigterm as a clean shutdown signal (#4277) 2018-06-12 16:44:04 -07:00
Daniel Nelson
36df4c5ae5 Fix grammar in converter processor documentation 2018-06-12 16:12:08 -07:00
marcv81
70ffed3a4d Fixed typos in nvidia_smi plugin doc (#4261) 2018-06-12 14:28:56 -07:00
Daniel Nelson
bf59bcf721 Update changelog 2018-06-12 13:57:00 -07:00
Sambhav Kothari
a789f97feb Add support for solr 7 to the solr input (#4271) 2018-06-12 13:56:13 -07:00
Daniel Nelson
d2e00a3205 Set 1.7.0 release date 2018-06-12 11:41:58 -07:00
Daniel Nelson
daddd8bbac Use nats-io/go-nats instead of nats-io/nats in tests 2018-06-11 16:13:59 -07:00
Daniel Nelson
d16530677d Update changelog 2018-06-11 16:07:23 -07:00
marcv81
1ea18ffd0a Add power draw field to nvidia_smi plugin (#4262) 2018-06-11 16:06:26 -07:00
Daniel Nelson
dd2223ae1c Use nats-io/go-nats instead of nats-io/nats 2018-06-11 15:24:45 -07:00
Daniel Nelson
90eebd88af Update changelog 2018-06-11 14:55:12 -07:00
Pierre Tessier
d2e729dfaf Remove tags with empty values from Wavefront output (#4266) 2018-06-11 14:54:08 -07:00
Daniel Nelson
f64d612294 Reword converter description 2018-06-11 14:43:28 -07:00
Daniel Nelson
76ec90e66d Update win_perf_counters README 2018-06-11 11:41:46 -07:00
Vlasta Hajek
1690f36b09 Add option to enable wildcard expansion (#4265)
This is needed because wildcard expansion causes counters to be localized.
2018-06-11 11:10:53 -07:00
Vlasta Hajek
87f711a19a Fix panic with unicode counter names in win_perf_counters (#4255) 2018-06-08 12:41:21 -07:00
Daniel Nelson
58895d6b03 Update go-syslog version
Fix go-syslog overquota errors since latest version no longer uses LFS.
2018-06-08 12:22:49 -07:00
Daniel Nelson
cd9ad77038 Update changelog 2018-06-07 12:38:17 -07:00
Daniel Nelson
8563238059 Update tengine docs 2018-06-07 12:35:46 -07:00
Daniel Nelson
11335f5fee Restore tengine input plugin (#4160)
This reverts commit 8826cdc423.
2018-06-07 12:35:02 -07:00
Daniel Nelson
acba20af1a Fix TLS and SSL config option parsing (#4247) 2018-06-06 18:29:59 -07:00
Daniel Nelson
229b6bd944 Update changelog 2018-06-06 14:30:37 -07:00
Daniel Nelson
7fe6e2f5ae Use same flags for all bsd family ping varients (#4241) 2018-06-06 14:28:12 -07:00
Pierre Fersing
a4214abfc4 Ignore more boring filesystems from disk plugin (#4244) 2018-06-06 13:44:26 -07:00
Daniel Nelson
5f0cbd1255 Update changelog 2018-06-05 17:14:29 -07:00
Leszek Charkiewicz
3ef4dff4ec Add SSL/TLS support to Redis input (#4236) 2018-06-05 17:12:30 -07:00
Piotr Popieluch
dfe7b5eec2 Don't skip metrics during startup in aggregate phase (#4230) 2018-06-05 16:30:53 -07:00
Daniel Nelson
92a8f795f5 Set 1.6.4 release date 2018-06-05 12:11:15 -07:00
Daniel Nelson
b1d77ade55 Update master version to 1.8 2018-06-05 11:46:55 -07:00
92 changed files with 2799 additions and 689 deletions

View File

@@ -15,7 +15,14 @@ jobs:
<<: [ *defaults, *go-1_10 ]
steps:
- checkout
- restore_cache:
key: vendor-{{ .Branch }}-{{ checksum "Gopkg.lock" }}
- run: 'make deps'
- save_cache:
name: 'vendored deps'
key: vendor-{{ .Branch }}-{{ checksum "Gopkg.lock" }}
paths:
- './vendor'
- persist_to_workspace:
root: '/go/src'
paths:

1
.gitignore vendored
View File

@@ -2,3 +2,4 @@
/telegraf
/telegraf.exe
/telegraf.gz
/vendor

View File

@@ -1,22 +1,35 @@
## v1.7.2 [unreleased]
## v1.8 [unreleased]
### Bugfixes
### Release Notes
- [#4381](https://github.com/influxdata/telegraf/issues/4381): Use localhost as default server tag in zookeeper input.
- [#4374](https://github.com/influxdata/telegraf/issues/4374): Don't set values when pattern doesn't match in regex processor.
### New Inputs
## v1.7.1 [2018-07-03]
- [tengine](./plugins/inputs/tengine/README.md) - Contributed by @ertaoxu
### New Aggregators
- [valuecounter](./plugins/aggregators/valuecounter/README.md) - Contributed by @piotr1212
### Features
- [#4236](https://github.com/influxdata/telegraf/pull/4236): Add SSL/TLS support to redis input.
- [#4160](https://github.com/influxdata/telegraf/pull/4160): Add tengine input plugin.
- [#4262](https://github.com/influxdata/telegraf/pull/4262): Add power draw field to nvidia_smi plugin.
- [#4271](https://github.com/influxdata/telegraf/pull/4271): Add support for solr 7 to the solr input.
- [#4281](https://github.com/influxdata/telegraf/pull/4281): Add owner tag on partitions in burrow input.
- [#4259](https://github.com/influxdata/telegraf/pull/4259): Add container status tag to docker input.
- [#3523](https://github.com/influxdata/telegraf/pull/3523): Add valuecounter aggregator plugin.
- [#4307](https://github.com/influxdata/telegraf/pull/4307): Add new measurement with results of pgrep lookup to procstat input.
- [#4311](https://github.com/influxdata/telegraf/pull/4311): Add support for comma in logparser timestamp format.
- [#4292](https://github.com/influxdata/telegraf/pull/4292): Add path tag to tail input plugin.
## v1.7.1 [unreleased]
### Bugfixes
- [#4277](https://github.com/influxdata/telegraf/pull/4277): Treat sigterm as a clean shutdown signal.
- [#4284](https://github.com/influxdata/telegraf/pull/4284): Fix selection of tags under nested objects in the JSON parser.
- [#4135](https://github.com/influxdata/telegraf/issues/4135): Fix postfix input handling multi-level queues.
- [#4334](https://github.com/influxdata/telegraf/pull/4334): Fix syslog timestamp parsing with single digit day of month.
- [#2910](https://github.com/influxdata/telegraf/issues/2910): Handle mysql input variations in the user_statistics collecting.
- [#4293](https://github.com/influxdata/telegraf/issues/4293): Fix minmax and basicstats aggregators to use uint64.
- [#4290](https://github.com/influxdata/telegraf/issues/4290): Document swap input plugin.
- [#4316](https://github.com/influxdata/telegraf/issues/4316): Fix incorrect precision being applied to metric in http_listener.
## v1.7 [2018-06-12]

View File

@@ -30,9 +30,9 @@ which can be found [on our website](http://influxdb.com/community/cla.html)
Assuming you can already build the project, run these in the telegraf directory:
1. `go get github.com/sparrc/gdm`
1. `gdm restore`
1. `GOOS=linux gdm save`
1. `go get -u github.com/golang/dep/cmd/dep`
2. `dep ensure`
3. `dep ensure -add github.com/[dependency]/[new-package]`
## Input Plugins

100
Godeps
View File

@@ -1,100 +0,0 @@
code.cloudfoundry.org/clock e9dc86bbf0e5bbe6bf7ff5a6f71e048959b61f71
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/bsm/sarama-cluster abf039439f66c1ce78017f560b490612552f6472
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/dgrijalva/jwt-go dbeaa9332f19a944acb5736b4456cfcc02140e29
github.com/docker/docker f5ec1e2936dcbe7b5001c2b817188b095c700c27
github.com/docker/go-connections 990a1a1a70b0da4c4cb70e117971a4f0babfbf1a
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
github.com/eclipse/paho.mqtt.golang aff15770515e3c57fc6109da73d42b0d46f7f483
github.com/go-logfmt/logfmt 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
github.com/gogo/protobuf 7b6c6391c4ff245962047fc1e2c6e08b1cdfa0e8
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
github.com/gorilla/mux 53c1911da2b537f792e7cafcb446b05ffe33b996
github.com/go-redis/redis 73b70592cdaa9e6abdfcfbf97b4a90d80728c836
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/hashicorp/consul 5174058f0d2bda63fa5198ab96c33d9a909c58ed
github.com/influxdata/go-syslog eecd51df3ad85464a2bab9b7d3a45bc1e299059e
github.com/influxdata/tail c43482518d410361b6c383d7aebce33d0471d7bc
github.com/influxdata/toml 2a2e3012f7cfbef64091cc79776311e65dfa211b
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
github.com/fsnotify/fsnotify c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9
github.com/jackc/pgx 63f58fd32edb5684b9e9f4cfaac847c6b42b3917
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
github.com/Microsoft/ApplicationInsights-Go 3612f58550c1de70f1a110c78c830e55f29aa65d
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4
github.com/multiplay/go-ts3 07477f49b8dfa3ada231afc7b7b17617d42afe8e
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/nats-io/gnatsd 393bbb7c031433e68707c8810fda0bfcfbe6ab9b
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
github.com/nsqio/go-nsq eee57a3ac4174c55924125bb15eeeda8cffb6e6f
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
github.com/opentracing-contrib/go-observer a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
github.com/opentracing/opentracing-go 06f47b42c792fef2796e9681353e1d908c417827
github.com/openzipkin/zipkin-go-opentracing 1cafbdfde94fbf2b373534764e0863aa3bd0bf7b
github.com/pierrec/lz4 5c9560bfa9ace2bf86080bf40d46b34ae44604df
github.com/pierrec/xxHash 5a004441f897722c627870a981d02b29924215fa
github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/prometheus/client_golang c317fb74746eac4fc65fe3909195f4cf67c5562a
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
github.com/shirou/gopsutil c95755e4bcd7a62bb8bd33f3a597a7c7f35e2cf3
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
github.com/soniah/gosnmp f15472a4cd6f6ea7929e4c7d9f163c49f059924f
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
github.com/stretchr/objx facf9a85c22f48d2f52f2380e4efce1768749a89
github.com/stretchr/testify 12b6f73e6084dad08a7c6e575284b177ecafbc71
github.com/tidwall/gjson 0623bd8fbdbf97cc62b98d15108832851a658e59
github.com/tidwall/match 173748da739a410c5b0b813b956f89ff94730b4c
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/crypto dc137beb6cce2043eb6b5f223ab8bf51c32459f4
golang.org/x/net a337091b0525af65de94df2eb7e98bd9962dcbe2
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
google.golang.org/genproto 11c7f9e547da6db876260ce49ea7536985904c9b
google.golang.org/grpc de2209a968d48e8970546c8a710189f7461370f7
gopkg.in/asn1-ber.v1 4e86f4367175e39f69d9358a5f17b4dda270378d
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
gopkg.in/gorethink/gorethink.v3 7ab832f7b65573104a555d84a27992ae9ea1f659
gopkg.in/ldap.v2 8168ee085ee43257585e50c6441aadf54ecb2c9f
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
gopkg.in/olivere/elastic.v5 3113f9b9ad37509fe5f8a0e5e91c96fdc4435e26
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6

973
Gopkg.lock generated Normal file
View File

@@ -0,0 +1,973 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
name = "code.cloudfoundry.org/clock"
packages = ["."]
revision = "02e53af36e6c978af692887ed449b74026d76fec"
[[projects]]
name = "collectd.org"
packages = [
"api",
"cdtime",
"network"
]
revision = "2ce144541b8903101fb8f1483cc0497a68798122"
version = "v0.3.0"
[[projects]]
name = "github.com/Microsoft/ApplicationInsights-Go"
packages = [
"appinsights",
"appinsights/contracts"
]
revision = "d2df5d440eda5372f24fcac03839a64d6cb5f7e5"
version = "v0.4.2"
[[projects]]
name = "github.com/Microsoft/go-winio"
packages = ["."]
revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f"
version = "v0.4.7"
[[projects]]
name = "github.com/Shopify/sarama"
packages = ["."]
revision = "35324cf48e33d8260e1c7c18854465a904ade249"
version = "v1.17.0"
[[projects]]
name = "github.com/StackExchange/wmi"
packages = ["."]
revision = "5d049714c4a64225c3c79a7cf7d02f7fb5b96338"
version = "1.0.0"
[[projects]]
name = "github.com/aerospike/aerospike-client-go"
packages = [
".",
"internal/lua",
"internal/lua/resources",
"logger",
"pkg/bcrypt",
"pkg/ripemd160",
"types",
"types/atomic",
"types/particle_type",
"types/rand",
"utils/buffer"
]
revision = "c10b5393e43bd60125aca6289c7b24879edb1787"
version = "v1.33.0"
[[projects]]
branch = "master"
name = "github.com/alecthomas/template"
packages = [
".",
"parse"
]
revision = "a0175ee3bccc567396460bf5acd36800cb10c49c"
[[projects]]
branch = "master"
name = "github.com/alecthomas/units"
packages = ["."]
revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
[[projects]]
branch = "master"
name = "github.com/amir/raidman"
packages = [
".",
"proto"
]
revision = "1ccc43bfb9c93cb401a4025e49c64ba71e5e668b"
[[projects]]
branch = "master"
name = "github.com/apache/thrift"
packages = ["lib/go/thrift"]
revision = "f5f430df56871bc937950274b2c86681d3db6e59"
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
"aws/awserr",
"aws/awsutil",
"aws/client",
"aws/client/metadata",
"aws/corehandlers",
"aws/credentials",
"aws/credentials/ec2rolecreds",
"aws/credentials/endpointcreds",
"aws/credentials/stscreds",
"aws/csm",
"aws/defaults",
"aws/ec2metadata",
"aws/endpoints",
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/sdkio",
"internal/sdkrand",
"internal/shareddefaults",
"private/protocol",
"private/protocol/json/jsonutil",
"private/protocol/jsonrpc",
"private/protocol/query",
"private/protocol/query/queryutil",
"private/protocol/rest",
"private/protocol/xml/xmlutil",
"service/cloudwatch",
"service/kinesis",
"service/sts"
]
revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff"
version = "v1.14.8"
[[projects]]
branch = "master"
name = "github.com/beorn7/perks"
packages = ["quantile"]
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
name = "github.com/bsm/sarama-cluster"
packages = ["."]
revision = "cf455bc755fe41ac9bb2861e7a961833d9c2ecc3"
version = "v2.1.13"
[[projects]]
name = "github.com/cenkalti/backoff"
packages = ["."]
revision = "2ea60e5f094469f9e65adb9cd103795b73ae743e"
version = "v2.0.0"
[[projects]]
branch = "master"
name = "github.com/couchbase/go-couchbase"
packages = ["."]
revision = "16db1f1fe037412f12738fa4d8448c549c4edd77"
[[projects]]
branch = "master"
name = "github.com/couchbase/gomemcached"
packages = [
".",
"client"
]
revision = "0da75df145308b9a4e6704d762ca9d9b77752efc"
[[projects]]
branch = "master"
name = "github.com/couchbase/goutils"
packages = [
"logging",
"scramsha"
]
revision = "e865a1461c8ac0032bd37e2d4dab3289faea3873"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
name = "github.com/docker/distribution"
packages = [
"digest",
"reference"
]
revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89"
version = "v2.6.2"
[[projects]]
name = "github.com/docker/docker"
packages = [
"api/types",
"api/types/blkiodev",
"api/types/container",
"api/types/events",
"api/types/filters",
"api/types/mount",
"api/types/network",
"api/types/reference",
"api/types/registry",
"api/types/strslice",
"api/types/swarm",
"api/types/time",
"api/types/versions",
"api/types/volume",
"client",
"pkg/tlsconfig"
]
revision = "eef6495eddab52828327aade186443681ed71a4e"
version = "v17.03.2-ce-rc1"
[[projects]]
name = "github.com/docker/go-connections"
packages = [
"nat",
"sockets",
"tlsconfig"
]
revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d"
version = "v0.3.0"
[[projects]]
name = "github.com/docker/go-units"
packages = ["."]
revision = "47565b4f722fb6ceae66b95f853feed578a4a51c"
version = "v0.3.3"
[[projects]]
name = "github.com/eapache/go-resiliency"
packages = ["breaker"]
revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/eapache/go-xerial-snappy"
packages = ["."]
revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
[[projects]]
name = "github.com/eapache/queue"
packages = ["."]
revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
version = "v1.1.0"
[[projects]]
name = "github.com/eclipse/paho.mqtt.golang"
packages = [
".",
"packets"
]
revision = "36d01c2b4cbeb3d2a12063e4880ce30800af9560"
version = "v1.1.1"
[[projects]]
name = "github.com/go-ini/ini"
packages = ["."]
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
version = "v1.37.0"
[[projects]]
name = "github.com/go-logfmt/logfmt"
packages = ["."]
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
version = "v0.3.0"
[[projects]]
name = "github.com/go-ole/go-ole"
packages = [
".",
"oleutil"
]
revision = "a41e3c4b706f6ae8dfbff342b06e40fa4d2d0506"
version = "v1.2.1"
[[projects]]
name = "github.com/go-redis/redis"
packages = [
".",
"internal",
"internal/consistenthash",
"internal/hashtag",
"internal/pool",
"internal/proto",
"internal/singleflight",
"internal/util"
]
revision = "83fb42932f6145ce52df09860384a4653d2d332a"
version = "v6.12.0"
[[projects]]
name = "github.com/go-sql-driver/mysql"
packages = ["."]
revision = "d523deb1b23d913de5bdada721a6071e71283618"
version = "v1.4.0"
[[projects]]
name = "github.com/gobwas/glob"
packages = [
".",
"compiler",
"match",
"syntax",
"syntax/ast",
"syntax/lexer",
"util/runes",
"util/strings"
]
revision = "5ccd90ef52e1e632236f7326478d4faa74f99438"
version = "v0.2.3"
[[projects]]
name = "github.com/gogo/protobuf"
packages = ["proto"]
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
version = "v1.0.0"
[[projects]]
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp"
]
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/golang/snappy"
packages = ["."]
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
[[projects]]
name = "github.com/google/go-cmp"
packages = [
"cmp",
"cmp/internal/diff",
"cmp/internal/function",
"cmp/internal/value"
]
revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
version = "v0.2.0"
[[projects]]
name = "github.com/gorilla/context"
packages = ["."]
revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42"
version = "v1.1.1"
[[projects]]
name = "github.com/gorilla/mux"
packages = ["."]
revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf"
version = "v1.6.2"
[[projects]]
branch = "master"
name = "github.com/hailocab/go-hostpool"
packages = ["."]
revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478"
[[projects]]
name = "github.com/hashicorp/consul"
packages = ["api"]
revision = "5174058f0d2bda63fa5198ab96c33d9a909c58ed"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-cleanhttp"
packages = ["."]
revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-rootcerts"
packages = ["."]
revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
[[projects]]
name = "github.com/hashicorp/serf"
packages = ["coordinate"]
revision = "d6574a5bb1226678d7010325fb6c985db20ee458"
version = "v0.8.1"
[[projects]]
name = "github.com/influxdata/go-syslog"
packages = [
"rfc5424",
"rfc5425"
]
revision = "eecd51df3ad85464a2bab9b7d3a45bc1e299059e"
version = "v1.0.1"
[[projects]]
branch = "master"
name = "github.com/influxdata/tail"
packages = [
".",
"ratelimiter",
"util",
"watch",
"winfile"
]
revision = "c43482518d410361b6c383d7aebce33d0471d7bc"
[[projects]]
branch = "master"
name = "github.com/influxdata/toml"
packages = [
".",
"ast"
]
revision = "2a2e3012f7cfbef64091cc79776311e65dfa211b"
[[projects]]
branch = "master"
name = "github.com/influxdata/wlog"
packages = ["."]
revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec"
[[projects]]
name = "github.com/jackc/pgx"
packages = [
".",
"chunkreader",
"internal/sanitize",
"pgio",
"pgproto3",
"pgtype",
"stdlib"
]
revision = "da3231b0b66e2e74cdb779f1d46c5e958ba8be27"
version = "v3.1.0"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "0b12d6b5"
[[projects]]
branch = "master"
name = "github.com/kardianos/osext"
packages = ["."]
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
[[projects]]
branch = "master"
name = "github.com/kardianos/service"
packages = ["."]
revision = "615a14ed75099c9eaac6949e22ac2341bf9d3197"
[[projects]]
branch = "master"
name = "github.com/kballard/go-shellquote"
packages = ["."]
revision = "95032a82bc518f77982ea72343cc1ade730072f0"
[[projects]]
branch = "master"
name = "github.com/kr/logfmt"
packages = ["."]
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
[[projects]]
branch = "master"
name = "github.com/mailru/easyjson"
packages = [
".",
"buffer",
"jlexer",
"jwriter"
]
revision = "3fdea8d05856a0c8df22ed4bc71b3219245e4485"
[[projects]]
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
version = "v1.0.1"
[[projects]]
name = "github.com/miekg/dns"
packages = ["."]
revision = "5a2b9fab83ff0f8bfc99684bd5f43a37abe560f1"
version = "v1.0.8"
[[projects]]
branch = "master"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66"
[[projects]]
branch = "master"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b"
[[projects]]
name = "github.com/multiplay/go-ts3"
packages = ["."]
revision = "d0d44555495c8776880a17e439399e715a4ef319"
version = "v1.0.0"
[[projects]]
name = "github.com/naoina/go-stringutil"
packages = ["."]
revision = "6b638e95a32d0c1131db0e7fe83775cbea4a0d0b"
version = "v0.1.0"
[[projects]]
name = "github.com/nats-io/gnatsd"
packages = [
"conf",
"logger",
"server",
"server/pse",
"util"
]
revision = "add6d7930ae6d4bff8823b28999ea87bf1bfd23d"
version = "v1.1.0"
[[projects]]
name = "github.com/nats-io/go-nats"
packages = [
".",
"encoders/builtin",
"util"
]
revision = "062418ea1c2181f52dc0f954f6204370519a868b"
version = "v1.5.0"
[[projects]]
name = "github.com/nats-io/nuid"
packages = ["."]
revision = "289cccf02c178dc782430d534e3c1f5b72af807f"
version = "v1.0.0"
[[projects]]
name = "github.com/nsqio/go-nsq"
packages = ["."]
revision = "eee57a3ac4174c55924125bb15eeeda8cffb6e6f"
version = "v1.0.7"
[[projects]]
branch = "master"
name = "github.com/opentracing-contrib/go-observer"
packages = ["."]
revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c"
[[projects]]
name = "github.com/opentracing/opentracing-go"
packages = [
".",
"ext",
"log"
]
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
version = "v1.0.2"
[[projects]]
name = "github.com/openzipkin/zipkin-go-opentracing"
packages = [
".",
"flag",
"thrift/gen-go/scribe",
"thrift/gen-go/zipkincore",
"types",
"wire"
]
revision = "26cf9707480e6b90e5eff22cf0bbf05319154232"
version = "v0.3.4"
[[projects]]
name = "github.com/pierrec/lz4"
packages = [
".",
"internal/xxh32"
]
revision = "6b9367c9ff401dbc54fabce3fb8d972e799b702d"
version = "v2.0.2"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
name = "github.com/prometheus/client_golang"
packages = [
"prometheus",
"prometheus/promhttp"
]
revision = "c5b7fccd204277076155f10851dad72b76a49317"
version = "v0.8.0"
[[projects]]
branch = "master"
name = "github.com/prometheus/client_model"
packages = ["go"]
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
[[projects]]
branch = "master"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"log",
"model"
]
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
[[projects]]
branch = "master"
name = "github.com/prometheus/procfs"
packages = [
".",
"internal/util",
"nfs",
"xfs"
]
revision = "7d6f385de8bea29190f15ba9931442a0eaef9af7"
[[projects]]
branch = "master"
name = "github.com/rcrowley/go-metrics"
packages = ["."]
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
[[projects]]
branch = "master"
name = "github.com/samuel/go-zookeeper"
packages = ["zk"]
revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47"
[[projects]]
name = "github.com/satori/go.uuid"
packages = ["."]
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
version = "v1.2.0"
[[projects]]
name = "github.com/shirou/gopsutil"
packages = [
"cpu",
"disk",
"host",
"internal/common",
"load",
"mem",
"net",
"process"
]
revision = "eeb1d38d69593f121e060d24d17f7b1f0936b203"
version = "v2.18.05"
[[projects]]
branch = "master"
name = "github.com/shirou/w32"
packages = ["."]
revision = "bb4de0191aa41b5507caa14b0650cdbddcd9280b"
[[projects]]
name = "github.com/sirupsen/logrus"
packages = ["."]
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
version = "v1.0.5"
[[projects]]
branch = "master"
name = "github.com/soniah/gosnmp"
packages = ["."]
revision = "bcf840db66be7d64bf96c3c0e075c92e3d98f793"
[[projects]]
branch = "master"
name = "github.com/streadway/amqp"
packages = ["."]
revision = "e5adc2ada8b8efff032bf61173a233d143e9318e"
[[projects]]
name = "github.com/stretchr/objx"
packages = ["."]
revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
version = "v0.1.1"
[[projects]]
name = "github.com/stretchr/testify"
packages = [
"assert",
"mock",
"require"
]
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
version = "v1.2.2"
[[projects]]
name = "github.com/tidwall/gjson"
packages = ["."]
revision = "afaeb9562041a8018c74e006551143666aed08bf"
version = "v1.1.1"
[[projects]]
branch = "master"
name = "github.com/tidwall/match"
packages = ["."]
revision = "1731857f09b1f38450e2c12409748407822dc6be"
[[projects]]
name = "github.com/vjeantet/grok"
packages = ["."]
revision = "ce01e59abcf6fbc9833b7deb5e4b8ee1769bcc53"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/wvanbergen/kafka"
packages = ["consumergroup"]
revision = "e2edea948ddfee841ea9a263b32ccca15f7d6c2f"
[[projects]]
branch = "master"
name = "github.com/wvanbergen/kazoo-go"
packages = ["."]
revision = "f72d8611297a7cf105da904c04198ad701a60101"
[[projects]]
branch = "master"
name = "github.com/yuin/gopher-lua"
packages = [
".",
"ast",
"parse",
"pm"
]
revision = "ca850f594eaafa5468da2bd53b865e4ee55be18b"
[[projects]]
branch = "master"
name = "github.com/zensqlmonitor/go-mssqldb"
packages = ["."]
revision = "e8fbf836e44e86764eba398361d1825651709547"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = [
"bcrypt",
"blowfish",
"ed25519",
"ed25519/internal/edwards25519",
"md4",
"pbkdf2",
"ssh/terminal"
]
revision = "027cca12c2d63e3d62b670d901e8a2c95854feec"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"bpf",
"context",
"context/ctxhttp",
"html",
"html/atom",
"html/charset",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"internal/iana",
"internal/socket",
"internal/socks",
"internal/timeseries",
"ipv4",
"ipv6",
"proxy",
"trace",
"websocket"
]
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = [
"unix",
"windows",
"windows/registry",
"windows/svc",
"windows/svc/debug",
"windows/svc/eventlog",
"windows/svc/mgr"
]
revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7"
[[projects]]
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"encoding",
"encoding/charmap",
"encoding/htmlindex",
"encoding/internal",
"encoding/internal/identifier",
"encoding/japanese",
"encoding/korean",
"encoding/simplifiedchinese",
"encoding/traditionalchinese",
"encoding/unicode",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"internal/utf8internal",
"language",
"runes",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
]
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
name = "google.golang.org/appengine"
packages = ["cloudsql"]
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
revision = "32ee49c4dd805befd833990acba36cb75042378c"
[[projects]]
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"channelz",
"codes",
"connectivity",
"credentials",
"encoding",
"encoding/proto",
"grpclb/grpc_lb_v1/messages",
"grpclog",
"internal",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
"transport"
]
revision = "7a6a684ca69eb4cae85ad0a484f2e531598c047b"
version = "v1.12.2"
[[projects]]
name = "gopkg.in/alecthomas/kingpin.v2"
packages = ["."]
revision = "947dcec5ba9c011838740e680966fd7087a71d0d"
version = "v2.2.6"
[[projects]]
name = "gopkg.in/asn1-ber.v1"
packages = ["."]
revision = "379148ca0225df7a432012b8df0355c2a2063ac0"
version = "v1.2"
[[projects]]
name = "gopkg.in/fatih/pool.v2"
packages = ["."]
revision = "010e0b745d12eaf8426c95f9c3924d81dd0b668f"
version = "v2.0.0"
[[projects]]
name = "gopkg.in/fsnotify.v1"
packages = ["."]
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz"
version = "v1.4.7"
[[projects]]
name = "gopkg.in/gorethink/gorethink.v3"
packages = [
".",
"encoding",
"ql2",
"types"
]
revision = "7f5bdfd858bb064d80559b2a32b86669c5de5d3b"
version = "v3.0.5"
[[projects]]
name = "gopkg.in/ldap.v2"
packages = ["."]
revision = "bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9"
version = "v2.5.1"
[[projects]]
branch = "v2"
name = "gopkg.in/mgo.v2"
packages = [
".",
"bson",
"internal/json",
"internal/sasl",
"internal/scram"
]
revision = "3f83fa5005286a7fe593b055f0d7771a7dce4655"
[[projects]]
name = "gopkg.in/olivere/elastic.v5"
packages = [
".",
"config",
"uritemplates"
]
revision = "b708306d715bea9b983685e94ab4602cdc9f988b"
version = "v5.0.69"
[[projects]]
branch = "v1"
name = "gopkg.in/tomb.v1"
packages = ["."]
revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "024194b983d91b9500fe97e0aa0ddb5fe725030cb51ddfb034e386cae1098370"
solver-name = "gps-cdcl"
solver-version = 1

243
Gopkg.toml Normal file
View File

@@ -0,0 +1,243 @@
[[constraint]]
name = "collectd.org"
version = "0.3.0"
[[constraint]]
name = "github.com/aerospike/aerospike-client-go"
version = "^1.33.0"
[[constraint]]
name = "github.com/amir/raidman"
branch = "master"
[[constraint]]
name = "github.com/apache/thrift"
branch = "master"
[[constraint]]
name = "github.com/aws/aws-sdk-go"
version = "1.14.8"
# version = "1.8.39"
[[constraint]]
name = "github.com/bsm/sarama-cluster"
version = "2.1.13"
# version = "2.1.10"
[[constraint]]
name = "github.com/couchbase/go-couchbase"
branch = "master"
[[constraint]]
name = "github.com/dgrijalva/jwt-go"
version = "3.2.0"
# version = "3.1.0"
[[constraint]]
name = "github.com/docker/docker"
version = "~17.03.2-ce"
[[constraint]]
name = "github.com/docker/go-connections"
version = "0.3.0"
# version = "0.2.1"
[[constraint]]
name = "github.com/eclipse/paho.mqtt.golang"
version = "~1.1.1"
# version = "1.1.0"
[[constraint]]
name = "github.com/go-sql-driver/mysql"
version = "1.4.0"
# version = "1.3.0"
[[constraint]]
name = "github.com/gobwas/glob"
version = "0.2.3"
# version = "0.2.2"
[[constraint]]
name = "github.com/golang/protobuf"
version = "1.1.0"
# version = "1.0.0"
[[constraint]]
name = "github.com/google/go-cmp"
version = "0.2.0"
# version = "0.1.0"
[[constraint]]
name = "github.com/gorilla/mux"
version = "1.6.2"
# version = "1.6.1"
[[constraint]]
name = "github.com/go-redis/redis"
version = "6.12.0"
[[constraint]]
name = "github.com/hashicorp/consul"
version = "1.1.0"
[[constraint]]
name = "github.com/influxdata/go-syslog"
version = "1.0.1"
[[constraint]]
name = "github.com/influxdata/tail"
branch = "master"
[[constraint]]
name = "github.com/influxdata/toml"
branch = "master"
[[constraint]]
name = "github.com/influxdata/wlog"
branch = "master"
[[constraint]]
name = "github.com/jackc/pgx"
version = "3.1.0"
[[constraint]]
name = "github.com/kardianos/service"
branch = "master"
[[constraint]]
name = "github.com/kballard/go-shellquote"
branch = "master"
[[constraint]]
name = "github.com/matttproud/golang_protobuf_extensions"
version = "1.0.1"
[[constraint]]
name = "github.com/Microsoft/ApplicationInsights-Go"
branch = "master"
[[constraint]]
name = "github.com/miekg/dns"
version = "1.0.8"
# version = "1.0.0"
[[constraint]]
name = "github.com/multiplay/go-ts3"
version = "1.0.0"
[[constraint]]
name = "github.com/nats-io/gnatsd"
version = "1.1.0"
# version = "1.0.4"
[[constraint]]
name = "github.com/nats-io/go-nats"
version = "1.5.0"
# version = "1.3.0"
[[constraint]]
name = "github.com/nsqio/go-nsq"
version = "1.0.7"
[[constraint]]
name = "github.com/openzipkin/zipkin-go-opentracing"
version = "0.3.4"
# version = "0.3.0"
[[constraint]]
name = "github.com/prometheus/client_golang"
version = "0.8.0"
[[constraint]]
name = "github.com/prometheus/client_model"
branch = "master"
[[constraint]]
name = "github.com/prometheus/common"
branch = "master"
[[constraint]]
name = "github.com/satori/go.uuid"
version = "1.2.0"
[[constraint]]
name = "github.com/shirou/gopsutil"
version = "2.18.05"
# version = "2.18.04"
[[constraint]]
name = "github.com/Shopify/sarama"
version = "1.17.0"
# version = "1.15.0"
[[constraint]]
name = "github.com/soniah/gosnmp"
branch = "master"
[[constraint]]
name = "github.com/StackExchange/wmi"
version = "1.0.0"
[[constraint]]
name = "github.com/streadway/amqp"
branch = "master"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.2.2"
# version = "1.2.1"
[[constraint]]
name = "github.com/tidwall/gjson"
version = "1.1.1"
# version = "1.0.0"
[[constraint]]
name = "github.com/vjeantet/grok"
version = "1.0.0"
[[constraint]]
name = "github.com/wvanbergen/kafka"
branch = "master"
[[constraint]]
name = "github.com/zensqlmonitor/go-mssqldb"
branch = "master"
[[constraint]]
name = "golang.org/x/net"
branch = "master"
[[constraint]]
name = "golang.org/x/sys"
branch = "master"
[[constraint]]
name = "google.golang.org/grpc"
version = "1.12.2"
# version = "1.8.0"
[[constraint]]
name = "gopkg.in/gorethink/gorethink.v3"
version = "3.0.5"
[[constraint]]
name = "gopkg.in/ldap.v2"
version = "2.5.1"
[[constraint]]
name = "gopkg.in/mgo.v2"
branch = "v2"
[[constraint]]
name = "gopkg.in/olivere/elastic.v5"
version = "^5.0.69"
# version = "^6.1.23"
[[constraint]]
name = "gopkg.in/yaml.v2"
version = "^2.2.1"
[[override]]
source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz"
name = "gopkg.in/fsnotify.v1"

View File

@@ -23,8 +23,8 @@ all:
deps:
go get -u github.com/golang/lint/golint
go get github.com/sparrc/gdm
gdm restore --parallel=false
go get -u github.com/golang/dep/cmd/dep
dep ensure
telegraf:
go build -ldflags "$(LDFLAGS)" ./cmd/telegraf
@@ -34,7 +34,7 @@ go-install:
install: telegraf
mkdir -p $(DESTDIR)$(PREFIX)/bin/
cp $(TELEGRAF) $(DESTDIR)$(PREFIX)/bin/
cp telegraf $(DESTDIR)$(PREFIX)/bin/
test:
go test -short ./...
@@ -54,11 +54,11 @@ fmtcheck:
@echo '[INFO] done.'
test-windows:
go test ./plugins/inputs/ping/...
go test ./plugins/inputs/win_perf_counters/...
go test ./plugins/inputs/win_services/...
go test ./plugins/inputs/procstat/...
go test ./plugins/inputs/ntpq/...
go test -short ./plugins/inputs/ping/...
go test -short ./plugins/inputs/win_perf_counters/...
go test -short ./plugins/inputs/win_services/...
go test -short ./plugins/inputs/procstat/...
go test -short ./plugins/inputs/ntpq/...
# vet runs the Go source code static analysis tool `vet` to find
# any common errors.

View File

@@ -42,7 +42,7 @@ Ansible role: https://github.com/rossmcdonald/telegraf
Telegraf requires golang version 1.9 or newer, the Makefile requires GNU make.
Dependencies are managed with [gdm](https://github.com/sparrc/gdm),
Dependencies are managed with [dep](https://github.com/golang/dep),
which is installed by the Makefile if you don't have it already.
1. [Install Go](https://golang.org/doc/install)
@@ -213,6 +213,7 @@ configuration options.
* [sql server](./plugins/inputs/sqlserver) (microsoft)
* [syslog](./plugins/inputs/syslog)
* [teamspeak](./plugins/inputs/teamspeak)
* [tengine](./plugins/inputs/tengine)
* [tomcat](./plugins/inputs/tomcat)
* [twemproxy](./plugins/inputs/twemproxy)
* [unbound](./plugins/inputs/unbound)
@@ -281,6 +282,7 @@ formats may be used with input plugins supporting the `data_format` option:
* [basicstats](./plugins/aggregators/basicstats)
* [minmax](./plugins/aggregators/minmax)
* [histogram](./plugins/aggregators/histogram)
* [valuecounter](./plugins/aggregators/valuecounter)
## Output Plugins

View File

@@ -362,24 +362,6 @@ func (a *Agent) Run(shutdown chan struct{}) error {
metricC := make(chan telegraf.Metric, 100)
aggC := make(chan telegraf.Metric, 100)
// Start all ServicePlugins
for _, input := range a.Config.Inputs {
input.SetDefaultTags(a.Config.Tags)
switch p := input.Input.(type) {
case telegraf.ServiceInput:
acc := NewAccumulator(input, metricC)
// Service input plugins should set their own precision of their
// metrics.
acc.SetPrecision(time.Nanosecond, 0)
if err := p.Start(acc); err != nil {
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
input.Name(), err.Error())
return err
}
defer p.Stop()
}
}
// Round collection to nearest interval by sleeping
if a.Config.Agent.RoundInterval {
i := int64(a.Config.Agent.Interval.Duration)
@@ -419,6 +401,25 @@ func (a *Agent) Run(shutdown chan struct{}) error {
}(input, interval)
}
// Start all ServicePlugins inputs after all other
// plugins are loaded so that no metrics get dropped
for _, input := range a.Config.Inputs {
input.SetDefaultTags(a.Config.Tags)
switch p := input.Input.(type) {
case telegraf.ServiceInput:
acc := NewAccumulator(input, metricC)
// Service input plugins should set their own precision of their
// metrics.
acc.SetPrecision(time.Nanosecond, 0)
if err := p.Start(acc); err != nil {
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
input.Name(), err.Error())
return err
}
defer p.Stop()
}
}
wg.Wait()
a.Close()
return nil

View File

@@ -21,6 +21,7 @@ install:
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
- go version
- go env
- git config --system core.longpaths true
build_script:
- cmd: C:\GnuWin32\bin\make deps

View File

@@ -58,7 +58,7 @@ var fService = flag.String("service", "",
var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)")
var (
nextVersion = "1.7.0"
nextVersion = "1.8.0"
version string
commit string
branch string

View File

@@ -104,9 +104,10 @@ but can be overridden using the `name_override` config option.
#### JSON Configuration:
The JSON data format supports specifying "tag keys". If specified, keys
will be searched for in the root-level of the JSON blob. If the key(s) exist,
they will be applied as tags to the Telegraf metrics.
The JSON data format supports specifying "tag keys" and "field keys". If specified, keys
will be searched for in the root-level and any nested lists of the JSON blob. If the key(s) exist,
they will be applied as tags or fields to the Telegraf metrics. If "field_keys" is not specified,
all int and float values will be set as fields by default.
For example, if you had this configuration:
@@ -173,6 +174,7 @@ For example, if the following configuration:
"my_tag_1",
"my_tag_2"
]
field_keys = ["b_c"]
```
with this JSON output from a command:
@@ -198,11 +200,11 @@ with this JSON output from a command:
]
```
Your Telegraf metrics would get tagged with "my_tag_1" and "my_tag_2"
Your Telegraf metrics would get tagged with "my_tag_1" and "my_tag_2" and fielded with "b_c"
```
exec_mycollector,my_tag_1=foo,my_tag_2=baz a=5,b_c=6
exec_mycollector,my_tag_1=bar,my_tag_2=baz a=7,b_c=8
exec_mycollector,my_tag_1=foo,my_tag_2=baz b_c=6
exec_mycollector,my_tag_1=bar,my_tag_2=baz b_c=8
```
# Value:

View File

@@ -1077,7 +1077,7 @@
# mount_points = ["/"]
## Ignore mount points by filesystem type.
ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
# Read metrics about disk IO by device

View File

@@ -242,7 +242,7 @@
#
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
# ## present on /run, /var/run, /dev/shm or /dev).
# # ignore_fs = ["tmpfs", "devtmpfs"]
# # ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
# # Read metrics about disk IO by device

View File

@@ -1261,6 +1261,18 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
}
}
if node, ok := tbl.Fields["field_keys"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.FieldKeys = append(c.FieldKeys, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["data_type"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
@@ -1344,6 +1356,7 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
delete(tbl.Fields, "separator")
delete(tbl.Fields, "templates")
delete(tbl.Fields, "tag_keys")
delete(tbl.Fields, "field_keys")
delete(tbl.Fields, "data_type")
delete(tbl.Fields, "collectd_auth_file")
delete(tbl.Fields, "collectd_security_level")

View File

@@ -143,7 +143,10 @@ func TestConfig_LoadDirectory(t *testing.T) {
"Testdata did not produce correct memcached metadata.")
ex := inputs.Inputs["exec"]().(*exec.Exec)
p, err := parsers.NewJSONParser("exec", nil, nil)
p, err := parsers.NewParser(&parsers.Config{
MetricName: "exec",
DataFormat: "json",
})
assert.NoError(t, err)
ex.SetParser(p)
ex.Command = "/usr/bin/myothercollector --foo=bar"

View File

@@ -11,6 +11,7 @@ import (
"os/exec"
"strconv"
"strings"
"syscall"
"time"
"unicode"
)
@@ -193,3 +194,15 @@ func RandomSleep(max time.Duration, shutdown chan struct{}) {
return
}
}
// Exit status takes the error from exec.Command
// and returns the exit status and true
// if error is not exit status, will return 0 and false
func ExitStatus(err error) (int, bool) {
if exiterr, ok := err.(*exec.ExitError); ok {
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
return status.ExitStatus(), true
}
}
return 0, false
}

View File

@@ -1,6 +1,7 @@
package models
import (
"log"
"time"
"github.com/influxdata/telegraf"
@@ -153,6 +154,7 @@ func (r *RunningAggregator) Run(
m.Time().After(r.periodEnd.Add(truncation).Add(r.Config.Delay)) {
// the metric is outside the current aggregation period, so
// skip it.
log.Printf("D! aggregator: metric \"%s\" is not in the current timewindow, skipping", m.Name())
continue
}
r.add(m)

View File

@@ -4,4 +4,5 @@ import (
_ "github.com/influxdata/telegraf/plugins/aggregators/basicstats"
_ "github.com/influxdata/telegraf/plugins/aggregators/histogram"
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
_ "github.com/influxdata/telegraf/plugins/aggregators/valuecounter"
)

View File

@@ -246,8 +246,6 @@ func convert(in interface{}) (float64, bool) {
return v, true
case int64:
return float64(v), true
case uint64:
return float64(v), true
default:
return 0, false
}

View File

@@ -28,7 +28,6 @@ var m2, _ = metric.New("m1",
"c": float64(4),
"d": float64(6),
"e": float64(200),
"f": uint64(200),
"ignoreme": "string",
"andme": true,
},
@@ -82,10 +81,6 @@ func TestBasicStatsWithPeriod(t *testing.T) {
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
"f_count": float64(1), //f
"f_max": float64(200),
"f_min": float64(200),
"f_mean": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -149,10 +144,6 @@ func TestBasicStatsDifferentPeriods(t *testing.T) {
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
"f_count": float64(1), //f
"f_max": float64(200),
"f_min": float64(200),
"f_mean": float64(200),
}
expectedTags = map[string]string{
"foo": "bar",
@@ -178,7 +169,6 @@ func TestBasicStatsWithOnlyCount(t *testing.T) {
"c_count": float64(2),
"d_count": float64(2),
"e_count": float64(1),
"f_count": float64(1),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -204,7 +194,6 @@ func TestBasicStatsWithOnlyMin(t *testing.T) {
"c_min": float64(2),
"d_min": float64(2),
"e_min": float64(200),
"f_min": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -230,7 +219,6 @@ func TestBasicStatsWithOnlyMax(t *testing.T) {
"c_max": float64(4),
"d_max": float64(6),
"e_max": float64(200),
"f_max": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -256,7 +244,6 @@ func TestBasicStatsWithOnlyMean(t *testing.T) {
"c_mean": float64(3),
"d_mean": float64(4),
"e_mean": float64(200),
"f_mean": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -282,7 +269,6 @@ func TestBasicStatsWithOnlySum(t *testing.T) {
"c_sum": float64(6),
"d_sum": float64(8),
"e_sum": float64(200),
"f_sum": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -413,8 +399,6 @@ func TestBasicStatsWithMinAndMax(t *testing.T) {
"d_min": float64(2),
"e_max": float64(200), //e
"e_min": float64(200),
"f_max": float64(200), //f
"f_min": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -466,11 +450,6 @@ func TestBasicStatsWithAllStats(t *testing.T) {
"e_min": float64(200),
"e_mean": float64(200),
"e_sum": float64(200),
"f_count": float64(1), //f
"f_max": float64(200),
"f_min": float64(200),
"f_mean": float64(200),
"f_sum": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",

View File

@@ -107,8 +107,6 @@ func convert(in interface{}) (float64, bool) {
return v, true
case int64:
return float64(v), true
case uint64:
return float64(v), true
default:
return 0, false
}

View File

@@ -38,7 +38,6 @@ var m2, _ = metric.New("m1",
"i": float64(1),
"j": float64(1),
"k": float64(200),
"l": uint64(200),
"ignoreme": "string",
"andme": true,
},
@@ -86,8 +85,6 @@ func TestMinMaxWithPeriod(t *testing.T) {
"j_min": float64(1),
"k_max": float64(200),
"k_min": float64(200),
"l_max": float64(200),
"l_min": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
@@ -157,8 +154,6 @@ func TestMinMaxDifferentPeriods(t *testing.T) {
"j_min": float64(1),
"k_max": float64(200),
"k_min": float64(200),
"l_max": float64(200),
"l_min": float64(200),
}
expectedTags = map[string]string{
"foo": "bar",

View File

@@ -0,0 +1,73 @@
# ValueCounter Aggregator Plugin
The valuecounter plugin counts the occurrence of values in fields and emits the
counter once every 'period' seconds.
A use case for the valuecounter plugin is when you are processing a HTTP access
log (with the logparser input) and want to count the HTTP status codes.
The fields which will be counted must be configured with the `fields`
configuration directive. When no `fields` is provided the plugin will not count
any fields. The results are emitted in fields in the format:
`originalfieldname_fieldvalue = count`.
Valuecounter only works on fields of the type int, bool or string. Float fields
are being dropped to prevent the creating of too many fields.
### Configuration:
```toml
[[aggregators.valuecounter]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## The fields for which the values will be counted
fields = ["status"]
```
### Measurements & Fields:
- measurement1
- field_value1
- field_value2
### Tags:
No tags are applied by this aggregator.
### Example Output:
Example for parsing a HTTP access log.
telegraf.conf:
```
[[inputs.logparser]]
files = ["/tmp/tst.log"]
[inputs.logparser.grok]
patterns = ['%{DATA:url:tag} %{NUMBER:response:string}']
measurement = "access"
[[aggregators.valuecounter]]
namepass = ["access"]
fields = ["response"]
```
/tmp/tst.log
```
/some/path 200
/some/path 401
/some/path 200
```
```
$ telegraf --config telegraf.conf --quiet
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="200" 1511948755991487011
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="401" 1511948755991522282
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="200" 1511948755991531697
access,path=/tmp/tst.log,host=localhost.localdomain,url=/some/path response_200=2i,response_401=1i 1511948761000000000
```

View File

@@ -0,0 +1,108 @@
package valuecounter
import (
"fmt"
"log"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
type aggregate struct {
name string
tags map[string]string
fieldCount map[string]int
}
// ValueCounter an aggregation plugin
type ValueCounter struct {
cache map[uint64]aggregate
Fields []string
}
// NewValueCounter create a new aggregation plugin which counts the occurances
// of fields and emits the count.
func NewValueCounter() telegraf.Aggregator {
vc := &ValueCounter{}
vc.Reset()
return vc
}
var sampleConfig = `
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## The fields for which the values will be counted
fields = []
`
// SampleConfig generates a sample config for the ValueCounter plugin
func (vc *ValueCounter) SampleConfig() string {
return sampleConfig
}
// Description returns the description of the ValueCounter plugin
func (vc *ValueCounter) Description() string {
return "Count the occurance of values in fields."
}
// Add is run on every metric which passes the plugin
func (vc *ValueCounter) Add(in telegraf.Metric) {
id := in.HashID()
// Check if the cache already has an entry for this metric, if not create it
if _, ok := vc.cache[id]; !ok {
a := aggregate{
name: in.Name(),
tags: in.Tags(),
fieldCount: make(map[string]int),
}
vc.cache[id] = a
}
// Check if this metric has fields which we need to count, if so increment
// the count.
for fk, fv := range in.Fields() {
for _, cf := range vc.Fields {
if fk == cf {
// Do not process float types to prevent memory from blowing up
switch fv.(type) {
default:
log.Printf("I! Valuecounter: Unsupported field type. " +
"Must be an int, string or bool. Ignoring.")
continue
case uint64, int64, string, bool:
}
fn := fmt.Sprintf("%v_%v", fk, fv)
vc.cache[id].fieldCount[fn]++
}
}
}
}
// Push emits the counters
func (vc *ValueCounter) Push(acc telegraf.Accumulator) {
for _, agg := range vc.cache {
fields := map[string]interface{}{}
for field, count := range agg.fieldCount {
fields[field] = count
}
acc.AddFields(agg.name, fields, agg.tags)
}
}
// Reset the cache, executed after each push
func (vc *ValueCounter) Reset() {
vc.cache = make(map[uint64]aggregate)
}
func init() {
aggregators.Add("valuecounter", func() telegraf.Aggregator {
return NewValueCounter()
})
}

View File

@@ -0,0 +1,126 @@
package valuecounter
import (
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
// Create a valuecounter with config
func NewTestValueCounter(fields []string) telegraf.Aggregator {
vc := &ValueCounter{
Fields: fields,
}
vc.Reset()
return vc
}
var m1, _ = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"status": 200,
"somefield": 20.1,
"foobar": "bar",
},
time.Now(),
)
var m2, _ = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"status": "OK",
"ignoreme": "string",
"andme": true,
"boolfield": false,
},
time.Now(),
)
func BenchmarkApply(b *testing.B) {
vc := NewTestValueCounter([]string{"status"})
for n := 0; n < b.N; n++ {
vc.Add(m1)
vc.Add(m2)
}
}
// Test basic functionality
func TestBasic(t *testing.T) {
vc := NewTestValueCounter([]string{"status"})
acc := testutil.Accumulator{}
vc.Add(m1)
vc.Add(m2)
vc.Add(m1)
vc.Push(&acc)
expectedFields := map[string]interface{}{
"status_200": 2,
"status_OK": 1,
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test with multiple fields to count
func TestMultipleFields(t *testing.T) {
vc := NewTestValueCounter([]string{"status", "somefield", "boolfield"})
acc := testutil.Accumulator{}
vc.Add(m1)
vc.Add(m2)
vc.Add(m2)
vc.Add(m1)
vc.Push(&acc)
expectedFields := map[string]interface{}{
"status_200": 2,
"status_OK": 2,
"boolfield_false": 2,
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test with a reset between two runs
func TestWithReset(t *testing.T) {
vc := NewTestValueCounter([]string{"status"})
acc := testutil.Accumulator{}
vc.Add(m1)
vc.Add(m1)
vc.Add(m2)
vc.Push(&acc)
expectedFields := map[string]interface{}{
"status_200": 2,
"status_OK": 1,
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
acc.ClearMetrics()
vc.Reset()
vc.Add(m2)
vc.Add(m2)
vc.Add(m1)
vc.Push(&acc)
expectedFields = map[string]interface{}{
"status_200": 1,
"status_OK": 2,
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}

View File

@@ -103,6 +103,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/teamspeak"
_ "github.com/influxdata/telegraf/plugins/inputs/tengine"
_ "github.com/influxdata/telegraf/plugins/inputs/tomcat"
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"

View File

@@ -92,6 +92,7 @@ Supported Burrow version: `1.x`
- group (string)
- topic (string)
- partition (int)
- owner (string)
* `burrow_topic`
- cluster (string)

View File

@@ -116,6 +116,7 @@ type (
Start apiStatusResponseLagItem `json:"start"`
End apiStatusResponseLagItem `json:"end"`
CurrentLag int64 `json:"current_lag"`
Owner string `json:"owner"`
}
// response: lag field item
@@ -447,6 +448,7 @@ func (b *burrow) genGroupLagMetrics(r *apiResponse, cluster, group string, acc t
"group": group,
"topic": partition.Topic,
"partition": strconv.FormatInt(int64(partition.Partition), 10),
"owner": partition.Owner,
},
)
}

View File

@@ -129,9 +129,9 @@ func TestBurrowPartition(t *testing.T) {
},
}
tags := []map[string]string{
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "0"},
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "1"},
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "2"},
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "0", "owner": "kafka1"},
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "1", "owner": "kafka2"},
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "2", "owner": "kafka3"},
}
require.Empty(t, acc.Errors)

View File

@@ -10,7 +10,7 @@
{
"topic": "topicA",
"partition": 0,
"owner": "kafka",
"owner": "kafka1",
"status": "OK",
"start": {
"offset": 431323195,
@@ -28,7 +28,7 @@
{
"topic": "topicA",
"partition": 1,
"owner": "kafka",
"owner": "kafka2",
"status": "OK",
"start": {
"offset": 431322962,
@@ -46,7 +46,7 @@
{
"topic": "topicA",
"partition": 2,
"owner": "kafka",
"owner": "kafka3",
"status": "OK",
"start": {
"offset": 428636563,

View File

@@ -124,6 +124,7 @@ docker API.
- server_version
- container_image
- container_name
- container_status
- container_version
- fields:
- total_pgmafault
@@ -167,6 +168,7 @@ docker API.
- server_version
- container_image
- container_name
- container_status
- container_version
- cpu
- fields:
@@ -186,6 +188,7 @@ docker API.
- server_version
- container_image
- container_name
- container_status
- container_version
- network
- fields:
@@ -205,6 +208,7 @@ docker API.
- server_version
- container_image
- container_name
- container_status
- container_version
- device
- fields:
@@ -226,11 +230,27 @@ docker API.
- server_version
- container_image
- container_name
- container_status
- container_version
- fields:
- health_status (string)
- failing_streak (integer)
- docker_container_status
- tags:
- engine_host
- server_version
- container_image
- container_name
- container_status
- container_version
- fields:
- oomkilled (boolean)
- pid (integer)
- exitcode (integer)
- started_at (integer)
- finished_at (integer)
- docker_swarm
- tags:
- service_id
@@ -245,12 +265,12 @@ docker API.
```
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce n_containers=6i,n_containers_paused=0i,n_containers_running=1i,n_containers_stopped=5i,n_cpus=2i,n_goroutines=41i,n_images=2i,n_listener_events=0i,n_used_file_descriptors=27i 1524002041000000000
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce,unit=bytes memory_total=2101661696i 1524002041000000000
docker_container_mem,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce active_anon=8327168i,active_file=2314240i,cache=27402240i,container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",hierarchical_memory_limit=9223372036854771712i,inactive_anon=0i,inactive_file=25088000i,limit=2101661696i,mapped_file=20582400i,max_usage=36646912i,pgfault=4193i,pgmajfault=214i,pgpgin=9243i,pgpgout=520i,rss=8327168i,rss_huge=0i,total_active_anon=8327168i,total_active_file=2314240i,total_cache=27402240i,total_inactive_anon=0i,total_inactive_file=25088000i,total_mapped_file=20582400i,total_pgfault=4193i,total_pgmajfault=214i,total_pgpgin=9243i,total_pgpgout=520i,total_rss=8327168i,total_rss_huge=0i,total_unevictable=0i,total_writeback=0i,unevictable=0i,usage=36528128i,usage_percent=0.4342225020025297,writeback=0i 1524002042000000000
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu-total,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",throttling_periods=0i,throttling_throttled_periods=0i,throttling_throttled_time=0i,usage_in_kernelmode=40000000i,usage_in_usermode=100000000i,usage_percent=0,usage_system=6394210000000i,usage_total=117319068i 1524002042000000000
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=20825265i 1524002042000000000
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu1,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=96493803i 1524002042000000000
docker_container_net,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,network=eth0,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",rx_bytes=1576i,rx_dropped=0i,rx_errors=0i,rx_packets=20i,tx_bytes=0i,tx_dropped=0i,tx_errors=0i,tx_packets=0i 1524002042000000000
docker_container_blkio,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,device=254:0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",io_service_bytes_recursive_async=27398144i,io_service_bytes_recursive_read=27398144i,io_service_bytes_recursive_sync=0i,io_service_bytes_recursive_total=27398144i,io_service_bytes_recursive_write=0i,io_serviced_recursive_async=529i,io_serviced_recursive_read=529i,io_serviced_recursive_sync=0i,io_serviced_recursive_total=529i,io_serviced_recursive_write=0i 1524002042000000000
docker_container_health,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce failing_streak=0i,health_status="healthy" 1524007529000000000
docker_container_mem,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce active_anon=8327168i,active_file=2314240i,cache=27402240i,container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",hierarchical_memory_limit=9223372036854771712i,inactive_anon=0i,inactive_file=25088000i,limit=2101661696i,mapped_file=20582400i,max_usage=36646912i,pgfault=4193i,pgmajfault=214i,pgpgin=9243i,pgpgout=520i,rss=8327168i,rss_huge=0i,total_active_anon=8327168i,total_active_file=2314240i,total_cache=27402240i,total_inactive_anon=0i,total_inactive_file=25088000i,total_mapped_file=20582400i,total_pgfault=4193i,total_pgmajfault=214i,total_pgpgin=9243i,total_pgpgout=520i,total_rss=8327168i,total_rss_huge=0i,total_unevictable=0i,total_writeback=0i,unevictable=0i,usage=36528128i,usage_percent=0.4342225020025297,writeback=0i 1524002042000000000
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,cpu=cpu-total,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",throttling_periods=0i,throttling_throttled_periods=0i,throttling_throttled_time=0i,usage_in_kernelmode=40000000i,usage_in_usermode=100000000i,usage_percent=0,usage_system=6394210000000i,usage_total=117319068i 1524002042000000000
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,cpu=cpu0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=20825265i 1524002042000000000
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,cpu=cpu1,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=96493803i 1524002042000000000
docker_container_net,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,network=eth0,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",rx_bytes=1576i,rx_dropped=0i,rx_errors=0i,rx_packets=20i,tx_bytes=0i,tx_dropped=0i,tx_errors=0i,tx_packets=0i 1524002042000000000
docker_container_blkio,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,device=254:0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",io_service_bytes_recursive_async=27398144i,io_service_bytes_recursive_read=27398144i,io_service_bytes_recursive_sync=0i,io_service_bytes_recursive_total=27398144i,io_service_bytes_recursive_write=0i,io_serviced_recursive_async=529i,io_serviced_recursive_read=529i,io_serviced_recursive_sync=0i,io_serviced_recursive_total=529i,io_serviced_recursive_write=0i 1524002042000000000
docker_container_health,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce failing_streak=0i,health_status="healthy" 1524007529000000000
docker_swarm,service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test tasks_desired=3,tasks_running=3 1508968160000000000
```

View File

@@ -435,6 +435,23 @@ func (d *Docker) gatherContainer(
}
}
}
if info.State != nil {
tags["container_status"] = info.State.Status
statefields := map[string]interface{}{
"oomkilled": info.State.OOMKilled,
"pid": info.State.Pid,
"exitcode": info.State.ExitCode,
}
container_time, err := time.Parse(time.RFC3339, info.State.StartedAt)
if err == nil && !container_time.IsZero() {
statefields["started_at"] = container_time.UnixNano()
}
container_time, err = time.Parse(time.RFC3339, info.State.FinishedAt)
if err == nil && !container_time.IsZero() {
statefields["finished_at"] = container_time.UnixNano()
}
acc.AddFields("docker_container_status", statefields, tags, time.Now())
}
if info.State.Health != nil {
healthfields := map[string]interface{}{

View File

@@ -653,6 +653,7 @@ func TestDockerGatherInfo(t *testing.T) {
"label1": "test_value_1",
"label2": "test_value_2",
"server_version": "17.09.0-ce",
"container_status": "running",
},
)
acc.AssertContainsTaggedFields(t,
@@ -676,6 +677,7 @@ func TestDockerGatherInfo(t *testing.T) {
"label1": "test_value_1",
"label2": "test_value_2",
"server_version": "17.09.0-ce",
"container_status": "running",
},
)
}

View File

@@ -484,6 +484,12 @@ var containerInspect = types.ContainerJSON{
FailingStreak: 1,
Status: "Unhealthy",
},
Status: "running",
OOMKilled: false,
Pid: 1234,
ExitCode: 0,
StartedAt: "2018-06-14T05:48:53.266176036Z",
FinishedAt: "0001-01-01T00:00:00Z",
},
},
}

View File

@@ -93,7 +93,10 @@ func (r runnerMock) Run(e *Exec, command string, acc telegraf.Accumulator) ([]by
}
func TestExec(t *testing.T) {
parser, _ := parsers.NewJSONParser("exec", []string{}, nil)
parser, _ := parsers.NewParser(&parsers.Config{
DataFormat: "json",
MetricName: "exec",
})
e := &Exec{
runner: newRunnerMock([]byte(validJson), nil),
Commands: []string{"testcommand arg1"},
@@ -119,7 +122,10 @@ func TestExec(t *testing.T) {
}
func TestExecMalformed(t *testing.T) {
parser, _ := parsers.NewJSONParser("exec", []string{}, nil)
parser, _ := parsers.NewParser(&parsers.Config{
DataFormat: "json",
MetricName: "exec",
})
e := &Exec{
runner: newRunnerMock([]byte(malformedJson), nil),
Commands: []string{"badcommand arg1"},
@@ -132,7 +138,10 @@ func TestExecMalformed(t *testing.T) {
}
func TestCommandError(t *testing.T) {
parser, _ := parsers.NewJSONParser("exec", []string{}, nil)
parser, _ := parsers.NewParser(&parsers.Config{
DataFormat: "json",
MetricName: "exec",
})
e := &Exec{
runner: newRunnerMock(nil, fmt.Errorf("exit status code 1")),
Commands: []string{"badcommand"},

View File

@@ -26,7 +26,11 @@ func TestHTTPwithJSONFormat(t *testing.T) {
URLs: []string{url},
}
metricName := "metricName"
p, _ := parsers.NewJSONParser(metricName, nil, nil)
p, _ := parsers.NewParser(&parsers.Config{
DataFormat: "json",
MetricName: "metricName",
})
plugin.SetParser(p)
var acc testutil.Accumulator
@@ -63,8 +67,11 @@ func TestHTTPHeaders(t *testing.T) {
URLs: []string{url},
Headers: map[string]string{header: headerValue},
}
metricName := "metricName"
p, _ := parsers.NewJSONParser(metricName, nil, nil)
p, _ := parsers.NewParser(&parsers.Config{
DataFormat: "json",
MetricName: "metricName",
})
plugin.SetParser(p)
var acc testutil.Accumulator
@@ -83,7 +90,10 @@ func TestInvalidStatusCode(t *testing.T) {
}
metricName := "metricName"
p, _ := parsers.NewJSONParser(metricName, nil, nil)
p, _ := parsers.NewParser(&parsers.Config{
DataFormat: "json",
MetricName: metricName,
})
plugin.SetParser(p)
var acc testutil.Accumulator
@@ -105,8 +115,10 @@ func TestMethod(t *testing.T) {
Method: "POST",
}
metricName := "metricName"
p, _ := parsers.NewJSONParser(metricName, nil, nil)
p, _ := parsers.NewParser(&parsers.Config{
DataFormat: "json",
MetricName: "metricName",
})
plugin.SetParser(p)
var acc testutil.Accumulator

View File

@@ -343,9 +343,6 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
}
func (h *HTTPListener) parse(b []byte, t time.Time, precision string) error {
h.mu.Lock()
defer h.mu.Unlock()
h.handler.SetTimePrecision(getPrecisionMultiplier(precision))
h.handler.SetTimeFunc(func() time.Time { return t })
metrics, err := h.parser.Parse(b)

View File

@@ -181,7 +181,12 @@ func (h *HttpJson) gatherServer(
"server": serverURL,
}
parser, err := parsers.NewJSONParser(msrmnt_name, h.TagKeys, tags)
parser, err := parsers.NewParser(&parsers.Config{
DataFormat: "json",
MetricName: msrmnt_name,
TagKeys: h.TagKeys,
DefaultTags: tags,
})
if err != nil {
return err
}

View File

@@ -125,7 +125,10 @@ func TestRunParserAndGatherJSON(t *testing.T) {
k.acc = &acc
defer close(k.done)
k.parser, _ = parsers.NewJSONParser("kafka_json_test", []string{}, nil)
k.parser, _ = parsers.NewParser(&parsers.Config{
DataFormat: "json",
MetricName: "kafka_json_test",
})
go k.receiver()
in <- saramaMsg(testMsgJSON)
acc.Wait(1)

View File

@@ -125,7 +125,10 @@ func TestRunParserAndGatherJSON(t *testing.T) {
k.acc = &acc
defer close(k.done)
k.parser, _ = parsers.NewJSONParser("kafka_json_test", []string{}, nil)
k.parser, _ = parsers.NewParser(&parsers.Config{
DataFormat: "json",
MetricName: "kafka_json_test",
})
go k.receiver()
in <- saramaMsg(testMsgJSON)
acc.Wait(1)

View File

@@ -108,7 +108,9 @@ You must capture at least one field per line.
- ts-"CUSTOM"
CUSTOM time layouts must be within quotes and be the representation of the
"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`
"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`.
To match a comma decimal point you can use a period. For example `%{TIMESTAMP:timestamp:ts-"2006-01-02 15:04:05.000"}` can be used to match `"2018-01-02 15:04:05,000"`
To match a comma decimal point you can use a period in the pattern string.
See https://golang.org/pkg/time/#Parse for more details.
Telegraf has many of its own [built-in patterns](./grok/patterns/influx-patterns),

View File

@@ -293,7 +293,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
timestamp = time.Unix(0, iv)
}
case SYSLOG_TIMESTAMP:
ts, err := time.ParseInLocation(time.Stamp, v, p.loc)
ts, err := time.ParseInLocation("Jan 02 15:04:05", v, p.loc)
if err == nil {
if ts.Year() == 0 {
ts = ts.AddDate(timestamp.Year(), 0, 0)
@@ -335,6 +335,9 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
case DROP:
// goodbye!
default:
// Replace commas with dot character
v = strings.Replace(v, ",", ".", -1)
ts, err := time.ParseInLocation(t, v, p.loc)
if err == nil {
timestamp = ts

View File

@@ -971,39 +971,32 @@ func TestNewlineInPatterns(t *testing.T) {
require.NotNil(t, m)
}
func TestSyslogTimestamp(t *testing.T) {
tests := []struct {
name string
line string
expected time.Time
}{
{
name: "two digit day of month",
line: "Sep 25 09:01:55 value=42",
expected: time.Date(2018, time.September, 25, 9, 1, 55, 0, time.UTC),
},
{
name: "one digit day of month single space",
line: "Sep 2 09:01:55 value=42",
expected: time.Date(2018, time.September, 2, 9, 1, 55, 0, time.UTC),
},
{
name: "one digit day of month double space",
line: "Sep 2 09:01:55 value=42",
expected: time.Date(2018, time.September, 2, 9, 1, 55, 0, time.UTC),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &Parser{
Patterns: []string{`%{SYSLOGTIMESTAMP:timestamp:ts-syslog} value=%{NUMBER:value:int}`},
timeFunc: func() time.Time { return time.Date(2017, time.April, 1, 0, 0, 0, 0, time.UTC) },
}
require.NoError(t, p.Compile())
m, err := p.ParseLine(tt.line)
require.NoError(t, err)
require.NotNil(t, m)
require.Equal(t, tt.expected, m.Time())
})
func TestSyslogTimestampParser(t *testing.T) {
p := &Parser{
Patterns: []string{`%{SYSLOGTIMESTAMP:timestamp:ts-syslog} value=%{NUMBER:value:int}`},
timeFunc: func() time.Time { return time.Date(2018, time.April, 1, 0, 0, 0, 0, nil) },
}
require.NoError(t, p.Compile())
m, err := p.ParseLine("Sep 25 09:01:55 value=42")
require.NoError(t, err)
require.NotNil(t, m)
require.Equal(t, 2018, m.Time().Year())
}
func TestReplaceTimestampComma(t *testing.T) {
p := &Parser{
Patterns: []string{`%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05.000"} successfulMatches=%{NUMBER:value:int}`},
}
require.NoError(t, p.Compile())
m, err := p.ParseLine("2018-02-21 13:10:34,555 successfulMatches=1")
require.NoError(t, err)
require.NotNil(t, m)
require.Equal(t, 2018, m.Time().Year())
require.Equal(t, 13, m.Time().Hour())
require.Equal(t, 34, m.Time().Second())
//Convert Nanosecond to milisecond for compare
require.Equal(t, 555, m.Time().Nanosecond()/1000000)
}

View File

@@ -172,7 +172,10 @@ func TestRunParserAndGatherJSON(t *testing.T) {
n.acc = &acc
defer close(n.done)
n.parser, _ = parsers.NewJSONParser("nats_json_test", []string{}, nil)
n.parser, _ = parsers.NewParser(&parsers.Config{
DataFormat: "json",
MetricName: "nats_json_test",
})
go n.receiver()
in <- mqttMsg(testMsgJSON)

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"database/sql"
"fmt"
"log"
"strconv"
"strings"
"sync"
@@ -79,7 +80,7 @@ var sampleConfig = `
## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
gather_process_list = true
#
## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS
## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS
gather_user_statistics = true
#
## gather auto_increment columns and max values from information schema
@@ -281,8 +282,9 @@ const (
GROUP BY command,state
ORDER BY null`
infoSchemaUserStatisticsQuery = `
SELECT *
FROM information_schema.user_statistics`
SELECT *,count(*)
FROM information_schema.user_statistics
GROUP BY user`
infoSchemaAutoIncQuery = `
SELECT table_schema, table_name, column_name, auto_increment,
CAST(pow(2, case data_type
@@ -759,6 +761,103 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum
if len(fields) > 0 {
acc.AddFields("mysql", fields, tags)
}
// gather connection metrics from processlist for each user
if m.GatherProcessList {
conn_rows, err := db.Query("SELECT user, sum(1) FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user")
if err != nil {
log.Printf("E! MySQL Error gathering process list: %s", err)
} else {
for conn_rows.Next() {
var user string
var connections int64
err = conn_rows.Scan(&user, &connections)
if err != nil {
return err
}
tags := map[string]string{"server": servtag, "user": user}
fields := make(map[string]interface{})
if err != nil {
return err
}
fields["connections"] = connections
acc.AddFields("mysql_users", fields, tags)
}
}
}
// gather connection metrics from user_statistics for each user
if m.GatherUserStatistics {
conn_rows, err := db.Query("select user, total_connections, concurrent_connections, connected_time, busy_time, cpu_time, bytes_received, bytes_sent, binlog_bytes_written, rows_fetched, rows_updated, table_rows_read, select_commands, update_commands, other_commands, commit_transactions, rollback_transactions, denied_connections, lost_connections, access_denied, empty_queries, total_ssl_connections FROM INFORMATION_SCHEMA.USER_STATISTICS GROUP BY user")
if err != nil {
log.Printf("E! MySQL Error gathering user stats: %s", err)
} else {
for conn_rows.Next() {
var user string
var total_connections int64
var concurrent_connections int64
var connected_time int64
var busy_time int64
var cpu_time int64
var bytes_received int64
var bytes_sent int64
var binlog_bytes_written int64
var rows_fetched int64
var rows_updated int64
var table_rows_read int64
var select_commands int64
var update_commands int64
var other_commands int64
var commit_transactions int64
var rollback_transactions int64
var denied_connections int64
var lost_connections int64
var access_denied int64
var empty_queries int64
var total_ssl_connections int64
err = conn_rows.Scan(&user, &total_connections, &concurrent_connections,
&connected_time, &busy_time, &cpu_time, &bytes_received, &bytes_sent, &binlog_bytes_written,
&rows_fetched, &rows_updated, &table_rows_read, &select_commands, &update_commands, &other_commands,
&commit_transactions, &rollback_transactions, &denied_connections, &lost_connections, &access_denied,
&empty_queries, &total_ssl_connections,
)
if err != nil {
return err
}
tags := map[string]string{"server": servtag, "user": user}
fields := map[string]interface{}{
"total_connections": total_connections,
"concurrent_connections": concurrent_connections,
"connected_time": connected_time,
"busy_time": busy_time,
"cpu_time": cpu_time,
"bytes_received": bytes_received,
"bytes_sent": bytes_sent,
"binlog_bytes_written": binlog_bytes_written,
"rows_fetched": rows_fetched,
"rows_updated": rows_updated,
"table_rows_read": table_rows_read,
"select_commands": select_commands,
"update_commands": update_commands,
"other_commands": other_commands,
"commit_transactions": commit_transactions,
"rollback_transactions": rollback_transactions,
"denied_connections": denied_connections,
"lost_connections": lost_connections,
"access_denied": access_denied,
"empty_queries": empty_queries,
"total_ssl_connections": total_ssl_connections,
}
acc.AddFields("mysql_user_stats", fields, tags)
}
}
}
return nil
}
@@ -809,29 +908,6 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf.
} else {
acc.AddFields("mysql_process_list", fields, tags)
}
// get count of connections from each user
conn_rows, err := db.Query("SELECT user, sum(1) AS connections FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user")
if err != nil {
return err
}
for conn_rows.Next() {
var user string
var connections int64
err = conn_rows.Scan(&user, &connections)
if err != nil {
return err
}
tags := map[string]string{"server": servtag, "user": user}
fields := make(map[string]interface{})
fields["connections"] = connections
acc.AddFields("mysql_users", fields, tags)
}
return nil
}
@@ -841,190 +917,77 @@ func (m *Mysql) GatherUserStatisticsStatuses(db *sql.DB, serv string, acc telegr
// run query
rows, err := db.Query(infoSchemaUserStatisticsQuery)
if err != nil {
// disable collecting if table is not found (mysql specific error)
// (suppresses repeat errors)
if strings.Contains(err.Error(), "nknown table 'user_statistics'") {
m.GatherUserStatistics = false
}
return err
}
defer rows.Close()
cols, err := columnsToLower(rows.Columns())
if err != nil {
return err
}
read, err := getColSlice(len(cols))
if err != nil {
return err
}
var (
user string
total_connections int64
concurrent_connections int64
connected_time int64
busy_time int64
cpu_time int64
bytes_received int64
bytes_sent int64
binlog_bytes_written int64
rows_fetched int64
rows_updated int64
table_rows_read int64
select_commands int64
update_commands int64
other_commands int64
commit_transactions int64
rollback_transactions int64
denied_connections int64
lost_connections int64
access_denied int64
empty_queries int64
total_ssl_connections int64
count uint32
)
servtag := getDSNTag(serv)
for rows.Next() {
err = rows.Scan(read...)
err = rows.Scan(&user, &total_connections, &concurrent_connections,
&connected_time, &busy_time, &cpu_time, &bytes_received, &bytes_sent, &binlog_bytes_written,
&rows_fetched, &rows_updated, &table_rows_read, &select_commands, &update_commands, &other_commands,
&commit_transactions, &rollback_transactions, &denied_connections, &lost_connections, &access_denied,
&empty_queries, &total_ssl_connections, &count,
)
if err != nil {
return err
}
tags := map[string]string{"server": servtag, "user": *read[0].(*string)}
fields := map[string]interface{}{}
tags := map[string]string{"server": servtag, "user": user}
fields := map[string]interface{}{
for i := range cols {
if i == 0 {
continue // skip "user"
}
switch v := read[i].(type) {
case *int64:
fields[cols[i]] = *v
case *float64:
fields[cols[i]] = *v
case *string:
fields[cols[i]] = *v
default:
return fmt.Errorf("Unknown column type - %T", v)
}
"total_connections": total_connections,
"concurrent_connections": concurrent_connections,
"connected_time": connected_time,
"busy_time": busy_time,
"cpu_time": cpu_time,
"bytes_received": bytes_received,
"bytes_sent": bytes_sent,
"binlog_bytes_written": binlog_bytes_written,
"rows_fetched": rows_fetched,
"rows_updated": rows_updated,
"table_rows_read": table_rows_read,
"select_commands": select_commands,
"update_commands": update_commands,
"other_commands": other_commands,
"commit_transactions": commit_transactions,
"rollback_transactions": rollback_transactions,
"denied_connections": denied_connections,
"lost_connections": lost_connections,
"access_denied": access_denied,
"empty_queries": empty_queries,
"total_ssl_connections": total_ssl_connections,
}
acc.AddFields("mysql_user_stats", fields, tags)
}
return nil
}
// columnsToLower converts selected column names to lowercase.
func columnsToLower(s []string, e error) ([]string, error) {
if e != nil {
return nil, e
}
d := make([]string, len(s))
for i := range s {
d[i] = strings.ToLower(s[i])
}
return d, nil
}
// getColSlice returns an in interface slice that can be used in the row.Scan().
func getColSlice(l int) ([]interface{}, error) {
// list of all possible column names
var (
user string
total_connections int64
concurrent_connections int64
connected_time int64
busy_time int64
cpu_time int64
bytes_received int64
bytes_sent int64
binlog_bytes_written int64
rows_read int64
rows_sent int64
rows_deleted int64
rows_inserted int64
rows_updated int64
select_commands int64
update_commands int64
other_commands int64
commit_transactions int64
rollback_transactions int64
denied_connections int64
lost_connections int64
access_denied int64
empty_queries int64
total_ssl_connections int64
max_statement_time_exceeded int64
// maria specific
fbusy_time float64
fcpu_time float64
// percona specific
rows_fetched int64
table_rows_read int64
)
switch l {
case 23: // maria5
return []interface{}{
&user,
&total_connections,
&concurrent_connections,
&connected_time,
&fbusy_time,
&fcpu_time,
&bytes_received,
&bytes_sent,
&binlog_bytes_written,
&rows_read,
&rows_sent,
&rows_deleted,
&rows_inserted,
&rows_updated,
&select_commands,
&update_commands,
&other_commands,
&commit_transactions,
&rollback_transactions,
&denied_connections,
&lost_connections,
&access_denied,
&empty_queries,
}, nil
case 25: // maria10
return []interface{}{
&user,
&total_connections,
&concurrent_connections,
&connected_time,
&fbusy_time,
&fcpu_time,
&bytes_received,
&bytes_sent,
&binlog_bytes_written,
&rows_read,
&rows_sent,
&rows_deleted,
&rows_inserted,
&rows_updated,
&select_commands,
&update_commands,
&other_commands,
&commit_transactions,
&rollback_transactions,
&denied_connections,
&lost_connections,
&access_denied,
&empty_queries,
&total_ssl_connections,
&max_statement_time_exceeded,
}, nil
case 22: // percona
return []interface{}{
&user,
&total_connections,
&concurrent_connections,
&connected_time,
&busy_time,
&cpu_time,
&bytes_received,
&bytes_sent,
&binlog_bytes_written,
&rows_fetched,
&rows_updated,
&table_rows_read,
&select_commands,
&update_commands,
&other_commands,
&commit_transactions,
&rollback_transactions,
&denied_connections,
&lost_connections,
&access_denied,
&empty_queries,
&total_ssl_connections,
}, nil
}
return nil, fmt.Errorf("Not Supported - %d columns", l)
}
// gatherPerfTableIOWaits can be used to get total count and time
// of I/O wait event for each table and process
func (m *Mysql) gatherPerfTableIOWaits(db *sql.DB, serv string, acc telegraf.Accumulator) error {

View File

@@ -49,7 +49,7 @@ func TestMysqlGetDSNTag(t *testing.T) {
},
{
"tcp(localhost)/",
"localhost",
"localhost:3306",
},
{
"root:passwd@tcp(192.168.1.1:3306)/?tls=false",

View File

@@ -108,7 +108,10 @@ func TestRunParserAndGatherJSON(t *testing.T) {
n.acc = &acc
defer close(n.done)
n.parser, _ = parsers.NewJSONParser("nats_json_test", []string{}, nil)
n.parser, _ = parsers.NewParser(&parsers.Config{
DataFormat: "json",
MetricName: "nats_json_test",
})
n.wg.Add(1)
go n.receiver()
in <- natsMsg(testMsgJSON)

View File

@@ -17,16 +17,17 @@ This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvid
### Metrics
- measurement: `nvidia_smi`
- tags
- `name` (type of GPU e.g. `GeForce GTX 170 Ti`)
- `name` (type of GPU e.g. `GeForce GTX 1070 Ti`)
- `compute_mode` (The compute mode of the GPU e.g. `Default`)
- `index` (The port index where the GPU is connected to the motherboard e.g. `1`)
- `pstate` (Overclocking state for the GPU e.g. `P0`)
- `uuid` (A unique identifier for the GPU e.g. `GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665`)
- fields
- `fan_speed` (integer, percentage)
- `memory_free` (integer, KB)
- `memory_used` (integer, KB)
- `memory_total` (integer, KB)
- `memory_free` (integer, MiB)
- `memory_used` (integer, MiB)
- `memory_total` (integer, MiB)
- `power_draw` (float, W)
- `temperature_gpu` (integer, degrees C)
- `utilization_gpu` (integer, percentage)
- `utilization_memory` (integer, percentage)

View File

@@ -16,20 +16,21 @@ import (
var (
measurement = "nvidia_smi"
metrics = "fan.speed,memory.total,memory.used,memory.free,pstate,temperature.gpu,name,uuid,compute_mode,utilization.gpu,utilization.memory,index"
metrics = "fan.speed,memory.total,memory.used,memory.free,pstate,temperature.gpu,name,uuid,compute_mode,utilization.gpu,utilization.memory,index,power.draw"
metricNames = [][]string{
[]string{"fan_speed", "field"},
[]string{"memory_total", "field"},
[]string{"memory_used", "field"},
[]string{"memory_free", "field"},
[]string{"fan_speed", "integer"},
[]string{"memory_total", "integer"},
[]string{"memory_used", "integer"},
[]string{"memory_free", "integer"},
[]string{"pstate", "tag"},
[]string{"temperature_gpu", "field"},
[]string{"temperature_gpu", "integer"},
[]string{"name", "tag"},
[]string{"uuid", "tag"},
[]string{"compute_mode", "tag"},
[]string{"utilization_gpu", "field"},
[]string{"utilization_memory", "field"},
[]string{"utilization_gpu", "integer"},
[]string{"utilization_memory", "integer"},
[]string{"index", "tag"},
[]string{"power_draw", "float"},
}
)
@@ -127,7 +128,7 @@ func parseLine(line string) (map[string]string, map[string]interface{}, error) {
for i, m := range metricNames {
col := strings.TrimSpace(met[i])
// First handle the tags
// Handle the tags
if m[1] == "tag" {
tags[m[0]] = col
continue
@@ -137,12 +138,23 @@ func parseLine(line string) (map[string]string, map[string]interface{}, error) {
continue
}
// Then parse the integers out of the fields
out, err := strconv.ParseInt(col, 10, 64)
if err != nil {
return tags, fields, err
// Parse the integers
if m[1] == "integer" {
out, err := strconv.ParseInt(col, 10, 64)
if err != nil {
return tags, fields, err
}
fields[m[0]] = out
}
// Parse the floats
if m[1] == "float" {
out, err := strconv.ParseFloat(col, 64)
if err != nil {
return tags, fields, err
}
fields[m[0]] = out
}
fields[m[0]] = out
}
// Return the tags and fields

View File

@@ -7,7 +7,7 @@ import (
)
func TestParseLineStandard(t *testing.T) {
line := "85, 8114, 553, 7561, P2, 61, GeForce GTX 1070 Ti, GPU-d1911b8a-f5c8-5e66-057c-486561269de8, Default, 100, 93, 1\n"
line := "85, 8114, 553, 7561, P2, 61, GeForce GTX 1070 Ti, GPU-d1911b8a-f5c8-5e66-057c-486561269de8, Default, 100, 93, 1, 0.0\n"
tags, fields, err := parseLine(line)
if err != nil {
t.Fail()
@@ -37,7 +37,7 @@ func TestParseLineBad(t *testing.T) {
}
func TestParseLineNotSupported(t *testing.T) {
line := "[Not Supported], 7606, 0, 7606, P0, 38, Tesla P4, GPU-xxx, Default, 0, 0, 0\n"
line := "[Not Supported], 7606, 0, 7606, P0, 38, Tesla P4, GPU-xxx, Default, 0, 0, 0, 0.0\n"
_, fields, err := parseLine(line)
require.NoError(t, err)
require.Equal(t, nil, fields["fan_speed"])

View File

@@ -6,6 +6,8 @@ import (
"os/exec"
"strconv"
"strings"
"github.com/influxdata/telegraf/internal"
)
// Implemention of PIDGatherer that execs pgrep to find processes
@@ -62,6 +64,12 @@ func find(path string, args []string) ([]PID, error) {
func run(path string, args []string) (string, error) {
out, err := exec.Command(path, args...).Output()
//if exit code 1, ie no processes found, do not return error
if i, _ := internal.ExitStatus(err); i == 1 {
return "", nil
}
if err != nil {
return "", fmt.Errorf("Error running %s: %s", path, err)
}

View File

@@ -97,7 +97,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error {
p.createProcess = defaultProcess
}
procs, err := p.updateProcesses(p.procs)
procs, err := p.updateProcesses(acc, p.procs)
if err != nil {
acc.AddError(fmt.Errorf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
p.Exe, p.PidFile, p.Pattern, p.User, err.Error()))
@@ -230,8 +230,8 @@ func (p *Procstat) addMetrics(proc Process, acc telegraf.Accumulator) {
}
// Update monitored Processes
func (p *Procstat) updateProcesses(prevInfo map[PID]Process) (map[PID]Process, error) {
pids, tags, err := p.findPids()
func (p *Procstat) updateProcesses(acc telegraf.Accumulator, prevInfo map[PID]Process) (map[PID]Process, error) {
pids, tags, err := p.findPids(acc)
if err != nil {
return nil, err
}
@@ -281,9 +281,9 @@ func (p *Procstat) getPIDFinder() (PIDFinder, error) {
}
// Get matching PIDs and their initial tags
func (p *Procstat) findPids() ([]PID, map[string]string, error) {
func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string, error) {
var pids []PID
var tags map[string]string
tags := make(map[string]string)
var err error
f, err := p.getPIDFinder()
@@ -313,7 +313,18 @@ func (p *Procstat) findPids() ([]PID, map[string]string, error) {
err = fmt.Errorf("Either exe, pid_file, user, pattern, systemd_unit, or cgroup must be specified")
}
return pids, tags, err
rTags := make(map[string]string)
for k, v := range tags {
rTags[k] = v
}
//adds a metric with info on the pgrep query
fields := make(map[string]interface{})
tags["pid_finder"] = p.PidFinder
fields["pid_count"] = len(pids)
acc.AddFields("procstat_lookup", fields, tags)
return pids, rTags, err
}
// execCommand is so tests can mock out exec.Command usage.

View File

@@ -343,7 +343,8 @@ func TestGather_systemdUnitPIDs(t *testing.T) {
createPIDFinder: pidFinder([]PID{}, nil),
SystemdUnit: "TestGather_systemdUnitPIDs",
}
pids, tags, err := p.findPids()
var acc testutil.Accumulator
pids, tags, err := p.findPids(&acc)
require.NoError(t, err)
assert.Equal(t, []PID{11408}, pids)
assert.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"])
@@ -364,8 +365,20 @@ func TestGather_cgroupPIDs(t *testing.T) {
createPIDFinder: pidFinder([]PID{}, nil),
CGroup: td,
}
pids, tags, err := p.findPids()
var acc testutil.Accumulator
pids, tags, err := p.findPids(&acc)
require.NoError(t, err)
assert.Equal(t, []PID{1234, 5678}, pids)
assert.Equal(t, td, tags["cgroup"])
}
func TestProcstatLookupMetric(t *testing.T) {
p := Procstat{
createPIDFinder: pidFinder([]PID{543}, nil),
Exe: "-Gsys",
}
var acc testutil.Accumulator
err := acc.GatherError(p.Gather)
require.NoError(t, err)
require.Equal(t, len(p.procs)+1, len(acc.Metrics))
}

View File

@@ -14,6 +14,13 @@
## If no servers are specified, then localhost is used as the host.
## If no port is specified, 6379 is used
servers = ["tcp://localhost:6379"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true
```
### Measurements & Fields:

View File

@@ -13,11 +13,13 @@ import (
"github.com/go-redis/redis"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Redis struct {
Servers []string
tls.ClientConfig
clients []Client
initialized bool
@@ -56,6 +58,13 @@ var sampleConfig = `
## If no servers are specified, then localhost is used as the host.
## If no port is specified, 6379 is used
servers = ["tcp://localhost:6379"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true
`
func (r *Redis) SampleConfig() string {
@@ -109,12 +118,18 @@ func (r *Redis) init(acc telegraf.Accumulator) error {
address = u.Host
}
tlsConfig, err := r.ClientConfig.TLSConfig()
if err != nil {
return err
}
client := redis.NewClient(
&redis.Options{
Addr: address,
Password: password,
Network: u.Scheme,
PoolSize: 1,
Addr: address,
Password: password,
Network: u.Scheme,
PoolSize: 1,
TLSConfig: tlsConfig,
},
)

View File

@@ -5,7 +5,7 @@ The [solr](http://lucene.apache.org/solr/) plugin collects stats via the
More about [performance statistics](https://cwiki.apache.org/confluence/display/solr/Performance+Statistics+Reference)
Tested from 3.5 to 6.*
Tested from 3.5 to 7.*
### Configuration:

View File

@@ -113,20 +113,7 @@ type Hitratio interface{}
// Cache is an exported type that
// contains cache metrics
type Cache struct {
Stats struct {
CumulativeEvictions int64 `json:"cumulative_evictions"`
CumulativeHitratio Hitratio `json:"cumulative_hitratio"`
CumulativeHits int64 `json:"cumulative_hits"`
CumulativeInserts int64 `json:"cumulative_inserts"`
CumulativeLookups int64 `json:"cumulative_lookups"`
Evictions int64 `json:"evictions"`
Hitratio Hitratio `json:"hitratio"`
Hits int64 `json:"hits"`
Inserts int64 `json:"inserts"`
Lookups int64 `json:"lookups"`
Size int64 `json:"size"`
WarmupTime int64 `json:"warmupTime"`
} `json:"stats"`
Stats map[string]interface{} `json:"stats"`
}
// NewSolr return a new instance of Solr
@@ -424,21 +411,30 @@ func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBe
return err
}
for name, metrics := range cacheMetrics {
cumulativeHits := getFloat(metrics.Stats.CumulativeHitratio)
hitratio := getFloat(metrics.Stats.Hitratio)
coreFields := map[string]interface{}{
"cumulative_evictions": metrics.Stats.CumulativeEvictions,
"cumulative_hitratio": cumulativeHits,
"cumulative_hits": metrics.Stats.CumulativeHits,
"cumulative_inserts": metrics.Stats.CumulativeInserts,
"cumulative_lookups": metrics.Stats.CumulativeLookups,
"evictions": metrics.Stats.Evictions,
"hitratio": hitratio,
"hits": metrics.Stats.Hits,
"inserts": metrics.Stats.Inserts,
"lookups": metrics.Stats.Lookups,
"size": metrics.Stats.Size,
"warmup_time": metrics.Stats.WarmupTime,
coreFields := make(map[string]interface{})
for key, value := range metrics.Stats {
splitKey := strings.Split(key, ".")
newKey := splitKey[len(splitKey)-1]
switch newKey {
case "cumulative_evictions",
"cumulative_hits",
"cumulative_inserts",
"cumulative_lookups",
"eviction",
"hits",
"inserts",
"lookups",
"size",
"evictions":
coreFields[newKey] = getInt(value)
case "hitratio",
"cumulative_hitratio":
coreFields[newKey] = getFloat(value)
case "warmupTime":
coreFields["warmup_time"] = getInt(value)
default:
continue
}
}
acc.AddFields(
"solr_cache",

View File

@@ -43,6 +43,17 @@ func TestGatherStats(t *testing.T) {
map[string]string{"core": "main", "handler": "filterCache"})
}
func TestSolr7MbeansStats(t *testing.T) {
ts := createMockSolr7Server()
solr := NewSolr()
solr.Servers = []string{ts.URL}
var acc testutil.Accumulator
require.NoError(t, solr.Gather(&acc))
acc.AssertContainsTaggedFields(t, "solr_cache",
solr7CacheExpected,
map[string]string{"core": "main", "handler": "documentCache"})
}
func TestSolr3GatherStats(t *testing.T) {
ts := createMockSolr3Server()
solr := NewSolr()
@@ -150,3 +161,18 @@ func createMockSolr3Server() *httptest.Server {
}
}))
}
func createMockSolr7Server() *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/solr/admin/cores") {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, statusResponse)
} else if strings.Contains(r.URL.Path, "solr/main/admin") {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, mBeansSolr7Response)
} else {
w.WriteHeader(http.StatusNotFound)
fmt.Fprintln(w, "nope")
}
}))
}

View File

@@ -0,0 +1,60 @@
package solr
const mBeansSolr7Response = `
{
"responseHeader":{
"status":0,
"QTime":2
},
"solr-mbeans":[
"CORE",
{
},
"QUERYHANDLER",
{
},
"UPDATEHANDLER",
{
},
"CACHE",
{
"documentCache":{
"class":"org.apache.solr.search.LRUCache",
"description":"LRU Cache(maxSize=16384, initialSize=4096)",
"stats":{
"CACHE.searcher.documentCache.evictions": 141485,
"CACHE.searcher.documentCache.cumulative_lookups": 265132,
"CACHE.searcher.documentCache.hitratio": 0.44,
"CACHE.searcher.documentCache.size": 8192,
"CACHE.searcher.documentCache.cumulative_hitratio": 0.42,
"CACHE.searcher.documentCache.lookups": 1234,
"CACHE.searcher.documentCache.warmupTime": 1,
"CACHE.searcher.documentCache.inserts": 987,
"CACHE.searcher.documentCache.hits": 1111,
"CACHE.searcher.documentCache.cumulative_hits": 115364,
"CACHE.searcher.documentCache.cumulative_inserts": 149768,
"CACHE.searcher.documentCache.cumulative_evictions": 141486
}
}
}
]
}
`
var solr7CacheExpected = map[string]interface{}{
"evictions": int64(141485),
"cumulative_evictions": int64(141486),
"cumulative_hitratio": float64(0.42),
"cumulative_hits": int64(115364),
"cumulative_inserts": int64(149768),
"cumulative_lookups": int64(265132),
"hitratio": float64(0.44),
"hits": int64(1111),
"inserts": int64(987),
"lookups": int64(1234),
"size": int64(8192),
"warmup_time": int64(1),
}

View File

@@ -16,7 +16,7 @@ https://en.wikipedia.org/wiki/Df_(Unix) for more details.
# mount_points = ["/"]
## Ignore mount points by filesystem type.
ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
```
#### Docker container

View File

@@ -1,30 +0,0 @@
# Swap Input Plugin
The swap plugin collects system swap metrics.
For a more information on what swap memory is, read [All about Linux swap space](https://www.linux.com/news/all-about-linux-swap-space).
### Configuration:
```toml
# Read metrics about swap memory usage
[[inputs.swap]]
# no configuration
```
### Metrics:
- swap
- fields:
- free (int)
- total (int)
- used (int)
- used_percent (float)
- in (int)
- out (int)
### Example Output:
```
swap total=20855394304i,used_percent=45.43883523785713,used=9476448256i,free=1715331072i 1511894782000000000
```

View File

@@ -28,7 +28,7 @@ var diskSampleConfig = `
# mount_points = ["/"]
## Ignore mount points by filesystem type.
ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
`
func (_ *DiskStats) SampleConfig() string {

View File

@@ -42,9 +42,45 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error {
return nil
}
type SwapStats struct {
ps PS
}
func (_ *SwapStats) Description() string {
return "Read metrics about swap memory usage"
}
func (_ *SwapStats) SampleConfig() string { return "" }
func (s *SwapStats) Gather(acc telegraf.Accumulator) error {
swap, err := s.ps.SwapStat()
if err != nil {
return fmt.Errorf("error getting swap memory info: %s", err)
}
fieldsG := map[string]interface{}{
"total": swap.Total,
"used": swap.Used,
"free": swap.Free,
"used_percent": swap.UsedPercent,
}
fieldsC := map[string]interface{}{
"in": swap.Sin,
"out": swap.Sout,
}
acc.AddGauge("swap", fieldsG, nil)
acc.AddCounter("swap", fieldsC, nil)
return nil
}
func init() {
ps := newSystemPS()
inputs.Add("mem", func() telegraf.Input {
return &MemStats{ps: ps}
})
inputs.Add("swap", func() telegraf.Input {
return &SwapStats{ps: ps}
})
}

View File

@@ -30,6 +30,17 @@ func TestMemStats(t *testing.T) {
mps.On("VMStat").Return(vms, nil)
sms := &mem.SwapMemoryStat{
Total: 8123,
Used: 1232,
Free: 6412,
UsedPercent: 12.2,
Sin: 7,
Sout: 830,
}
mps.On("SwapStat").Return(sms, nil)
err = (&MemStats{&mps}).Gather(&acc)
require.NoError(t, err)
@@ -50,4 +61,15 @@ func TestMemStats(t *testing.T) {
acc.AssertContainsTaggedFields(t, "mem", memfields, make(map[string]string))
acc.Metrics = nil
err = (&SwapStats{&mps}).Gather(&acc)
require.NoError(t, err)
swapfields := map[string]interface{}{
"total": uint64(8123),
"used": uint64(1232),
"used_percent": float64(12.2),
"free": uint64(6412),
}
acc.AssertContainsTaggedFields(t, "swap", swapfields, make(map[string]string))
}

View File

@@ -1,47 +0,0 @@
package system
import (
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type SwapStats struct {
ps PS
}
func (_ *SwapStats) Description() string {
return "Read metrics about swap memory usage"
}
func (_ *SwapStats) SampleConfig() string { return "" }
func (s *SwapStats) Gather(acc telegraf.Accumulator) error {
swap, err := s.ps.SwapStat()
if err != nil {
return fmt.Errorf("error getting swap memory info: %s", err)
}
fieldsG := map[string]interface{}{
"total": swap.Total,
"used": swap.Used,
"free": swap.Free,
"used_percent": swap.UsedPercent,
}
fieldsC := map[string]interface{}{
"in": swap.Sin,
"out": swap.Sout,
}
acc.AddGauge("swap", fieldsG, nil)
acc.AddCounter("swap", fieldsC, nil)
return nil
}
func init() {
ps := newSystemPS()
inputs.Add("swap", func() telegraf.Input {
return &SwapStats{ps: ps}
})
}

View File

@@ -1,38 +0,0 @@
package system
import (
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/shirou/gopsutil/mem"
"github.com/stretchr/testify/require"
)
func TestSwapStats(t *testing.T) {
var mps MockPS
var err error
defer mps.AssertExpectations(t)
var acc testutil.Accumulator
sms := &mem.SwapMemoryStat{
Total: 8123,
Used: 1232,
Free: 6412,
UsedPercent: 12.2,
Sin: 7,
Sout: 830,
}
mps.On("SwapStat").Return(sms, nil)
err = (&SwapStats{&mps}).Gather(&acc)
require.NoError(t, err)
swapfields := map[string]interface{}{
"total": uint64(8123),
"used": uint64(1232),
"used_percent": float64(12.2),
"free": uint64(6412),
}
acc.AssertContainsTaggedFields(t, "swap", swapfields, make(map[string]string))
}

View File

@@ -1,4 +1,4 @@
# tail Input Plugin
# Tail Input Plugin
The tail plugin "tails" a logfile and parses each log message.
@@ -49,3 +49,7 @@ The plugin expects messages in one of the
data_format = "influx"
```
### Metrics:
Metrics are produced according to the `data_format` option. Additionally a
tag labeled `path` is added to the metric containing the filename being tailed.

View File

@@ -146,7 +146,11 @@ func (t *Tail) receiver(tailer *tail.Tail) {
m, err = t.parser.ParseLine(text)
if err == nil {
t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
if m != nil {
tags := m.Tags()
tags["path"] = tailer.Filename
t.acc.AddFields(m.Name(), m.Fields(), tags, m.Time())
}
} else {
t.acc.AddError(fmt.Errorf("E! Malformed log line in %s: [%s], Error: %s\n",
tailer.Filename, line.Text, err))

View File

@@ -300,7 +300,10 @@ func TestRunParserJSONMsg(t *testing.T) {
listener.acc = &acc
defer close(listener.done)
listener.parser, _ = parsers.NewJSONParser("udp_json_test", []string{}, nil)
listener.parser, _ = parsers.NewParser(&parsers.Config{
DataFormat: "json",
MetricName: "udp_json_test",
})
listener.wg.Add(1)
go listener.tcpParser()

View File

@@ -0,0 +1,71 @@
# Tengine Input Plugin
The tengine plugin gathers metrics from the
[Tengine Web Server](http://tengine.taobao.org/) via the
[reqstat](http://tengine.taobao.org/document/http_reqstat.html) module.
### Configuration:
```toml
# Read Tengine's basic status information (ngx_http_reqstat_module)
[[inputs.tengine]]
## An array of Tengine reqstat module URI to gather stats.
urls = ["http://127.0.0.1/us"]
## HTTP response timeout (default: 5s)
# response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
```
### Metrics:
- Measurement
- tags:
- port
- server
- server_name
- fields:
- bytes_in (integer, total number of bytes received from client)
- bytes_out (integer, total number of bytes sent to client)
- conn_total (integer, total number of accepted connections)
- req_total (integer, total number of processed requests)
- http_2xx (integer, total number of 2xx requests)
- http_3xx (integer, total number of 3xx requests)
- http_4xx (integer, total number of 4xx requests)
- http_5xx (integer, total number of 5xx requests)
- http_other_status (integer, total number of other requests)
- rt (integer, accumulation or rt)
- ups_req (integer, total number of requests calling for upstream)
- ups_rt (integer, accumulation or upstream rt)
- ups_tries (integer, total number of times calling for upstream)
- http_200 (integer, total number of 200 requests)
- http_206 (integer, total number of 206 requests)
- http_302 (integer, total number of 302 requests)
- http_304 (integer, total number of 304 requests)
- http_403 (integer, total number of 403 requests)
- http_404 (integer, total number of 404 requests)
- http_416 (integer, total number of 416 requests)
- http_499 (integer, total number of 499 requests)
- http_500 (integer, total number of 500 requests)
- http_502 (integer, total number of 502 requests)
- http_503 (integer, total number of 503 requests)
- http_504 (integer, total number of 504 requests)
- http_508 (integer, total number of 508 requests)
- http_other_detail_status (integer, total number of requests of other status codes*http_ups_4xx total number of requests of upstream 4xx)
- http_ups_5xx (integer, total number of requests of upstream 5xx)
### Example Output:
```
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=localhost bytes_in=9129i,bytes_out=56334i,conn_total=14i,http_200=90i,http_206=0i,http_2xx=90i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=0i,http_416=0i,http_499=0i,http_4xx=0i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=90i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=28.79.190.35.bc.googleusercontent.com bytes_in=1500i,bytes_out=3009i,conn_total=4i,http_200=1i,http_206=0i,http_2xx=1i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=1i,http_416=0i,http_499=0i,http_4xx=3i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=4i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=www.google.com bytes_in=372i,bytes_out=786i,conn_total=1i,http_200=1i,http_206=0i,http_2xx=1i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=0i,http_416=0i,http_499=0i,http_4xx=0i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=1i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=35.190.79.28 bytes_in=4433i,bytes_out=10259i,conn_total=5i,http_200=3i,http_206=0i,http_2xx=3i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=11i,http_416=0i,http_499=0i,http_4xx=11i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=14i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=tenka-prod-api.txwy.tw bytes_in=3014397400i,bytes_out=14279992835i,conn_total=36844i,http_200=3177339i,http_206=0i,http_2xx=3177339i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=123i,http_416=0i,http_499=0i,http_4xx=123i,http_500=17214i,http_502=4453i,http_503=80i,http_504=0i,http_508=0i,http_5xx=21747i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=123i,http_ups_5xx=21747i,req_total=3199209i,rt=245874536i,ups_req=2685076i,ups_rt=245858217i,ups_tries=2685076i 1526546308000000000
```

View File

@@ -0,0 +1,338 @@
package tengine
import (
"bufio"
"fmt"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
"io"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Tengine struct {
Urls []string
ResponseTimeout internal.Duration
tls.ClientConfig
client *http.Client
}
var sampleConfig = `
# An array of Tengine reqstat module URI to gather stats.
urls = ["http://127.0.0.1/us"]
# HTTP response timeout (default: 5s)
# response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.cer"
# tls_key = "/etc/telegraf/key.key"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
func (n *Tengine) SampleConfig() string {
return sampleConfig
}
func (n *Tengine) Description() string {
return "Read Tengine's basic status information (ngx_http_reqstat_module)"
}
func (n *Tengine) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
// Create an HTTP client that is re-used for each
// collection interval
if n.client == nil {
client, err := n.createHttpClient()
if err != nil {
return err
}
n.client = client
}
for _, u := range n.Urls {
addr, err := url.Parse(u)
if err != nil {
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
continue
}
wg.Add(1)
go func(addr *url.URL) {
defer wg.Done()
acc.AddError(n.gatherUrl(addr, acc))
}(addr)
}
wg.Wait()
return nil
}
func (n *Tengine) createHttpClient() (*http.Client, error) {
tlsCfg, err := n.ClientConfig.TLSConfig()
if err != nil {
return nil, err
}
if n.ResponseTimeout.Duration < time.Second {
n.ResponseTimeout.Duration = time.Second * 5
}
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsCfg,
},
Timeout: n.ResponseTimeout.Duration,
}
return client, nil
}
type TengineSatus struct {
host string
bytes_in uint64
bytes_out uint64
conn_total uint64
req_total uint64
http_2xx uint64
http_3xx uint64
http_4xx uint64
http_5xx uint64
http_other_status uint64
rt uint64
ups_req uint64
ups_rt uint64
ups_tries uint64
http_200 uint64
http_206 uint64
http_302 uint64
http_304 uint64
http_403 uint64
http_404 uint64
http_416 uint64
http_499 uint64
http_500 uint64
http_502 uint64
http_503 uint64
http_504 uint64
http_508 uint64
http_other_detail_status uint64
http_ups_4xx uint64
http_ups_5xx uint64
}
func (n *Tengine) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
var tenginestatus TengineSatus
resp, err := n.client.Get(addr.String())
if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
}
r := bufio.NewReader(resp.Body)
for {
line, err := r.ReadString('\n')
if err != nil || io.EOF == err {
break
}
line_split := strings.Split(strings.TrimSpace(line), ",")
if len(line_split) != 30 {
continue
}
tenginestatus.host = line_split[0]
if err != nil {
return err
}
tenginestatus.bytes_in, err = strconv.ParseUint(line_split[1], 10, 64)
if err != nil {
return err
}
tenginestatus.bytes_out, err = strconv.ParseUint(line_split[2], 10, 64)
if err != nil {
return err
}
tenginestatus.conn_total, err = strconv.ParseUint(line_split[3], 10, 64)
if err != nil {
return err
}
tenginestatus.req_total, err = strconv.ParseUint(line_split[4], 10, 64)
if err != nil {
return err
}
tenginestatus.http_2xx, err = strconv.ParseUint(line_split[5], 10, 64)
if err != nil {
return err
}
tenginestatus.http_3xx, err = strconv.ParseUint(line_split[6], 10, 64)
if err != nil {
return err
}
tenginestatus.http_4xx, err = strconv.ParseUint(line_split[7], 10, 64)
if err != nil {
return err
}
tenginestatus.http_5xx, err = strconv.ParseUint(line_split[8], 10, 64)
if err != nil {
return err
}
tenginestatus.http_other_status, err = strconv.ParseUint(line_split[9], 10, 64)
if err != nil {
return err
}
tenginestatus.rt, err = strconv.ParseUint(line_split[10], 10, 64)
if err != nil {
return err
}
tenginestatus.ups_req, err = strconv.ParseUint(line_split[11], 10, 64)
if err != nil {
return err
}
tenginestatus.ups_rt, err = strconv.ParseUint(line_split[12], 10, 64)
if err != nil {
return err
}
tenginestatus.ups_tries, err = strconv.ParseUint(line_split[13], 10, 64)
if err != nil {
return err
}
tenginestatus.http_200, err = strconv.ParseUint(line_split[14], 10, 64)
if err != nil {
return err
}
tenginestatus.http_206, err = strconv.ParseUint(line_split[15], 10, 64)
if err != nil {
return err
}
tenginestatus.http_302, err = strconv.ParseUint(line_split[16], 10, 64)
if err != nil {
return err
}
tenginestatus.http_304, err = strconv.ParseUint(line_split[17], 10, 64)
if err != nil {
return err
}
tenginestatus.http_403, err = strconv.ParseUint(line_split[18], 10, 64)
if err != nil {
return err
}
tenginestatus.http_404, err = strconv.ParseUint(line_split[19], 10, 64)
if err != nil {
return err
}
tenginestatus.http_416, err = strconv.ParseUint(line_split[20], 10, 64)
if err != nil {
return err
}
tenginestatus.http_499, err = strconv.ParseUint(line_split[21], 10, 64)
if err != nil {
return err
}
tenginestatus.http_500, err = strconv.ParseUint(line_split[22], 10, 64)
if err != nil {
return err
}
tenginestatus.http_502, err = strconv.ParseUint(line_split[23], 10, 64)
if err != nil {
return err
}
tenginestatus.http_503, err = strconv.ParseUint(line_split[24], 10, 64)
if err != nil {
return err
}
tenginestatus.http_504, err = strconv.ParseUint(line_split[25], 10, 64)
if err != nil {
return err
}
tenginestatus.http_508, err = strconv.ParseUint(line_split[26], 10, 64)
if err != nil {
return err
}
tenginestatus.http_other_detail_status, err = strconv.ParseUint(line_split[27], 10, 64)
if err != nil {
return err
}
tenginestatus.http_ups_4xx, err = strconv.ParseUint(line_split[28], 10, 64)
if err != nil {
return err
}
tenginestatus.http_ups_5xx, err = strconv.ParseUint(line_split[29], 10, 64)
if err != nil {
return err
}
tags := getTags(addr, tenginestatus.host)
fields := map[string]interface{}{
"bytes_in": tenginestatus.bytes_in,
"bytes_out": tenginestatus.bytes_out,
"conn_total": tenginestatus.conn_total,
"req_total": tenginestatus.req_total,
"http_2xx": tenginestatus.http_2xx,
"http_3xx": tenginestatus.http_3xx,
"http_4xx": tenginestatus.http_4xx,
"http_5xx": tenginestatus.http_5xx,
"http_other_status": tenginestatus.http_other_status,
"rt": tenginestatus.rt,
"ups_req": tenginestatus.ups_req,
"ups_rt": tenginestatus.ups_rt,
"ups_tries": tenginestatus.ups_tries,
"http_200": tenginestatus.http_200,
"http_206": tenginestatus.http_206,
"http_302": tenginestatus.http_302,
"http_304": tenginestatus.http_304,
"http_403": tenginestatus.http_403,
"http_404": tenginestatus.http_404,
"http_416": tenginestatus.http_416,
"http_499": tenginestatus.http_499,
"http_500": tenginestatus.http_500,
"http_502": tenginestatus.http_502,
"http_503": tenginestatus.http_503,
"http_504": tenginestatus.http_504,
"http_508": tenginestatus.http_508,
"http_other_detail_status": tenginestatus.http_other_detail_status,
"http_ups_4xx": tenginestatus.http_ups_4xx,
"http_ups_5xx": tenginestatus.http_ups_5xx,
}
acc.AddFields("tengine", fields, tags)
}
return nil
}
// Get tag(s) for the tengine plugin
func getTags(addr *url.URL, server_name string) map[string]string {
h := addr.Host
host, port, err := net.SplitHostPort(h)
if err != nil {
host = addr.Host
if addr.Scheme == "http" {
port = "80"
} else if addr.Scheme == "https" {
port = "443"
} else {
port = ""
}
}
return map[string]string{"server": host, "port": port, "server_name": server_name}
}
func init() {
inputs.Add("tengine", func() telegraf.Input {
return &Tengine{}
})
}

View File

@@ -0,0 +1,97 @@
package tengine
import (
"fmt"
"net"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const tengineSampleResponse = `127.0.0.1,784,1511,2,2,1,0,1,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0`
// Verify that tengine tags are properly parsed based on the server
func TestTengineTags(t *testing.T) {
urls := []string{"http://localhost/us", "http://localhost:80/us"}
var addr *url.URL
for _, url1 := range urls {
addr, _ = url.Parse(url1)
tagMap := getTags(addr, "127.0.0.1")
assert.Contains(t, tagMap["server"], "localhost")
}
}
func TestTengineGeneratesMetrics(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var rsp string
rsp = tengineSampleResponse
fmt.Fprintln(w, rsp)
}))
defer ts.Close()
n := &Tengine{
Urls: []string{fmt.Sprintf("%s/us", ts.URL)},
}
var acc_tengine testutil.Accumulator
err_tengine := acc_tengine.GatherError(n.Gather)
require.NoError(t, err_tengine)
fields_tengine := map[string]interface{}{
"bytes_in": uint64(784),
"bytes_out": uint64(1511),
"conn_total": uint64(2),
"req_total": uint64(2),
"http_2xx": uint64(1),
"http_3xx": uint64(0),
"http_4xx": uint64(1),
"http_5xx": uint64(0),
"http_other_status": uint64(0),
"rt": uint64(0),
"ups_req": uint64(0),
"ups_rt": uint64(0),
"ups_tries": uint64(0),
"http_200": uint64(1),
"http_206": uint64(0),
"http_302": uint64(0),
"http_304": uint64(0),
"http_403": uint64(0),
"http_404": uint64(1),
"http_416": uint64(0),
"http_499": uint64(0),
"http_500": uint64(0),
"http_502": uint64(0),
"http_503": uint64(0),
"http_504": uint64(0),
"http_508": uint64(0),
"http_other_detail_status": uint64(0),
"http_ups_4xx": uint64(0),
"http_ups_5xx": uint64(0),
}
addr, err := url.Parse(ts.URL)
if err != nil {
panic(err)
}
host, port, err := net.SplitHostPort(addr.Host)
if err != nil {
host = addr.Host
if addr.Scheme == "http" {
port = "80"
} else if addr.Scheme == "https" {
port = "443"
} else {
port = ""
}
}
tags := map[string]string{"server": host, "port": port, "server_name": "127.0.0.1"}
acc_tengine.AssertContainsTaggedFields(t, "tengine", fields_tengine, tags)
}

View File

@@ -193,7 +193,10 @@ func TestRunParserJSONMsg(t *testing.T) {
listener.acc = &acc
defer close(listener.done)
listener.parser, _ = parsers.NewJSONParser("udp_json_test", []string{}, nil)
listener.parser, _ = parsers.NewParser(&parsers.Config{
DataFormat: "json",
MetricName: "udp_json_test",
})
listener.wg.Add(1)
go listener.udpParser()

View File

@@ -32,7 +32,7 @@ import (
"log"
"github.com/apache/thrift/lib/go/thrift"
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
)
var (

View File

@@ -4,7 +4,7 @@ import (
"time"
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
)
//now is a mockable time for now

View File

@@ -7,7 +7,7 @@ import (
"time"
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
)
// JSON decodes spans from bodies `POST`ed to the spans endpoint

View File

@@ -10,7 +10,7 @@ import (
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
"github.com/apache/thrift/lib/go/thrift"
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
)
// UnmarshalThrift converts raw bytes in thrift format to a slice of spans

View File

@@ -6,7 +6,7 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
)
func Test_endpointHost(t *testing.T) {

View File

@@ -1,17 +0,0 @@
version: '3'
services:
zoo:
image: zookeeper
telegraf:
image: glinton/scratch
volumes:
- ./telegraf.conf:/telegraf.conf
- ../../../../telegraf:/telegraf
depends_on:
- zoo
entrypoint:
- /telegraf
- --config
- /telegraf.conf
network_mode: service:zoo

View File

@@ -1,9 +0,0 @@
[agent]
interval="1s"
flush_interval="1s"
[[inputs.zookeeper]]
servers = [":2181"]
[[outputs.file]]
files = ["stdout"]

View File

@@ -158,14 +158,8 @@ func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegr
}
}
}
srv := "localhost"
if service[0] != "" {
srv = service[0]
}
tags := map[string]string{
"server": srv,
"server": service[0],
"port": service[1],
"state": zookeeper_state,
}

View File

@@ -59,7 +59,7 @@ func TestPartitionKey(t *testing.T) {
partitionKey := k.getPartitionKey(testPoint)
u, err := uuid.FromString(partitionKey)
assert.Nil(err, "Issue parsing UUID")
assert.Equal(uint(4), u.Version(), "PartitionKey should be UUIDv4")
assert.Equal(byte(4), u.Version(), "PartitionKey should be UUIDv4")
k = KinesisOutput{
PartitionKey: "-",
@@ -72,6 +72,5 @@ func TestPartitionKey(t *testing.T) {
partitionKey = k.getPartitionKey(testPoint)
u, err = uuid.FromString(partitionKey)
assert.Nil(err, "Issue parsing UUID")
assert.Equal(uint(4), u.Version(), "PartitionKey should be UUIDv4")
assert.Equal(byte(4), u.Version(), "PartitionKey should be UUIDv4")
}

View File

@@ -184,7 +184,7 @@ func (m *MQTT) publish(topic string, body []byte) error {
func (m *MQTT) createOpts() (*paho.ClientOptions, error) {
opts := paho.NewClientOptions()
opts.KeepAlive = 0 * time.Second
opts.KeepAlive = 0
if m.Timeout.Duration < time.Second {
m.Timeout.Duration = 5 * time.Second

View File

@@ -20,6 +20,7 @@ var (
type JSONParser struct {
MetricName string
TagKeys []string
FieldKeys []string
DefaultTags map[string]string
}
@@ -86,6 +87,17 @@ func (p *JSONParser) switchFieldToTag(tags map[string]string, fields map[string]
}
}
//if field_keys is specified, only those values should be reported as fields
if len(p.FieldKeys) > 0 {
nFields := make(map[string]interface{})
for _, name := range p.FieldKeys {
if fields[name] != nil {
nFields[name] = fields[name]
}
}
return tags, nFields
}
//remove any additional string/bool values from fields
for k := range fields {
switch fields[k].(type) {

View File

@@ -1,6 +1,7 @@
package json
import (
"log"
"testing"
"github.com/stretchr/testify/assert"
@@ -448,22 +449,28 @@ func TestJSONParseNestedArray(t *testing.T) {
"total_devices": 5,
"total_threads": 10,
"shares": {
"total": 5,
"accepted": 5,
"rejected": 0,
"avg_find_time": 4,
"tester": "work",
"tester2": "don't want this",
"tester3": 7.93
"total": 5,
"accepted": 5,
"rejected": 0,
"avg_find_time": 4,
"tester": "work",
"tester2": "don't want this",
"tester3": {
"hello":"sup",
"fun":"money",
"break":9
}
}
}`
parser := JSONParser{
MetricName: "json_test",
TagKeys: []string{"total_devices", "total_threads", "shares_tester", "shares_tester3"},
TagKeys: []string{"total_devices", "total_threads", "shares_tester3_fun"},
FieldKeys: []string{"shares_tester", "shares_tester3_break"},
}
metrics, err := parser.Parse([]byte(testString))
log.Printf("m[0] name: %v, tags: %v, fields: %v", metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields())
require.NoError(t, err)
require.Equal(t, len(parser.TagKeys), len(metrics[0].Tags()))
}

View File

@@ -56,6 +56,8 @@ type Config struct {
// TagKeys only apply to JSON data
TagKeys []string
// FieldKeys only apply to JSON
FieldKeys []string
// MetricName applies to JSON & value. This will be the name of the measurement.
MetricName string
@@ -95,8 +97,8 @@ func NewParser(config *Config) (Parser, error) {
var parser Parser
switch config.DataFormat {
case "json":
parser, err = NewJSONParser(config.MetricName,
config.TagKeys, config.DefaultTags)
parser, err = newJSONParser(config.MetricName,
config.TagKeys, config.FieldKeys, config.DefaultTags)
case "value":
parser, err = NewValueParser(config.MetricName,
config.DataType, config.DefaultTags)
@@ -126,6 +128,22 @@ func NewParser(config *Config) (Parser, error) {
return parser, err
}
func newJSONParser(
metricName string,
tagKeys []string,
fieldKeys []string,
defaultTags map[string]string,
) (Parser, error) {
parser := &json.JSONParser{
MetricName: metricName,
TagKeys: tagKeys,
FieldKeys: fieldKeys,
DefaultTags: defaultTags,
}
return parser, nil
}
//Deprecated: Use NewParser to get a JSONParser object
func NewJSONParser(
metricName string,
tagKeys []string,

View File

@@ -67,9 +67,7 @@ func (r *Regex) Apply(in ...telegraf.Metric) []telegraf.Metric {
for _, metric := range in {
for _, converter := range r.Tags {
if value, ok := metric.GetTag(converter.Key); ok {
if key, newValue := r.convert(converter, value); newValue != "" {
metric.AddTag(key, newValue)
}
metric.AddTag(r.convert(converter, value))
}
}
@@ -77,9 +75,7 @@ func (r *Regex) Apply(in ...telegraf.Metric) []telegraf.Metric {
if value, ok := metric.GetField(converter.Key); ok {
switch value := value.(type) {
case string:
if key, newValue := r.convert(converter, value); newValue != "" {
metric.AddField(key, newValue)
}
metric.AddField(r.convert(converter, value))
}
}
}

View File

@@ -222,7 +222,7 @@ func TestNoMatches(t *testing.T) {
},
},
{
message: "Should not emit new tag/field when result_key given but regex doesn't match",
message: "Should emit empty string when result_key given but regex doesn't match",
converter: converter{
Key: "request",
Pattern: "not_match",
@@ -230,7 +230,8 @@ func TestNoMatches(t *testing.T) {
ResultKey: "new_field",
},
expectedFields: map[string]interface{}{
"request": "/users/42/",
"request": "/users/42/",
"new_field": "",
},
},
}

View File

@@ -95,7 +95,7 @@ supported_packages = {
"freebsd": [ "tar" ]
}
next_version = '1.7.0'
next_version = '1.8.0'
################
#### Telegraf Functions
@@ -155,12 +155,12 @@ def go_get(branch, update=False, no_uncommitted=False):
if local_changes() and no_uncommitted:
logging.error("There are uncommitted changes in the current directory.")
return False
if not check_path_for("gdm"):
logging.info("Downloading `gdm`...")
get_command = "go get github.com/sparrc/gdm"
if not check_path_for("dep"):
logging.info("Downloading `dep`...")
get_command = "go get -u github.com/golang/dep/cmd/dep"
run(get_command)
logging.info("Retrieving dependencies with `gdm`...")
run("{}/bin/gdm restore -v".format(os.environ.get("GOPATH",
logging.info("Retrieving dependencies with `dep`...")
run("{}/bin/dep ensure -v".format(os.environ.get("GOPATH",
os.path.expanduser("~/go"))))
return True