Compare commits
54 Commits
release-1.
...
procstat-r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
640ae884ea | ||
|
|
542c030dc8 | ||
|
|
504d978446 | ||
|
|
ec7f13111f | ||
|
|
4e24a1bbe3 | ||
|
|
9c4b52256d | ||
|
|
08a11d7bfd | ||
|
|
e12eced211 | ||
|
|
1bd41ef3ce | ||
|
|
d7c756e9ff | ||
|
|
39206677f8 | ||
|
|
b66eb2fec7 | ||
|
|
3ad10283ef | ||
|
|
84e9a5c97e | ||
|
|
c98b58dacc | ||
|
|
98d86df797 | ||
|
|
4e9e57e210 | ||
|
|
7781507c01 | ||
|
|
8482c40a91 | ||
|
|
0dda9b8319 | ||
|
|
4e69d10ff7 | ||
|
|
f689463e8e | ||
|
|
f217d12de5 | ||
|
|
886795063e | ||
|
|
30dc95fa78 | ||
|
|
40fac0a9b4 | ||
|
|
36df4c5ae5 | ||
|
|
70ffed3a4d | ||
|
|
bf59bcf721 | ||
|
|
a789f97feb | ||
|
|
d2e00a3205 | ||
|
|
daddd8bbac | ||
|
|
d16530677d | ||
|
|
1ea18ffd0a | ||
|
|
dd2223ae1c | ||
|
|
90eebd88af | ||
|
|
d2e729dfaf | ||
|
|
f64d612294 | ||
|
|
76ec90e66d | ||
|
|
1690f36b09 | ||
|
|
87f711a19a | ||
|
|
58895d6b03 | ||
|
|
cd9ad77038 | ||
|
|
8563238059 | ||
|
|
11335f5fee | ||
|
|
acba20af1a | ||
|
|
229b6bd944 | ||
|
|
7fe6e2f5ae | ||
|
|
a4214abfc4 | ||
|
|
5f0cbd1255 | ||
|
|
3ef4dff4ec | ||
|
|
dfe7b5eec2 | ||
|
|
92a8f795f5 | ||
|
|
b1d77ade55 |
@@ -15,7 +15,14 @@ jobs:
|
|||||||
<<: [ *defaults, *go-1_10 ]
|
<<: [ *defaults, *go-1_10 ]
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
|
- restore_cache:
|
||||||
|
key: vendor-{{ .Branch }}-{{ checksum "Gopkg.lock" }}
|
||||||
- run: 'make deps'
|
- run: 'make deps'
|
||||||
|
- save_cache:
|
||||||
|
name: 'vendored deps'
|
||||||
|
key: vendor-{{ .Branch }}-{{ checksum "Gopkg.lock" }}
|
||||||
|
paths:
|
||||||
|
- './vendor'
|
||||||
- persist_to_workspace:
|
- persist_to_workspace:
|
||||||
root: '/go/src'
|
root: '/go/src'
|
||||||
paths:
|
paths:
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -2,3 +2,4 @@
|
|||||||
/telegraf
|
/telegraf
|
||||||
/telegraf.exe
|
/telegraf.exe
|
||||||
/telegraf.gz
|
/telegraf.gz
|
||||||
|
/vendor
|
||||||
|
|||||||
32
CHANGELOG.md
32
CHANGELOG.md
@@ -1,22 +1,32 @@
|
|||||||
## v1.7.2 [unreleased]
|
## v1.8 [unreleased]
|
||||||
|
|
||||||
### Bugfixes
|
### Release Notes
|
||||||
|
|
||||||
- [#4381](https://github.com/influxdata/telegraf/issues/4381): Use localhost as default server tag in zookeeper input.
|
### New Inputs
|
||||||
- [#4374](https://github.com/influxdata/telegraf/issues/4374): Don't set values when pattern doesn't match in regex processor.
|
|
||||||
|
|
||||||
## v1.7.1 [2018-07-03]
|
- [tengine](./plugins/inputs/tengine/README.md) - Contributed by @ertaoxu
|
||||||
|
|
||||||
|
### New Aggregators
|
||||||
|
|
||||||
|
- [valuecounter](./plugins/aggregators/valuecounter/README.md) - Contributed by @piotr1212
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- [#4236](https://github.com/influxdata/telegraf/pull/4236): Add SSL/TLS support to redis input.
|
||||||
|
- [#4160](https://github.com/influxdata/telegraf/pull/4160): Add tengine input plugin.
|
||||||
|
- [#4262](https://github.com/influxdata/telegraf/pull/4262): Add power draw field to nvidia_smi plugin.
|
||||||
|
- [#4271](https://github.com/influxdata/telegraf/pull/4271): Add support for solr 7 to the solr input.
|
||||||
|
- [#4281](https://github.com/influxdata/telegraf/pull/4281): Add owner tag on partitions in burrow input.
|
||||||
|
- [#4259](https://github.com/influxdata/telegraf/pull/4259): Add container status tag to docker input.
|
||||||
|
- [#3523](https://github.com/influxdata/telegraf/pull/3523): Add valuecounter aggregator plugin.
|
||||||
|
- [#4307](https://github.com/influxdata/telegraf/pull/4307): Add new measurement with results of pgrep lookup to procstat input.
|
||||||
|
|
||||||
|
## v1.7.1 [unreleased]
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
|
|
||||||
- [#4277](https://github.com/influxdata/telegraf/pull/4277): Treat sigterm as a clean shutdown signal.
|
- [#4277](https://github.com/influxdata/telegraf/pull/4277): Treat sigterm as a clean shutdown signal.
|
||||||
- [#4284](https://github.com/influxdata/telegraf/pull/4284): Fix selection of tags under nested objects in the JSON parser.
|
- [#4284](https://github.com/influxdata/telegraf/pull/4284): Fix selection of tags under nested objects in the JSON parser.
|
||||||
- [#4135](https://github.com/influxdata/telegraf/issues/4135): Fix postfix input handling multi-level queues.
|
|
||||||
- [#4334](https://github.com/influxdata/telegraf/pull/4334): Fix syslog timestamp parsing with single digit day of month.
|
|
||||||
- [#2910](https://github.com/influxdata/telegraf/issues/2910): Handle mysql input variations in the user_statistics collecting.
|
|
||||||
- [#4293](https://github.com/influxdata/telegraf/issues/4293): Fix minmax and basicstats aggregators to use uint64.
|
|
||||||
- [#4290](https://github.com/influxdata/telegraf/issues/4290): Document swap input plugin.
|
|
||||||
- [#4316](https://github.com/influxdata/telegraf/issues/4316): Fix incorrect precision being applied to metric in http_listener.
|
|
||||||
|
|
||||||
## v1.7 [2018-06-12]
|
## v1.7 [2018-06-12]
|
||||||
|
|
||||||
|
|||||||
@@ -30,9 +30,9 @@ which can be found [on our website](http://influxdb.com/community/cla.html)
|
|||||||
|
|
||||||
Assuming you can already build the project, run these in the telegraf directory:
|
Assuming you can already build the project, run these in the telegraf directory:
|
||||||
|
|
||||||
1. `go get github.com/sparrc/gdm`
|
1. `go get -u github.com/golang/dep/cmd/dep`
|
||||||
1. `gdm restore`
|
2. `dep ensure`
|
||||||
1. `GOOS=linux gdm save`
|
3. `dep ensure -add github.com/[dependency]/[new-package]`
|
||||||
|
|
||||||
## Input Plugins
|
## Input Plugins
|
||||||
|
|
||||||
|
|||||||
100
Godeps
100
Godeps
@@ -1,100 +0,0 @@
|
|||||||
code.cloudfoundry.org/clock e9dc86bbf0e5bbe6bf7ff5a6f71e048959b61f71
|
|
||||||
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
|
|
||||||
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
|
|
||||||
github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
|
|
||||||
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
|
|
||||||
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
|
|
||||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
|
||||||
github.com/bsm/sarama-cluster abf039439f66c1ce78017f560b490612552f6472
|
|
||||||
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
|
|
||||||
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
|
|
||||||
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
|
|
||||||
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
|
||||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
|
||||||
github.com/dgrijalva/jwt-go dbeaa9332f19a944acb5736b4456cfcc02140e29
|
|
||||||
github.com/docker/docker f5ec1e2936dcbe7b5001c2b817188b095c700c27
|
|
||||||
github.com/docker/go-connections 990a1a1a70b0da4c4cb70e117971a4f0babfbf1a
|
|
||||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
|
||||||
github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
|
|
||||||
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
|
|
||||||
github.com/eclipse/paho.mqtt.golang aff15770515e3c57fc6109da73d42b0d46f7f483
|
|
||||||
github.com/go-logfmt/logfmt 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
|
|
||||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
|
||||||
github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
|
|
||||||
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
|
|
||||||
github.com/gogo/protobuf 7b6c6391c4ff245962047fc1e2c6e08b1cdfa0e8
|
|
||||||
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
|
|
||||||
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
|
|
||||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
|
||||||
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
|
|
||||||
github.com/gorilla/mux 53c1911da2b537f792e7cafcb446b05ffe33b996
|
|
||||||
github.com/go-redis/redis 73b70592cdaa9e6abdfcfbf97b4a90d80728c836
|
|
||||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
|
||||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
|
||||||
github.com/hashicorp/consul 5174058f0d2bda63fa5198ab96c33d9a909c58ed
|
|
||||||
github.com/influxdata/go-syslog eecd51df3ad85464a2bab9b7d3a45bc1e299059e
|
|
||||||
github.com/influxdata/tail c43482518d410361b6c383d7aebce33d0471d7bc
|
|
||||||
github.com/influxdata/toml 2a2e3012f7cfbef64091cc79776311e65dfa211b
|
|
||||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
|
||||||
github.com/fsnotify/fsnotify c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9
|
|
||||||
github.com/jackc/pgx 63f58fd32edb5684b9e9f4cfaac847c6b42b3917
|
|
||||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
|
||||||
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
|
|
||||||
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
|
|
||||||
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
|
|
||||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
|
||||||
github.com/Microsoft/ApplicationInsights-Go 3612f58550c1de70f1a110c78c830e55f29aa65d
|
|
||||||
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
|
||||||
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
|
|
||||||
github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4
|
|
||||||
github.com/multiplay/go-ts3 07477f49b8dfa3ada231afc7b7b17617d42afe8e
|
|
||||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
|
||||||
github.com/nats-io/gnatsd 393bbb7c031433e68707c8810fda0bfcfbe6ab9b
|
|
||||||
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
|
||||||
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
|
|
||||||
github.com/nsqio/go-nsq eee57a3ac4174c55924125bb15eeeda8cffb6e6f
|
|
||||||
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
|
||||||
github.com/opentracing-contrib/go-observer a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
|
|
||||||
github.com/opentracing/opentracing-go 06f47b42c792fef2796e9681353e1d908c417827
|
|
||||||
github.com/openzipkin/zipkin-go-opentracing 1cafbdfde94fbf2b373534764e0863aa3bd0bf7b
|
|
||||||
github.com/pierrec/lz4 5c9560bfa9ace2bf86080bf40d46b34ae44604df
|
|
||||||
github.com/pierrec/xxHash 5a004441f897722c627870a981d02b29924215fa
|
|
||||||
github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d
|
|
||||||
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
|
||||||
github.com/prometheus/client_golang c317fb74746eac4fc65fe3909195f4cf67c5562a
|
|
||||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
|
||||||
github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
|
|
||||||
github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
|
|
||||||
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
|
||||||
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
|
||||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
|
||||||
github.com/shirou/gopsutil c95755e4bcd7a62bb8bd33f3a597a7c7f35e2cf3
|
|
||||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
|
||||||
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
|
|
||||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
|
||||||
github.com/soniah/gosnmp f15472a4cd6f6ea7929e4c7d9f163c49f059924f
|
|
||||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
|
||||||
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
|
|
||||||
github.com/stretchr/objx facf9a85c22f48d2f52f2380e4efce1768749a89
|
|
||||||
github.com/stretchr/testify 12b6f73e6084dad08a7c6e575284b177ecafbc71
|
|
||||||
github.com/tidwall/gjson 0623bd8fbdbf97cc62b98d15108832851a658e59
|
|
||||||
github.com/tidwall/match 173748da739a410c5b0b813b956f89ff94730b4c
|
|
||||||
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
|
|
||||||
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
|
|
||||||
github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
|
|
||||||
github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
|
|
||||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
|
||||||
golang.org/x/crypto dc137beb6cce2043eb6b5f223ab8bf51c32459f4
|
|
||||||
golang.org/x/net a337091b0525af65de94df2eb7e98bd9962dcbe2
|
|
||||||
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
|
|
||||||
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
|
|
||||||
google.golang.org/genproto 11c7f9e547da6db876260ce49ea7536985904c9b
|
|
||||||
google.golang.org/grpc de2209a968d48e8970546c8a710189f7461370f7
|
|
||||||
gopkg.in/asn1-ber.v1 4e86f4367175e39f69d9358a5f17b4dda270378d
|
|
||||||
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
|
|
||||||
gopkg.in/gorethink/gorethink.v3 7ab832f7b65573104a555d84a27992ae9ea1f659
|
|
||||||
gopkg.in/ldap.v2 8168ee085ee43257585e50c6441aadf54ecb2c9f
|
|
||||||
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
|
|
||||||
gopkg.in/olivere/elastic.v5 3113f9b9ad37509fe5f8a0e5e91c96fdc4435e26
|
|
||||||
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
|
|
||||||
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
|
|
||||||
973
Gopkg.lock
generated
Normal file
973
Gopkg.lock
generated
Normal file
@@ -0,0 +1,973 @@
|
|||||||
|
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||||
|
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "code.cloudfoundry.org/clock"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "02e53af36e6c978af692887ed449b74026d76fec"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "collectd.org"
|
||||||
|
packages = [
|
||||||
|
"api",
|
||||||
|
"cdtime",
|
||||||
|
"network"
|
||||||
|
]
|
||||||
|
revision = "2ce144541b8903101fb8f1483cc0497a68798122"
|
||||||
|
version = "v0.3.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/Microsoft/ApplicationInsights-Go"
|
||||||
|
packages = [
|
||||||
|
"appinsights",
|
||||||
|
"appinsights/contracts"
|
||||||
|
]
|
||||||
|
revision = "d2df5d440eda5372f24fcac03839a64d6cb5f7e5"
|
||||||
|
version = "v0.4.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/Microsoft/go-winio"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f"
|
||||||
|
version = "v0.4.7"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/Shopify/sarama"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "35324cf48e33d8260e1c7c18854465a904ade249"
|
||||||
|
version = "v1.17.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/StackExchange/wmi"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "5d049714c4a64225c3c79a7cf7d02f7fb5b96338"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/aerospike/aerospike-client-go"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"internal/lua",
|
||||||
|
"internal/lua/resources",
|
||||||
|
"logger",
|
||||||
|
"pkg/bcrypt",
|
||||||
|
"pkg/ripemd160",
|
||||||
|
"types",
|
||||||
|
"types/atomic",
|
||||||
|
"types/particle_type",
|
||||||
|
"types/rand",
|
||||||
|
"utils/buffer"
|
||||||
|
]
|
||||||
|
revision = "c10b5393e43bd60125aca6289c7b24879edb1787"
|
||||||
|
version = "v1.33.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/alecthomas/template"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"parse"
|
||||||
|
]
|
||||||
|
revision = "a0175ee3bccc567396460bf5acd36800cb10c49c"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/alecthomas/units"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/amir/raidman"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"proto"
|
||||||
|
]
|
||||||
|
revision = "1ccc43bfb9c93cb401a4025e49c64ba71e5e668b"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/apache/thrift"
|
||||||
|
packages = ["lib/go/thrift"]
|
||||||
|
revision = "f5f430df56871bc937950274b2c86681d3db6e59"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/aws/aws-sdk-go"
|
||||||
|
packages = [
|
||||||
|
"aws",
|
||||||
|
"aws/awserr",
|
||||||
|
"aws/awsutil",
|
||||||
|
"aws/client",
|
||||||
|
"aws/client/metadata",
|
||||||
|
"aws/corehandlers",
|
||||||
|
"aws/credentials",
|
||||||
|
"aws/credentials/ec2rolecreds",
|
||||||
|
"aws/credentials/endpointcreds",
|
||||||
|
"aws/credentials/stscreds",
|
||||||
|
"aws/csm",
|
||||||
|
"aws/defaults",
|
||||||
|
"aws/ec2metadata",
|
||||||
|
"aws/endpoints",
|
||||||
|
"aws/request",
|
||||||
|
"aws/session",
|
||||||
|
"aws/signer/v4",
|
||||||
|
"internal/sdkio",
|
||||||
|
"internal/sdkrand",
|
||||||
|
"internal/shareddefaults",
|
||||||
|
"private/protocol",
|
||||||
|
"private/protocol/json/jsonutil",
|
||||||
|
"private/protocol/jsonrpc",
|
||||||
|
"private/protocol/query",
|
||||||
|
"private/protocol/query/queryutil",
|
||||||
|
"private/protocol/rest",
|
||||||
|
"private/protocol/xml/xmlutil",
|
||||||
|
"service/cloudwatch",
|
||||||
|
"service/kinesis",
|
||||||
|
"service/sts"
|
||||||
|
]
|
||||||
|
revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff"
|
||||||
|
version = "v1.14.8"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/beorn7/perks"
|
||||||
|
packages = ["quantile"]
|
||||||
|
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/bsm/sarama-cluster"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "cf455bc755fe41ac9bb2861e7a961833d9c2ecc3"
|
||||||
|
version = "v2.1.13"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/cenkalti/backoff"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "2ea60e5f094469f9e65adb9cd103795b73ae743e"
|
||||||
|
version = "v2.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/couchbase/go-couchbase"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "16db1f1fe037412f12738fa4d8448c549c4edd77"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/couchbase/gomemcached"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"client"
|
||||||
|
]
|
||||||
|
revision = "0da75df145308b9a4e6704d762ca9d9b77752efc"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/couchbase/goutils"
|
||||||
|
packages = [
|
||||||
|
"logging",
|
||||||
|
"scramsha"
|
||||||
|
]
|
||||||
|
revision = "e865a1461c8ac0032bd37e2d4dab3289faea3873"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/davecgh/go-spew"
|
||||||
|
packages = ["spew"]
|
||||||
|
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/dgrijalva/jwt-go"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||||
|
version = "v3.2.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/docker/distribution"
|
||||||
|
packages = [
|
||||||
|
"digest",
|
||||||
|
"reference"
|
||||||
|
]
|
||||||
|
revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89"
|
||||||
|
version = "v2.6.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/docker/docker"
|
||||||
|
packages = [
|
||||||
|
"api/types",
|
||||||
|
"api/types/blkiodev",
|
||||||
|
"api/types/container",
|
||||||
|
"api/types/events",
|
||||||
|
"api/types/filters",
|
||||||
|
"api/types/mount",
|
||||||
|
"api/types/network",
|
||||||
|
"api/types/reference",
|
||||||
|
"api/types/registry",
|
||||||
|
"api/types/strslice",
|
||||||
|
"api/types/swarm",
|
||||||
|
"api/types/time",
|
||||||
|
"api/types/versions",
|
||||||
|
"api/types/volume",
|
||||||
|
"client",
|
||||||
|
"pkg/tlsconfig"
|
||||||
|
]
|
||||||
|
revision = "eef6495eddab52828327aade186443681ed71a4e"
|
||||||
|
version = "v17.03.2-ce-rc1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/docker/go-connections"
|
||||||
|
packages = [
|
||||||
|
"nat",
|
||||||
|
"sockets",
|
||||||
|
"tlsconfig"
|
||||||
|
]
|
||||||
|
revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d"
|
||||||
|
version = "v0.3.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/docker/go-units"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "47565b4f722fb6ceae66b95f853feed578a4a51c"
|
||||||
|
version = "v0.3.3"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/eapache/go-resiliency"
|
||||||
|
packages = ["breaker"]
|
||||||
|
revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/eapache/go-xerial-snappy"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/eapache/queue"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/eclipse/paho.mqtt.golang"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"packets"
|
||||||
|
]
|
||||||
|
revision = "36d01c2b4cbeb3d2a12063e4880ce30800af9560"
|
||||||
|
version = "v1.1.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/go-ini/ini"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
|
||||||
|
version = "v1.37.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/go-logfmt/logfmt"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
|
||||||
|
version = "v0.3.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/go-ole/go-ole"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"oleutil"
|
||||||
|
]
|
||||||
|
revision = "a41e3c4b706f6ae8dfbff342b06e40fa4d2d0506"
|
||||||
|
version = "v1.2.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/go-redis/redis"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"internal",
|
||||||
|
"internal/consistenthash",
|
||||||
|
"internal/hashtag",
|
||||||
|
"internal/pool",
|
||||||
|
"internal/proto",
|
||||||
|
"internal/singleflight",
|
||||||
|
"internal/util"
|
||||||
|
]
|
||||||
|
revision = "83fb42932f6145ce52df09860384a4653d2d332a"
|
||||||
|
version = "v6.12.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/go-sql-driver/mysql"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "d523deb1b23d913de5bdada721a6071e71283618"
|
||||||
|
version = "v1.4.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/gobwas/glob"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"compiler",
|
||||||
|
"match",
|
||||||
|
"syntax",
|
||||||
|
"syntax/ast",
|
||||||
|
"syntax/lexer",
|
||||||
|
"util/runes",
|
||||||
|
"util/strings"
|
||||||
|
]
|
||||||
|
revision = "5ccd90ef52e1e632236f7326478d4faa74f99438"
|
||||||
|
version = "v0.2.3"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/gogo/protobuf"
|
||||||
|
packages = ["proto"]
|
||||||
|
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/golang/protobuf"
|
||||||
|
packages = [
|
||||||
|
"proto",
|
||||||
|
"ptypes",
|
||||||
|
"ptypes/any",
|
||||||
|
"ptypes/duration",
|
||||||
|
"ptypes/timestamp"
|
||||||
|
]
|
||||||
|
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/golang/snappy"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/google/go-cmp"
|
||||||
|
packages = [
|
||||||
|
"cmp",
|
||||||
|
"cmp/internal/diff",
|
||||||
|
"cmp/internal/function",
|
||||||
|
"cmp/internal/value"
|
||||||
|
]
|
||||||
|
revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
|
||||||
|
version = "v0.2.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/gorilla/context"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42"
|
||||||
|
version = "v1.1.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/gorilla/mux"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf"
|
||||||
|
version = "v1.6.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/hailocab/go-hostpool"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/hashicorp/consul"
|
||||||
|
packages = ["api"]
|
||||||
|
revision = "5174058f0d2bda63fa5198ab96c33d9a909c58ed"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/hashicorp/go-cleanhttp"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/hashicorp/go-rootcerts"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/hashicorp/serf"
|
||||||
|
packages = ["coordinate"]
|
||||||
|
revision = "d6574a5bb1226678d7010325fb6c985db20ee458"
|
||||||
|
version = "v0.8.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/influxdata/go-syslog"
|
||||||
|
packages = [
|
||||||
|
"rfc5424",
|
||||||
|
"rfc5425"
|
||||||
|
]
|
||||||
|
revision = "eecd51df3ad85464a2bab9b7d3a45bc1e299059e"
|
||||||
|
version = "v1.0.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/influxdata/tail"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"ratelimiter",
|
||||||
|
"util",
|
||||||
|
"watch",
|
||||||
|
"winfile"
|
||||||
|
]
|
||||||
|
revision = "c43482518d410361b6c383d7aebce33d0471d7bc"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/influxdata/toml"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"ast"
|
||||||
|
]
|
||||||
|
revision = "2a2e3012f7cfbef64091cc79776311e65dfa211b"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/influxdata/wlog"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/jackc/pgx"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"chunkreader",
|
||||||
|
"internal/sanitize",
|
||||||
|
"pgio",
|
||||||
|
"pgproto3",
|
||||||
|
"pgtype",
|
||||||
|
"stdlib"
|
||||||
|
]
|
||||||
|
revision = "da3231b0b66e2e74cdb779f1d46c5e958ba8be27"
|
||||||
|
version = "v3.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/jmespath/go-jmespath"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "0b12d6b5"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/kardianos/osext"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/kardianos/service"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "615a14ed75099c9eaac6949e22ac2341bf9d3197"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/kballard/go-shellquote"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "95032a82bc518f77982ea72343cc1ade730072f0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/kr/logfmt"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/mailru/easyjson"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"buffer",
|
||||||
|
"jlexer",
|
||||||
|
"jwriter"
|
||||||
|
]
|
||||||
|
revision = "3fdea8d05856a0c8df22ed4bc71b3219245e4485"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||||
|
packages = ["pbutil"]
|
||||||
|
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||||
|
version = "v1.0.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/miekg/dns"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "5a2b9fab83ff0f8bfc99684bd5f43a37abe560f1"
|
||||||
|
version = "v1.0.8"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/mitchellh/go-homedir"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/mitchellh/mapstructure"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/multiplay/go-ts3"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "d0d44555495c8776880a17e439399e715a4ef319"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/naoina/go-stringutil"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "6b638e95a32d0c1131db0e7fe83775cbea4a0d0b"
|
||||||
|
version = "v0.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/nats-io/gnatsd"
|
||||||
|
packages = [
|
||||||
|
"conf",
|
||||||
|
"logger",
|
||||||
|
"server",
|
||||||
|
"server/pse",
|
||||||
|
"util"
|
||||||
|
]
|
||||||
|
revision = "add6d7930ae6d4bff8823b28999ea87bf1bfd23d"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/nats-io/go-nats"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"encoders/builtin",
|
||||||
|
"util"
|
||||||
|
]
|
||||||
|
revision = "062418ea1c2181f52dc0f954f6204370519a868b"
|
||||||
|
version = "v1.5.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/nats-io/nuid"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "289cccf02c178dc782430d534e3c1f5b72af807f"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/nsqio/go-nsq"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "eee57a3ac4174c55924125bb15eeeda8cffb6e6f"
|
||||||
|
version = "v1.0.7"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/opentracing-contrib/go-observer"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/opentracing/opentracing-go"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"ext",
|
||||||
|
"log"
|
||||||
|
]
|
||||||
|
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
|
||||||
|
version = "v1.0.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/openzipkin/zipkin-go-opentracing"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"flag",
|
||||||
|
"thrift/gen-go/scribe",
|
||||||
|
"thrift/gen-go/zipkincore",
|
||||||
|
"types",
|
||||||
|
"wire"
|
||||||
|
]
|
||||||
|
revision = "26cf9707480e6b90e5eff22cf0bbf05319154232"
|
||||||
|
version = "v0.3.4"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/pierrec/lz4"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"internal/xxh32"
|
||||||
|
]
|
||||||
|
revision = "6b9367c9ff401dbc54fabce3fb8d972e799b702d"
|
||||||
|
version = "v2.0.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/pkg/errors"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||||
|
version = "v0.8.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/pmezard/go-difflib"
|
||||||
|
packages = ["difflib"]
|
||||||
|
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/prometheus/client_golang"
|
||||||
|
packages = [
|
||||||
|
"prometheus",
|
||||||
|
"prometheus/promhttp"
|
||||||
|
]
|
||||||
|
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||||
|
version = "v0.8.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/prometheus/client_model"
|
||||||
|
packages = ["go"]
|
||||||
|
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/prometheus/common"
|
||||||
|
packages = [
|
||||||
|
"expfmt",
|
||||||
|
"internal/bitbucket.org/ww/goautoneg",
|
||||||
|
"log",
|
||||||
|
"model"
|
||||||
|
]
|
||||||
|
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/prometheus/procfs"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"internal/util",
|
||||||
|
"nfs",
|
||||||
|
"xfs"
|
||||||
|
]
|
||||||
|
revision = "7d6f385de8bea29190f15ba9931442a0eaef9af7"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/rcrowley/go-metrics"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/samuel/go-zookeeper"
|
||||||
|
packages = ["zk"]
|
||||||
|
revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/satori/go.uuid"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||||
|
version = "v1.2.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/shirou/gopsutil"
|
||||||
|
packages = [
|
||||||
|
"cpu",
|
||||||
|
"disk",
|
||||||
|
"host",
|
||||||
|
"internal/common",
|
||||||
|
"load",
|
||||||
|
"mem",
|
||||||
|
"net",
|
||||||
|
"process"
|
||||||
|
]
|
||||||
|
revision = "eeb1d38d69593f121e060d24d17f7b1f0936b203"
|
||||||
|
version = "v2.18.05"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/shirou/w32"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "bb4de0191aa41b5507caa14b0650cdbddcd9280b"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/sirupsen/logrus"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
|
||||||
|
version = "v1.0.5"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/soniah/gosnmp"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "bcf840db66be7d64bf96c3c0e075c92e3d98f793"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/streadway/amqp"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "e5adc2ada8b8efff032bf61173a233d143e9318e"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/stretchr/objx"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
|
||||||
|
version = "v0.1.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/stretchr/testify"
|
||||||
|
packages = [
|
||||||
|
"assert",
|
||||||
|
"mock",
|
||||||
|
"require"
|
||||||
|
]
|
||||||
|
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||||
|
version = "v1.2.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/tidwall/gjson"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "afaeb9562041a8018c74e006551143666aed08bf"
|
||||||
|
version = "v1.1.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/tidwall/match"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "1731857f09b1f38450e2c12409748407822dc6be"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/vjeantet/grok"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "ce01e59abcf6fbc9833b7deb5e4b8ee1769bcc53"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/wvanbergen/kafka"
|
||||||
|
packages = ["consumergroup"]
|
||||||
|
revision = "e2edea948ddfee841ea9a263b32ccca15f7d6c2f"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/wvanbergen/kazoo-go"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "f72d8611297a7cf105da904c04198ad701a60101"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/yuin/gopher-lua"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"ast",
|
||||||
|
"parse",
|
||||||
|
"pm"
|
||||||
|
]
|
||||||
|
revision = "ca850f594eaafa5468da2bd53b865e4ee55be18b"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/zensqlmonitor/go-mssqldb"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "e8fbf836e44e86764eba398361d1825651709547"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/crypto"
|
||||||
|
packages = [
|
||||||
|
"bcrypt",
|
||||||
|
"blowfish",
|
||||||
|
"ed25519",
|
||||||
|
"ed25519/internal/edwards25519",
|
||||||
|
"md4",
|
||||||
|
"pbkdf2",
|
||||||
|
"ssh/terminal"
|
||||||
|
]
|
||||||
|
revision = "027cca12c2d63e3d62b670d901e8a2c95854feec"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/net"
|
||||||
|
packages = [
|
||||||
|
"bpf",
|
||||||
|
"context",
|
||||||
|
"context/ctxhttp",
|
||||||
|
"html",
|
||||||
|
"html/atom",
|
||||||
|
"html/charset",
|
||||||
|
"http/httpguts",
|
||||||
|
"http2",
|
||||||
|
"http2/hpack",
|
||||||
|
"idna",
|
||||||
|
"internal/iana",
|
||||||
|
"internal/socket",
|
||||||
|
"internal/socks",
|
||||||
|
"internal/timeseries",
|
||||||
|
"ipv4",
|
||||||
|
"ipv6",
|
||||||
|
"proxy",
|
||||||
|
"trace",
|
||||||
|
"websocket"
|
||||||
|
]
|
||||||
|
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/sys"
|
||||||
|
packages = [
|
||||||
|
"unix",
|
||||||
|
"windows",
|
||||||
|
"windows/registry",
|
||||||
|
"windows/svc",
|
||||||
|
"windows/svc/debug",
|
||||||
|
"windows/svc/eventlog",
|
||||||
|
"windows/svc/mgr"
|
||||||
|
]
|
||||||
|
revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "golang.org/x/text"
|
||||||
|
packages = [
|
||||||
|
"collate",
|
||||||
|
"collate/build",
|
||||||
|
"encoding",
|
||||||
|
"encoding/charmap",
|
||||||
|
"encoding/htmlindex",
|
||||||
|
"encoding/internal",
|
||||||
|
"encoding/internal/identifier",
|
||||||
|
"encoding/japanese",
|
||||||
|
"encoding/korean",
|
||||||
|
"encoding/simplifiedchinese",
|
||||||
|
"encoding/traditionalchinese",
|
||||||
|
"encoding/unicode",
|
||||||
|
"internal/colltab",
|
||||||
|
"internal/gen",
|
||||||
|
"internal/tag",
|
||||||
|
"internal/triegen",
|
||||||
|
"internal/ucd",
|
||||||
|
"internal/utf8internal",
|
||||||
|
"language",
|
||||||
|
"runes",
|
||||||
|
"secure/bidirule",
|
||||||
|
"transform",
|
||||||
|
"unicode/bidi",
|
||||||
|
"unicode/cldr",
|
||||||
|
"unicode/norm",
|
||||||
|
"unicode/rangetable"
|
||||||
|
]
|
||||||
|
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||||
|
version = "v0.3.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "google.golang.org/appengine"
|
||||||
|
packages = ["cloudsql"]
|
||||||
|
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "google.golang.org/genproto"
|
||||||
|
packages = ["googleapis/rpc/status"]
|
||||||
|
revision = "32ee49c4dd805befd833990acba36cb75042378c"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "google.golang.org/grpc"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"balancer",
|
||||||
|
"balancer/base",
|
||||||
|
"balancer/roundrobin",
|
||||||
|
"channelz",
|
||||||
|
"codes",
|
||||||
|
"connectivity",
|
||||||
|
"credentials",
|
||||||
|
"encoding",
|
||||||
|
"encoding/proto",
|
||||||
|
"grpclb/grpc_lb_v1/messages",
|
||||||
|
"grpclog",
|
||||||
|
"internal",
|
||||||
|
"keepalive",
|
||||||
|
"metadata",
|
||||||
|
"naming",
|
||||||
|
"peer",
|
||||||
|
"resolver",
|
||||||
|
"resolver/dns",
|
||||||
|
"resolver/passthrough",
|
||||||
|
"stats",
|
||||||
|
"status",
|
||||||
|
"tap",
|
||||||
|
"transport"
|
||||||
|
]
|
||||||
|
revision = "7a6a684ca69eb4cae85ad0a484f2e531598c047b"
|
||||||
|
version = "v1.12.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "gopkg.in/alecthomas/kingpin.v2"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "947dcec5ba9c011838740e680966fd7087a71d0d"
|
||||||
|
version = "v2.2.6"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "gopkg.in/asn1-ber.v1"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "379148ca0225df7a432012b8df0355c2a2063ac0"
|
||||||
|
version = "v1.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "gopkg.in/fatih/pool.v2"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "010e0b745d12eaf8426c95f9c3924d81dd0b668f"
|
||||||
|
version = "v2.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "gopkg.in/fsnotify.v1"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
|
||||||
|
source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz"
|
||||||
|
version = "v1.4.7"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "gopkg.in/gorethink/gorethink.v3"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"encoding",
|
||||||
|
"ql2",
|
||||||
|
"types"
|
||||||
|
]
|
||||||
|
revision = "7f5bdfd858bb064d80559b2a32b86669c5de5d3b"
|
||||||
|
version = "v3.0.5"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "gopkg.in/ldap.v2"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9"
|
||||||
|
version = "v2.5.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "v2"
|
||||||
|
name = "gopkg.in/mgo.v2"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"bson",
|
||||||
|
"internal/json",
|
||||||
|
"internal/sasl",
|
||||||
|
"internal/scram"
|
||||||
|
]
|
||||||
|
revision = "3f83fa5005286a7fe593b055f0d7771a7dce4655"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "gopkg.in/olivere/elastic.v5"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"config",
|
||||||
|
"uritemplates"
|
||||||
|
]
|
||||||
|
revision = "b708306d715bea9b983685e94ab4602cdc9f988b"
|
||||||
|
version = "v5.0.69"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "v1"
|
||||||
|
name = "gopkg.in/tomb.v1"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "gopkg.in/yaml.v2"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||||
|
version = "v2.2.1"
|
||||||
|
|
||||||
|
[solve-meta]
|
||||||
|
analyzer-name = "dep"
|
||||||
|
analyzer-version = 1
|
||||||
|
inputs-digest = "024194b983d91b9500fe97e0aa0ddb5fe725030cb51ddfb034e386cae1098370"
|
||||||
|
solver-name = "gps-cdcl"
|
||||||
|
solver-version = 1
|
||||||
243
Gopkg.toml
Normal file
243
Gopkg.toml
Normal file
@@ -0,0 +1,243 @@
|
|||||||
|
[[constraint]]
|
||||||
|
name = "collectd.org"
|
||||||
|
version = "0.3.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/aerospike/aerospike-client-go"
|
||||||
|
version = "^1.33.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/amir/raidman"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/apache/thrift"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/aws/aws-sdk-go"
|
||||||
|
version = "1.14.8"
|
||||||
|
# version = "1.8.39"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/bsm/sarama-cluster"
|
||||||
|
version = "2.1.13"
|
||||||
|
# version = "2.1.10"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/couchbase/go-couchbase"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/dgrijalva/jwt-go"
|
||||||
|
version = "3.2.0"
|
||||||
|
# version = "3.1.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/docker/docker"
|
||||||
|
version = "~17.03.2-ce"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/docker/go-connections"
|
||||||
|
version = "0.3.0"
|
||||||
|
# version = "0.2.1"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/eclipse/paho.mqtt.golang"
|
||||||
|
version = "~1.1.1"
|
||||||
|
# version = "1.1.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/go-sql-driver/mysql"
|
||||||
|
version = "1.4.0"
|
||||||
|
# version = "1.3.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/gobwas/glob"
|
||||||
|
version = "0.2.3"
|
||||||
|
# version = "0.2.2"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/golang/protobuf"
|
||||||
|
version = "1.1.0"
|
||||||
|
# version = "1.0.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/google/go-cmp"
|
||||||
|
version = "0.2.0"
|
||||||
|
# version = "0.1.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/gorilla/mux"
|
||||||
|
version = "1.6.2"
|
||||||
|
# version = "1.6.1"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/go-redis/redis"
|
||||||
|
version = "6.12.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/hashicorp/consul"
|
||||||
|
version = "1.1.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/influxdata/go-syslog"
|
||||||
|
version = "1.0.1"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/influxdata/tail"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/influxdata/toml"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/influxdata/wlog"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/jackc/pgx"
|
||||||
|
version = "3.1.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/kardianos/service"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/kballard/go-shellquote"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||||
|
version = "1.0.1"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/Microsoft/ApplicationInsights-Go"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/miekg/dns"
|
||||||
|
version = "1.0.8"
|
||||||
|
# version = "1.0.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/multiplay/go-ts3"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/nats-io/gnatsd"
|
||||||
|
version = "1.1.0"
|
||||||
|
# version = "1.0.4"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/nats-io/go-nats"
|
||||||
|
version = "1.5.0"
|
||||||
|
# version = "1.3.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/nsqio/go-nsq"
|
||||||
|
version = "1.0.7"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/openzipkin/zipkin-go-opentracing"
|
||||||
|
version = "0.3.4"
|
||||||
|
# version = "0.3.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/prometheus/client_golang"
|
||||||
|
version = "0.8.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/prometheus/client_model"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/prometheus/common"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/satori/go.uuid"
|
||||||
|
version = "1.2.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/shirou/gopsutil"
|
||||||
|
version = "2.18.05"
|
||||||
|
# version = "2.18.04"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/Shopify/sarama"
|
||||||
|
version = "1.17.0"
|
||||||
|
# version = "1.15.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/soniah/gosnmp"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/StackExchange/wmi"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/streadway/amqp"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/stretchr/testify"
|
||||||
|
version = "1.2.2"
|
||||||
|
# version = "1.2.1"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/tidwall/gjson"
|
||||||
|
version = "1.1.1"
|
||||||
|
# version = "1.0.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/vjeantet/grok"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/wvanbergen/kafka"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/zensqlmonitor/go-mssqldb"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "golang.org/x/net"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "golang.org/x/sys"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "google.golang.org/grpc"
|
||||||
|
version = "1.12.2"
|
||||||
|
# version = "1.8.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "gopkg.in/gorethink/gorethink.v3"
|
||||||
|
version = "3.0.5"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "gopkg.in/ldap.v2"
|
||||||
|
version = "2.5.1"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "gopkg.in/mgo.v2"
|
||||||
|
branch = "v2"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "gopkg.in/olivere/elastic.v5"
|
||||||
|
version = "^5.0.69"
|
||||||
|
# version = "^6.1.23"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "gopkg.in/yaml.v2"
|
||||||
|
version = "^2.2.1"
|
||||||
|
|
||||||
|
[[override]]
|
||||||
|
source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz"
|
||||||
|
name = "gopkg.in/fsnotify.v1"
|
||||||
6
Makefile
6
Makefile
@@ -23,8 +23,8 @@ all:
|
|||||||
|
|
||||||
deps:
|
deps:
|
||||||
go get -u github.com/golang/lint/golint
|
go get -u github.com/golang/lint/golint
|
||||||
go get github.com/sparrc/gdm
|
go get -u github.com/golang/dep/cmd/dep
|
||||||
gdm restore --parallel=false
|
dep ensure
|
||||||
|
|
||||||
telegraf:
|
telegraf:
|
||||||
go build -ldflags "$(LDFLAGS)" ./cmd/telegraf
|
go build -ldflags "$(LDFLAGS)" ./cmd/telegraf
|
||||||
@@ -34,7 +34,7 @@ go-install:
|
|||||||
|
|
||||||
install: telegraf
|
install: telegraf
|
||||||
mkdir -p $(DESTDIR)$(PREFIX)/bin/
|
mkdir -p $(DESTDIR)$(PREFIX)/bin/
|
||||||
cp $(TELEGRAF) $(DESTDIR)$(PREFIX)/bin/
|
cp telegraf $(DESTDIR)$(PREFIX)/bin/
|
||||||
|
|
||||||
test:
|
test:
|
||||||
go test -short ./...
|
go test -short ./...
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ Ansible role: https://github.com/rossmcdonald/telegraf
|
|||||||
|
|
||||||
Telegraf requires golang version 1.9 or newer, the Makefile requires GNU make.
|
Telegraf requires golang version 1.9 or newer, the Makefile requires GNU make.
|
||||||
|
|
||||||
Dependencies are managed with [gdm](https://github.com/sparrc/gdm),
|
Dependencies are managed with [dep](https://github.com/golang/dep),
|
||||||
which is installed by the Makefile if you don't have it already.
|
which is installed by the Makefile if you don't have it already.
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install)
|
1. [Install Go](https://golang.org/doc/install)
|
||||||
@@ -213,6 +213,7 @@ configuration options.
|
|||||||
* [sql server](./plugins/inputs/sqlserver) (microsoft)
|
* [sql server](./plugins/inputs/sqlserver) (microsoft)
|
||||||
* [syslog](./plugins/inputs/syslog)
|
* [syslog](./plugins/inputs/syslog)
|
||||||
* [teamspeak](./plugins/inputs/teamspeak)
|
* [teamspeak](./plugins/inputs/teamspeak)
|
||||||
|
* [tengine](./plugins/inputs/tengine)
|
||||||
* [tomcat](./plugins/inputs/tomcat)
|
* [tomcat](./plugins/inputs/tomcat)
|
||||||
* [twemproxy](./plugins/inputs/twemproxy)
|
* [twemproxy](./plugins/inputs/twemproxy)
|
||||||
* [unbound](./plugins/inputs/unbound)
|
* [unbound](./plugins/inputs/unbound)
|
||||||
@@ -281,6 +282,7 @@ formats may be used with input plugins supporting the `data_format` option:
|
|||||||
* [basicstats](./plugins/aggregators/basicstats)
|
* [basicstats](./plugins/aggregators/basicstats)
|
||||||
* [minmax](./plugins/aggregators/minmax)
|
* [minmax](./plugins/aggregators/minmax)
|
||||||
* [histogram](./plugins/aggregators/histogram)
|
* [histogram](./plugins/aggregators/histogram)
|
||||||
|
* [valuecounter](./plugins/aggregators/valuecounter)
|
||||||
|
|
||||||
## Output Plugins
|
## Output Plugins
|
||||||
|
|
||||||
|
|||||||
@@ -362,24 +362,6 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
|||||||
metricC := make(chan telegraf.Metric, 100)
|
metricC := make(chan telegraf.Metric, 100)
|
||||||
aggC := make(chan telegraf.Metric, 100)
|
aggC := make(chan telegraf.Metric, 100)
|
||||||
|
|
||||||
// Start all ServicePlugins
|
|
||||||
for _, input := range a.Config.Inputs {
|
|
||||||
input.SetDefaultTags(a.Config.Tags)
|
|
||||||
switch p := input.Input.(type) {
|
|
||||||
case telegraf.ServiceInput:
|
|
||||||
acc := NewAccumulator(input, metricC)
|
|
||||||
// Service input plugins should set their own precision of their
|
|
||||||
// metrics.
|
|
||||||
acc.SetPrecision(time.Nanosecond, 0)
|
|
||||||
if err := p.Start(acc); err != nil {
|
|
||||||
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
|
|
||||||
input.Name(), err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer p.Stop()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Round collection to nearest interval by sleeping
|
// Round collection to nearest interval by sleeping
|
||||||
if a.Config.Agent.RoundInterval {
|
if a.Config.Agent.RoundInterval {
|
||||||
i := int64(a.Config.Agent.Interval.Duration)
|
i := int64(a.Config.Agent.Interval.Duration)
|
||||||
@@ -419,6 +401,25 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
|||||||
}(input, interval)
|
}(input, interval)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start all ServicePlugins inputs after all other
|
||||||
|
// plugins are loaded so that no metrics get dropped
|
||||||
|
for _, input := range a.Config.Inputs {
|
||||||
|
input.SetDefaultTags(a.Config.Tags)
|
||||||
|
switch p := input.Input.(type) {
|
||||||
|
case telegraf.ServiceInput:
|
||||||
|
acc := NewAccumulator(input, metricC)
|
||||||
|
// Service input plugins should set their own precision of their
|
||||||
|
// metrics.
|
||||||
|
acc.SetPrecision(time.Nanosecond, 0)
|
||||||
|
if err := p.Start(acc); err != nil {
|
||||||
|
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
|
||||||
|
input.Name(), err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer p.Stop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
a.Close()
|
a.Close()
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ install:
|
|||||||
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
|
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
|
||||||
- go version
|
- go version
|
||||||
- go env
|
- go env
|
||||||
|
- git config --system core.longpaths true
|
||||||
|
|
||||||
build_script:
|
build_script:
|
||||||
- cmd: C:\GnuWin32\bin\make deps
|
- cmd: C:\GnuWin32\bin\make deps
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ var fService = flag.String("service", "",
|
|||||||
var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)")
|
var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)")
|
||||||
|
|
||||||
var (
|
var (
|
||||||
nextVersion = "1.7.0"
|
nextVersion = "1.8.0"
|
||||||
version string
|
version string
|
||||||
commit string
|
commit string
|
||||||
branch string
|
branch string
|
||||||
|
|||||||
@@ -1077,7 +1077,7 @@
|
|||||||
# mount_points = ["/"]
|
# mount_points = ["/"]
|
||||||
|
|
||||||
## Ignore mount points by filesystem type.
|
## Ignore mount points by filesystem type.
|
||||||
ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
|
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
|
||||||
|
|
||||||
|
|
||||||
# Read metrics about disk IO by device
|
# Read metrics about disk IO by device
|
||||||
|
|||||||
@@ -242,7 +242,7 @@
|
|||||||
#
|
#
|
||||||
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||||
# ## present on /run, /var/run, /dev/shm or /dev).
|
# ## present on /run, /var/run, /dev/shm or /dev).
|
||||||
# # ignore_fs = ["tmpfs", "devtmpfs"]
|
# # ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics about disk IO by device
|
# # Read metrics about disk IO by device
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
"unicode"
|
"unicode"
|
||||||
)
|
)
|
||||||
@@ -193,3 +194,15 @@ func RandomSleep(max time.Duration, shutdown chan struct{}) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Exit status takes the error from exec.Command
|
||||||
|
// and returns the exit status and true
|
||||||
|
// if error is not exit status, will return 0 and false
|
||||||
|
func ExitStatus(err error) (int, bool) {
|
||||||
|
if exiterr, ok := err.(*exec.ExitError); ok {
|
||||||
|
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
|
||||||
|
return status.ExitStatus(), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package models
|
package models
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
@@ -153,6 +154,7 @@ func (r *RunningAggregator) Run(
|
|||||||
m.Time().After(r.periodEnd.Add(truncation).Add(r.Config.Delay)) {
|
m.Time().After(r.periodEnd.Add(truncation).Add(r.Config.Delay)) {
|
||||||
// the metric is outside the current aggregation period, so
|
// the metric is outside the current aggregation period, so
|
||||||
// skip it.
|
// skip it.
|
||||||
|
log.Printf("D! aggregator: metric \"%s\" is not in the current timewindow, skipping", m.Name())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
r.add(m)
|
r.add(m)
|
||||||
|
|||||||
@@ -4,4 +4,5 @@ import (
|
|||||||
_ "github.com/influxdata/telegraf/plugins/aggregators/basicstats"
|
_ "github.com/influxdata/telegraf/plugins/aggregators/basicstats"
|
||||||
_ "github.com/influxdata/telegraf/plugins/aggregators/histogram"
|
_ "github.com/influxdata/telegraf/plugins/aggregators/histogram"
|
||||||
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
|
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/aggregators/valuecounter"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -246,8 +246,6 @@ func convert(in interface{}) (float64, bool) {
|
|||||||
return v, true
|
return v, true
|
||||||
case int64:
|
case int64:
|
||||||
return float64(v), true
|
return float64(v), true
|
||||||
case uint64:
|
|
||||||
return float64(v), true
|
|
||||||
default:
|
default:
|
||||||
return 0, false
|
return 0, false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,7 +28,6 @@ var m2, _ = metric.New("m1",
|
|||||||
"c": float64(4),
|
"c": float64(4),
|
||||||
"d": float64(6),
|
"d": float64(6),
|
||||||
"e": float64(200),
|
"e": float64(200),
|
||||||
"f": uint64(200),
|
|
||||||
"ignoreme": "string",
|
"ignoreme": "string",
|
||||||
"andme": true,
|
"andme": true,
|
||||||
},
|
},
|
||||||
@@ -82,10 +81,6 @@ func TestBasicStatsWithPeriod(t *testing.T) {
|
|||||||
"e_max": float64(200),
|
"e_max": float64(200),
|
||||||
"e_min": float64(200),
|
"e_min": float64(200),
|
||||||
"e_mean": float64(200),
|
"e_mean": float64(200),
|
||||||
"f_count": float64(1), //f
|
|
||||||
"f_max": float64(200),
|
|
||||||
"f_min": float64(200),
|
|
||||||
"f_mean": float64(200),
|
|
||||||
}
|
}
|
||||||
expectedTags := map[string]string{
|
expectedTags := map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
@@ -149,10 +144,6 @@ func TestBasicStatsDifferentPeriods(t *testing.T) {
|
|||||||
"e_max": float64(200),
|
"e_max": float64(200),
|
||||||
"e_min": float64(200),
|
"e_min": float64(200),
|
||||||
"e_mean": float64(200),
|
"e_mean": float64(200),
|
||||||
"f_count": float64(1), //f
|
|
||||||
"f_max": float64(200),
|
|
||||||
"f_min": float64(200),
|
|
||||||
"f_mean": float64(200),
|
|
||||||
}
|
}
|
||||||
expectedTags = map[string]string{
|
expectedTags = map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
@@ -178,7 +169,6 @@ func TestBasicStatsWithOnlyCount(t *testing.T) {
|
|||||||
"c_count": float64(2),
|
"c_count": float64(2),
|
||||||
"d_count": float64(2),
|
"d_count": float64(2),
|
||||||
"e_count": float64(1),
|
"e_count": float64(1),
|
||||||
"f_count": float64(1),
|
|
||||||
}
|
}
|
||||||
expectedTags := map[string]string{
|
expectedTags := map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
@@ -204,7 +194,6 @@ func TestBasicStatsWithOnlyMin(t *testing.T) {
|
|||||||
"c_min": float64(2),
|
"c_min": float64(2),
|
||||||
"d_min": float64(2),
|
"d_min": float64(2),
|
||||||
"e_min": float64(200),
|
"e_min": float64(200),
|
||||||
"f_min": float64(200),
|
|
||||||
}
|
}
|
||||||
expectedTags := map[string]string{
|
expectedTags := map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
@@ -230,7 +219,6 @@ func TestBasicStatsWithOnlyMax(t *testing.T) {
|
|||||||
"c_max": float64(4),
|
"c_max": float64(4),
|
||||||
"d_max": float64(6),
|
"d_max": float64(6),
|
||||||
"e_max": float64(200),
|
"e_max": float64(200),
|
||||||
"f_max": float64(200),
|
|
||||||
}
|
}
|
||||||
expectedTags := map[string]string{
|
expectedTags := map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
@@ -256,7 +244,6 @@ func TestBasicStatsWithOnlyMean(t *testing.T) {
|
|||||||
"c_mean": float64(3),
|
"c_mean": float64(3),
|
||||||
"d_mean": float64(4),
|
"d_mean": float64(4),
|
||||||
"e_mean": float64(200),
|
"e_mean": float64(200),
|
||||||
"f_mean": float64(200),
|
|
||||||
}
|
}
|
||||||
expectedTags := map[string]string{
|
expectedTags := map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
@@ -282,7 +269,6 @@ func TestBasicStatsWithOnlySum(t *testing.T) {
|
|||||||
"c_sum": float64(6),
|
"c_sum": float64(6),
|
||||||
"d_sum": float64(8),
|
"d_sum": float64(8),
|
||||||
"e_sum": float64(200),
|
"e_sum": float64(200),
|
||||||
"f_sum": float64(200),
|
|
||||||
}
|
}
|
||||||
expectedTags := map[string]string{
|
expectedTags := map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
@@ -413,8 +399,6 @@ func TestBasicStatsWithMinAndMax(t *testing.T) {
|
|||||||
"d_min": float64(2),
|
"d_min": float64(2),
|
||||||
"e_max": float64(200), //e
|
"e_max": float64(200), //e
|
||||||
"e_min": float64(200),
|
"e_min": float64(200),
|
||||||
"f_max": float64(200), //f
|
|
||||||
"f_min": float64(200),
|
|
||||||
}
|
}
|
||||||
expectedTags := map[string]string{
|
expectedTags := map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
@@ -466,11 +450,6 @@ func TestBasicStatsWithAllStats(t *testing.T) {
|
|||||||
"e_min": float64(200),
|
"e_min": float64(200),
|
||||||
"e_mean": float64(200),
|
"e_mean": float64(200),
|
||||||
"e_sum": float64(200),
|
"e_sum": float64(200),
|
||||||
"f_count": float64(1), //f
|
|
||||||
"f_max": float64(200),
|
|
||||||
"f_min": float64(200),
|
|
||||||
"f_mean": float64(200),
|
|
||||||
"f_sum": float64(200),
|
|
||||||
}
|
}
|
||||||
expectedTags := map[string]string{
|
expectedTags := map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
|
|||||||
@@ -107,8 +107,6 @@ func convert(in interface{}) (float64, bool) {
|
|||||||
return v, true
|
return v, true
|
||||||
case int64:
|
case int64:
|
||||||
return float64(v), true
|
return float64(v), true
|
||||||
case uint64:
|
|
||||||
return float64(v), true
|
|
||||||
default:
|
default:
|
||||||
return 0, false
|
return 0, false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,7 +38,6 @@ var m2, _ = metric.New("m1",
|
|||||||
"i": float64(1),
|
"i": float64(1),
|
||||||
"j": float64(1),
|
"j": float64(1),
|
||||||
"k": float64(200),
|
"k": float64(200),
|
||||||
"l": uint64(200),
|
|
||||||
"ignoreme": "string",
|
"ignoreme": "string",
|
||||||
"andme": true,
|
"andme": true,
|
||||||
},
|
},
|
||||||
@@ -86,8 +85,6 @@ func TestMinMaxWithPeriod(t *testing.T) {
|
|||||||
"j_min": float64(1),
|
"j_min": float64(1),
|
||||||
"k_max": float64(200),
|
"k_max": float64(200),
|
||||||
"k_min": float64(200),
|
"k_min": float64(200),
|
||||||
"l_max": float64(200),
|
|
||||||
"l_min": float64(200),
|
|
||||||
}
|
}
|
||||||
expectedTags := map[string]string{
|
expectedTags := map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
@@ -157,8 +154,6 @@ func TestMinMaxDifferentPeriods(t *testing.T) {
|
|||||||
"j_min": float64(1),
|
"j_min": float64(1),
|
||||||
"k_max": float64(200),
|
"k_max": float64(200),
|
||||||
"k_min": float64(200),
|
"k_min": float64(200),
|
||||||
"l_max": float64(200),
|
|
||||||
"l_min": float64(200),
|
|
||||||
}
|
}
|
||||||
expectedTags = map[string]string{
|
expectedTags = map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
|
|||||||
73
plugins/aggregators/valuecounter/README.md
Normal file
73
plugins/aggregators/valuecounter/README.md
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
# ValueCounter Aggregator Plugin
|
||||||
|
|
||||||
|
The valuecounter plugin counts the occurrence of values in fields and emits the
|
||||||
|
counter once every 'period' seconds.
|
||||||
|
|
||||||
|
A use case for the valuecounter plugin is when you are processing a HTTP access
|
||||||
|
log (with the logparser input) and want to count the HTTP status codes.
|
||||||
|
|
||||||
|
The fields which will be counted must be configured with the `fields`
|
||||||
|
configuration directive. When no `fields` is provided the plugin will not count
|
||||||
|
any fields. The results are emitted in fields in the format:
|
||||||
|
`originalfieldname_fieldvalue = count`.
|
||||||
|
|
||||||
|
Valuecounter only works on fields of the type int, bool or string. Float fields
|
||||||
|
are being dropped to prevent the creating of too many fields.
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[aggregators.valuecounter]]
|
||||||
|
## General Aggregator Arguments:
|
||||||
|
## The period on which to flush & clear the aggregator.
|
||||||
|
period = "30s"
|
||||||
|
## If true, the original metric will be dropped by the
|
||||||
|
## aggregator and will not get sent to the output plugins.
|
||||||
|
drop_original = false
|
||||||
|
## The fields for which the values will be counted
|
||||||
|
fields = ["status"]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Measurements & Fields:
|
||||||
|
|
||||||
|
- measurement1
|
||||||
|
- field_value1
|
||||||
|
- field_value2
|
||||||
|
|
||||||
|
### Tags:
|
||||||
|
|
||||||
|
No tags are applied by this aggregator.
|
||||||
|
|
||||||
|
### Example Output:
|
||||||
|
|
||||||
|
Example for parsing a HTTP access log.
|
||||||
|
|
||||||
|
telegraf.conf:
|
||||||
|
```
|
||||||
|
[[inputs.logparser]]
|
||||||
|
files = ["/tmp/tst.log"]
|
||||||
|
[inputs.logparser.grok]
|
||||||
|
patterns = ['%{DATA:url:tag} %{NUMBER:response:string}']
|
||||||
|
measurement = "access"
|
||||||
|
|
||||||
|
[[aggregators.valuecounter]]
|
||||||
|
namepass = ["access"]
|
||||||
|
fields = ["response"]
|
||||||
|
```
|
||||||
|
|
||||||
|
/tmp/tst.log
|
||||||
|
```
|
||||||
|
/some/path 200
|
||||||
|
/some/path 401
|
||||||
|
/some/path 200
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
$ telegraf --config telegraf.conf --quiet
|
||||||
|
|
||||||
|
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="200" 1511948755991487011
|
||||||
|
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="401" 1511948755991522282
|
||||||
|
access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="200" 1511948755991531697
|
||||||
|
|
||||||
|
access,path=/tmp/tst.log,host=localhost.localdomain,url=/some/path response_200=2i,response_401=1i 1511948761000000000
|
||||||
|
```
|
||||||
108
plugins/aggregators/valuecounter/valuecounter.go
Normal file
108
plugins/aggregators/valuecounter/valuecounter.go
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
package valuecounter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||||
|
)
|
||||||
|
|
||||||
|
type aggregate struct {
|
||||||
|
name string
|
||||||
|
tags map[string]string
|
||||||
|
fieldCount map[string]int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueCounter an aggregation plugin
|
||||||
|
type ValueCounter struct {
|
||||||
|
cache map[uint64]aggregate
|
||||||
|
Fields []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewValueCounter create a new aggregation plugin which counts the occurances
|
||||||
|
// of fields and emits the count.
|
||||||
|
func NewValueCounter() telegraf.Aggregator {
|
||||||
|
vc := &ValueCounter{}
|
||||||
|
vc.Reset()
|
||||||
|
return vc
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
## General Aggregator Arguments:
|
||||||
|
## The period on which to flush & clear the aggregator.
|
||||||
|
period = "30s"
|
||||||
|
## If true, the original metric will be dropped by the
|
||||||
|
## aggregator and will not get sent to the output plugins.
|
||||||
|
drop_original = false
|
||||||
|
## The fields for which the values will be counted
|
||||||
|
fields = []
|
||||||
|
`
|
||||||
|
|
||||||
|
// SampleConfig generates a sample config for the ValueCounter plugin
|
||||||
|
func (vc *ValueCounter) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// Description returns the description of the ValueCounter plugin
|
||||||
|
func (vc *ValueCounter) Description() string {
|
||||||
|
return "Count the occurance of values in fields."
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add is run on every metric which passes the plugin
|
||||||
|
func (vc *ValueCounter) Add(in telegraf.Metric) {
|
||||||
|
id := in.HashID()
|
||||||
|
|
||||||
|
// Check if the cache already has an entry for this metric, if not create it
|
||||||
|
if _, ok := vc.cache[id]; !ok {
|
||||||
|
a := aggregate{
|
||||||
|
name: in.Name(),
|
||||||
|
tags: in.Tags(),
|
||||||
|
fieldCount: make(map[string]int),
|
||||||
|
}
|
||||||
|
vc.cache[id] = a
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this metric has fields which we need to count, if so increment
|
||||||
|
// the count.
|
||||||
|
for fk, fv := range in.Fields() {
|
||||||
|
for _, cf := range vc.Fields {
|
||||||
|
if fk == cf {
|
||||||
|
// Do not process float types to prevent memory from blowing up
|
||||||
|
switch fv.(type) {
|
||||||
|
default:
|
||||||
|
log.Printf("I! Valuecounter: Unsupported field type. " +
|
||||||
|
"Must be an int, string or bool. Ignoring.")
|
||||||
|
continue
|
||||||
|
case uint64, int64, string, bool:
|
||||||
|
}
|
||||||
|
fn := fmt.Sprintf("%v_%v", fk, fv)
|
||||||
|
vc.cache[id].fieldCount[fn]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push emits the counters
|
||||||
|
func (vc *ValueCounter) Push(acc telegraf.Accumulator) {
|
||||||
|
for _, agg := range vc.cache {
|
||||||
|
fields := map[string]interface{}{}
|
||||||
|
|
||||||
|
for field, count := range agg.fieldCount {
|
||||||
|
fields[field] = count
|
||||||
|
}
|
||||||
|
|
||||||
|
acc.AddFields(agg.name, fields, agg.tags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset the cache, executed after each push
|
||||||
|
func (vc *ValueCounter) Reset() {
|
||||||
|
vc.cache = make(map[uint64]aggregate)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
aggregators.Add("valuecounter", func() telegraf.Aggregator {
|
||||||
|
return NewValueCounter()
|
||||||
|
})
|
||||||
|
}
|
||||||
126
plugins/aggregators/valuecounter/valuecounter_test.go
Normal file
126
plugins/aggregators/valuecounter/valuecounter_test.go
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
package valuecounter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/metric"
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Create a valuecounter with config
|
||||||
|
func NewTestValueCounter(fields []string) telegraf.Aggregator {
|
||||||
|
vc := &ValueCounter{
|
||||||
|
Fields: fields,
|
||||||
|
}
|
||||||
|
vc.Reset()
|
||||||
|
|
||||||
|
return vc
|
||||||
|
}
|
||||||
|
|
||||||
|
var m1, _ = metric.New("m1",
|
||||||
|
map[string]string{"foo": "bar"},
|
||||||
|
map[string]interface{}{
|
||||||
|
"status": 200,
|
||||||
|
"somefield": 20.1,
|
||||||
|
"foobar": "bar",
|
||||||
|
},
|
||||||
|
time.Now(),
|
||||||
|
)
|
||||||
|
|
||||||
|
var m2, _ = metric.New("m1",
|
||||||
|
map[string]string{"foo": "bar"},
|
||||||
|
map[string]interface{}{
|
||||||
|
"status": "OK",
|
||||||
|
"ignoreme": "string",
|
||||||
|
"andme": true,
|
||||||
|
"boolfield": false,
|
||||||
|
},
|
||||||
|
time.Now(),
|
||||||
|
)
|
||||||
|
|
||||||
|
func BenchmarkApply(b *testing.B) {
|
||||||
|
vc := NewTestValueCounter([]string{"status"})
|
||||||
|
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
vc.Add(m1)
|
||||||
|
vc.Add(m2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test basic functionality
|
||||||
|
func TestBasic(t *testing.T) {
|
||||||
|
vc := NewTestValueCounter([]string{"status"})
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
|
||||||
|
vc.Add(m1)
|
||||||
|
vc.Add(m2)
|
||||||
|
vc.Add(m1)
|
||||||
|
vc.Push(&acc)
|
||||||
|
|
||||||
|
expectedFields := map[string]interface{}{
|
||||||
|
"status_200": 2,
|
||||||
|
"status_OK": 1,
|
||||||
|
}
|
||||||
|
expectedTags := map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test with multiple fields to count
|
||||||
|
func TestMultipleFields(t *testing.T) {
|
||||||
|
vc := NewTestValueCounter([]string{"status", "somefield", "boolfield"})
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
|
||||||
|
vc.Add(m1)
|
||||||
|
vc.Add(m2)
|
||||||
|
vc.Add(m2)
|
||||||
|
vc.Add(m1)
|
||||||
|
vc.Push(&acc)
|
||||||
|
|
||||||
|
expectedFields := map[string]interface{}{
|
||||||
|
"status_200": 2,
|
||||||
|
"status_OK": 2,
|
||||||
|
"boolfield_false": 2,
|
||||||
|
}
|
||||||
|
expectedTags := map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test with a reset between two runs
|
||||||
|
func TestWithReset(t *testing.T) {
|
||||||
|
vc := NewTestValueCounter([]string{"status"})
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
|
||||||
|
vc.Add(m1)
|
||||||
|
vc.Add(m1)
|
||||||
|
vc.Add(m2)
|
||||||
|
vc.Push(&acc)
|
||||||
|
|
||||||
|
expectedFields := map[string]interface{}{
|
||||||
|
"status_200": 2,
|
||||||
|
"status_OK": 1,
|
||||||
|
}
|
||||||
|
expectedTags := map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||||
|
|
||||||
|
acc.ClearMetrics()
|
||||||
|
vc.Reset()
|
||||||
|
|
||||||
|
vc.Add(m2)
|
||||||
|
vc.Add(m2)
|
||||||
|
vc.Add(m1)
|
||||||
|
vc.Push(&acc)
|
||||||
|
|
||||||
|
expectedFields = map[string]interface{}{
|
||||||
|
"status_200": 1,
|
||||||
|
"status_OK": 2,
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||||
|
}
|
||||||
@@ -103,6 +103,7 @@ import (
|
|||||||
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
|
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
|
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/teamspeak"
|
_ "github.com/influxdata/telegraf/plugins/inputs/teamspeak"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/tengine"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/tomcat"
|
_ "github.com/influxdata/telegraf/plugins/inputs/tomcat"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||||
|
|||||||
@@ -92,6 +92,7 @@ Supported Burrow version: `1.x`
|
|||||||
- group (string)
|
- group (string)
|
||||||
- topic (string)
|
- topic (string)
|
||||||
- partition (int)
|
- partition (int)
|
||||||
|
- owner (string)
|
||||||
|
|
||||||
* `burrow_topic`
|
* `burrow_topic`
|
||||||
- cluster (string)
|
- cluster (string)
|
||||||
|
|||||||
@@ -116,6 +116,7 @@ type (
|
|||||||
Start apiStatusResponseLagItem `json:"start"`
|
Start apiStatusResponseLagItem `json:"start"`
|
||||||
End apiStatusResponseLagItem `json:"end"`
|
End apiStatusResponseLagItem `json:"end"`
|
||||||
CurrentLag int64 `json:"current_lag"`
|
CurrentLag int64 `json:"current_lag"`
|
||||||
|
Owner string `json:"owner"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// response: lag field item
|
// response: lag field item
|
||||||
@@ -447,6 +448,7 @@ func (b *burrow) genGroupLagMetrics(r *apiResponse, cluster, group string, acc t
|
|||||||
"group": group,
|
"group": group,
|
||||||
"topic": partition.Topic,
|
"topic": partition.Topic,
|
||||||
"partition": strconv.FormatInt(int64(partition.Partition), 10),
|
"partition": strconv.FormatInt(int64(partition.Partition), 10),
|
||||||
|
"owner": partition.Owner,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -129,9 +129,9 @@ func TestBurrowPartition(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
tags := []map[string]string{
|
tags := []map[string]string{
|
||||||
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "0"},
|
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "0", "owner": "kafka1"},
|
||||||
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "1"},
|
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "1", "owner": "kafka2"},
|
||||||
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "2"},
|
{"cluster": "clustername1", "group": "group1", "topic": "topicA", "partition": "2", "owner": "kafka3"},
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Empty(t, acc.Errors)
|
require.Empty(t, acc.Errors)
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
{
|
{
|
||||||
"topic": "topicA",
|
"topic": "topicA",
|
||||||
"partition": 0,
|
"partition": 0,
|
||||||
"owner": "kafka",
|
"owner": "kafka1",
|
||||||
"status": "OK",
|
"status": "OK",
|
||||||
"start": {
|
"start": {
|
||||||
"offset": 431323195,
|
"offset": 431323195,
|
||||||
@@ -28,7 +28,7 @@
|
|||||||
{
|
{
|
||||||
"topic": "topicA",
|
"topic": "topicA",
|
||||||
"partition": 1,
|
"partition": 1,
|
||||||
"owner": "kafka",
|
"owner": "kafka2",
|
||||||
"status": "OK",
|
"status": "OK",
|
||||||
"start": {
|
"start": {
|
||||||
"offset": 431322962,
|
"offset": 431322962,
|
||||||
@@ -46,7 +46,7 @@
|
|||||||
{
|
{
|
||||||
"topic": "topicA",
|
"topic": "topicA",
|
||||||
"partition": 2,
|
"partition": 2,
|
||||||
"owner": "kafka",
|
"owner": "kafka3",
|
||||||
"status": "OK",
|
"status": "OK",
|
||||||
"start": {
|
"start": {
|
||||||
"offset": 428636563,
|
"offset": 428636563,
|
||||||
|
|||||||
@@ -124,6 +124,7 @@ docker API.
|
|||||||
- server_version
|
- server_version
|
||||||
- container_image
|
- container_image
|
||||||
- container_name
|
- container_name
|
||||||
|
- container_status
|
||||||
- container_version
|
- container_version
|
||||||
- fields:
|
- fields:
|
||||||
- total_pgmafault
|
- total_pgmafault
|
||||||
@@ -167,6 +168,7 @@ docker API.
|
|||||||
- server_version
|
- server_version
|
||||||
- container_image
|
- container_image
|
||||||
- container_name
|
- container_name
|
||||||
|
- container_status
|
||||||
- container_version
|
- container_version
|
||||||
- cpu
|
- cpu
|
||||||
- fields:
|
- fields:
|
||||||
@@ -186,6 +188,7 @@ docker API.
|
|||||||
- server_version
|
- server_version
|
||||||
- container_image
|
- container_image
|
||||||
- container_name
|
- container_name
|
||||||
|
- container_status
|
||||||
- container_version
|
- container_version
|
||||||
- network
|
- network
|
||||||
- fields:
|
- fields:
|
||||||
@@ -205,6 +208,7 @@ docker API.
|
|||||||
- server_version
|
- server_version
|
||||||
- container_image
|
- container_image
|
||||||
- container_name
|
- container_name
|
||||||
|
- container_status
|
||||||
- container_version
|
- container_version
|
||||||
- device
|
- device
|
||||||
- fields:
|
- fields:
|
||||||
@@ -226,11 +230,27 @@ docker API.
|
|||||||
- server_version
|
- server_version
|
||||||
- container_image
|
- container_image
|
||||||
- container_name
|
- container_name
|
||||||
|
- container_status
|
||||||
- container_version
|
- container_version
|
||||||
- fields:
|
- fields:
|
||||||
- health_status (string)
|
- health_status (string)
|
||||||
- failing_streak (integer)
|
- failing_streak (integer)
|
||||||
|
|
||||||
|
- docker_container_status
|
||||||
|
- tags:
|
||||||
|
- engine_host
|
||||||
|
- server_version
|
||||||
|
- container_image
|
||||||
|
- container_name
|
||||||
|
- container_status
|
||||||
|
- container_version
|
||||||
|
- fields:
|
||||||
|
- oomkilled (boolean)
|
||||||
|
- pid (integer)
|
||||||
|
- exitcode (integer)
|
||||||
|
- started_at (integer)
|
||||||
|
- finished_at (integer)
|
||||||
|
|
||||||
- docker_swarm
|
- docker_swarm
|
||||||
- tags:
|
- tags:
|
||||||
- service_id
|
- service_id
|
||||||
@@ -245,12 +265,12 @@ docker API.
|
|||||||
```
|
```
|
||||||
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce n_containers=6i,n_containers_paused=0i,n_containers_running=1i,n_containers_stopped=5i,n_cpus=2i,n_goroutines=41i,n_images=2i,n_listener_events=0i,n_used_file_descriptors=27i 1524002041000000000
|
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce n_containers=6i,n_containers_paused=0i,n_containers_running=1i,n_containers_stopped=5i,n_cpus=2i,n_goroutines=41i,n_images=2i,n_listener_events=0i,n_used_file_descriptors=27i 1524002041000000000
|
||||||
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce,unit=bytes memory_total=2101661696i 1524002041000000000
|
docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce,unit=bytes memory_total=2101661696i 1524002041000000000
|
||||||
docker_container_mem,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce active_anon=8327168i,active_file=2314240i,cache=27402240i,container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",hierarchical_memory_limit=9223372036854771712i,inactive_anon=0i,inactive_file=25088000i,limit=2101661696i,mapped_file=20582400i,max_usage=36646912i,pgfault=4193i,pgmajfault=214i,pgpgin=9243i,pgpgout=520i,rss=8327168i,rss_huge=0i,total_active_anon=8327168i,total_active_file=2314240i,total_cache=27402240i,total_inactive_anon=0i,total_inactive_file=25088000i,total_mapped_file=20582400i,total_pgfault=4193i,total_pgmajfault=214i,total_pgpgin=9243i,total_pgpgout=520i,total_rss=8327168i,total_rss_huge=0i,total_unevictable=0i,total_writeback=0i,unevictable=0i,usage=36528128i,usage_percent=0.4342225020025297,writeback=0i 1524002042000000000
|
docker_container_mem,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce active_anon=8327168i,active_file=2314240i,cache=27402240i,container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",hierarchical_memory_limit=9223372036854771712i,inactive_anon=0i,inactive_file=25088000i,limit=2101661696i,mapped_file=20582400i,max_usage=36646912i,pgfault=4193i,pgmajfault=214i,pgpgin=9243i,pgpgout=520i,rss=8327168i,rss_huge=0i,total_active_anon=8327168i,total_active_file=2314240i,total_cache=27402240i,total_inactive_anon=0i,total_inactive_file=25088000i,total_mapped_file=20582400i,total_pgfault=4193i,total_pgmajfault=214i,total_pgpgin=9243i,total_pgpgout=520i,total_rss=8327168i,total_rss_huge=0i,total_unevictable=0i,total_writeback=0i,unevictable=0i,usage=36528128i,usage_percent=0.4342225020025297,writeback=0i 1524002042000000000
|
||||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu-total,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",throttling_periods=0i,throttling_throttled_periods=0i,throttling_throttled_time=0i,usage_in_kernelmode=40000000i,usage_in_usermode=100000000i,usage_percent=0,usage_system=6394210000000i,usage_total=117319068i 1524002042000000000
|
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,cpu=cpu-total,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",throttling_periods=0i,throttling_throttled_periods=0i,throttling_throttled_time=0i,usage_in_kernelmode=40000000i,usage_in_usermode=100000000i,usage_percent=0,usage_system=6394210000000i,usage_total=117319068i 1524002042000000000
|
||||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=20825265i 1524002042000000000
|
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,cpu=cpu0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=20825265i 1524002042000000000
|
||||||
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,cpu=cpu1,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=96493803i 1524002042000000000
|
docker_container_cpu,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,cpu=cpu1,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",usage_total=96493803i 1524002042000000000
|
||||||
docker_container_net,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,network=eth0,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",rx_bytes=1576i,rx_dropped=0i,rx_errors=0i,rx_packets=20i,tx_bytes=0i,tx_dropped=0i,tx_errors=0i,tx_packets=0i 1524002042000000000
|
docker_container_net,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,network=eth0,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",rx_bytes=1576i,rx_dropped=0i,rx_errors=0i,rx_packets=20i,tx_bytes=0i,tx_dropped=0i,tx_errors=0i,tx_packets=0i 1524002042000000000
|
||||||
docker_container_blkio,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,device=254:0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",io_service_bytes_recursive_async=27398144i,io_service_bytes_recursive_read=27398144i,io_service_bytes_recursive_sync=0i,io_service_bytes_recursive_total=27398144i,io_service_bytes_recursive_write=0i,io_serviced_recursive_async=529i,io_serviced_recursive_read=529i,io_serviced_recursive_sync=0i,io_serviced_recursive_total=529i,io_serviced_recursive_write=0i 1524002042000000000
|
docker_container_blkio,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,device=254:0,engine_host=debian-stretch-docker,server_version=17.09.0-ce container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",io_service_bytes_recursive_async=27398144i,io_service_bytes_recursive_read=27398144i,io_service_bytes_recursive_sync=0i,io_service_bytes_recursive_total=27398144i,io_service_bytes_recursive_write=0i,io_serviced_recursive_async=529i,io_serviced_recursive_read=529i,io_serviced_recursive_sync=0i,io_serviced_recursive_total=529i,io_serviced_recursive_write=0i 1524002042000000000
|
||||||
docker_container_health,container_image=telegraf,container_name=zen_ritchie,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce failing_streak=0i,health_status="healthy" 1524007529000000000
|
docker_container_health,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce failing_streak=0i,health_status="healthy" 1524007529000000000
|
||||||
docker_swarm,service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test tasks_desired=3,tasks_running=3 1508968160000000000
|
docker_swarm,service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test tasks_desired=3,tasks_running=3 1508968160000000000
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -435,6 +435,23 @@ func (d *Docker) gatherContainer(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if info.State != nil {
|
||||||
|
tags["container_status"] = info.State.Status
|
||||||
|
statefields := map[string]interface{}{
|
||||||
|
"oomkilled": info.State.OOMKilled,
|
||||||
|
"pid": info.State.Pid,
|
||||||
|
"exitcode": info.State.ExitCode,
|
||||||
|
}
|
||||||
|
container_time, err := time.Parse(time.RFC3339, info.State.StartedAt)
|
||||||
|
if err == nil && !container_time.IsZero() {
|
||||||
|
statefields["started_at"] = container_time.UnixNano()
|
||||||
|
}
|
||||||
|
container_time, err = time.Parse(time.RFC3339, info.State.FinishedAt)
|
||||||
|
if err == nil && !container_time.IsZero() {
|
||||||
|
statefields["finished_at"] = container_time.UnixNano()
|
||||||
|
}
|
||||||
|
acc.AddFields("docker_container_status", statefields, tags, time.Now())
|
||||||
|
}
|
||||||
|
|
||||||
if info.State.Health != nil {
|
if info.State.Health != nil {
|
||||||
healthfields := map[string]interface{}{
|
healthfields := map[string]interface{}{
|
||||||
|
|||||||
@@ -653,6 +653,7 @@ func TestDockerGatherInfo(t *testing.T) {
|
|||||||
"label1": "test_value_1",
|
"label1": "test_value_1",
|
||||||
"label2": "test_value_2",
|
"label2": "test_value_2",
|
||||||
"server_version": "17.09.0-ce",
|
"server_version": "17.09.0-ce",
|
||||||
|
"container_status": "running",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
acc.AssertContainsTaggedFields(t,
|
acc.AssertContainsTaggedFields(t,
|
||||||
@@ -676,6 +677,7 @@ func TestDockerGatherInfo(t *testing.T) {
|
|||||||
"label1": "test_value_1",
|
"label1": "test_value_1",
|
||||||
"label2": "test_value_2",
|
"label2": "test_value_2",
|
||||||
"server_version": "17.09.0-ce",
|
"server_version": "17.09.0-ce",
|
||||||
|
"container_status": "running",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -484,6 +484,12 @@ var containerInspect = types.ContainerJSON{
|
|||||||
FailingStreak: 1,
|
FailingStreak: 1,
|
||||||
Status: "Unhealthy",
|
Status: "Unhealthy",
|
||||||
},
|
},
|
||||||
|
Status: "running",
|
||||||
|
OOMKilled: false,
|
||||||
|
Pid: 1234,
|
||||||
|
ExitCode: 0,
|
||||||
|
StartedAt: "2018-06-14T05:48:53.266176036Z",
|
||||||
|
FinishedAt: "0001-01-01T00:00:00Z",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -343,9 +343,6 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (h *HTTPListener) parse(b []byte, t time.Time, precision string) error {
|
func (h *HTTPListener) parse(b []byte, t time.Time, precision string) error {
|
||||||
h.mu.Lock()
|
|
||||||
defer h.mu.Unlock()
|
|
||||||
|
|
||||||
h.handler.SetTimePrecision(getPrecisionMultiplier(precision))
|
h.handler.SetTimePrecision(getPrecisionMultiplier(precision))
|
||||||
h.handler.SetTimeFunc(func() time.Time { return t })
|
h.handler.SetTimeFunc(func() time.Time { return t })
|
||||||
metrics, err := h.parser.Parse(b)
|
metrics, err := h.parser.Parse(b)
|
||||||
|
|||||||
@@ -293,7 +293,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
|||||||
timestamp = time.Unix(0, iv)
|
timestamp = time.Unix(0, iv)
|
||||||
}
|
}
|
||||||
case SYSLOG_TIMESTAMP:
|
case SYSLOG_TIMESTAMP:
|
||||||
ts, err := time.ParseInLocation(time.Stamp, v, p.loc)
|
ts, err := time.ParseInLocation("Jan 02 15:04:05", v, p.loc)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if ts.Year() == 0 {
|
if ts.Year() == 0 {
|
||||||
ts = ts.AddDate(timestamp.Year(), 0, 0)
|
ts = ts.AddDate(timestamp.Year(), 0, 0)
|
||||||
|
|||||||
@@ -971,39 +971,14 @@ func TestNewlineInPatterns(t *testing.T) {
|
|||||||
require.NotNil(t, m)
|
require.NotNil(t, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSyslogTimestamp(t *testing.T) {
|
func TestSyslogTimestampParser(t *testing.T) {
|
||||||
tests := []struct {
|
p := &Parser{
|
||||||
name string
|
Patterns: []string{`%{SYSLOGTIMESTAMP:timestamp:ts-syslog} value=%{NUMBER:value:int}`},
|
||||||
line string
|
timeFunc: func() time.Time { return time.Date(2018, time.April, 1, 0, 0, 0, 0, nil) },
|
||||||
expected time.Time
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "two digit day of month",
|
|
||||||
line: "Sep 25 09:01:55 value=42",
|
|
||||||
expected: time.Date(2018, time.September, 25, 9, 1, 55, 0, time.UTC),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "one digit day of month single space",
|
|
||||||
line: "Sep 2 09:01:55 value=42",
|
|
||||||
expected: time.Date(2018, time.September, 2, 9, 1, 55, 0, time.UTC),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "one digit day of month double space",
|
|
||||||
line: "Sep 2 09:01:55 value=42",
|
|
||||||
expected: time.Date(2018, time.September, 2, 9, 1, 55, 0, time.UTC),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
p := &Parser{
|
|
||||||
Patterns: []string{`%{SYSLOGTIMESTAMP:timestamp:ts-syslog} value=%{NUMBER:value:int}`},
|
|
||||||
timeFunc: func() time.Time { return time.Date(2017, time.April, 1, 0, 0, 0, 0, time.UTC) },
|
|
||||||
}
|
|
||||||
require.NoError(t, p.Compile())
|
|
||||||
m, err := p.ParseLine(tt.line)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, m)
|
|
||||||
require.Equal(t, tt.expected, m.Time())
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
require.NoError(t, p.Compile())
|
||||||
|
m, err := p.ParseLine("Sep 25 09:01:55 value=42")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, m)
|
||||||
|
require.Equal(t, 2018, m.Time().Year())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -79,7 +80,7 @@ var sampleConfig = `
|
|||||||
## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
|
## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
|
||||||
gather_process_list = true
|
gather_process_list = true
|
||||||
#
|
#
|
||||||
## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS
|
## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS
|
||||||
gather_user_statistics = true
|
gather_user_statistics = true
|
||||||
#
|
#
|
||||||
## gather auto_increment columns and max values from information schema
|
## gather auto_increment columns and max values from information schema
|
||||||
@@ -281,8 +282,9 @@ const (
|
|||||||
GROUP BY command,state
|
GROUP BY command,state
|
||||||
ORDER BY null`
|
ORDER BY null`
|
||||||
infoSchemaUserStatisticsQuery = `
|
infoSchemaUserStatisticsQuery = `
|
||||||
SELECT *
|
SELECT *,count(*)
|
||||||
FROM information_schema.user_statistics`
|
FROM information_schema.user_statistics
|
||||||
|
GROUP BY user`
|
||||||
infoSchemaAutoIncQuery = `
|
infoSchemaAutoIncQuery = `
|
||||||
SELECT table_schema, table_name, column_name, auto_increment,
|
SELECT table_schema, table_name, column_name, auto_increment,
|
||||||
CAST(pow(2, case data_type
|
CAST(pow(2, case data_type
|
||||||
@@ -759,6 +761,103 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum
|
|||||||
if len(fields) > 0 {
|
if len(fields) > 0 {
|
||||||
acc.AddFields("mysql", fields, tags)
|
acc.AddFields("mysql", fields, tags)
|
||||||
}
|
}
|
||||||
|
// gather connection metrics from processlist for each user
|
||||||
|
if m.GatherProcessList {
|
||||||
|
conn_rows, err := db.Query("SELECT user, sum(1) FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user")
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("E! MySQL Error gathering process list: %s", err)
|
||||||
|
} else {
|
||||||
|
for conn_rows.Next() {
|
||||||
|
var user string
|
||||||
|
var connections int64
|
||||||
|
|
||||||
|
err = conn_rows.Scan(&user, &connections)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := map[string]string{"server": servtag, "user": user}
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fields["connections"] = connections
|
||||||
|
acc.AddFields("mysql_users", fields, tags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// gather connection metrics from user_statistics for each user
|
||||||
|
if m.GatherUserStatistics {
|
||||||
|
conn_rows, err := db.Query("select user, total_connections, concurrent_connections, connected_time, busy_time, cpu_time, bytes_received, bytes_sent, binlog_bytes_written, rows_fetched, rows_updated, table_rows_read, select_commands, update_commands, other_commands, commit_transactions, rollback_transactions, denied_connections, lost_connections, access_denied, empty_queries, total_ssl_connections FROM INFORMATION_SCHEMA.USER_STATISTICS GROUP BY user")
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("E! MySQL Error gathering user stats: %s", err)
|
||||||
|
} else {
|
||||||
|
for conn_rows.Next() {
|
||||||
|
var user string
|
||||||
|
var total_connections int64
|
||||||
|
var concurrent_connections int64
|
||||||
|
var connected_time int64
|
||||||
|
var busy_time int64
|
||||||
|
var cpu_time int64
|
||||||
|
var bytes_received int64
|
||||||
|
var bytes_sent int64
|
||||||
|
var binlog_bytes_written int64
|
||||||
|
var rows_fetched int64
|
||||||
|
var rows_updated int64
|
||||||
|
var table_rows_read int64
|
||||||
|
var select_commands int64
|
||||||
|
var update_commands int64
|
||||||
|
var other_commands int64
|
||||||
|
var commit_transactions int64
|
||||||
|
var rollback_transactions int64
|
||||||
|
var denied_connections int64
|
||||||
|
var lost_connections int64
|
||||||
|
var access_denied int64
|
||||||
|
var empty_queries int64
|
||||||
|
var total_ssl_connections int64
|
||||||
|
|
||||||
|
err = conn_rows.Scan(&user, &total_connections, &concurrent_connections,
|
||||||
|
&connected_time, &busy_time, &cpu_time, &bytes_received, &bytes_sent, &binlog_bytes_written,
|
||||||
|
&rows_fetched, &rows_updated, &table_rows_read, &select_commands, &update_commands, &other_commands,
|
||||||
|
&commit_transactions, &rollback_transactions, &denied_connections, &lost_connections, &access_denied,
|
||||||
|
&empty_queries, &total_ssl_connections,
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := map[string]string{"server": servtag, "user": user}
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"total_connections": total_connections,
|
||||||
|
"concurrent_connections": concurrent_connections,
|
||||||
|
"connected_time": connected_time,
|
||||||
|
"busy_time": busy_time,
|
||||||
|
"cpu_time": cpu_time,
|
||||||
|
"bytes_received": bytes_received,
|
||||||
|
"bytes_sent": bytes_sent,
|
||||||
|
"binlog_bytes_written": binlog_bytes_written,
|
||||||
|
"rows_fetched": rows_fetched,
|
||||||
|
"rows_updated": rows_updated,
|
||||||
|
"table_rows_read": table_rows_read,
|
||||||
|
"select_commands": select_commands,
|
||||||
|
"update_commands": update_commands,
|
||||||
|
"other_commands": other_commands,
|
||||||
|
"commit_transactions": commit_transactions,
|
||||||
|
"rollback_transactions": rollback_transactions,
|
||||||
|
"denied_connections": denied_connections,
|
||||||
|
"lost_connections": lost_connections,
|
||||||
|
"access_denied": access_denied,
|
||||||
|
"empty_queries": empty_queries,
|
||||||
|
"total_ssl_connections": total_ssl_connections,
|
||||||
|
}
|
||||||
|
|
||||||
|
acc.AddFields("mysql_user_stats", fields, tags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -809,29 +908,6 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf.
|
|||||||
} else {
|
} else {
|
||||||
acc.AddFields("mysql_process_list", fields, tags)
|
acc.AddFields("mysql_process_list", fields, tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
// get count of connections from each user
|
|
||||||
conn_rows, err := db.Query("SELECT user, sum(1) AS connections FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for conn_rows.Next() {
|
|
||||||
var user string
|
|
||||||
var connections int64
|
|
||||||
|
|
||||||
err = conn_rows.Scan(&user, &connections)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
tags := map[string]string{"server": servtag, "user": user}
|
|
||||||
fields := make(map[string]interface{})
|
|
||||||
|
|
||||||
fields["connections"] = connections
|
|
||||||
acc.AddFields("mysql_users", fields, tags)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -841,190 +917,77 @@ func (m *Mysql) GatherUserStatisticsStatuses(db *sql.DB, serv string, acc telegr
|
|||||||
// run query
|
// run query
|
||||||
rows, err := db.Query(infoSchemaUserStatisticsQuery)
|
rows, err := db.Query(infoSchemaUserStatisticsQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// disable collecting if table is not found (mysql specific error)
|
|
||||||
// (suppresses repeat errors)
|
|
||||||
if strings.Contains(err.Error(), "nknown table 'user_statistics'") {
|
|
||||||
m.GatherUserStatistics = false
|
|
||||||
}
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
|
var (
|
||||||
cols, err := columnsToLower(rows.Columns())
|
user string
|
||||||
if err != nil {
|
total_connections int64
|
||||||
return err
|
concurrent_connections int64
|
||||||
}
|
connected_time int64
|
||||||
|
busy_time int64
|
||||||
read, err := getColSlice(len(cols))
|
cpu_time int64
|
||||||
if err != nil {
|
bytes_received int64
|
||||||
return err
|
bytes_sent int64
|
||||||
}
|
binlog_bytes_written int64
|
||||||
|
rows_fetched int64
|
||||||
|
rows_updated int64
|
||||||
|
table_rows_read int64
|
||||||
|
select_commands int64
|
||||||
|
update_commands int64
|
||||||
|
other_commands int64
|
||||||
|
commit_transactions int64
|
||||||
|
rollback_transactions int64
|
||||||
|
denied_connections int64
|
||||||
|
lost_connections int64
|
||||||
|
access_denied int64
|
||||||
|
empty_queries int64
|
||||||
|
total_ssl_connections int64
|
||||||
|
count uint32
|
||||||
|
)
|
||||||
|
|
||||||
servtag := getDSNTag(serv)
|
servtag := getDSNTag(serv)
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
err = rows.Scan(read...)
|
err = rows.Scan(&user, &total_connections, &concurrent_connections,
|
||||||
|
&connected_time, &busy_time, &cpu_time, &bytes_received, &bytes_sent, &binlog_bytes_written,
|
||||||
|
&rows_fetched, &rows_updated, &table_rows_read, &select_commands, &update_commands, &other_commands,
|
||||||
|
&commit_transactions, &rollback_transactions, &denied_connections, &lost_connections, &access_denied,
|
||||||
|
&empty_queries, &total_ssl_connections, &count,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
tags := map[string]string{"server": servtag, "user": *read[0].(*string)}
|
tags := map[string]string{"server": servtag, "user": user}
|
||||||
fields := map[string]interface{}{}
|
fields := map[string]interface{}{
|
||||||
|
|
||||||
for i := range cols {
|
"total_connections": total_connections,
|
||||||
if i == 0 {
|
"concurrent_connections": concurrent_connections,
|
||||||
continue // skip "user"
|
"connected_time": connected_time,
|
||||||
}
|
"busy_time": busy_time,
|
||||||
switch v := read[i].(type) {
|
"cpu_time": cpu_time,
|
||||||
case *int64:
|
"bytes_received": bytes_received,
|
||||||
fields[cols[i]] = *v
|
"bytes_sent": bytes_sent,
|
||||||
case *float64:
|
"binlog_bytes_written": binlog_bytes_written,
|
||||||
fields[cols[i]] = *v
|
"rows_fetched": rows_fetched,
|
||||||
case *string:
|
"rows_updated": rows_updated,
|
||||||
fields[cols[i]] = *v
|
"table_rows_read": table_rows_read,
|
||||||
default:
|
"select_commands": select_commands,
|
||||||
return fmt.Errorf("Unknown column type - %T", v)
|
"update_commands": update_commands,
|
||||||
}
|
"other_commands": other_commands,
|
||||||
|
"commit_transactions": commit_transactions,
|
||||||
|
"rollback_transactions": rollback_transactions,
|
||||||
|
"denied_connections": denied_connections,
|
||||||
|
"lost_connections": lost_connections,
|
||||||
|
"access_denied": access_denied,
|
||||||
|
"empty_queries": empty_queries,
|
||||||
|
"total_ssl_connections": total_ssl_connections,
|
||||||
}
|
}
|
||||||
acc.AddFields("mysql_user_stats", fields, tags)
|
acc.AddFields("mysql_user_stats", fields, tags)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// columnsToLower converts selected column names to lowercase.
|
|
||||||
func columnsToLower(s []string, e error) ([]string, error) {
|
|
||||||
if e != nil {
|
|
||||||
return nil, e
|
|
||||||
}
|
|
||||||
d := make([]string, len(s))
|
|
||||||
|
|
||||||
for i := range s {
|
|
||||||
d[i] = strings.ToLower(s[i])
|
|
||||||
}
|
|
||||||
return d, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getColSlice returns an in interface slice that can be used in the row.Scan().
|
|
||||||
func getColSlice(l int) ([]interface{}, error) {
|
|
||||||
// list of all possible column names
|
|
||||||
var (
|
|
||||||
user string
|
|
||||||
total_connections int64
|
|
||||||
concurrent_connections int64
|
|
||||||
connected_time int64
|
|
||||||
busy_time int64
|
|
||||||
cpu_time int64
|
|
||||||
bytes_received int64
|
|
||||||
bytes_sent int64
|
|
||||||
binlog_bytes_written int64
|
|
||||||
rows_read int64
|
|
||||||
rows_sent int64
|
|
||||||
rows_deleted int64
|
|
||||||
rows_inserted int64
|
|
||||||
rows_updated int64
|
|
||||||
select_commands int64
|
|
||||||
update_commands int64
|
|
||||||
other_commands int64
|
|
||||||
commit_transactions int64
|
|
||||||
rollback_transactions int64
|
|
||||||
denied_connections int64
|
|
||||||
lost_connections int64
|
|
||||||
access_denied int64
|
|
||||||
empty_queries int64
|
|
||||||
total_ssl_connections int64
|
|
||||||
max_statement_time_exceeded int64
|
|
||||||
// maria specific
|
|
||||||
fbusy_time float64
|
|
||||||
fcpu_time float64
|
|
||||||
// percona specific
|
|
||||||
rows_fetched int64
|
|
||||||
table_rows_read int64
|
|
||||||
)
|
|
||||||
|
|
||||||
switch l {
|
|
||||||
case 23: // maria5
|
|
||||||
return []interface{}{
|
|
||||||
&user,
|
|
||||||
&total_connections,
|
|
||||||
&concurrent_connections,
|
|
||||||
&connected_time,
|
|
||||||
&fbusy_time,
|
|
||||||
&fcpu_time,
|
|
||||||
&bytes_received,
|
|
||||||
&bytes_sent,
|
|
||||||
&binlog_bytes_written,
|
|
||||||
&rows_read,
|
|
||||||
&rows_sent,
|
|
||||||
&rows_deleted,
|
|
||||||
&rows_inserted,
|
|
||||||
&rows_updated,
|
|
||||||
&select_commands,
|
|
||||||
&update_commands,
|
|
||||||
&other_commands,
|
|
||||||
&commit_transactions,
|
|
||||||
&rollback_transactions,
|
|
||||||
&denied_connections,
|
|
||||||
&lost_connections,
|
|
||||||
&access_denied,
|
|
||||||
&empty_queries,
|
|
||||||
}, nil
|
|
||||||
case 25: // maria10
|
|
||||||
return []interface{}{
|
|
||||||
&user,
|
|
||||||
&total_connections,
|
|
||||||
&concurrent_connections,
|
|
||||||
&connected_time,
|
|
||||||
&fbusy_time,
|
|
||||||
&fcpu_time,
|
|
||||||
&bytes_received,
|
|
||||||
&bytes_sent,
|
|
||||||
&binlog_bytes_written,
|
|
||||||
&rows_read,
|
|
||||||
&rows_sent,
|
|
||||||
&rows_deleted,
|
|
||||||
&rows_inserted,
|
|
||||||
&rows_updated,
|
|
||||||
&select_commands,
|
|
||||||
&update_commands,
|
|
||||||
&other_commands,
|
|
||||||
&commit_transactions,
|
|
||||||
&rollback_transactions,
|
|
||||||
&denied_connections,
|
|
||||||
&lost_connections,
|
|
||||||
&access_denied,
|
|
||||||
&empty_queries,
|
|
||||||
&total_ssl_connections,
|
|
||||||
&max_statement_time_exceeded,
|
|
||||||
}, nil
|
|
||||||
case 22: // percona
|
|
||||||
return []interface{}{
|
|
||||||
&user,
|
|
||||||
&total_connections,
|
|
||||||
&concurrent_connections,
|
|
||||||
&connected_time,
|
|
||||||
&busy_time,
|
|
||||||
&cpu_time,
|
|
||||||
&bytes_received,
|
|
||||||
&bytes_sent,
|
|
||||||
&binlog_bytes_written,
|
|
||||||
&rows_fetched,
|
|
||||||
&rows_updated,
|
|
||||||
&table_rows_read,
|
|
||||||
&select_commands,
|
|
||||||
&update_commands,
|
|
||||||
&other_commands,
|
|
||||||
&commit_transactions,
|
|
||||||
&rollback_transactions,
|
|
||||||
&denied_connections,
|
|
||||||
&lost_connections,
|
|
||||||
&access_denied,
|
|
||||||
&empty_queries,
|
|
||||||
&total_ssl_connections,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("Not Supported - %d columns", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
// gatherPerfTableIOWaits can be used to get total count and time
|
// gatherPerfTableIOWaits can be used to get total count and time
|
||||||
// of I/O wait event for each table and process
|
// of I/O wait event for each table and process
|
||||||
func (m *Mysql) gatherPerfTableIOWaits(db *sql.DB, serv string, acc telegraf.Accumulator) error {
|
func (m *Mysql) gatherPerfTableIOWaits(db *sql.DB, serv string, acc telegraf.Accumulator) error {
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ func TestMysqlGetDSNTag(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"tcp(localhost)/",
|
"tcp(localhost)/",
|
||||||
"localhost",
|
"localhost:3306",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"root:passwd@tcp(192.168.1.1:3306)/?tls=false",
|
"root:passwd@tcp(192.168.1.1:3306)/?tls=false",
|
||||||
|
|||||||
@@ -17,16 +17,17 @@ This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvid
|
|||||||
### Metrics
|
### Metrics
|
||||||
- measurement: `nvidia_smi`
|
- measurement: `nvidia_smi`
|
||||||
- tags
|
- tags
|
||||||
- `name` (type of GPU e.g. `GeForce GTX 170 Ti`)
|
- `name` (type of GPU e.g. `GeForce GTX 1070 Ti`)
|
||||||
- `compute_mode` (The compute mode of the GPU e.g. `Default`)
|
- `compute_mode` (The compute mode of the GPU e.g. `Default`)
|
||||||
- `index` (The port index where the GPU is connected to the motherboard e.g. `1`)
|
- `index` (The port index where the GPU is connected to the motherboard e.g. `1`)
|
||||||
- `pstate` (Overclocking state for the GPU e.g. `P0`)
|
- `pstate` (Overclocking state for the GPU e.g. `P0`)
|
||||||
- `uuid` (A unique identifier for the GPU e.g. `GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665`)
|
- `uuid` (A unique identifier for the GPU e.g. `GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665`)
|
||||||
- fields
|
- fields
|
||||||
- `fan_speed` (integer, percentage)
|
- `fan_speed` (integer, percentage)
|
||||||
- `memory_free` (integer, KB)
|
- `memory_free` (integer, MiB)
|
||||||
- `memory_used` (integer, KB)
|
- `memory_used` (integer, MiB)
|
||||||
- `memory_total` (integer, KB)
|
- `memory_total` (integer, MiB)
|
||||||
|
- `power_draw` (float, W)
|
||||||
- `temperature_gpu` (integer, degrees C)
|
- `temperature_gpu` (integer, degrees C)
|
||||||
- `utilization_gpu` (integer, percentage)
|
- `utilization_gpu` (integer, percentage)
|
||||||
- `utilization_memory` (integer, percentage)
|
- `utilization_memory` (integer, percentage)
|
||||||
|
|||||||
@@ -16,20 +16,21 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
measurement = "nvidia_smi"
|
measurement = "nvidia_smi"
|
||||||
metrics = "fan.speed,memory.total,memory.used,memory.free,pstate,temperature.gpu,name,uuid,compute_mode,utilization.gpu,utilization.memory,index"
|
metrics = "fan.speed,memory.total,memory.used,memory.free,pstate,temperature.gpu,name,uuid,compute_mode,utilization.gpu,utilization.memory,index,power.draw"
|
||||||
metricNames = [][]string{
|
metricNames = [][]string{
|
||||||
[]string{"fan_speed", "field"},
|
[]string{"fan_speed", "integer"},
|
||||||
[]string{"memory_total", "field"},
|
[]string{"memory_total", "integer"},
|
||||||
[]string{"memory_used", "field"},
|
[]string{"memory_used", "integer"},
|
||||||
[]string{"memory_free", "field"},
|
[]string{"memory_free", "integer"},
|
||||||
[]string{"pstate", "tag"},
|
[]string{"pstate", "tag"},
|
||||||
[]string{"temperature_gpu", "field"},
|
[]string{"temperature_gpu", "integer"},
|
||||||
[]string{"name", "tag"},
|
[]string{"name", "tag"},
|
||||||
[]string{"uuid", "tag"},
|
[]string{"uuid", "tag"},
|
||||||
[]string{"compute_mode", "tag"},
|
[]string{"compute_mode", "tag"},
|
||||||
[]string{"utilization_gpu", "field"},
|
[]string{"utilization_gpu", "integer"},
|
||||||
[]string{"utilization_memory", "field"},
|
[]string{"utilization_memory", "integer"},
|
||||||
[]string{"index", "tag"},
|
[]string{"index", "tag"},
|
||||||
|
[]string{"power_draw", "float"},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -127,7 +128,7 @@ func parseLine(line string) (map[string]string, map[string]interface{}, error) {
|
|||||||
for i, m := range metricNames {
|
for i, m := range metricNames {
|
||||||
col := strings.TrimSpace(met[i])
|
col := strings.TrimSpace(met[i])
|
||||||
|
|
||||||
// First handle the tags
|
// Handle the tags
|
||||||
if m[1] == "tag" {
|
if m[1] == "tag" {
|
||||||
tags[m[0]] = col
|
tags[m[0]] = col
|
||||||
continue
|
continue
|
||||||
@@ -137,12 +138,23 @@ func parseLine(line string) (map[string]string, map[string]interface{}, error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Then parse the integers out of the fields
|
// Parse the integers
|
||||||
out, err := strconv.ParseInt(col, 10, 64)
|
if m[1] == "integer" {
|
||||||
if err != nil {
|
out, err := strconv.ParseInt(col, 10, 64)
|
||||||
return tags, fields, err
|
if err != nil {
|
||||||
|
return tags, fields, err
|
||||||
|
}
|
||||||
|
fields[m[0]] = out
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the floats
|
||||||
|
if m[1] == "float" {
|
||||||
|
out, err := strconv.ParseFloat(col, 64)
|
||||||
|
if err != nil {
|
||||||
|
return tags, fields, err
|
||||||
|
}
|
||||||
|
fields[m[0]] = out
|
||||||
}
|
}
|
||||||
fields[m[0]] = out
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the tags and fields
|
// Return the tags and fields
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestParseLineStandard(t *testing.T) {
|
func TestParseLineStandard(t *testing.T) {
|
||||||
line := "85, 8114, 553, 7561, P2, 61, GeForce GTX 1070 Ti, GPU-d1911b8a-f5c8-5e66-057c-486561269de8, Default, 100, 93, 1\n"
|
line := "85, 8114, 553, 7561, P2, 61, GeForce GTX 1070 Ti, GPU-d1911b8a-f5c8-5e66-057c-486561269de8, Default, 100, 93, 1, 0.0\n"
|
||||||
tags, fields, err := parseLine(line)
|
tags, fields, err := parseLine(line)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
@@ -37,7 +37,7 @@ func TestParseLineBad(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParseLineNotSupported(t *testing.T) {
|
func TestParseLineNotSupported(t *testing.T) {
|
||||||
line := "[Not Supported], 7606, 0, 7606, P0, 38, Tesla P4, GPU-xxx, Default, 0, 0, 0\n"
|
line := "[Not Supported], 7606, 0, 7606, P0, 38, Tesla P4, GPU-xxx, Default, 0, 0, 0, 0.0\n"
|
||||||
_, fields, err := parseLine(line)
|
_, fields, err := parseLine(line)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, nil, fields["fan_speed"])
|
require.Equal(t, nil, fields["fan_speed"])
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -28,37 +28,36 @@ func getQueueDirectory() (string, error) {
|
|||||||
return strings.TrimSpace(string(qd)), nil
|
return strings.TrimSpace(string(qd)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func qScan(path string, acc telegraf.Accumulator) (int64, int64, int64, error) {
|
func qScan(path string) (int64, int64, int64, error) {
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
finfos, err := f.Readdir(-1)
|
||||||
|
f.Close()
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
var length, size int64
|
var length, size int64
|
||||||
var oldest time.Time
|
var oldest time.Time
|
||||||
err := filepath.Walk(path, func(_ string, finfo os.FileInfo, err error) error {
|
for _, finfo := range finfos {
|
||||||
if err != nil {
|
|
||||||
acc.AddError(fmt.Errorf("error scanning %s: %s", path, err))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if finfo.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
length++
|
length++
|
||||||
size += finfo.Size()
|
size += finfo.Size()
|
||||||
|
|
||||||
ctime := statCTime(finfo.Sys())
|
ctime := statCTime(finfo.Sys())
|
||||||
if ctime.IsZero() {
|
if ctime.IsZero() {
|
||||||
return nil
|
continue
|
||||||
}
|
}
|
||||||
if oldest.IsZero() || ctime.Before(oldest) {
|
if oldest.IsZero() || ctime.Before(oldest) {
|
||||||
oldest = ctime
|
oldest = ctime
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return 0, 0, 0, err
|
|
||||||
}
|
}
|
||||||
var age int64
|
var age int64
|
||||||
if !oldest.IsZero() {
|
if !oldest.IsZero() {
|
||||||
age = int64(time.Now().Sub(oldest) / time.Second)
|
age = int64(time.Now().Sub(oldest) / time.Second)
|
||||||
} else if length != 0 {
|
} else if len(finfos) != 0 {
|
||||||
// system doesn't support ctime
|
// system doesn't support ctime
|
||||||
age = -1
|
age = -1
|
||||||
}
|
}
|
||||||
@@ -78,8 +77,8 @@ func (p *Postfix) Gather(acc telegraf.Accumulator) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, q := range []string{"active", "hold", "incoming", "maildrop", "deferred"} {
|
for _, q := range []string{"active", "hold", "incoming", "maildrop"} {
|
||||||
length, size, age, err := qScan(filepath.Join(p.QueueDirectory, q), acc)
|
length, size, age, err := qScan(path.Join(p.QueueDirectory, q))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
acc.AddError(fmt.Errorf("error scanning queue %s: %s", q, err))
|
acc.AddError(fmt.Errorf("error scanning queue %s: %s", q, err))
|
||||||
continue
|
continue
|
||||||
@@ -91,6 +90,30 @@ func (p *Postfix) Gather(acc telegraf.Accumulator) error {
|
|||||||
acc.AddFields("postfix_queue", fields, map[string]string{"queue": q})
|
acc.AddFields("postfix_queue", fields, map[string]string{"queue": q})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var dLength, dSize int64
|
||||||
|
dAge := int64(-1)
|
||||||
|
for _, q := range []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F"} {
|
||||||
|
length, size, age, err := qScan(path.Join(p.QueueDirectory, "deferred", q))
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
// the directories are created on first use
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
acc.AddError(fmt.Errorf("error scanning queue deferred/%s: %s", q, err))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
dLength += length
|
||||||
|
dSize += size
|
||||||
|
if age > dAge {
|
||||||
|
dAge = age
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fields := map[string]interface{}{"length": dLength, "size": dSize}
|
||||||
|
if dAge != -1 {
|
||||||
|
fields["age"] = dAge
|
||||||
|
}
|
||||||
|
acc.AddFields("postfix_queue", fields, map[string]string{"queue": "deferred"})
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package postfix
|
|||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
@@ -16,16 +16,19 @@ func TestGather(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer os.RemoveAll(td)
|
defer os.RemoveAll(td)
|
||||||
|
|
||||||
for _, q := range []string{"active", "hold", "incoming", "maildrop", "deferred/0/0", "deferred/F/F"} {
|
for _, q := range []string{"active", "hold", "incoming", "maildrop", "deferred"} {
|
||||||
require.NoError(t, os.MkdirAll(filepath.FromSlash(td+"/"+q), 0755))
|
require.NoError(t, os.Mkdir(path.Join(td, q), 0755))
|
||||||
|
}
|
||||||
|
for _, q := range []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "F"} { // "E" deliberately left off
|
||||||
|
require.NoError(t, os.Mkdir(path.Join(td, "deferred", q), 0755))
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/active/01"), []byte("abc"), 0644))
|
require.NoError(t, ioutil.WriteFile(path.Join(td, "active", "01"), []byte("abc"), 0644))
|
||||||
require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/active/02"), []byte("defg"), 0644))
|
require.NoError(t, ioutil.WriteFile(path.Join(td, "active", "02"), []byte("defg"), 0644))
|
||||||
require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/hold/01"), []byte("abc"), 0644))
|
require.NoError(t, ioutil.WriteFile(path.Join(td, "hold", "01"), []byte("abc"), 0644))
|
||||||
require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/incoming/01"), []byte("abcd"), 0644))
|
require.NoError(t, ioutil.WriteFile(path.Join(td, "incoming", "01"), []byte("abcd"), 0644))
|
||||||
require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/deferred/0/0/01"), []byte("abc"), 0644))
|
require.NoError(t, ioutil.WriteFile(path.Join(td, "deferred", "0", "01"), []byte("abc"), 0644))
|
||||||
require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/deferred/F/F/F1"), []byte("abc"), 0644))
|
require.NoError(t, ioutil.WriteFile(path.Join(td, "deferred", "F", "F1"), []byte("abc"), 0644))
|
||||||
|
|
||||||
p := Postfix{
|
p := Postfix{
|
||||||
QueueDirectory: td,
|
QueueDirectory: td,
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
# Procstat Input Plugin
|
# Procstat Input Plugin
|
||||||
|
|
||||||
The procstat plugin can be used to monitor the system resource usage of one or more processes.
|
The procstat plugin can be used to monitor the system resource usage of one or more processes.
|
||||||
|
The procstat_lookup metric displays the query information,
|
||||||
|
specifically the number of PIDs returned on a search
|
||||||
|
|
||||||
Processes can be selected for monitoring using one of several methods:
|
Processes can be selected for monitoring using one of several methods:
|
||||||
- pidfile
|
- pidfile
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Implemention of PIDGatherer that execs pgrep to find processes
|
// Implemention of PIDGatherer that execs pgrep to find processes
|
||||||
@@ -62,6 +64,12 @@ func find(path string, args []string) ([]PID, error) {
|
|||||||
|
|
||||||
func run(path string, args []string) (string, error) {
|
func run(path string, args []string) (string, error) {
|
||||||
out, err := exec.Command(path, args...).Output()
|
out, err := exec.Command(path, args...).Output()
|
||||||
|
|
||||||
|
//if exit code 1, ie no processes found, do not return error
|
||||||
|
if i, _ := internal.ExitStatus(err); i == 1 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Error running %s: %s", path, err)
|
return "", fmt.Errorf("Error running %s: %s", path, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -97,7 +97,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error {
|
|||||||
p.createProcess = defaultProcess
|
p.createProcess = defaultProcess
|
||||||
}
|
}
|
||||||
|
|
||||||
procs, err := p.updateProcesses(p.procs)
|
procs, err := p.updateProcesses(acc, p.procs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
acc.AddError(fmt.Errorf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
|
acc.AddError(fmt.Errorf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
|
||||||
p.Exe, p.PidFile, p.Pattern, p.User, err.Error()))
|
p.Exe, p.PidFile, p.Pattern, p.User, err.Error()))
|
||||||
@@ -230,8 +230,8 @@ func (p *Procstat) addMetrics(proc Process, acc telegraf.Accumulator) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update monitored Processes
|
// Update monitored Processes
|
||||||
func (p *Procstat) updateProcesses(prevInfo map[PID]Process) (map[PID]Process, error) {
|
func (p *Procstat) updateProcesses(acc telegraf.Accumulator, prevInfo map[PID]Process) (map[PID]Process, error) {
|
||||||
pids, tags, err := p.findPids()
|
pids, tags, err := p.findPids(acc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -281,9 +281,9 @@ func (p *Procstat) getPIDFinder() (PIDFinder, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get matching PIDs and their initial tags
|
// Get matching PIDs and their initial tags
|
||||||
func (p *Procstat) findPids() ([]PID, map[string]string, error) {
|
func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string, error) {
|
||||||
var pids []PID
|
var pids []PID
|
||||||
var tags map[string]string
|
tags := make(map[string]string)
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
f, err := p.getPIDFinder()
|
f, err := p.getPIDFinder()
|
||||||
@@ -313,7 +313,18 @@ func (p *Procstat) findPids() ([]PID, map[string]string, error) {
|
|||||||
err = fmt.Errorf("Either exe, pid_file, user, pattern, systemd_unit, or cgroup must be specified")
|
err = fmt.Errorf("Either exe, pid_file, user, pattern, systemd_unit, or cgroup must be specified")
|
||||||
}
|
}
|
||||||
|
|
||||||
return pids, tags, err
|
rTags := make(map[string]string)
|
||||||
|
for k, v := range tags {
|
||||||
|
rTags[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
//adds a metric with info on the pgrep query
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
tags["pid_finder"] = p.PidFinder
|
||||||
|
fields["pid_count"] = len(pids)
|
||||||
|
acc.AddFields("procstat_lookup", fields, tags)
|
||||||
|
|
||||||
|
return pids, rTags, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// execCommand is so tests can mock out exec.Command usage.
|
// execCommand is so tests can mock out exec.Command usage.
|
||||||
|
|||||||
@@ -343,7 +343,8 @@ func TestGather_systemdUnitPIDs(t *testing.T) {
|
|||||||
createPIDFinder: pidFinder([]PID{}, nil),
|
createPIDFinder: pidFinder([]PID{}, nil),
|
||||||
SystemdUnit: "TestGather_systemdUnitPIDs",
|
SystemdUnit: "TestGather_systemdUnitPIDs",
|
||||||
}
|
}
|
||||||
pids, tags, err := p.findPids()
|
var acc testutil.Accumulator
|
||||||
|
pids, tags, err := p.findPids(&acc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, []PID{11408}, pids)
|
assert.Equal(t, []PID{11408}, pids)
|
||||||
assert.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"])
|
assert.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"])
|
||||||
@@ -364,8 +365,20 @@ func TestGather_cgroupPIDs(t *testing.T) {
|
|||||||
createPIDFinder: pidFinder([]PID{}, nil),
|
createPIDFinder: pidFinder([]PID{}, nil),
|
||||||
CGroup: td,
|
CGroup: td,
|
||||||
}
|
}
|
||||||
pids, tags, err := p.findPids()
|
var acc testutil.Accumulator
|
||||||
|
pids, tags, err := p.findPids(&acc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, []PID{1234, 5678}, pids)
|
assert.Equal(t, []PID{1234, 5678}, pids)
|
||||||
assert.Equal(t, td, tags["cgroup"])
|
assert.Equal(t, td, tags["cgroup"])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestProcstatLookupMetric(t *testing.T) {
|
||||||
|
p := Procstat{
|
||||||
|
createPIDFinder: pidFinder([]PID{543}, nil),
|
||||||
|
Exe: "-Gsys",
|
||||||
|
}
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
err := acc.GatherError(p.Gather)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, len(p.procs)+1, len(acc.Metrics))
|
||||||
|
}
|
||||||
|
|||||||
117
plugins/inputs/reader/reader.go
Normal file
117
plugins/inputs/reader/reader.go
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
package reader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal/globpath"
|
||||||
|
"github.com/influxdata/telegraf/plugins/parsers"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Reader struct {
|
||||||
|
Filepaths []string `toml:"files"`
|
||||||
|
FromBeginning bool
|
||||||
|
DataFormat string `toml:"data_format"`
|
||||||
|
ParserConfig parsers.Config
|
||||||
|
Parser parsers.Parser
|
||||||
|
Tags []string
|
||||||
|
|
||||||
|
Filenames []string
|
||||||
|
|
||||||
|
//for grok parser
|
||||||
|
Patterns []string
|
||||||
|
namedPatterns []string
|
||||||
|
CustomPatterns string
|
||||||
|
CustomPatternFiles []string
|
||||||
|
}
|
||||||
|
|
||||||
|
const sampleConfig = `## Files to parse.
|
||||||
|
## These accept standard unix glob matching rules, but with the addition of
|
||||||
|
## ** as a "super asterisk". ie:
|
||||||
|
## /var/log/**.log -> recursively find all .log files in /var/log
|
||||||
|
## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
|
||||||
|
## /var/log/apache.log -> only tail the apache log file
|
||||||
|
files = ["/var/log/apache/access.log"]
|
||||||
|
|
||||||
|
## The dataformat to be read from files
|
||||||
|
## Each data format has its own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = ""`
|
||||||
|
|
||||||
|
// SampleConfig returns the default configuration of the Input
|
||||||
|
func (r *Reader) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) Description() string {
|
||||||
|
return "reload and gather from file[s] on telegraf's interval"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) Gather(acc telegraf.Accumulator) error {
|
||||||
|
r.refreshFilePaths()
|
||||||
|
for _, k := range r.Filenames {
|
||||||
|
metrics, err := r.readMetric(k)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range metrics {
|
||||||
|
acc.AddFields(m.Name(), m.Fields(), m.Tags())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) compileParser() {
|
||||||
|
if r.DataFormat == "" {
|
||||||
|
log.Printf("E! No data_format specified")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r.ParserConfig = parsers.Config{
|
||||||
|
DataFormat: r.DataFormat,
|
||||||
|
TagKeys: r.Tags,
|
||||||
|
|
||||||
|
//grok settings
|
||||||
|
Patterns: r.Patterns,
|
||||||
|
NamedPatterns: r.namedPatterns,
|
||||||
|
CustomPatterns: r.CustomPatterns,
|
||||||
|
CustomPatternFiles: r.CustomPatternFiles,
|
||||||
|
}
|
||||||
|
nParser, err := parsers.NewParser(&r.ParserConfig)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("E! Error building parser: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Parser = nParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) refreshFilePaths() {
|
||||||
|
var allFiles []string
|
||||||
|
for _, filepath := range r.Filepaths {
|
||||||
|
g, err := globpath.Compile(filepath)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("E! Error Glob %s failed to compile, %s", filepath, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
files := g.Match()
|
||||||
|
|
||||||
|
for k := range files {
|
||||||
|
allFiles = append(allFiles, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Filenames = allFiles
|
||||||
|
}
|
||||||
|
|
||||||
|
//requires that Parser has been compiled
|
||||||
|
func (r *Reader) readMetric(filename string) ([]telegraf.Metric, error) {
|
||||||
|
fileContents, err := ioutil.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("E! File could not be opened: %v", filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.Parser.Parse(fileContents)
|
||||||
|
|
||||||
|
}
|
||||||
58
plugins/inputs/reader/reader_test.go
Normal file
58
plugins/inputs/reader/reader_test.go
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
package reader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRefreshFilePaths(t *testing.T) {
|
||||||
|
testDir := getPluginDir()
|
||||||
|
r := Reader{
|
||||||
|
Filepaths: []string{testDir + "/logparser/grok/testdata/**.log"},
|
||||||
|
}
|
||||||
|
|
||||||
|
r.refreshFilePaths()
|
||||||
|
//log.Printf("filenames: %v", filenames)
|
||||||
|
assert.Equal(t, len(r.Filenames), 2)
|
||||||
|
}
|
||||||
|
func TestJSONParserCompile(t *testing.T) {
|
||||||
|
testDir := getPluginDir()
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
r := Reader{
|
||||||
|
Filepaths: []string{testDir + "/reader/testfiles/json_a.log"},
|
||||||
|
DataFormat: "json",
|
||||||
|
Tags: []string{"parent_ignored_child"},
|
||||||
|
}
|
||||||
|
r.compileParser()
|
||||||
|
r.Gather(&acc)
|
||||||
|
log.Printf("acc: %v", acc.Metrics[0].Tags)
|
||||||
|
assert.Equal(t, map[string]string{"parent_ignored_child": "hi"}, acc.Metrics[0].Tags)
|
||||||
|
assert.Equal(t, 5, len(acc.Metrics[0].Fields))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGrokParser(t *testing.T) {
|
||||||
|
testDir := getPluginDir()
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
r := Reader{
|
||||||
|
Filepaths: []string{testDir + "/reader/testfiles/grok_a.log"},
|
||||||
|
DataFormat: "grok",
|
||||||
|
Patterns: []string{"%{COMMON_LOG_FORMAT}"},
|
||||||
|
}
|
||||||
|
|
||||||
|
r.compileParser()
|
||||||
|
err := r.Gather(&acc)
|
||||||
|
log.Printf("err: %v", err)
|
||||||
|
log.Printf("metric[0]_tags: %v, metric[0]_fields: %v", acc.Metrics[0].Tags, acc.Metrics[0].Fields)
|
||||||
|
log.Printf("metric[1]_tags: %v, metric[1]_fields: %v", acc.Metrics[1].Tags, acc.Metrics[1].Fields)
|
||||||
|
assert.Equal(t, 2, len(acc.Metrics))
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPluginDir() string {
|
||||||
|
_, filename, _, _ := runtime.Caller(1)
|
||||||
|
return strings.Replace(filename, "/reader/reader_test.go", "", 1)
|
||||||
|
}
|
||||||
2
plugins/inputs/reader/testfiles/grok_a.log
Normal file
2
plugins/inputs/reader/testfiles/grok_a.log
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326
|
||||||
|
128.0.0.1 user-identifier tony [10/Oct/2000:13:55:36 -0800] "GET /apache_pb.gif HTTP/1.0" 300 45
|
||||||
14
plugins/inputs/reader/testfiles/json_a.log
Normal file
14
plugins/inputs/reader/testfiles/json_a.log
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"parent": {
|
||||||
|
"child": 3.0,
|
||||||
|
"ignored_child": "hi"
|
||||||
|
},
|
||||||
|
"ignored_null": null,
|
||||||
|
"integer": 4,
|
||||||
|
"list": [3, 4],
|
||||||
|
"ignored_parent": {
|
||||||
|
"another_ignored_null": null,
|
||||||
|
"ignored_string": "hello, world!"
|
||||||
|
},
|
||||||
|
"another_list": [4]
|
||||||
|
}
|
||||||
@@ -14,6 +14,13 @@
|
|||||||
## If no servers are specified, then localhost is used as the host.
|
## If no servers are specified, then localhost is used as the host.
|
||||||
## If no port is specified, 6379 is used
|
## If no port is specified, 6379 is used
|
||||||
servers = ["tcp://localhost:6379"]
|
servers = ["tcp://localhost:6379"]
|
||||||
|
|
||||||
|
## Optional TLS Config
|
||||||
|
# tls_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# tls_cert = "/etc/telegraf/cert.pem"
|
||||||
|
# tls_key = "/etc/telegraf/key.pem"
|
||||||
|
## Use TLS but skip chain & host verification
|
||||||
|
# insecure_skip_verify = true
|
||||||
```
|
```
|
||||||
|
|
||||||
### Measurements & Fields:
|
### Measurements & Fields:
|
||||||
|
|||||||
@@ -13,11 +13,13 @@ import (
|
|||||||
|
|
||||||
"github.com/go-redis/redis"
|
"github.com/go-redis/redis"
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal/tls"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Redis struct {
|
type Redis struct {
|
||||||
Servers []string
|
Servers []string
|
||||||
|
tls.ClientConfig
|
||||||
|
|
||||||
clients []Client
|
clients []Client
|
||||||
initialized bool
|
initialized bool
|
||||||
@@ -56,6 +58,13 @@ var sampleConfig = `
|
|||||||
## If no servers are specified, then localhost is used as the host.
|
## If no servers are specified, then localhost is used as the host.
|
||||||
## If no port is specified, 6379 is used
|
## If no port is specified, 6379 is used
|
||||||
servers = ["tcp://localhost:6379"]
|
servers = ["tcp://localhost:6379"]
|
||||||
|
|
||||||
|
## Optional TLS Config
|
||||||
|
# tls_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# tls_cert = "/etc/telegraf/cert.pem"
|
||||||
|
# tls_key = "/etc/telegraf/key.pem"
|
||||||
|
## Use TLS but skip chain & host verification
|
||||||
|
# insecure_skip_verify = true
|
||||||
`
|
`
|
||||||
|
|
||||||
func (r *Redis) SampleConfig() string {
|
func (r *Redis) SampleConfig() string {
|
||||||
@@ -109,12 +118,18 @@ func (r *Redis) init(acc telegraf.Accumulator) error {
|
|||||||
address = u.Host
|
address = u.Host
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tlsConfig, err := r.ClientConfig.TLSConfig()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
client := redis.NewClient(
|
client := redis.NewClient(
|
||||||
&redis.Options{
|
&redis.Options{
|
||||||
Addr: address,
|
Addr: address,
|
||||||
Password: password,
|
Password: password,
|
||||||
Network: u.Scheme,
|
Network: u.Scheme,
|
||||||
PoolSize: 1,
|
PoolSize: 1,
|
||||||
|
TLSConfig: tlsConfig,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ The [solr](http://lucene.apache.org/solr/) plugin collects stats via the
|
|||||||
|
|
||||||
More about [performance statistics](https://cwiki.apache.org/confluence/display/solr/Performance+Statistics+Reference)
|
More about [performance statistics](https://cwiki.apache.org/confluence/display/solr/Performance+Statistics+Reference)
|
||||||
|
|
||||||
Tested from 3.5 to 6.*
|
Tested from 3.5 to 7.*
|
||||||
|
|
||||||
### Configuration:
|
### Configuration:
|
||||||
|
|
||||||
|
|||||||
@@ -113,20 +113,7 @@ type Hitratio interface{}
|
|||||||
// Cache is an exported type that
|
// Cache is an exported type that
|
||||||
// contains cache metrics
|
// contains cache metrics
|
||||||
type Cache struct {
|
type Cache struct {
|
||||||
Stats struct {
|
Stats map[string]interface{} `json:"stats"`
|
||||||
CumulativeEvictions int64 `json:"cumulative_evictions"`
|
|
||||||
CumulativeHitratio Hitratio `json:"cumulative_hitratio"`
|
|
||||||
CumulativeHits int64 `json:"cumulative_hits"`
|
|
||||||
CumulativeInserts int64 `json:"cumulative_inserts"`
|
|
||||||
CumulativeLookups int64 `json:"cumulative_lookups"`
|
|
||||||
Evictions int64 `json:"evictions"`
|
|
||||||
Hitratio Hitratio `json:"hitratio"`
|
|
||||||
Hits int64 `json:"hits"`
|
|
||||||
Inserts int64 `json:"inserts"`
|
|
||||||
Lookups int64 `json:"lookups"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
WarmupTime int64 `json:"warmupTime"`
|
|
||||||
} `json:"stats"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSolr return a new instance of Solr
|
// NewSolr return a new instance of Solr
|
||||||
@@ -424,21 +411,30 @@ func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBe
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for name, metrics := range cacheMetrics {
|
for name, metrics := range cacheMetrics {
|
||||||
cumulativeHits := getFloat(metrics.Stats.CumulativeHitratio)
|
coreFields := make(map[string]interface{})
|
||||||
hitratio := getFloat(metrics.Stats.Hitratio)
|
for key, value := range metrics.Stats {
|
||||||
coreFields := map[string]interface{}{
|
splitKey := strings.Split(key, ".")
|
||||||
"cumulative_evictions": metrics.Stats.CumulativeEvictions,
|
newKey := splitKey[len(splitKey)-1]
|
||||||
"cumulative_hitratio": cumulativeHits,
|
switch newKey {
|
||||||
"cumulative_hits": metrics.Stats.CumulativeHits,
|
case "cumulative_evictions",
|
||||||
"cumulative_inserts": metrics.Stats.CumulativeInserts,
|
"cumulative_hits",
|
||||||
"cumulative_lookups": metrics.Stats.CumulativeLookups,
|
"cumulative_inserts",
|
||||||
"evictions": metrics.Stats.Evictions,
|
"cumulative_lookups",
|
||||||
"hitratio": hitratio,
|
"eviction",
|
||||||
"hits": metrics.Stats.Hits,
|
"hits",
|
||||||
"inserts": metrics.Stats.Inserts,
|
"inserts",
|
||||||
"lookups": metrics.Stats.Lookups,
|
"lookups",
|
||||||
"size": metrics.Stats.Size,
|
"size",
|
||||||
"warmup_time": metrics.Stats.WarmupTime,
|
"evictions":
|
||||||
|
coreFields[newKey] = getInt(value)
|
||||||
|
case "hitratio",
|
||||||
|
"cumulative_hitratio":
|
||||||
|
coreFields[newKey] = getFloat(value)
|
||||||
|
case "warmupTime":
|
||||||
|
coreFields["warmup_time"] = getInt(value)
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
acc.AddFields(
|
acc.AddFields(
|
||||||
"solr_cache",
|
"solr_cache",
|
||||||
|
|||||||
@@ -43,6 +43,17 @@ func TestGatherStats(t *testing.T) {
|
|||||||
map[string]string{"core": "main", "handler": "filterCache"})
|
map[string]string{"core": "main", "handler": "filterCache"})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSolr7MbeansStats(t *testing.T) {
|
||||||
|
ts := createMockSolr7Server()
|
||||||
|
solr := NewSolr()
|
||||||
|
solr.Servers = []string{ts.URL}
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
require.NoError(t, solr.Gather(&acc))
|
||||||
|
acc.AssertContainsTaggedFields(t, "solr_cache",
|
||||||
|
solr7CacheExpected,
|
||||||
|
map[string]string{"core": "main", "handler": "documentCache"})
|
||||||
|
}
|
||||||
|
|
||||||
func TestSolr3GatherStats(t *testing.T) {
|
func TestSolr3GatherStats(t *testing.T) {
|
||||||
ts := createMockSolr3Server()
|
ts := createMockSolr3Server()
|
||||||
solr := NewSolr()
|
solr := NewSolr()
|
||||||
@@ -150,3 +161,18 @@ func createMockSolr3Server() *httptest.Server {
|
|||||||
}
|
}
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func createMockSolr7Server() *httptest.Server {
|
||||||
|
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if strings.Contains(r.URL.Path, "/solr/admin/cores") {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
fmt.Fprintln(w, statusResponse)
|
||||||
|
} else if strings.Contains(r.URL.Path, "solr/main/admin") {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
fmt.Fprintln(w, mBeansSolr7Response)
|
||||||
|
} else {
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
fmt.Fprintln(w, "nope")
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|||||||
60
plugins/inputs/solr/testdata7_test.go
Normal file
60
plugins/inputs/solr/testdata7_test.go
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
package solr
|
||||||
|
|
||||||
|
const mBeansSolr7Response = `
|
||||||
|
{
|
||||||
|
"responseHeader":{
|
||||||
|
"status":0,
|
||||||
|
"QTime":2
|
||||||
|
},
|
||||||
|
"solr-mbeans":[
|
||||||
|
"CORE",
|
||||||
|
{
|
||||||
|
|
||||||
|
},
|
||||||
|
"QUERYHANDLER",
|
||||||
|
{
|
||||||
|
|
||||||
|
},
|
||||||
|
"UPDATEHANDLER",
|
||||||
|
{
|
||||||
|
|
||||||
|
},
|
||||||
|
"CACHE",
|
||||||
|
{
|
||||||
|
"documentCache":{
|
||||||
|
"class":"org.apache.solr.search.LRUCache",
|
||||||
|
"description":"LRU Cache(maxSize=16384, initialSize=4096)",
|
||||||
|
"stats":{
|
||||||
|
"CACHE.searcher.documentCache.evictions": 141485,
|
||||||
|
"CACHE.searcher.documentCache.cumulative_lookups": 265132,
|
||||||
|
"CACHE.searcher.documentCache.hitratio": 0.44,
|
||||||
|
"CACHE.searcher.documentCache.size": 8192,
|
||||||
|
"CACHE.searcher.documentCache.cumulative_hitratio": 0.42,
|
||||||
|
"CACHE.searcher.documentCache.lookups": 1234,
|
||||||
|
"CACHE.searcher.documentCache.warmupTime": 1,
|
||||||
|
"CACHE.searcher.documentCache.inserts": 987,
|
||||||
|
"CACHE.searcher.documentCache.hits": 1111,
|
||||||
|
"CACHE.searcher.documentCache.cumulative_hits": 115364,
|
||||||
|
"CACHE.searcher.documentCache.cumulative_inserts": 149768,
|
||||||
|
"CACHE.searcher.documentCache.cumulative_evictions": 141486
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var solr7CacheExpected = map[string]interface{}{
|
||||||
|
"evictions": int64(141485),
|
||||||
|
"cumulative_evictions": int64(141486),
|
||||||
|
"cumulative_hitratio": float64(0.42),
|
||||||
|
"cumulative_hits": int64(115364),
|
||||||
|
"cumulative_inserts": int64(149768),
|
||||||
|
"cumulative_lookups": int64(265132),
|
||||||
|
"hitratio": float64(0.44),
|
||||||
|
"hits": int64(1111),
|
||||||
|
"inserts": int64(987),
|
||||||
|
"lookups": int64(1234),
|
||||||
|
"size": int64(8192),
|
||||||
|
"warmup_time": int64(1),
|
||||||
|
}
|
||||||
@@ -16,7 +16,7 @@ https://en.wikipedia.org/wiki/Df_(Unix) for more details.
|
|||||||
# mount_points = ["/"]
|
# mount_points = ["/"]
|
||||||
|
|
||||||
## Ignore mount points by filesystem type.
|
## Ignore mount points by filesystem type.
|
||||||
ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
|
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Docker container
|
#### Docker container
|
||||||
|
|||||||
@@ -1,30 +0,0 @@
|
|||||||
# Swap Input Plugin
|
|
||||||
|
|
||||||
The swap plugin collects system swap metrics.
|
|
||||||
|
|
||||||
For a more information on what swap memory is, read [All about Linux swap space](https://www.linux.com/news/all-about-linux-swap-space).
|
|
||||||
|
|
||||||
### Configuration:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
# Read metrics about swap memory usage
|
|
||||||
[[inputs.swap]]
|
|
||||||
# no configuration
|
|
||||||
```
|
|
||||||
|
|
||||||
### Metrics:
|
|
||||||
|
|
||||||
- swap
|
|
||||||
- fields:
|
|
||||||
- free (int)
|
|
||||||
- total (int)
|
|
||||||
- used (int)
|
|
||||||
- used_percent (float)
|
|
||||||
- in (int)
|
|
||||||
- out (int)
|
|
||||||
|
|
||||||
### Example Output:
|
|
||||||
|
|
||||||
```
|
|
||||||
swap total=20855394304i,used_percent=45.43883523785713,used=9476448256i,free=1715331072i 1511894782000000000
|
|
||||||
```
|
|
||||||
@@ -28,7 +28,7 @@ var diskSampleConfig = `
|
|||||||
# mount_points = ["/"]
|
# mount_points = ["/"]
|
||||||
|
|
||||||
## Ignore mount points by filesystem type.
|
## Ignore mount points by filesystem type.
|
||||||
ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
|
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
|
||||||
`
|
`
|
||||||
|
|
||||||
func (_ *DiskStats) SampleConfig() string {
|
func (_ *DiskStats) SampleConfig() string {
|
||||||
|
|||||||
@@ -42,9 +42,45 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SwapStats struct {
|
||||||
|
ps PS
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *SwapStats) Description() string {
|
||||||
|
return "Read metrics about swap memory usage"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *SwapStats) SampleConfig() string { return "" }
|
||||||
|
|
||||||
|
func (s *SwapStats) Gather(acc telegraf.Accumulator) error {
|
||||||
|
swap, err := s.ps.SwapStat()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error getting swap memory info: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldsG := map[string]interface{}{
|
||||||
|
"total": swap.Total,
|
||||||
|
"used": swap.Used,
|
||||||
|
"free": swap.Free,
|
||||||
|
"used_percent": swap.UsedPercent,
|
||||||
|
}
|
||||||
|
fieldsC := map[string]interface{}{
|
||||||
|
"in": swap.Sin,
|
||||||
|
"out": swap.Sout,
|
||||||
|
}
|
||||||
|
acc.AddGauge("swap", fieldsG, nil)
|
||||||
|
acc.AddCounter("swap", fieldsC, nil)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
ps := newSystemPS()
|
ps := newSystemPS()
|
||||||
inputs.Add("mem", func() telegraf.Input {
|
inputs.Add("mem", func() telegraf.Input {
|
||||||
return &MemStats{ps: ps}
|
return &MemStats{ps: ps}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
inputs.Add("swap", func() telegraf.Input {
|
||||||
|
return &SwapStats{ps: ps}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -30,6 +30,17 @@ func TestMemStats(t *testing.T) {
|
|||||||
|
|
||||||
mps.On("VMStat").Return(vms, nil)
|
mps.On("VMStat").Return(vms, nil)
|
||||||
|
|
||||||
|
sms := &mem.SwapMemoryStat{
|
||||||
|
Total: 8123,
|
||||||
|
Used: 1232,
|
||||||
|
Free: 6412,
|
||||||
|
UsedPercent: 12.2,
|
||||||
|
Sin: 7,
|
||||||
|
Sout: 830,
|
||||||
|
}
|
||||||
|
|
||||||
|
mps.On("SwapStat").Return(sms, nil)
|
||||||
|
|
||||||
err = (&MemStats{&mps}).Gather(&acc)
|
err = (&MemStats{&mps}).Gather(&acc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -50,4 +61,15 @@ func TestMemStats(t *testing.T) {
|
|||||||
acc.AssertContainsTaggedFields(t, "mem", memfields, make(map[string]string))
|
acc.AssertContainsTaggedFields(t, "mem", memfields, make(map[string]string))
|
||||||
|
|
||||||
acc.Metrics = nil
|
acc.Metrics = nil
|
||||||
|
|
||||||
|
err = (&SwapStats{&mps}).Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
swapfields := map[string]interface{}{
|
||||||
|
"total": uint64(8123),
|
||||||
|
"used": uint64(1232),
|
||||||
|
"used_percent": float64(12.2),
|
||||||
|
"free": uint64(6412),
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "swap", swapfields, make(map[string]string))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,47 +0,0 @@
|
|||||||
package system
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SwapStats struct {
|
|
||||||
ps PS
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_ *SwapStats) Description() string {
|
|
||||||
return "Read metrics about swap memory usage"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (_ *SwapStats) SampleConfig() string { return "" }
|
|
||||||
|
|
||||||
func (s *SwapStats) Gather(acc telegraf.Accumulator) error {
|
|
||||||
swap, err := s.ps.SwapStat()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error getting swap memory info: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldsG := map[string]interface{}{
|
|
||||||
"total": swap.Total,
|
|
||||||
"used": swap.Used,
|
|
||||||
"free": swap.Free,
|
|
||||||
"used_percent": swap.UsedPercent,
|
|
||||||
}
|
|
||||||
fieldsC := map[string]interface{}{
|
|
||||||
"in": swap.Sin,
|
|
||||||
"out": swap.Sout,
|
|
||||||
}
|
|
||||||
acc.AddGauge("swap", fieldsG, nil)
|
|
||||||
acc.AddCounter("swap", fieldsC, nil)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
ps := newSystemPS()
|
|
||||||
inputs.Add("swap", func() telegraf.Input {
|
|
||||||
return &SwapStats{ps: ps}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
package system
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
|
||||||
"github.com/shirou/gopsutil/mem"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSwapStats(t *testing.T) {
|
|
||||||
var mps MockPS
|
|
||||||
var err error
|
|
||||||
defer mps.AssertExpectations(t)
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
|
|
||||||
sms := &mem.SwapMemoryStat{
|
|
||||||
Total: 8123,
|
|
||||||
Used: 1232,
|
|
||||||
Free: 6412,
|
|
||||||
UsedPercent: 12.2,
|
|
||||||
Sin: 7,
|
|
||||||
Sout: 830,
|
|
||||||
}
|
|
||||||
|
|
||||||
mps.On("SwapStat").Return(sms, nil)
|
|
||||||
|
|
||||||
err = (&SwapStats{&mps}).Gather(&acc)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
swapfields := map[string]interface{}{
|
|
||||||
"total": uint64(8123),
|
|
||||||
"used": uint64(1232),
|
|
||||||
"used_percent": float64(12.2),
|
|
||||||
"free": uint64(6412),
|
|
||||||
}
|
|
||||||
acc.AssertContainsTaggedFields(t, "swap", swapfields, make(map[string]string))
|
|
||||||
}
|
|
||||||
71
plugins/inputs/tengine/README.md
Normal file
71
plugins/inputs/tengine/README.md
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# Tengine Input Plugin
|
||||||
|
|
||||||
|
The tengine plugin gathers metrics from the
|
||||||
|
[Tengine Web Server](http://tengine.taobao.org/) via the
|
||||||
|
[reqstat](http://tengine.taobao.org/document/http_reqstat.html) module.
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Read Tengine's basic status information (ngx_http_reqstat_module)
|
||||||
|
[[inputs.tengine]]
|
||||||
|
## An array of Tengine reqstat module URI to gather stats.
|
||||||
|
urls = ["http://127.0.0.1/us"]
|
||||||
|
|
||||||
|
## HTTP response timeout (default: 5s)
|
||||||
|
# response_timeout = "5s"
|
||||||
|
|
||||||
|
## Optional TLS Config
|
||||||
|
# tls_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# tls_cert = "/etc/telegraf/cert.pem"
|
||||||
|
# tls_key = "/etc/telegraf/key.pem"
|
||||||
|
## Use TLS but skip chain & host verification
|
||||||
|
# insecure_skip_verify = false
|
||||||
|
```
|
||||||
|
|
||||||
|
### Metrics:
|
||||||
|
|
||||||
|
- Measurement
|
||||||
|
- tags:
|
||||||
|
- port
|
||||||
|
- server
|
||||||
|
- server_name
|
||||||
|
- fields:
|
||||||
|
- bytes_in (integer, total number of bytes received from client)
|
||||||
|
- bytes_out (integer, total number of bytes sent to client)
|
||||||
|
- conn_total (integer, total number of accepted connections)
|
||||||
|
- req_total (integer, total number of processed requests)
|
||||||
|
- http_2xx (integer, total number of 2xx requests)
|
||||||
|
- http_3xx (integer, total number of 3xx requests)
|
||||||
|
- http_4xx (integer, total number of 4xx requests)
|
||||||
|
- http_5xx (integer, total number of 5xx requests)
|
||||||
|
- http_other_status (integer, total number of other requests)
|
||||||
|
- rt (integer, accumulation or rt)
|
||||||
|
- ups_req (integer, total number of requests calling for upstream)
|
||||||
|
- ups_rt (integer, accumulation or upstream rt)
|
||||||
|
- ups_tries (integer, total number of times calling for upstream)
|
||||||
|
- http_200 (integer, total number of 200 requests)
|
||||||
|
- http_206 (integer, total number of 206 requests)
|
||||||
|
- http_302 (integer, total number of 302 requests)
|
||||||
|
- http_304 (integer, total number of 304 requests)
|
||||||
|
- http_403 (integer, total number of 403 requests)
|
||||||
|
- http_404 (integer, total number of 404 requests)
|
||||||
|
- http_416 (integer, total number of 416 requests)
|
||||||
|
- http_499 (integer, total number of 499 requests)
|
||||||
|
- http_500 (integer, total number of 500 requests)
|
||||||
|
- http_502 (integer, total number of 502 requests)
|
||||||
|
- http_503 (integer, total number of 503 requests)
|
||||||
|
- http_504 (integer, total number of 504 requests)
|
||||||
|
- http_508 (integer, total number of 508 requests)
|
||||||
|
- http_other_detail_status (integer, total number of requests of other status codes*http_ups_4xx total number of requests of upstream 4xx)
|
||||||
|
- http_ups_5xx (integer, total number of requests of upstream 5xx)
|
||||||
|
|
||||||
|
### Example Output:
|
||||||
|
|
||||||
|
```
|
||||||
|
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=localhost bytes_in=9129i,bytes_out=56334i,conn_total=14i,http_200=90i,http_206=0i,http_2xx=90i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=0i,http_416=0i,http_499=0i,http_4xx=0i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=90i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000
|
||||||
|
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=28.79.190.35.bc.googleusercontent.com bytes_in=1500i,bytes_out=3009i,conn_total=4i,http_200=1i,http_206=0i,http_2xx=1i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=1i,http_416=0i,http_499=0i,http_4xx=3i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=4i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000
|
||||||
|
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=www.google.com bytes_in=372i,bytes_out=786i,conn_total=1i,http_200=1i,http_206=0i,http_2xx=1i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=0i,http_416=0i,http_499=0i,http_4xx=0i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=1i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000
|
||||||
|
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=35.190.79.28 bytes_in=4433i,bytes_out=10259i,conn_total=5i,http_200=3i,http_206=0i,http_2xx=3i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=11i,http_416=0i,http_499=0i,http_4xx=11i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=14i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000
|
||||||
|
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=tenka-prod-api.txwy.tw bytes_in=3014397400i,bytes_out=14279992835i,conn_total=36844i,http_200=3177339i,http_206=0i,http_2xx=3177339i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=123i,http_416=0i,http_499=0i,http_4xx=123i,http_500=17214i,http_502=4453i,http_503=80i,http_504=0i,http_508=0i,http_5xx=21747i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=123i,http_ups_5xx=21747i,req_total=3199209i,rt=245874536i,ups_req=2685076i,ups_rt=245858217i,ups_tries=2685076i 1526546308000000000
|
||||||
|
```
|
||||||
338
plugins/inputs/tengine/tengine.go
Normal file
338
plugins/inputs/tengine/tengine.go
Normal file
@@ -0,0 +1,338 @@
|
|||||||
|
package tengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
|
"github.com/influxdata/telegraf/internal/tls"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Tengine struct {
|
||||||
|
Urls []string
|
||||||
|
ResponseTimeout internal.Duration
|
||||||
|
tls.ClientConfig
|
||||||
|
|
||||||
|
client *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
# An array of Tengine reqstat module URI to gather stats.
|
||||||
|
urls = ["http://127.0.0.1/us"]
|
||||||
|
|
||||||
|
# HTTP response timeout (default: 5s)
|
||||||
|
# response_timeout = "5s"
|
||||||
|
|
||||||
|
## Optional TLS Config
|
||||||
|
# tls_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# tls_cert = "/etc/telegraf/cert.cer"
|
||||||
|
# tls_key = "/etc/telegraf/key.key"
|
||||||
|
## Use TLS but skip chain & host verification
|
||||||
|
# insecure_skip_verify = false
|
||||||
|
`
|
||||||
|
|
||||||
|
func (n *Tengine) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Tengine) Description() string {
|
||||||
|
return "Read Tengine's basic status information (ngx_http_reqstat_module)"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Tengine) Gather(acc telegraf.Accumulator) error {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
// Create an HTTP client that is re-used for each
|
||||||
|
// collection interval
|
||||||
|
if n.client == nil {
|
||||||
|
client, err := n.createHttpClient()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.client = client
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, u := range n.Urls {
|
||||||
|
addr, err := url.Parse(u)
|
||||||
|
if err != nil {
|
||||||
|
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func(addr *url.URL) {
|
||||||
|
defer wg.Done()
|
||||||
|
acc.AddError(n.gatherUrl(addr, acc))
|
||||||
|
}(addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Tengine) createHttpClient() (*http.Client, error) {
|
||||||
|
tlsCfg, err := n.ClientConfig.TLSConfig()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.ResponseTimeout.Duration < time.Second {
|
||||||
|
n.ResponseTimeout.Duration = time.Second * 5
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
TLSClientConfig: tlsCfg,
|
||||||
|
},
|
||||||
|
Timeout: n.ResponseTimeout.Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type TengineSatus struct {
|
||||||
|
host string
|
||||||
|
bytes_in uint64
|
||||||
|
bytes_out uint64
|
||||||
|
conn_total uint64
|
||||||
|
req_total uint64
|
||||||
|
http_2xx uint64
|
||||||
|
http_3xx uint64
|
||||||
|
http_4xx uint64
|
||||||
|
http_5xx uint64
|
||||||
|
http_other_status uint64
|
||||||
|
rt uint64
|
||||||
|
ups_req uint64
|
||||||
|
ups_rt uint64
|
||||||
|
ups_tries uint64
|
||||||
|
http_200 uint64
|
||||||
|
http_206 uint64
|
||||||
|
http_302 uint64
|
||||||
|
http_304 uint64
|
||||||
|
http_403 uint64
|
||||||
|
http_404 uint64
|
||||||
|
http_416 uint64
|
||||||
|
http_499 uint64
|
||||||
|
http_500 uint64
|
||||||
|
http_502 uint64
|
||||||
|
http_503 uint64
|
||||||
|
http_504 uint64
|
||||||
|
http_508 uint64
|
||||||
|
http_other_detail_status uint64
|
||||||
|
http_ups_4xx uint64
|
||||||
|
http_ups_5xx uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Tengine) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||||
|
var tenginestatus TengineSatus
|
||||||
|
resp, err := n.client.Get(addr.String())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
|
||||||
|
}
|
||||||
|
r := bufio.NewReader(resp.Body)
|
||||||
|
|
||||||
|
for {
|
||||||
|
line, err := r.ReadString('\n')
|
||||||
|
|
||||||
|
if err != nil || io.EOF == err {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
line_split := strings.Split(strings.TrimSpace(line), ",")
|
||||||
|
if len(line_split) != 30 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tenginestatus.host = line_split[0]
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.bytes_in, err = strconv.ParseUint(line_split[1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.bytes_out, err = strconv.ParseUint(line_split[2], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.conn_total, err = strconv.ParseUint(line_split[3], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.req_total, err = strconv.ParseUint(line_split[4], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_2xx, err = strconv.ParseUint(line_split[5], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_3xx, err = strconv.ParseUint(line_split[6], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_4xx, err = strconv.ParseUint(line_split[7], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_5xx, err = strconv.ParseUint(line_split[8], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_other_status, err = strconv.ParseUint(line_split[9], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.rt, err = strconv.ParseUint(line_split[10], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.ups_req, err = strconv.ParseUint(line_split[11], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.ups_rt, err = strconv.ParseUint(line_split[12], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.ups_tries, err = strconv.ParseUint(line_split[13], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_200, err = strconv.ParseUint(line_split[14], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_206, err = strconv.ParseUint(line_split[15], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_302, err = strconv.ParseUint(line_split[16], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_304, err = strconv.ParseUint(line_split[17], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_403, err = strconv.ParseUint(line_split[18], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_404, err = strconv.ParseUint(line_split[19], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_416, err = strconv.ParseUint(line_split[20], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_499, err = strconv.ParseUint(line_split[21], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_500, err = strconv.ParseUint(line_split[22], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_502, err = strconv.ParseUint(line_split[23], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_503, err = strconv.ParseUint(line_split[24], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_504, err = strconv.ParseUint(line_split[25], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_508, err = strconv.ParseUint(line_split[26], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_other_detail_status, err = strconv.ParseUint(line_split[27], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_ups_4xx, err = strconv.ParseUint(line_split[28], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tenginestatus.http_ups_5xx, err = strconv.ParseUint(line_split[29], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tags := getTags(addr, tenginestatus.host)
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"bytes_in": tenginestatus.bytes_in,
|
||||||
|
"bytes_out": tenginestatus.bytes_out,
|
||||||
|
"conn_total": tenginestatus.conn_total,
|
||||||
|
"req_total": tenginestatus.req_total,
|
||||||
|
"http_2xx": tenginestatus.http_2xx,
|
||||||
|
"http_3xx": tenginestatus.http_3xx,
|
||||||
|
"http_4xx": tenginestatus.http_4xx,
|
||||||
|
"http_5xx": tenginestatus.http_5xx,
|
||||||
|
"http_other_status": tenginestatus.http_other_status,
|
||||||
|
"rt": tenginestatus.rt,
|
||||||
|
"ups_req": tenginestatus.ups_req,
|
||||||
|
"ups_rt": tenginestatus.ups_rt,
|
||||||
|
"ups_tries": tenginestatus.ups_tries,
|
||||||
|
"http_200": tenginestatus.http_200,
|
||||||
|
"http_206": tenginestatus.http_206,
|
||||||
|
"http_302": tenginestatus.http_302,
|
||||||
|
"http_304": tenginestatus.http_304,
|
||||||
|
"http_403": tenginestatus.http_403,
|
||||||
|
"http_404": tenginestatus.http_404,
|
||||||
|
"http_416": tenginestatus.http_416,
|
||||||
|
"http_499": tenginestatus.http_499,
|
||||||
|
"http_500": tenginestatus.http_500,
|
||||||
|
"http_502": tenginestatus.http_502,
|
||||||
|
"http_503": tenginestatus.http_503,
|
||||||
|
"http_504": tenginestatus.http_504,
|
||||||
|
"http_508": tenginestatus.http_508,
|
||||||
|
"http_other_detail_status": tenginestatus.http_other_detail_status,
|
||||||
|
"http_ups_4xx": tenginestatus.http_ups_4xx,
|
||||||
|
"http_ups_5xx": tenginestatus.http_ups_5xx,
|
||||||
|
}
|
||||||
|
acc.AddFields("tengine", fields, tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get tag(s) for the tengine plugin
|
||||||
|
func getTags(addr *url.URL, server_name string) map[string]string {
|
||||||
|
h := addr.Host
|
||||||
|
host, port, err := net.SplitHostPort(h)
|
||||||
|
if err != nil {
|
||||||
|
host = addr.Host
|
||||||
|
if addr.Scheme == "http" {
|
||||||
|
port = "80"
|
||||||
|
} else if addr.Scheme == "https" {
|
||||||
|
port = "443"
|
||||||
|
} else {
|
||||||
|
port = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return map[string]string{"server": host, "port": port, "server_name": server_name}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add("tengine", func() telegraf.Input {
|
||||||
|
return &Tengine{}
|
||||||
|
})
|
||||||
|
}
|
||||||
97
plugins/inputs/tengine/tengine_test.go
Normal file
97
plugins/inputs/tengine/tengine_test.go
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
package tengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
const tengineSampleResponse = `127.0.0.1,784,1511,2,2,1,0,1,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0`
|
||||||
|
|
||||||
|
// Verify that tengine tags are properly parsed based on the server
|
||||||
|
func TestTengineTags(t *testing.T) {
|
||||||
|
urls := []string{"http://localhost/us", "http://localhost:80/us"}
|
||||||
|
var addr *url.URL
|
||||||
|
for _, url1 := range urls {
|
||||||
|
addr, _ = url.Parse(url1)
|
||||||
|
tagMap := getTags(addr, "127.0.0.1")
|
||||||
|
assert.Contains(t, tagMap["server"], "localhost")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTengineGeneratesMetrics(t *testing.T) {
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var rsp string
|
||||||
|
rsp = tengineSampleResponse
|
||||||
|
fmt.Fprintln(w, rsp)
|
||||||
|
}))
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
n := &Tengine{
|
||||||
|
Urls: []string{fmt.Sprintf("%s/us", ts.URL)},
|
||||||
|
}
|
||||||
|
|
||||||
|
var acc_tengine testutil.Accumulator
|
||||||
|
|
||||||
|
err_tengine := acc_tengine.GatherError(n.Gather)
|
||||||
|
|
||||||
|
require.NoError(t, err_tengine)
|
||||||
|
|
||||||
|
fields_tengine := map[string]interface{}{
|
||||||
|
"bytes_in": uint64(784),
|
||||||
|
"bytes_out": uint64(1511),
|
||||||
|
"conn_total": uint64(2),
|
||||||
|
"req_total": uint64(2),
|
||||||
|
"http_2xx": uint64(1),
|
||||||
|
"http_3xx": uint64(0),
|
||||||
|
"http_4xx": uint64(1),
|
||||||
|
"http_5xx": uint64(0),
|
||||||
|
"http_other_status": uint64(0),
|
||||||
|
"rt": uint64(0),
|
||||||
|
"ups_req": uint64(0),
|
||||||
|
"ups_rt": uint64(0),
|
||||||
|
"ups_tries": uint64(0),
|
||||||
|
"http_200": uint64(1),
|
||||||
|
"http_206": uint64(0),
|
||||||
|
"http_302": uint64(0),
|
||||||
|
"http_304": uint64(0),
|
||||||
|
"http_403": uint64(0),
|
||||||
|
"http_404": uint64(1),
|
||||||
|
"http_416": uint64(0),
|
||||||
|
"http_499": uint64(0),
|
||||||
|
"http_500": uint64(0),
|
||||||
|
"http_502": uint64(0),
|
||||||
|
"http_503": uint64(0),
|
||||||
|
"http_504": uint64(0),
|
||||||
|
"http_508": uint64(0),
|
||||||
|
"http_other_detail_status": uint64(0),
|
||||||
|
"http_ups_4xx": uint64(0),
|
||||||
|
"http_ups_5xx": uint64(0),
|
||||||
|
}
|
||||||
|
|
||||||
|
addr, err := url.Parse(ts.URL)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
host, port, err := net.SplitHostPort(addr.Host)
|
||||||
|
if err != nil {
|
||||||
|
host = addr.Host
|
||||||
|
if addr.Scheme == "http" {
|
||||||
|
port = "80"
|
||||||
|
} else if addr.Scheme == "https" {
|
||||||
|
port = "443"
|
||||||
|
} else {
|
||||||
|
port = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tags := map[string]string{"server": host, "port": port, "server_name": "127.0.0.1"}
|
||||||
|
acc_tengine.AssertContainsTaggedFields(t, "tengine", fields_tengine, tags)
|
||||||
|
}
|
||||||
@@ -32,7 +32,7 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/apache/thrift/lib/go/thrift"
|
"github.com/apache/thrift/lib/go/thrift"
|
||||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
|
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
|
||||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
|
||||||
)
|
)
|
||||||
|
|
||||||
//now is a mockable time for now
|
//now is a mockable time for now
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
|
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
|
||||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
|
||||||
)
|
)
|
||||||
|
|
||||||
// JSON decodes spans from bodies `POST`ed to the spans endpoint
|
// JSON decodes spans from bodies `POST`ed to the spans endpoint
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
|
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
|
||||||
|
|
||||||
"github.com/apache/thrift/lib/go/thrift"
|
"github.com/apache/thrift/lib/go/thrift"
|
||||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
|
||||||
)
|
)
|
||||||
|
|
||||||
// UnmarshalThrift converts raw bytes in thrift format to a slice of spans
|
// UnmarshalThrift converts raw bytes in thrift format to a slice of spans
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import (
|
|||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
|
|
||||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_endpointHost(t *testing.T) {
|
func Test_endpointHost(t *testing.T) {
|
||||||
|
|||||||
@@ -1,17 +0,0 @@
|
|||||||
version: '3'
|
|
||||||
services:
|
|
||||||
zoo:
|
|
||||||
image: zookeeper
|
|
||||||
|
|
||||||
telegraf:
|
|
||||||
image: glinton/scratch
|
|
||||||
volumes:
|
|
||||||
- ./telegraf.conf:/telegraf.conf
|
|
||||||
- ../../../../telegraf:/telegraf
|
|
||||||
depends_on:
|
|
||||||
- zoo
|
|
||||||
entrypoint:
|
|
||||||
- /telegraf
|
|
||||||
- --config
|
|
||||||
- /telegraf.conf
|
|
||||||
network_mode: service:zoo
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
[agent]
|
|
||||||
interval="1s"
|
|
||||||
flush_interval="1s"
|
|
||||||
|
|
||||||
[[inputs.zookeeper]]
|
|
||||||
servers = [":2181"]
|
|
||||||
|
|
||||||
[[outputs.file]]
|
|
||||||
files = ["stdout"]
|
|
||||||
@@ -158,14 +158,8 @@ func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
srv := "localhost"
|
|
||||||
if service[0] != "" {
|
|
||||||
srv = service[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
"server": srv,
|
"server": service[0],
|
||||||
"port": service[1],
|
"port": service[1],
|
||||||
"state": zookeeper_state,
|
"state": zookeeper_state,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ func TestPartitionKey(t *testing.T) {
|
|||||||
partitionKey := k.getPartitionKey(testPoint)
|
partitionKey := k.getPartitionKey(testPoint)
|
||||||
u, err := uuid.FromString(partitionKey)
|
u, err := uuid.FromString(partitionKey)
|
||||||
assert.Nil(err, "Issue parsing UUID")
|
assert.Nil(err, "Issue parsing UUID")
|
||||||
assert.Equal(uint(4), u.Version(), "PartitionKey should be UUIDv4")
|
assert.Equal(byte(4), u.Version(), "PartitionKey should be UUIDv4")
|
||||||
|
|
||||||
k = KinesisOutput{
|
k = KinesisOutput{
|
||||||
PartitionKey: "-",
|
PartitionKey: "-",
|
||||||
@@ -72,6 +72,5 @@ func TestPartitionKey(t *testing.T) {
|
|||||||
partitionKey = k.getPartitionKey(testPoint)
|
partitionKey = k.getPartitionKey(testPoint)
|
||||||
u, err = uuid.FromString(partitionKey)
|
u, err = uuid.FromString(partitionKey)
|
||||||
assert.Nil(err, "Issue parsing UUID")
|
assert.Nil(err, "Issue parsing UUID")
|
||||||
assert.Equal(uint(4), u.Version(), "PartitionKey should be UUIDv4")
|
assert.Equal(byte(4), u.Version(), "PartitionKey should be UUIDv4")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -184,7 +184,7 @@ func (m *MQTT) publish(topic string, body []byte) error {
|
|||||||
|
|
||||||
func (m *MQTT) createOpts() (*paho.ClientOptions, error) {
|
func (m *MQTT) createOpts() (*paho.ClientOptions, error) {
|
||||||
opts := paho.NewClientOptions()
|
opts := paho.NewClientOptions()
|
||||||
opts.KeepAlive = 0 * time.Second
|
opts.KeepAlive = 0
|
||||||
|
|
||||||
if m.Timeout.Duration < time.Second {
|
if m.Timeout.Duration < time.Second {
|
||||||
m.Timeout.Duration = 5 * time.Second
|
m.Timeout.Duration = 5 * time.Second
|
||||||
|
|||||||
73
plugins/parsers/grok/influx-patterns
Normal file
73
plugins/parsers/grok/influx-patterns
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
# Captures are a slightly modified version of logstash "grok" patterns, with
|
||||||
|
# the format %{<capture syntax>[:<semantic name>][:<modifier>]}
|
||||||
|
# By default all named captures are converted into string fields.
|
||||||
|
# Modifiers can be used to convert captures to other types or tags.
|
||||||
|
# Timestamp modifiers can be used to convert captures to the timestamp of the
|
||||||
|
# parsed metric.
|
||||||
|
|
||||||
|
# View logstash grok pattern docs here:
|
||||||
|
# https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html
|
||||||
|
# All default logstash patterns are supported, these can be viewed here:
|
||||||
|
# https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns
|
||||||
|
|
||||||
|
# Available modifiers:
|
||||||
|
# string (default if nothing is specified)
|
||||||
|
# int
|
||||||
|
# float
|
||||||
|
# duration (ie, 5.23ms gets converted to int nanoseconds)
|
||||||
|
# tag (converts the field into a tag)
|
||||||
|
# drop (drops the field completely)
|
||||||
|
# Timestamp modifiers:
|
||||||
|
# ts-ansic ("Mon Jan _2 15:04:05 2006")
|
||||||
|
# ts-unix ("Mon Jan _2 15:04:05 MST 2006")
|
||||||
|
# ts-ruby ("Mon Jan 02 15:04:05 -0700 2006")
|
||||||
|
# ts-rfc822 ("02 Jan 06 15:04 MST")
|
||||||
|
# ts-rfc822z ("02 Jan 06 15:04 -0700")
|
||||||
|
# ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST")
|
||||||
|
# ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST")
|
||||||
|
# ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700")
|
||||||
|
# ts-rfc3339 ("2006-01-02T15:04:05Z07:00")
|
||||||
|
# ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00")
|
||||||
|
# ts-httpd ("02/Jan/2006:15:04:05 -0700")
|
||||||
|
# ts-epoch (seconds since unix epoch)
|
||||||
|
# ts-epochnano (nanoseconds since unix epoch)
|
||||||
|
# ts-"CUSTOM"
|
||||||
|
# CUSTOM time layouts must be within quotes and be the representation of the
|
||||||
|
# "reference time", which is Mon Jan 2 15:04:05 -0700 MST 2006
|
||||||
|
# See https://golang.org/pkg/time/#Parse for more details.
|
||||||
|
|
||||||
|
# Example log file pattern, example log looks like this:
|
||||||
|
# [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs
|
||||||
|
# Breakdown of the DURATION pattern below:
|
||||||
|
# NUMBER is a builtin logstash grok pattern matching float & int numbers.
|
||||||
|
# [nuµm]? is a regex specifying 0 or 1 of the characters within brackets.
|
||||||
|
# s is also regex, this pattern must end in "s".
|
||||||
|
# so DURATION will match something like '5.324ms' or '6.1µs' or '10s'
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
RESPONSE_CODE %{NUMBER:response_code:tag}
|
||||||
|
RESPONSE_TIME %{DURATION:response_time_ns:duration}
|
||||||
|
EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
|
||||||
|
|
||||||
|
# Wider-ranging username matching vs. logstash built-in %{USER}
|
||||||
|
NGUSERNAME [a-zA-Z0-9\.\@\-\+_%]+
|
||||||
|
NGUSER %{NGUSERNAME}
|
||||||
|
# Wider-ranging client IP matching
|
||||||
|
CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
|
||||||
|
|
||||||
|
##
|
||||||
|
## COMMON LOG PATTERNS
|
||||||
|
##
|
||||||
|
|
||||||
|
# apache & nginx logs, this is also known as the "common log format"
|
||||||
|
# see https://en.wikipedia.org/wiki/Common_Log_Format
|
||||||
|
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NOTSPACE:ident} %{NOTSPACE:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-)
|
||||||
|
|
||||||
|
# Combined log format is the same as the common log format but with the addition
|
||||||
|
# of two quoted strings at the end for "referrer" and "agent"
|
||||||
|
# See Examples at http://httpd.apache.org/docs/current/mod/mod_log_config.html
|
||||||
|
COMBINED_LOG_FORMAT %{COMMON_LOG_FORMAT} %{QS:referrer} %{QS:agent}
|
||||||
|
|
||||||
|
# HTTPD log formats
|
||||||
|
HTTPD20_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:loglevel:tag}\] (?:\[client %{IPORHOST:clientip}\] ){0,1}%{GREEDYDATA:errormsg}
|
||||||
|
HTTPD24_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{WORD:module}:%{LOGLEVEL:loglevel:tag}\] \[pid %{POSINT:pid:int}:tid %{NUMBER:tid:int}\]( \(%{POSINT:proxy_errorcode:int}\)%{DATA:proxy_errormessage}:)?( \[client %{IPORHOST:client}:%{POSINT:clientport}\])? %{DATA:errorcode}: %{GREEDYDATA:message}
|
||||||
|
HTTPD_ERRORLOG %{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG}
|
||||||
78
plugins/parsers/grok/influx_patterns.go
Normal file
78
plugins/parsers/grok/influx_patterns.go
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
package grok
|
||||||
|
|
||||||
|
// DEFAULT_PATTERNS SHOULD BE KEPT IN-SYNC WITH patterns/influx-patterns
|
||||||
|
const DEFAULT_PATTERNS = `
|
||||||
|
# Captures are a slightly modified version of logstash "grok" patterns, with
|
||||||
|
# the format %{<capture syntax>[:<semantic name>][:<modifier>]}
|
||||||
|
# By default all named captures are converted into string fields.
|
||||||
|
# Modifiers can be used to convert captures to other types or tags.
|
||||||
|
# Timestamp modifiers can be used to convert captures to the timestamp of the
|
||||||
|
# parsed metric.
|
||||||
|
|
||||||
|
# View logstash grok pattern docs here:
|
||||||
|
# https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html
|
||||||
|
# All default logstash patterns are supported, these can be viewed here:
|
||||||
|
# https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns
|
||||||
|
|
||||||
|
# Available modifiers:
|
||||||
|
# string (default if nothing is specified)
|
||||||
|
# int
|
||||||
|
# float
|
||||||
|
# duration (ie, 5.23ms gets converted to int nanoseconds)
|
||||||
|
# tag (converts the field into a tag)
|
||||||
|
# drop (drops the field completely)
|
||||||
|
# Timestamp modifiers:
|
||||||
|
# ts-ansic ("Mon Jan _2 15:04:05 2006")
|
||||||
|
# ts-unix ("Mon Jan _2 15:04:05 MST 2006")
|
||||||
|
# ts-ruby ("Mon Jan 02 15:04:05 -0700 2006")
|
||||||
|
# ts-rfc822 ("02 Jan 06 15:04 MST")
|
||||||
|
# ts-rfc822z ("02 Jan 06 15:04 -0700")
|
||||||
|
# ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST")
|
||||||
|
# ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST")
|
||||||
|
# ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700")
|
||||||
|
# ts-rfc3339 ("2006-01-02T15:04:05Z07:00")
|
||||||
|
# ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00")
|
||||||
|
# ts-httpd ("02/Jan/2006:15:04:05 -0700")
|
||||||
|
# ts-epoch (seconds since unix epoch)
|
||||||
|
# ts-epochnano (nanoseconds since unix epoch)
|
||||||
|
# ts-"CUSTOM"
|
||||||
|
# CUSTOM time layouts must be within quotes and be the representation of the
|
||||||
|
# "reference time", which is Mon Jan 2 15:04:05 -0700 MST 2006
|
||||||
|
# See https://golang.org/pkg/time/#Parse for more details.
|
||||||
|
|
||||||
|
# Example log file pattern, example log looks like this:
|
||||||
|
# [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs
|
||||||
|
# Breakdown of the DURATION pattern below:
|
||||||
|
# NUMBER is a builtin logstash grok pattern matching float & int numbers.
|
||||||
|
# [nuµm]? is a regex specifying 0 or 1 of the characters within brackets.
|
||||||
|
# s is also regex, this pattern must end in "s".
|
||||||
|
# so DURATION will match something like '5.324ms' or '6.1µs' or '10s'
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
RESPONSE_CODE %{NUMBER:response_code:tag}
|
||||||
|
RESPONSE_TIME %{DURATION:response_time_ns:duration}
|
||||||
|
EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
|
||||||
|
|
||||||
|
# Wider-ranging username matching vs. logstash built-in %{USER}
|
||||||
|
NGUSERNAME [a-zA-Z0-9\.\@\-\+_%]+
|
||||||
|
NGUSER %{NGUSERNAME}
|
||||||
|
# Wider-ranging client IP matching
|
||||||
|
CLIENT (?:%{IPV6}|%{IPV4}|%{HOSTNAME}|%{HOSTPORT})
|
||||||
|
|
||||||
|
##
|
||||||
|
## COMMON LOG PATTERNS
|
||||||
|
##
|
||||||
|
|
||||||
|
# apache & nginx logs, this is also known as the "common log format"
|
||||||
|
# see https://en.wikipedia.org/wiki/Common_Log_Format
|
||||||
|
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NOTSPACE:ident} %{NOTSPACE:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-)
|
||||||
|
|
||||||
|
# Combined log format is the same as the common log format but with the addition
|
||||||
|
# of two quoted strings at the end for "referrer" and "agent"
|
||||||
|
# See Examples at http://httpd.apache.org/docs/current/mod/mod_log_config.html
|
||||||
|
COMBINED_LOG_FORMAT %{COMMON_LOG_FORMAT} %{QS:referrer} %{QS:agent}
|
||||||
|
|
||||||
|
# HTTPD log formats
|
||||||
|
HTTPD20_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:loglevel:tag}\] (?:\[client %{IPORHOST:clientip}\] ){0,1}%{GREEDYDATA:errormsg}
|
||||||
|
HTTPD24_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{WORD:module}:%{LOGLEVEL:loglevel:tag}\] \[pid %{POSINT:pid:int}:tid %{NUMBER:tid:int}\]( \(%{POSINT:proxy_errorcode:int}\)%{DATA:proxy_errormessage}:)?( \[client %{IPORHOST:client}:%{POSINT:clientport}\])? %{DATA:errorcode}: %{GREEDYDATA:message}
|
||||||
|
HTTPD_ERRORLOG %{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG}
|
||||||
|
`
|
||||||
527
plugins/parsers/grok/parser.go
Normal file
527
plugins/parsers/grok/parser.go
Normal file
@@ -0,0 +1,527 @@
|
|||||||
|
package grok
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/vjeantet/grok"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/metric"
|
||||||
|
)
|
||||||
|
|
||||||
|
var timeLayouts = map[string]string{
|
||||||
|
"ts-ansic": "Mon Jan _2 15:04:05 2006",
|
||||||
|
"ts-unix": "Mon Jan _2 15:04:05 MST 2006",
|
||||||
|
"ts-ruby": "Mon Jan 02 15:04:05 -0700 2006",
|
||||||
|
"ts-rfc822": "02 Jan 06 15:04 MST",
|
||||||
|
"ts-rfc822z": "02 Jan 06 15:04 -0700", // RFC822 with numeric zone
|
||||||
|
"ts-rfc850": "Monday, 02-Jan-06 15:04:05 MST",
|
||||||
|
"ts-rfc1123": "Mon, 02 Jan 2006 15:04:05 MST",
|
||||||
|
"ts-rfc1123z": "Mon, 02 Jan 2006 15:04:05 -0700", // RFC1123 with numeric zone
|
||||||
|
"ts-rfc3339": "2006-01-02T15:04:05Z07:00",
|
||||||
|
"ts-rfc3339nano": "2006-01-02T15:04:05.999999999Z07:00",
|
||||||
|
"ts-httpd": "02/Jan/2006:15:04:05 -0700",
|
||||||
|
// These three are not exactly "layouts", but they are special cases that
|
||||||
|
// will get handled in the ParseLine function.
|
||||||
|
"ts-epoch": "EPOCH",
|
||||||
|
"ts-epochnano": "EPOCH_NANO",
|
||||||
|
"ts-syslog": "SYSLOG_TIMESTAMP",
|
||||||
|
"ts": "GENERIC_TIMESTAMP", // try parsing all known timestamp layouts.
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
INT = "int"
|
||||||
|
TAG = "tag"
|
||||||
|
FLOAT = "float"
|
||||||
|
STRING = "string"
|
||||||
|
DURATION = "duration"
|
||||||
|
DROP = "drop"
|
||||||
|
EPOCH = "EPOCH"
|
||||||
|
EPOCH_NANO = "EPOCH_NANO"
|
||||||
|
SYSLOG_TIMESTAMP = "SYSLOG_TIMESTAMP"
|
||||||
|
GENERIC_TIMESTAMP = "GENERIC_TIMESTAMP"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// matches named captures that contain a modifier.
|
||||||
|
// ie,
|
||||||
|
// %{NUMBER:bytes:int}
|
||||||
|
// %{IPORHOST:clientip:tag}
|
||||||
|
// %{HTTPDATE:ts1:ts-http}
|
||||||
|
// %{HTTPDATE:ts2:ts-"02 Jan 06 15:04"}
|
||||||
|
modifierRe = regexp.MustCompile(`%{\w+:(\w+):(ts-".+"|t?s?-?\w+)}`)
|
||||||
|
// matches a plain pattern name. ie, %{NUMBER}
|
||||||
|
patternOnlyRe = regexp.MustCompile(`%{(\w+)}`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Parser is the primary struct to handle and grok-patterns defined in the config toml
|
||||||
|
type Parser struct {
|
||||||
|
Patterns []string
|
||||||
|
// namedPatterns is a list of internally-assigned names to the patterns
|
||||||
|
// specified by the user in Patterns.
|
||||||
|
// They will look like:
|
||||||
|
// GROK_INTERNAL_PATTERN_0, GROK_INTERNAL_PATTERN_1, etc.
|
||||||
|
NamedPatterns []string
|
||||||
|
CustomPatterns string
|
||||||
|
CustomPatternFiles []string
|
||||||
|
Measurement string
|
||||||
|
|
||||||
|
// Timezone is an optional component to help render log dates to
|
||||||
|
// your chosen zone.
|
||||||
|
// Default: "" which renders UTC
|
||||||
|
// Options are as follows:
|
||||||
|
// 1. Local -- interpret based on machine localtime
|
||||||
|
// 2. "America/Chicago" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
|
||||||
|
// 3. UTC -- or blank/unspecified, will return timestamp in UTC
|
||||||
|
Timezone string
|
||||||
|
loc *time.Location
|
||||||
|
|
||||||
|
// typeMap is a map of patterns -> capture name -> modifier,
|
||||||
|
// ie, {
|
||||||
|
// "%{TESTLOG}":
|
||||||
|
// {
|
||||||
|
// "bytes": "int",
|
||||||
|
// "clientip": "tag"
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
typeMap map[string]map[string]string
|
||||||
|
// tsMap is a map of patterns -> capture name -> timestamp layout.
|
||||||
|
// ie, {
|
||||||
|
// "%{TESTLOG}":
|
||||||
|
// {
|
||||||
|
// "httptime": "02/Jan/2006:15:04:05 -0700"
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
tsMap map[string]map[string]string
|
||||||
|
// patterns is a map of all of the parsed patterns from CustomPatterns
|
||||||
|
// and CustomPatternFiles.
|
||||||
|
// ie, {
|
||||||
|
// "DURATION": "%{NUMBER}[nuµm]?s"
|
||||||
|
// "RESPONSE_CODE": "%{NUMBER:rc:tag}"
|
||||||
|
// }
|
||||||
|
patterns map[string]string
|
||||||
|
// foundTsLayouts is a slice of timestamp patterns that have been found
|
||||||
|
// in the log lines. This slice gets updated if the user uses the generic
|
||||||
|
// 'ts' modifier for timestamps. This slice is checked first for matches,
|
||||||
|
// so that previously-matched layouts get priority over all other timestamp
|
||||||
|
// layouts.
|
||||||
|
foundTsLayouts []string
|
||||||
|
|
||||||
|
timeFunc func() time.Time
|
||||||
|
g *grok.Grok
|
||||||
|
tsModder *tsModder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compile is a bound method to Parser which will process the options for our parser
|
||||||
|
func (p *Parser) Compile() error {
|
||||||
|
p.typeMap = make(map[string]map[string]string)
|
||||||
|
p.tsMap = make(map[string]map[string]string)
|
||||||
|
p.patterns = make(map[string]string)
|
||||||
|
p.tsModder = &tsModder{}
|
||||||
|
var err error
|
||||||
|
p.g, err = grok.NewWithConfig(&grok.Config{NamedCapturesOnly: true})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Give Patterns fake names so that they can be treated as named
|
||||||
|
// "custom patterns"
|
||||||
|
p.NamedPatterns = make([]string, 0, len(p.Patterns))
|
||||||
|
for i, pattern := range p.Patterns {
|
||||||
|
pattern = strings.TrimSpace(pattern)
|
||||||
|
if pattern == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := fmt.Sprintf("GROK_INTERNAL_PATTERN_%d", i)
|
||||||
|
p.CustomPatterns += "\n" + name + " " + pattern + "\n"
|
||||||
|
p.NamedPatterns = append(p.NamedPatterns, "%{"+name+"}")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p.NamedPatterns) == 0 {
|
||||||
|
return fmt.Errorf("pattern required")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Combine user-supplied CustomPatterns with DEFAULT_PATTERNS and parse
|
||||||
|
// them together as the same type of pattern.
|
||||||
|
p.CustomPatterns = DEFAULT_PATTERNS + p.CustomPatterns
|
||||||
|
if len(p.CustomPatterns) != 0 {
|
||||||
|
scanner := bufio.NewScanner(strings.NewReader(p.CustomPatterns))
|
||||||
|
p.addCustomPatterns(scanner)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse any custom pattern files supplied.
|
||||||
|
for _, filename := range p.CustomPatternFiles {
|
||||||
|
file, fileErr := os.Open(filename)
|
||||||
|
if fileErr != nil {
|
||||||
|
return fileErr
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(bufio.NewReader(file))
|
||||||
|
p.addCustomPatterns(scanner)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.Measurement == "" {
|
||||||
|
p.Measurement = "logparser_grok"
|
||||||
|
}
|
||||||
|
|
||||||
|
p.loc, err = time.LoadLocation(p.Timezone)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("W! improper timezone supplied (%s), setting loc to UTC", p.Timezone)
|
||||||
|
p.loc, _ = time.LoadLocation("UTC")
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.timeFunc == nil {
|
||||||
|
p.timeFunc = time.Now
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.compileCustomPatterns()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseLine is the primary function to process individual lines, returning the metrics
|
||||||
|
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||||
|
var err error
|
||||||
|
// values are the parsed fields from the log line
|
||||||
|
var values map[string]string
|
||||||
|
// the matching pattern string
|
||||||
|
var patternName string
|
||||||
|
for _, pattern := range p.NamedPatterns {
|
||||||
|
if values, err = p.g.Parse(pattern, line); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(values) != 0 {
|
||||||
|
patternName = pattern
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(values) == 0 {
|
||||||
|
log.Printf("D! Grok no match found for: %q", line)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
tags := make(map[string]string)
|
||||||
|
timestamp := time.Now()
|
||||||
|
for k, v := range values {
|
||||||
|
if k == "" || v == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// t is the modifier of the field
|
||||||
|
var t string
|
||||||
|
// check if pattern has some modifiers
|
||||||
|
if types, ok := p.typeMap[patternName]; ok {
|
||||||
|
t = types[k]
|
||||||
|
}
|
||||||
|
// if we didn't find a modifier, check if we have a timestamp layout
|
||||||
|
if t == "" {
|
||||||
|
if ts, ok := p.tsMap[patternName]; ok {
|
||||||
|
// check if the modifier is a timestamp layout
|
||||||
|
if layout, ok := ts[k]; ok {
|
||||||
|
t = layout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if we didn't find a type OR timestamp modifier, assume string
|
||||||
|
if t == "" {
|
||||||
|
t = STRING
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t {
|
||||||
|
case INT:
|
||||||
|
iv, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("E! Error parsing %s to int: %s", v, err)
|
||||||
|
} else {
|
||||||
|
fields[k] = iv
|
||||||
|
}
|
||||||
|
case FLOAT:
|
||||||
|
fv, err := strconv.ParseFloat(v, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("E! Error parsing %s to float: %s", v, err)
|
||||||
|
} else {
|
||||||
|
fields[k] = fv
|
||||||
|
}
|
||||||
|
case DURATION:
|
||||||
|
d, err := time.ParseDuration(v)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("E! Error parsing %s to duration: %s", v, err)
|
||||||
|
} else {
|
||||||
|
fields[k] = int64(d)
|
||||||
|
}
|
||||||
|
case TAG:
|
||||||
|
tags[k] = v
|
||||||
|
case STRING:
|
||||||
|
fields[k] = strings.Trim(v, `"`)
|
||||||
|
case EPOCH:
|
||||||
|
parts := strings.SplitN(v, ".", 2)
|
||||||
|
if len(parts) == 0 {
|
||||||
|
log.Printf("E! Error parsing %s to timestamp: %s", v, err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
sec, err := strconv.ParseInt(parts[0], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("E! Error parsing %s to timestamp: %s", v, err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ts := time.Unix(sec, 0)
|
||||||
|
|
||||||
|
if len(parts) == 2 {
|
||||||
|
padded := fmt.Sprintf("%-9s", parts[1])
|
||||||
|
nsString := strings.Replace(padded[:9], " ", "0", -1)
|
||||||
|
nanosec, err := strconv.ParseInt(nsString, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("E! Error parsing %s to timestamp: %s", v, err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ts = ts.Add(time.Duration(nanosec) * time.Nanosecond)
|
||||||
|
}
|
||||||
|
timestamp = ts
|
||||||
|
case EPOCH_NANO:
|
||||||
|
iv, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("E! Error parsing %s to int: %s", v, err)
|
||||||
|
} else {
|
||||||
|
timestamp = time.Unix(0, iv)
|
||||||
|
}
|
||||||
|
case SYSLOG_TIMESTAMP:
|
||||||
|
ts, err := time.ParseInLocation("Jan 02 15:04:05", v, p.loc)
|
||||||
|
if err == nil {
|
||||||
|
if ts.Year() == 0 {
|
||||||
|
ts = ts.AddDate(timestamp.Year(), 0, 0)
|
||||||
|
}
|
||||||
|
timestamp = ts
|
||||||
|
} else {
|
||||||
|
log.Printf("E! Error parsing %s to time layout [%s]: %s", v, t, err)
|
||||||
|
}
|
||||||
|
case GENERIC_TIMESTAMP:
|
||||||
|
var foundTs bool
|
||||||
|
// first try timestamp layouts that we've already found
|
||||||
|
for _, layout := range p.foundTsLayouts {
|
||||||
|
ts, err := time.ParseInLocation(layout, v, p.loc)
|
||||||
|
if err == nil {
|
||||||
|
timestamp = ts
|
||||||
|
foundTs = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if we haven't found a timestamp layout yet, try all timestamp
|
||||||
|
// layouts.
|
||||||
|
if !foundTs {
|
||||||
|
for _, layout := range timeLayouts {
|
||||||
|
ts, err := time.ParseInLocation(layout, v, p.loc)
|
||||||
|
if err == nil {
|
||||||
|
timestamp = ts
|
||||||
|
foundTs = true
|
||||||
|
p.foundTsLayouts = append(p.foundTsLayouts, layout)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if we still haven't found a timestamp layout, log it and we will
|
||||||
|
// just use time.Now()
|
||||||
|
if !foundTs {
|
||||||
|
log.Printf("E! Error parsing timestamp [%s], could not find any "+
|
||||||
|
"suitable time layouts.", v)
|
||||||
|
}
|
||||||
|
case DROP:
|
||||||
|
// goodbye!
|
||||||
|
default:
|
||||||
|
ts, err := time.ParseInLocation(t, v, p.loc)
|
||||||
|
if err == nil {
|
||||||
|
timestamp = ts
|
||||||
|
} else {
|
||||||
|
log.Printf("E! Error parsing %s to time layout [%s]: %s", v, t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(fields) == 0 {
|
||||||
|
return nil, fmt.Errorf("logparser_grok: must have one or more fields")
|
||||||
|
}
|
||||||
|
|
||||||
|
return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||||
|
lines := strings.Split(string(buf), "\n")
|
||||||
|
var metrics []telegraf.Metric
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
m, err := p.ParseLine(line)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
metrics = append(metrics, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
return metrics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) SetDefaultTags(tags map[string]string) {
|
||||||
|
//needs implementation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) addCustomPatterns(scanner *bufio.Scanner) {
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
if len(line) > 0 && line[0] != '#' {
|
||||||
|
names := strings.SplitN(line, " ", 2)
|
||||||
|
p.patterns[names[0]] = names[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) compileCustomPatterns() error {
|
||||||
|
var err error
|
||||||
|
// check if the pattern contains a subpattern that is already defined
|
||||||
|
// replace it with the subpattern for modifier inheritance.
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
for name, pattern := range p.patterns {
|
||||||
|
subNames := patternOnlyRe.FindAllStringSubmatch(pattern, -1)
|
||||||
|
for _, subName := range subNames {
|
||||||
|
if subPattern, ok := p.patterns[subName[1]]; ok {
|
||||||
|
pattern = strings.Replace(pattern, subName[0], subPattern, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.patterns[name] = pattern
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if pattern contains modifiers. Parse them out if it does.
|
||||||
|
for name, pattern := range p.patterns {
|
||||||
|
if modifierRe.MatchString(pattern) {
|
||||||
|
// this pattern has modifiers, so parse out the modifiers
|
||||||
|
pattern, err = p.parseTypedCaptures(name, pattern)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.patterns[name] = pattern
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.g.AddPatternsFromMap(p.patterns)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseTypedCaptures parses the capture modifiers, and then deletes the
|
||||||
|
// modifier from the line so that it is a valid "grok" pattern again.
|
||||||
|
// ie,
|
||||||
|
// %{NUMBER:bytes:int} => %{NUMBER:bytes} (stores %{NUMBER}->bytes->int)
|
||||||
|
// %{IPORHOST:clientip:tag} => %{IPORHOST:clientip} (stores %{IPORHOST}->clientip->tag)
|
||||||
|
func (p *Parser) parseTypedCaptures(name, pattern string) (string, error) {
|
||||||
|
matches := modifierRe.FindAllStringSubmatch(pattern, -1)
|
||||||
|
|
||||||
|
// grab the name of the capture pattern
|
||||||
|
patternName := "%{" + name + "}"
|
||||||
|
// create type map for this pattern
|
||||||
|
p.typeMap[patternName] = make(map[string]string)
|
||||||
|
p.tsMap[patternName] = make(map[string]string)
|
||||||
|
|
||||||
|
// boolean to verify that each pattern only has a single ts- data type.
|
||||||
|
hasTimestamp := false
|
||||||
|
for _, match := range matches {
|
||||||
|
// regex capture 1 is the name of the capture
|
||||||
|
// regex capture 2 is the modifier of the capture
|
||||||
|
if strings.HasPrefix(match[2], "ts") {
|
||||||
|
if hasTimestamp {
|
||||||
|
return pattern, fmt.Errorf("logparser pattern compile error: "+
|
||||||
|
"Each pattern is allowed only one named "+
|
||||||
|
"timestamp data type. pattern: %s", pattern)
|
||||||
|
}
|
||||||
|
if layout, ok := timeLayouts[match[2]]; ok {
|
||||||
|
// built-in time format
|
||||||
|
p.tsMap[patternName][match[1]] = layout
|
||||||
|
} else {
|
||||||
|
// custom time format
|
||||||
|
p.tsMap[patternName][match[1]] = strings.TrimSuffix(strings.TrimPrefix(match[2], `ts-"`), `"`)
|
||||||
|
}
|
||||||
|
hasTimestamp = true
|
||||||
|
} else {
|
||||||
|
p.typeMap[patternName][match[1]] = match[2]
|
||||||
|
}
|
||||||
|
|
||||||
|
// the modifier is not a valid part of a "grok" pattern, so remove it
|
||||||
|
// from the pattern.
|
||||||
|
pattern = strings.Replace(pattern, ":"+match[2]+"}", "}", 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pattern, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// tsModder is a struct for incrementing identical timestamps of log lines
|
||||||
|
// so that we don't push identical metrics that will get overwritten.
|
||||||
|
type tsModder struct {
|
||||||
|
dupe time.Time
|
||||||
|
last time.Time
|
||||||
|
incr time.Duration
|
||||||
|
incrn time.Duration
|
||||||
|
rollover time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// tsMod increments the given timestamp one unit more from the previous
|
||||||
|
// duplicate timestamp.
|
||||||
|
// the increment unit is determined as the next smallest time unit below the
|
||||||
|
// most significant time unit of ts.
|
||||||
|
// ie, if the input is at ms precision, it will increment it 1µs.
|
||||||
|
func (t *tsModder) tsMod(ts time.Time) time.Time {
|
||||||
|
defer func() { t.last = ts }()
|
||||||
|
// don't mod the time if we don't need to
|
||||||
|
if t.last.IsZero() || ts.IsZero() {
|
||||||
|
t.incrn = 0
|
||||||
|
t.rollover = 0
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
if !ts.Equal(t.last) && !ts.Equal(t.dupe) {
|
||||||
|
t.incr = 0
|
||||||
|
t.incrn = 0
|
||||||
|
t.rollover = 0
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.Equal(t.last) {
|
||||||
|
t.dupe = ts
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.Equal(t.dupe) && t.incr == time.Duration(0) {
|
||||||
|
tsNano := ts.UnixNano()
|
||||||
|
|
||||||
|
d := int64(10)
|
||||||
|
counter := 1
|
||||||
|
for {
|
||||||
|
a := tsNano % d
|
||||||
|
if a > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
d = d * 10
|
||||||
|
counter++
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case counter <= 6:
|
||||||
|
t.incr = time.Nanosecond
|
||||||
|
case counter <= 9:
|
||||||
|
t.incr = time.Microsecond
|
||||||
|
case counter > 9:
|
||||||
|
t.incr = time.Millisecond
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.incrn++
|
||||||
|
if t.incrn == 999 && t.incr > time.Nanosecond {
|
||||||
|
t.rollover = t.incr * t.incrn
|
||||||
|
t.incrn = 1
|
||||||
|
t.incr = t.incr / 1000
|
||||||
|
if t.incr < time.Nanosecond {
|
||||||
|
t.incr = time.Nanosecond
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ts.Add(t.incr*t.incrn + t.rollover)
|
||||||
|
}
|
||||||
19
plugins/parsers/grok/parser_test.go
Normal file
19
plugins/parsers/grok/parser_test.go
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
package grok
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGrokParse(t *testing.T) {
|
||||||
|
parser := Parser{
|
||||||
|
Measurement: "t_met",
|
||||||
|
Patterns: []string{"%{COMMON_LOG_FORMAT}"},
|
||||||
|
}
|
||||||
|
parser.Compile()
|
||||||
|
metrics, err := parser.Parse([]byte(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`))
|
||||||
|
log.Printf("metric_tags: %v, metric_fields: %v", metrics[0].Tags(), metrics[0].Fields())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"github.com/influxdata/telegraf/plugins/parsers/collectd"
|
"github.com/influxdata/telegraf/plugins/parsers/collectd"
|
||||||
"github.com/influxdata/telegraf/plugins/parsers/dropwizard"
|
"github.com/influxdata/telegraf/plugins/parsers/dropwizard"
|
||||||
"github.com/influxdata/telegraf/plugins/parsers/graphite"
|
"github.com/influxdata/telegraf/plugins/parsers/graphite"
|
||||||
|
"github.com/influxdata/telegraf/plugins/parsers/grok"
|
||||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||||
"github.com/influxdata/telegraf/plugins/parsers/json"
|
"github.com/influxdata/telegraf/plugins/parsers/json"
|
||||||
"github.com/influxdata/telegraf/plugins/parsers/nagios"
|
"github.com/influxdata/telegraf/plugins/parsers/nagios"
|
||||||
@@ -87,6 +88,12 @@ type Config struct {
|
|||||||
// an optional map containing tag names as keys and json paths to retrieve the tag values from as values
|
// an optional map containing tag names as keys and json paths to retrieve the tag values from as values
|
||||||
// used if TagsPath is empty or doesn't return any tags
|
// used if TagsPath is empty or doesn't return any tags
|
||||||
DropwizardTagPathsMap map[string]string
|
DropwizardTagPathsMap map[string]string
|
||||||
|
|
||||||
|
//grok patterns
|
||||||
|
Patterns []string
|
||||||
|
NamedPatterns []string
|
||||||
|
CustomPatterns string
|
||||||
|
CustomPatternFiles []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewParser returns a Parser interface based on the given config.
|
// NewParser returns a Parser interface based on the given config.
|
||||||
@@ -120,12 +127,36 @@ func NewParser(config *Config) (Parser, error) {
|
|||||||
config.DefaultTags,
|
config.DefaultTags,
|
||||||
config.Separator,
|
config.Separator,
|
||||||
config.Templates)
|
config.Templates)
|
||||||
|
case "grok":
|
||||||
|
parser, err = NewGrokParser(
|
||||||
|
config.MetricName,
|
||||||
|
config.Patterns,
|
||||||
|
config.NamedPatterns,
|
||||||
|
config.CustomPatterns,
|
||||||
|
config.CustomPatternFiles)
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("Invalid data format: %s", config.DataFormat)
|
err = fmt.Errorf("Invalid data format: %s", config.DataFormat)
|
||||||
}
|
}
|
||||||
return parser, err
|
return parser, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewGrokParser(metricName string,
|
||||||
|
patterns []string,
|
||||||
|
nPatterns []string,
|
||||||
|
cPatterns string,
|
||||||
|
cPatternFiles []string) (Parser, error) {
|
||||||
|
parser := grok.Parser{
|
||||||
|
Measurement: metricName,
|
||||||
|
Patterns: patterns,
|
||||||
|
NamedPatterns: nPatterns,
|
||||||
|
CustomPatterns: cPatterns,
|
||||||
|
CustomPatternFiles: cPatternFiles,
|
||||||
|
}
|
||||||
|
|
||||||
|
parser.Compile()
|
||||||
|
return &parser, nil
|
||||||
|
}
|
||||||
|
|
||||||
func NewJSONParser(
|
func NewJSONParser(
|
||||||
metricName string,
|
metricName string,
|
||||||
tagKeys []string,
|
tagKeys []string,
|
||||||
|
|||||||
@@ -67,9 +67,7 @@ func (r *Regex) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
|||||||
for _, metric := range in {
|
for _, metric := range in {
|
||||||
for _, converter := range r.Tags {
|
for _, converter := range r.Tags {
|
||||||
if value, ok := metric.GetTag(converter.Key); ok {
|
if value, ok := metric.GetTag(converter.Key); ok {
|
||||||
if key, newValue := r.convert(converter, value); newValue != "" {
|
metric.AddTag(r.convert(converter, value))
|
||||||
metric.AddTag(key, newValue)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,9 +75,7 @@ func (r *Regex) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
|||||||
if value, ok := metric.GetField(converter.Key); ok {
|
if value, ok := metric.GetField(converter.Key); ok {
|
||||||
switch value := value.(type) {
|
switch value := value.(type) {
|
||||||
case string:
|
case string:
|
||||||
if key, newValue := r.convert(converter, value); newValue != "" {
|
metric.AddField(r.convert(converter, value))
|
||||||
metric.AddField(key, newValue)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -222,7 +222,7 @@ func TestNoMatches(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
message: "Should not emit new tag/field when result_key given but regex doesn't match",
|
message: "Should emit empty string when result_key given but regex doesn't match",
|
||||||
converter: converter{
|
converter: converter{
|
||||||
Key: "request",
|
Key: "request",
|
||||||
Pattern: "not_match",
|
Pattern: "not_match",
|
||||||
@@ -230,7 +230,8 @@ func TestNoMatches(t *testing.T) {
|
|||||||
ResultKey: "new_field",
|
ResultKey: "new_field",
|
||||||
},
|
},
|
||||||
expectedFields: map[string]interface{}{
|
expectedFields: map[string]interface{}{
|
||||||
"request": "/users/42/",
|
"request": "/users/42/",
|
||||||
|
"new_field": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -95,7 +95,7 @@ supported_packages = {
|
|||||||
"freebsd": [ "tar" ]
|
"freebsd": [ "tar" ]
|
||||||
}
|
}
|
||||||
|
|
||||||
next_version = '1.7.0'
|
next_version = '1.8.0'
|
||||||
|
|
||||||
################
|
################
|
||||||
#### Telegraf Functions
|
#### Telegraf Functions
|
||||||
@@ -155,12 +155,12 @@ def go_get(branch, update=False, no_uncommitted=False):
|
|||||||
if local_changes() and no_uncommitted:
|
if local_changes() and no_uncommitted:
|
||||||
logging.error("There are uncommitted changes in the current directory.")
|
logging.error("There are uncommitted changes in the current directory.")
|
||||||
return False
|
return False
|
||||||
if not check_path_for("gdm"):
|
if not check_path_for("dep"):
|
||||||
logging.info("Downloading `gdm`...")
|
logging.info("Downloading `dep`...")
|
||||||
get_command = "go get github.com/sparrc/gdm"
|
get_command = "go get -u github.com/golang/dep/cmd/dep"
|
||||||
run(get_command)
|
run(get_command)
|
||||||
logging.info("Retrieving dependencies with `gdm`...")
|
logging.info("Retrieving dependencies with `dep`...")
|
||||||
run("{}/bin/gdm restore -v".format(os.environ.get("GOPATH",
|
run("{}/bin/dep ensure -v".format(os.environ.get("GOPATH",
|
||||||
os.path.expanduser("~/go"))))
|
os.path.expanduser("~/go"))))
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user