Compare commits
70 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b6dc9c004b | ||
|
|
4ea0c707c1 | ||
|
|
2fbcb5c6d8 | ||
|
|
a4d60d9750 | ||
|
|
d3925890b1 | ||
|
|
8c6c144f28 | ||
|
|
db8c24cc7b | ||
|
|
ecbbb8426f | ||
|
|
0752879fc8 | ||
|
|
3f2a04b25b | ||
|
|
aa15e7916e | ||
|
|
7b09623fa8 | ||
|
|
2f45b8b7f5 | ||
|
|
5ffa2a30be | ||
|
|
b102ae141a | ||
|
|
845abcdd77 | ||
|
|
805db7ca50 | ||
|
|
bd3d0c330f | ||
|
|
0060df9877 | ||
|
|
cd66e203bd | ||
|
|
240f99478a | ||
|
|
41534c73f0 | ||
|
|
6139a69fa8 | ||
|
|
3cca312e61 | ||
|
|
7e312797ec | ||
|
|
fe44fa648a | ||
|
|
3249030257 | ||
|
|
8f98c20c51 | ||
|
|
1c76d5d096 | ||
|
|
35f1e28809 | ||
|
|
20999979de | ||
|
|
c6706a86f1 | ||
|
|
b4b1866286 | ||
|
|
28eb9b4c29 | ||
|
|
0a9accccc1 | ||
|
|
c3d220175f | ||
|
|
095c90ad22 | ||
|
|
a77bfecb02 | ||
|
|
72027b5b3c | ||
|
|
e5503c56ad | ||
|
|
ee7b225272 | ||
|
|
03d37725a9 | ||
|
|
29d1cbb673 | ||
|
|
e81278b800 | ||
|
|
e5482a5725 | ||
|
|
8464be691e | ||
|
|
ed9937bbd8 | ||
|
|
b2a4d4a018 | ||
|
|
74aaf4f75b | ||
|
|
2945f9daa9 | ||
|
|
3b496ab3d8 | ||
|
|
e1f30aeff9 | ||
|
|
a92e73231d | ||
|
|
8d91115623 | ||
|
|
9af8d6912a | ||
|
|
fe43fb47e1 | ||
|
|
ca3a80fbe1 | ||
|
|
f0747e76da | ||
|
|
7416d6ea71 | ||
|
|
ea7cbc781e | ||
|
|
3568fb9f93 | ||
|
|
43b7ce4f6d | ||
|
|
baa38d6266 | ||
|
|
1677960caa | ||
|
|
0fab573c98 | ||
|
|
04a8e5b888 | ||
|
|
6284e2011c | ||
|
|
a97c93abe4 | ||
|
|
664816383a | ||
|
|
fc4cb1654c |
37
CHANGELOG.md
37
CHANGELOG.md
@@ -1,10 +1,45 @@
|
||||
## v0.10.5 [unreleased]
|
||||
## v0.11.0 [2016-02-15]
|
||||
|
||||
### Release Notes
|
||||
|
||||
### Features
|
||||
- [#692](https://github.com/influxdata/telegraf/pull/770): Support InfluxDB retention policies
|
||||
- [#771](https://github.com/influxdata/telegraf/pull/771): Default timeouts for input plugns. Thanks @PierreF!
|
||||
- [#758](https://github.com/influxdata/telegraf/pull/758): UDP Listener input plugin, thanks @whatyouhide!
|
||||
- [#769](https://github.com/influxdata/telegraf/issues/769): httpjson plugin: allow specifying SSL configuration.
|
||||
- [#735](https://github.com/influxdata/telegraf/pull/735): SNMP Table feature. Thanks @titilambert!
|
||||
- [#754](https://github.com/influxdata/telegraf/pull/754): docker plugin: adding `docker info` metrics to output. Thanks @titilambert!
|
||||
- [#788](https://github.com/influxdata/telegraf/pull/788): -input-list and -output-list command-line options. Thanks @ebookbug!
|
||||
- [#778](https://github.com/influxdata/telegraf/pull/778): Adding a TCP input listener.
|
||||
- [#797](https://github.com/influxdata/telegraf/issues/797): Provide option for persistent MQTT consumer client sessions.
|
||||
- [#799](https://github.com/influxdata/telegraf/pull/799): Add number of threads for procstat input plugin. Thanks @titilambert!
|
||||
- [#776](https://github.com/influxdata/telegraf/pull/776): Add Zookeeper chroot option to kafka_consumer. Thanks @prune998!
|
||||
- [#811](https://github.com/influxdata/telegraf/pull/811): Add processes plugin for classifying total procs on system. Thanks @titilambert!
|
||||
- [#235](https://github.com/influxdata/telegraf/issues/235): Add number of users to the `system` input plugin.
|
||||
- [#826](https://github.com/influxdata/telegraf/pull/826): "kernel" linux plugin for /proc/stat metrics (context switches, interrupts, etc.)
|
||||
- [#847](https://github.com/influxdata/telegraf/pull/847): `ntpq`: Input plugin for running ntp query executable and gathering metrics.
|
||||
|
||||
### Bugfixes
|
||||
- [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":"
|
||||
- [#722](https://github.com/influxdata/telegraf/pull/722): Librato output plugin fixes. Thanks @chrusty!
|
||||
- [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. Thanks @titilambert!
|
||||
- [#781](https://github.com/influxdata/telegraf/pull/781): Fix mqtt_consumer username not being set. Thanks @chaton78!
|
||||
- [#786](https://github.com/influxdata/telegraf/pull/786): Fix mqtt output username not being set. Thanks @msangoi!
|
||||
- [#773](https://github.com/influxdata/telegraf/issues/773): Fix duplicate measurements in snmp plugin. Thanks @titilambert!
|
||||
- [#708](https://github.com/influxdata/telegraf/issues/708): packaging: build ARM package
|
||||
- [#713](https://github.com/influxdata/telegraf/issues/713): packaging: insecure permissions error on log directory
|
||||
- [#816](https://github.com/influxdata/telegraf/issues/816): Fix phpfpm panic if fcgi endpoint unreachable.
|
||||
- [#828](https://github.com/influxdata/telegraf/issues/828): fix net_response plugin overwriting host tag.
|
||||
- [#821](https://github.com/influxdata/telegraf/issues/821): Remove postgres password from server tag. Thanks @menardorama!
|
||||
|
||||
## v0.10.4.1
|
||||
|
||||
### Release Notes
|
||||
- Bug in the build script broke deb and rpm packages.
|
||||
|
||||
### Bugfixes
|
||||
- [#750](https://github.com/influxdata/telegraf/issues/750): deb package broken
|
||||
- [#752](https://github.com/influxdata/telegraf/issues/752): rpm package broken
|
||||
|
||||
## v0.10.4 [2016-02-24]
|
||||
|
||||
|
||||
@@ -80,7 +80,7 @@ func (s *Simple) SampleConfig() string {
|
||||
return "ok = true # indicate if everything is fine"
|
||||
}
|
||||
|
||||
func (s *Simple) Gather(acc inputs.Accumulator) error {
|
||||
func (s *Simple) Gather(acc telegraf.Accumulator) error {
|
||||
if s.Ok {
|
||||
acc.Add("state", "pretty good", nil)
|
||||
} else {
|
||||
|
||||
65
Godeps
65
Godeps
@@ -1,53 +1,54 @@
|
||||
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5
|
||||
github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef
|
||||
github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252
|
||||
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
|
||||
github.com/aws/aws-sdk-go 87b1e60a50b09e4812dee560b33a238f67305804
|
||||
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
|
||||
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
|
||||
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
|
||||
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
|
||||
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
|
||||
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||
github.com/dancannon/gorethink 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
|
||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/fsouza/go-dockerclient 7b651349f9479f5114913eefbfd3c4eeddd79ab4
|
||||
github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3
|
||||
github.com/go-sql-driver/mysql 7c7f556282622f94213bc028b4d0a7b6151ba239
|
||||
github.com/golang/protobuf 6aaa8d47701fa6cf07e914ec01fde3d4a1fe79c3
|
||||
github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a
|
||||
github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967
|
||||
github.com/go-ini/ini 776aa739ce9373377cd16f526cdf06cb4c89b40f
|
||||
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
|
||||
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
|
||||
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d
|
||||
github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690
|
||||
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
|
||||
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24
|
||||
github.com/influxdata/influxdb ef571fc104dc24b77cd3710c156cd95e5cfd7aa5
|
||||
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
|
||||
github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38
|
||||
github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f
|
||||
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
||||
github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48
|
||||
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
||||
github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
|
||||
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
||||
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
|
||||
github.com/nats-io/nats 6a83f1a633cfbfd90aa648ac99fb38c06a8b40df
|
||||
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
|
||||
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
|
||||
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
|
||||
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
|
||||
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
|
||||
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common 14ca1097bbe21584194c15e391a9dab95ad42a59
|
||||
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil e77438504d45b9985c99a75730fe65220ceea00e
|
||||
github.com/shirou/gopsutil 1de1357e7737a536c7f4ff6be7bd27977db4d2cb
|
||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18
|
||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto 1f22c0103821b9390939b6776727195525381532
|
||||
golang.org/x/net 04b9de9b512f58addf28c9853d50ebef61c3953e
|
||||
golang.org/x/text 6d3c22c4525a4da167968fa2479be5524d2e8bd0
|
||||
gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
|
||||
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
|
||||
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
|
||||
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64
|
||||
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4
|
||||
github.com/miekg/dns e0d84d97e59bcb6561eae269c4e94d25b66822cb
|
||||
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
|
||||
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
|
||||
|
||||
@@ -1,56 +1,53 @@
|
||||
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5
|
||||
github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef
|
||||
github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252
|
||||
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
|
||||
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
|
||||
github.com/aws/aws-sdk-go 87b1e60a50b09e4812dee560b33a238f67305804
|
||||
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
|
||||
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
|
||||
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
|
||||
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||
github.com/dancannon/gorethink 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
|
||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/fsouza/go-dockerclient 7b651349f9479f5114913eefbfd3c4eeddd79ab4
|
||||
github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3
|
||||
github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967
|
||||
github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4
|
||||
github.com/go-sql-driver/mysql 7c7f556282622f94213bc028b4d0a7b6151ba239
|
||||
github.com/golang/protobuf 6aaa8d47701fa6cf07e914ec01fde3d4a1fe79c3
|
||||
github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a
|
||||
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
|
||||
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
|
||||
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d
|
||||
github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690
|
||||
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
|
||||
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24
|
||||
github.com/influxdata/influxdb ef571fc104dc24b77cd3710c156cd95e5cfd7aa5
|
||||
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
|
||||
github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38
|
||||
github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f
|
||||
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
||||
github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48
|
||||
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
||||
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
||||
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
|
||||
github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/miekg/dns e0d84d97e59bcb6561eae269c4e94d25b66822cb
|
||||
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
|
||||
github.com/nats-io/nats 6a83f1a633cfbfd90aa648ac99fb38c06a8b40df
|
||||
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
|
||||
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
|
||||
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
|
||||
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
|
||||
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
|
||||
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common 14ca1097bbe21584194c15e391a9dab95ad42a59
|
||||
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil e77438504d45b9985c99a75730fe65220ceea00e
|
||||
github.com/shirou/gopsutil 1de1357e7737a536c7f4ff6be7bd27977db4d2cb
|
||||
github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5
|
||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18
|
||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/net 04b9de9b512f58addf28c9853d50ebef61c3953e
|
||||
golang.org/x/text 6d3c22c4525a4da167968fa2479be5524d2e8bd0
|
||||
gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
|
||||
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
|
||||
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64
|
||||
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4
|
||||
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
|
||||
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
|
||||
|
||||
4
Makefile
4
Makefile
@@ -22,8 +22,8 @@ build-windows:
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
build-for-docker:
|
||||
CGO_ENABLED=0 GOOS=linux go -o telegraf -ldflags \
|
||||
"-X main.Version=$(VERSION)" \
|
||||
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
|
||||
"-s -X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# Build with race detector
|
||||
|
||||
26
README.md
26
README.md
@@ -27,12 +27,12 @@ the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/)
|
||||
### Linux deb and rpm Packages:
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.10.4-1_amd64.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1.x86_64.rpm
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.11.0-1_amd64.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.11.0-1.x86_64.rpm
|
||||
|
||||
Latest (arm):
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.10.4-1_arm.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1.arm.rpm
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.11.0-1_arm.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.11.0-1.arm.rpm
|
||||
|
||||
0.2.x:
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb
|
||||
@@ -56,9 +56,9 @@ for instructions, replacing the `influxdb` package name with `telegraf`.
|
||||
### Linux tarballs:
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_amd64.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_i386.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_arm.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_linux_amd64.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_linux_i386.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_linux_arm.tar.gz
|
||||
|
||||
0.2.x:
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz
|
||||
@@ -70,13 +70,13 @@ Latest:
|
||||
To install the full directory structure with config file, run:
|
||||
|
||||
```
|
||||
sudo tar -C / -zxvf ./telegraf-0.10.4-1_linux_amd64.tar.gz
|
||||
sudo tar -C / -zxvf ./telegraf-0.11.0-1_linux_amd64.tar.gz
|
||||
```
|
||||
|
||||
To extract only the binary, run:
|
||||
|
||||
```
|
||||
tar -zxvf telegraf-0.10.4-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
|
||||
tar -zxvf telegraf-0.11.0-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
|
||||
```
|
||||
|
||||
### Ansible Role:
|
||||
@@ -93,8 +93,8 @@ brew install telegraf
|
||||
### Windows Binaries (EXPERIMENTAL)
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_windows_amd64.zip
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_windows_i386.zip
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_windows_amd64.zip
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_windows_i386.zip
|
||||
|
||||
### From Source:
|
||||
|
||||
@@ -214,10 +214,14 @@ Currently implemented sources:
|
||||
* disk
|
||||
* diskio
|
||||
* swap
|
||||
* processes
|
||||
* kernel (/proc/stat)
|
||||
|
||||
Telegraf can also collect metrics via the following service plugins:
|
||||
|
||||
* statsd
|
||||
* udp_listener
|
||||
* tcp_listener
|
||||
* mqtt_consumer
|
||||
* kafka_consumer
|
||||
* nats_consumer
|
||||
|
||||
@@ -105,7 +105,6 @@ func (ac *accumulator) AddFields(
|
||||
continue
|
||||
}
|
||||
}
|
||||
result[k] = v
|
||||
|
||||
// Validate uint64 and float64 fields
|
||||
switch val := v.(type) {
|
||||
@@ -116,6 +115,7 @@ func (ac *accumulator) AddFields(
|
||||
} else {
|
||||
result[k] = int64(9223372036854775807)
|
||||
}
|
||||
continue
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
@@ -127,6 +127,8 @@ func (ac *accumulator) AddFields(
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
result[k] = v
|
||||
}
|
||||
fields = nil
|
||||
if len(result) == 0 {
|
||||
@@ -168,5 +170,8 @@ func (ac *accumulator) setDefaultTags(tags map[string]string) {
|
||||
}
|
||||
|
||||
func (ac *accumulator) addDefaultTag(key, value string) {
|
||||
if ac.defaultTags == nil {
|
||||
ac.defaultTags = make(map[string]string)
|
||||
}
|
||||
ac.defaultTags[key] = value
|
||||
}
|
||||
|
||||
302
agent/accumulator_test.go
Normal file
302
agent/accumulator_test.go
Normal file
@@ -0,0 +1,302 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
a.Add("acctest", float64(101), map[string]string{})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddDefaultTags(t *testing.T) {
|
||||
a := accumulator{}
|
||||
a.addDefaultTag("default", "tag")
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
a.Add("acctest", float64(101), map[string]string{})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest,default=tag value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddFields(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest usage=99")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test usage=99")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test usage=99 %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
// Test that all Inf fields get dropped, and not added to metrics channel
|
||||
func TestAddInfFields(t *testing.T) {
|
||||
inf := math.Inf(1)
|
||||
ninf := math.Inf(-1)
|
||||
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": inf,
|
||||
"nusage": ninf,
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
assert.Len(t, a.metrics, 0)
|
||||
|
||||
// test that non-inf fields are kept and not dropped
|
||||
fields["notinf"] = float64(100)
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest notinf=100")
|
||||
}
|
||||
|
||||
// Test that nan fields are dropped and not added
|
||||
func TestAddNaNFields(t *testing.T) {
|
||||
nan := math.NaN()
|
||||
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": nan,
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
assert.Len(t, a.metrics, 0)
|
||||
|
||||
// test that non-nan fields are kept and not dropped
|
||||
fields["notnan"] = float64(100)
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest notnan=100")
|
||||
}
|
||||
|
||||
func TestAddUint64Fields(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": uint64(99),
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest usage=99i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test usage=99i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test usage=99i %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddUint64Overflow(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": uint64(9223372036854775808),
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest usage=9223372036854775807i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test usage=9223372036854775807i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test usage=9223372036854775807i %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddInts(t *testing.T) {
|
||||
a := accumulator{}
|
||||
a.addDefaultTag("default", "tag")
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
a.Add("acctest", int(101), map[string]string{})
|
||||
a.Add("acctest", int32(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", int64(101), map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest,default=tag value=101i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test,default=tag value=101i %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddFloats(t *testing.T) {
|
||||
a := accumulator{}
|
||||
a.addDefaultTag("default", "tag")
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
a.Add("acctest", float32(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddStrings(t *testing.T) {
|
||||
a := accumulator{}
|
||||
a.addDefaultTag("default", "tag")
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
a.Add("acctest", "test", map[string]string{"acc": "test"})
|
||||
a.Add("acctest", "foo", map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=\"test\"")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test,default=tag value=\"foo\" %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddBools(t *testing.T) {
|
||||
a := accumulator{}
|
||||
a.addDefaultTag("default", "tag")
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
a.Add("acctest", true, map[string]string{"acc": "test"})
|
||||
a.Add("acctest", false, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=true")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test,default=tag value=false %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
@@ -11,8 +11,9 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf/agent"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
)
|
||||
|
||||
@@ -30,11 +31,14 @@ var fSampleConfig = flag.Bool("sample-config", false,
|
||||
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
|
||||
var fInputFilters = flag.String("input-filter", "",
|
||||
"filter the inputs to enable, separator is :")
|
||||
var fInputList = flag.Bool("input-list", false,
|
||||
"print available output plugins.")
|
||||
var fOutputFilters = flag.String("output-filter", "",
|
||||
"filter the outputs to enable, separator is :")
|
||||
var fOutputList = flag.Bool("output-list", false,
|
||||
"print available output plugins.")
|
||||
var fUsage = flag.String("usage", "",
|
||||
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
||||
|
||||
var fInputFiltersLegacy = flag.String("filter", "",
|
||||
"filter the inputs to enable, separator is :")
|
||||
var fOutputFiltersLegacy = flag.String("outputfilter", "",
|
||||
@@ -59,7 +63,9 @@ The flags are:
|
||||
-sample-config print out full sample configuration to stdout
|
||||
-config-directory directory containing additional *.conf files
|
||||
-input-filter filter the input plugins to enable, separator is :
|
||||
-input-list print all the plugins inputs
|
||||
-output-filter filter the output plugins to enable, separator is :
|
||||
-output-list print all the available outputs
|
||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||
-debug print metrics as they're generated to stdout
|
||||
-quiet run in quiet mode
|
||||
@@ -115,6 +121,22 @@ func main() {
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
|
||||
if *fOutputList {
|
||||
fmt.Println("Available Output Plugins:")
|
||||
for k, _ := range outputs.Outputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if *fInputList {
|
||||
fmt.Println("Available Input Plugins:")
|
||||
for k, _ := range inputs.Inputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if *fVersion {
|
||||
v := fmt.Sprintf("Telegraf - Version %s", Version)
|
||||
fmt.Println(v)
|
||||
|
||||
@@ -97,7 +97,7 @@ fields which begin with `time_`.
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
# filter all fields beginning with 'time_'
|
||||
drop = ["time_*"]
|
||||
fielddrop = ["time_*"]
|
||||
```
|
||||
|
||||
#### Input Config: tagpass and tagdrop
|
||||
@@ -106,7 +106,7 @@ fields which begin with `time_`.
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time"]
|
||||
fielddrop = ["cpu_time"]
|
||||
# Don't collect CPU data for cpu6 & cpu7
|
||||
[inputs.cpu.tagdrop]
|
||||
cpu = [ "cpu6", "cpu7" ]
|
||||
@@ -199,7 +199,7 @@ to avoid measurement collisions:
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
name_override = "percpu_usage"
|
||||
drop = ["cpu_time*"]
|
||||
fielddrop = ["cpu_time*"]
|
||||
```
|
||||
|
||||
## `[outputs.xxx]` Configuration
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
|
||||
## Telegraf will cache metric_buffer_limit metrics for each output, and will
|
||||
## flush this buffer on a successful write.
|
||||
metric_buffer_limit = 10000
|
||||
metric_buffer_limit = 1000
|
||||
## Flush the buffer whenever full, regardless of flush_interval.
|
||||
flush_buffer_when_full = true
|
||||
|
||||
@@ -56,15 +56,17 @@
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[[outputs.influxdb]]
|
||||
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||
# Multiple urls can be specified but it is assumed that they are part of the same
|
||||
# cluster, this means that only ONE of the urls will be written to each interval.
|
||||
## The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||
## Multiple urls can be specified as part of the same cluster,
|
||||
## this means that only ONE of the urls will be written to each interval.
|
||||
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
||||
urls = ["http://localhost:8086"] # required
|
||||
# The target database for metrics (telegraf will create it if not exists)
|
||||
## The target database for metrics (telegraf will create it if not exists).
|
||||
database = "telegraf" # required
|
||||
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
# note: using second precision greatly helps InfluxDB compression
|
||||
## Retention policy to write to.
|
||||
retention_policy = "default"
|
||||
## Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
## note: using "s" precision greatly improves InfluxDB compression.
|
||||
precision = "s"
|
||||
|
||||
## Write timeout (for the InfluxDB client), formatted as a string.
|
||||
@@ -72,11 +74,18 @@
|
||||
timeout = "5s"
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
||||
## Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
||||
## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
||||
# udp_payload = 512
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
|
||||
###############################################################################
|
||||
# INPUTS #
|
||||
@@ -110,10 +119,18 @@
|
||||
# Uncomment the following line if you do not need disk serial numbers.
|
||||
# skip_serial_number = true
|
||||
|
||||
# Get kernel statistics from /proc/stat
|
||||
[[inputs.kernel]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about memory usage
|
||||
[[inputs.mem]]
|
||||
# no configuration
|
||||
|
||||
# Get the number of processes and group them by status
|
||||
[[inputs.processes]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
[[inputs.swap]]
|
||||
# no configuration
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
|
||||
## Telegraf will cache metric_buffer_limit metrics for each output, and will
|
||||
## flush this buffer on a successful write.
|
||||
metric_buffer_limit = 10000
|
||||
metric_buffer_limit = 1000
|
||||
## Flush the buffer whenever full, regardless of flush_interval.
|
||||
flush_buffer_when_full = true
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"github.com/influxdata/telegraf/plugins/serializers"
|
||||
|
||||
"github.com/influxdata/config"
|
||||
"github.com/naoina/toml/ast"
|
||||
"github.com/influxdata/toml/ast"
|
||||
)
|
||||
|
||||
// Config specifies the URL/user/password for the database that telegraf
|
||||
@@ -159,7 +159,7 @@ var header = `# Telegraf Configuration
|
||||
|
||||
## Telegraf will cache metric_buffer_limit metrics for each output, and will
|
||||
## flush this buffer on a successful write.
|
||||
metric_buffer_limit = 10000
|
||||
metric_buffer_limit = 1000
|
||||
## Flush the buffer whenever full, regardless of flush_interval.
|
||||
flush_buffer_when_full = true
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
const (
|
||||
// Default number of metrics kept between flushes.
|
||||
DEFAULT_METRIC_BUFFER_LIMIT = 10000
|
||||
DEFAULT_METRIC_BUFFER_LIMIT = 1000
|
||||
|
||||
// Limit how many full metric buffers are kept due to failed writes.
|
||||
FULL_METRIC_BUFFERS_LIMIT = 100
|
||||
@@ -82,9 +82,11 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Printf("WARNING: overwriting cached metrics, you may want to " +
|
||||
"increase the metric_buffer_limit setting in your [agent] " +
|
||||
"config if you do not wish to overwrite metrics.\n")
|
||||
if ro.overwriteI == 0 {
|
||||
log.Printf("WARNING: overwriting cached metrics, you may want to " +
|
||||
"increase the metric_buffer_limit setting in your [agent] " +
|
||||
"config if you do not wish to overwrite metrics.\n")
|
||||
}
|
||||
if ro.overwriteI == len(ro.metrics) {
|
||||
ro.overwriteI = 0
|
||||
}
|
||||
|
||||
@@ -30,8 +30,6 @@ The example plugin gathers metrics about example things
|
||||
|
||||
### Example Output:
|
||||
|
||||
Give an example `-test` output here
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter example -test
|
||||
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
|
||||
@@ -47,8 +48,10 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/system"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
|
||||
|
||||
@@ -58,7 +58,10 @@ var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{Transport: tr}
|
||||
var client = &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
resp, err := client.Get(addr.String())
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Schema:
|
||||
@@ -112,9 +113,18 @@ func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error {
|
||||
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host string) error {
|
||||
|
||||
response, error := http.Get(host)
|
||||
response, error := client.Get(host)
|
||||
if error != nil {
|
||||
return error
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
@@ -30,6 +31,8 @@ var sampleConfig = `
|
||||
servers = ["localhost"]
|
||||
`
|
||||
|
||||
var defaultTimeout = 5 * time.Second
|
||||
|
||||
func (r *Disque) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
@@ -107,7 +110,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
addr.Host = addr.Host + ":" + defaultPort
|
||||
}
|
||||
|
||||
c, err := net.Dial("tcp", addr.Host)
|
||||
c, err := net.DialTimeout("tcp", addr.Host, defaultTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to disque server '%s': %s", addr.Host, err)
|
||||
}
|
||||
@@ -132,6 +135,9 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
g.c = c
|
||||
}
|
||||
|
||||
// Extend connection
|
||||
g.c.SetDeadline(time.Now().Add(defaultTimeout))
|
||||
|
||||
g.c.Write([]byte("info\r\n"))
|
||||
|
||||
r := bufio.NewReader(g.c)
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
package dns_query
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/miekg/dns"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var servers = []string{"8.8.8.8"}
|
||||
@@ -21,7 +24,7 @@ func TestGathering(t *testing.T) {
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
assert.True(t, ok)
|
||||
require.True(t, ok)
|
||||
queryTime, _ := metric.Fields["query_time_ms"].(float64)
|
||||
|
||||
assert.NotEqual(t, 0, queryTime)
|
||||
@@ -38,7 +41,7 @@ func TestGatheringMxRecord(t *testing.T) {
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
assert.True(t, ok)
|
||||
require.True(t, ok)
|
||||
queryTime, _ := metric.Fields["query_time_ms"].(float64)
|
||||
|
||||
assert.NotEqual(t, 0, queryTime)
|
||||
@@ -61,7 +64,7 @@ func TestGatheringRootDomain(t *testing.T) {
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
assert.True(t, ok)
|
||||
require.True(t, ok)
|
||||
queryTime, _ := metric.Fields["query_time_ms"].(float64)
|
||||
|
||||
fields["query_time_ms"] = queryTime
|
||||
@@ -84,7 +87,7 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) {
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
assert.True(t, ok)
|
||||
require.True(t, ok)
|
||||
queryTime, _ := metric.Fields["query_time_ms"].(float64)
|
||||
|
||||
fields["query_time_ms"] = queryTime
|
||||
|
||||
@@ -74,6 +74,7 @@ on the availability of per-cpu stats on your system.
|
||||
- usage_in_usermode
|
||||
- usage_system
|
||||
- usage_total
|
||||
- usage_percent
|
||||
- docker_net
|
||||
- rx_dropped
|
||||
- rx_bytes
|
||||
@@ -94,18 +95,50 @@ on the availability of per-cpu stats on your system.
|
||||
- io_serviced_recursive_sync
|
||||
- io_serviced_recursive_total
|
||||
- io_serviced_recursive_write
|
||||
- docker_
|
||||
- n_used_file_descriptors
|
||||
- n_cpus
|
||||
- n_containers
|
||||
- n_images
|
||||
- n_goroutines
|
||||
- n_listener_events
|
||||
- memory_total
|
||||
- pool_blocksize
|
||||
- docker_data
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
- docker_metadata
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
|
||||
|
||||
### Tags:
|
||||
|
||||
- All stats have the following tags:
|
||||
- docker (memory_total)
|
||||
- unit=bytes
|
||||
- docker (pool_blocksize)
|
||||
- unit=bytes
|
||||
- docker_data
|
||||
- unit=bytes
|
||||
- docker_metadata
|
||||
- unit=bytes
|
||||
|
||||
- docker_cpu specific:
|
||||
- cont_id (container ID)
|
||||
- cont_image (container image)
|
||||
- cont_name (container name)
|
||||
- docker_cpu specific:
|
||||
- cpu
|
||||
- docker_net specific:
|
||||
- cont_id (container ID)
|
||||
- cont_image (container image)
|
||||
- cont_name (container name)
|
||||
- network
|
||||
- docker_blkio specific:
|
||||
- cont_id (container ID)
|
||||
- cont_image (container image)
|
||||
- cont_name (container name)
|
||||
- device
|
||||
|
||||
### Example Output:
|
||||
@@ -113,6 +146,16 @@ on the availability of per-cpu stats on your system.
|
||||
```
|
||||
% ./telegraf -config ~/ws/telegraf.conf -input-filter docker -test
|
||||
* Plugin: docker, Collection 1
|
||||
> docker n_cpus=8i 1456926671065383978
|
||||
> docker n_used_file_descriptors=15i 1456926671065383978
|
||||
> docker n_containers=7i 1456926671065383978
|
||||
> docker n_images=152i 1456926671065383978
|
||||
> docker n_goroutines=36i 1456926671065383978
|
||||
> docker n_listener_events=0i 1456926671065383978
|
||||
> docker,unit=bytes memory_total=18935443456i 1456926671065383978
|
||||
> docker,unit=bytes pool_blocksize=65540i 1456926671065383978
|
||||
> docker_data,unit=bytes available=24340000000i,total=107400000000i,used=14820000000i 1456926671065383978
|
||||
> docker_metadata,unit=bytes available=2126999999i,total=2146999999i,used=20420000i 145692667106538
|
||||
> docker_mem,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
||||
cont_image=spotify/kafka,cont_name=kafka \
|
||||
active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -17,9 +20,29 @@ type Docker struct {
|
||||
Endpoint string
|
||||
ContainerNames []string
|
||||
|
||||
client *docker.Client
|
||||
client DockerClient
|
||||
}
|
||||
|
||||
type DockerClient interface {
|
||||
// Docker Client wrapper
|
||||
// Useful for test
|
||||
Info() (*docker.Env, error)
|
||||
ListContainers(opts docker.ListContainersOptions) ([]docker.APIContainers, error)
|
||||
Stats(opts docker.StatsOptions) error
|
||||
}
|
||||
|
||||
const (
|
||||
KB = 1000
|
||||
MB = 1000 * KB
|
||||
GB = 1000 * MB
|
||||
TB = 1000 * GB
|
||||
PB = 1000 * TB
|
||||
)
|
||||
|
||||
var (
|
||||
sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
|
||||
)
|
||||
|
||||
var sampleConfig = `
|
||||
## Docker Endpoint
|
||||
## To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
@@ -58,12 +81,20 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
d.client = c
|
||||
}
|
||||
|
||||
// Get daemon info
|
||||
err := d.gatherInfo(acc)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
|
||||
// List containers
|
||||
opts := docker.ListContainersOptions{}
|
||||
containers, err := d.client.ListContainers(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get container data
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(containers))
|
||||
for _, container := range containers {
|
||||
@@ -81,6 +112,76 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
// Init vars
|
||||
var driverStatus [][]string
|
||||
dataFields := make(map[string]interface{})
|
||||
metadataFields := make(map[string]interface{})
|
||||
now := time.Now()
|
||||
// Get info from docker daemon
|
||||
info, err := d.client.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"n_cpus": info.GetInt64("NCPU"),
|
||||
"n_used_file_descriptors": info.GetInt64("NFd"),
|
||||
"n_containers": info.GetInt64("Containers"),
|
||||
"n_images": info.GetInt64("Images"),
|
||||
"n_goroutines": info.GetInt64("NGoroutines"),
|
||||
"n_listener_events": info.GetInt64("NEventsListener"),
|
||||
}
|
||||
// Add metrics
|
||||
acc.AddFields("docker",
|
||||
fields,
|
||||
nil,
|
||||
now)
|
||||
acc.AddFields("docker",
|
||||
map[string]interface{}{"memory_total": info.GetInt64("MemTotal")},
|
||||
map[string]string{"unit": "bytes"},
|
||||
now)
|
||||
// Get storage metrics
|
||||
driverStatusRaw := []byte(info.Get("DriverStatus"))
|
||||
json.Unmarshal(driverStatusRaw, &driverStatus)
|
||||
for _, rawData := range driverStatus {
|
||||
// Try to convert string to int (bytes)
|
||||
value, err := parseSize(rawData[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
name := strings.ToLower(strings.Replace(rawData[0], " ", "_", -1))
|
||||
if name == "pool_blocksize" {
|
||||
// pool blocksize
|
||||
acc.AddFields("docker",
|
||||
map[string]interface{}{"pool_blocksize": value},
|
||||
map[string]string{"unit": "bytes"},
|
||||
now)
|
||||
} else if strings.HasPrefix(name, "data_space_") {
|
||||
// data space
|
||||
field_name := strings.TrimPrefix(name, "data_space_")
|
||||
dataFields[field_name] = value
|
||||
} else if strings.HasPrefix(name, "metadata_space_") {
|
||||
// metadata space
|
||||
field_name := strings.TrimPrefix(name, "metadata_space_")
|
||||
metadataFields[field_name] = value
|
||||
}
|
||||
}
|
||||
if len(dataFields) > 0 {
|
||||
acc.AddFields("docker_data",
|
||||
dataFields,
|
||||
map[string]string{"unit": "bytes"},
|
||||
now)
|
||||
}
|
||||
if len(metadataFields) > 0 {
|
||||
acc.AddFields("docker_metadata",
|
||||
metadataFields,
|
||||
map[string]string{"unit": "bytes"},
|
||||
now)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Docker) gatherContainer(
|
||||
container docker.APIContainers,
|
||||
acc telegraf.Accumulator,
|
||||
@@ -334,6 +435,27 @@ func sliceContains(in string, sl []string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Parses the human-readable size string into the amount it represents.
|
||||
func parseSize(sizeStr string) (int64, error) {
|
||||
matches := sizeRegex.FindStringSubmatch(sizeStr)
|
||||
if len(matches) != 4 {
|
||||
return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
|
||||
}
|
||||
|
||||
size, err := strconv.ParseFloat(matches[1], 64)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
uMap := map[string]int64{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
|
||||
unitPrefix := strings.ToLower(matches[3])
|
||||
if mul, ok := uMap[unitPrefix]; ok {
|
||||
size *= float64(mul)
|
||||
}
|
||||
|
||||
return int64(size), nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("docker", func() telegraf.Input {
|
||||
return &Docker{}
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDockerGatherContainerStats(t *testing.T) {
|
||||
@@ -194,3 +196,186 @@ func testStats() *docker.Stats {
|
||||
|
||||
return stats
|
||||
}
|
||||
|
||||
type FakeDockerClient struct {
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) Info() (*docker.Env, error) {
|
||||
env := docker.Env{"Containers=108", "OomKillDisable=false", "SystemTime=2016-02-24T00:55:09.15073105-05:00", "NEventsListener=0", "ID=5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD", "Debug=false", "LoggingDriver=json-file", "KernelVersion=4.3.0-1-amd64", "IndexServerAddress=https://index.docker.io/v1/", "MemTotal=3840757760", "Images=199", "CpuCfsQuota=true", "Name=absol", "SwapLimit=false", "IPv4Forwarding=true", "ExecutionDriver=native-0.2", "InitSha1=23a51f3c916d2b5a3bbb31caf301fd2d14edd518", "ExperimentalBuild=false", "CpuCfsPeriod=true", "RegistryConfig={\"IndexConfigs\":{\"docker.io\":{\"Mirrors\":null,\"Name\":\"docker.io\",\"Official\":true,\"Secure\":true}},\"InsecureRegistryCIDRs\":[\"127.0.0.0/8\"],\"Mirrors\":null}", "OperatingSystem=Linux Mint LMDE (containerized)", "BridgeNfIptables=true", "HttpsProxy=", "Labels=null", "MemoryLimit=false", "DriverStatus=[[\"Pool Name\",\"docker-8:1-1182287-pool\"],[\"Pool Blocksize\",\"65.54 kB\"],[\"Backing Filesystem\",\"extfs\"],[\"Data file\",\"/dev/loop0\"],[\"Metadata file\",\"/dev/loop1\"],[\"Data Space Used\",\"17.3 GB\"],[\"Data Space Total\",\"107.4 GB\"],[\"Data Space Available\",\"36.53 GB\"],[\"Metadata Space Used\",\"20.97 MB\"],[\"Metadata Space Total\",\"2.147 GB\"],[\"Metadata Space Available\",\"2.127 GB\"],[\"Udev Sync Supported\",\"true\"],[\"Deferred Removal Enabled\",\"false\"],[\"Data loop file\",\"/var/lib/docker/devicemapper/devicemapper/data\"],[\"Metadata loop file\",\"/var/lib/docker/devicemapper/devicemapper/metadata\"],[\"Library Version\",\"1.02.115 (2016-01-25)\"]]", "NFd=19", "HttpProxy=", "Driver=devicemapper", "NGoroutines=39", "InitPath=/usr/lib/docker.io/dockerinit", "NCPU=4", "DockerRootDir=/var/lib/docker", "NoProxy=", "BridgeNfIp6tables=true"}
|
||||
return &env, nil
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) ListContainers(opts docker.ListContainersOptions) ([]docker.APIContainers, error) {
|
||||
container1 := docker.APIContainers{
|
||||
ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
|
||||
Image: "quay.io/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941930,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []docker.APIPort{
|
||||
docker.APIPort{
|
||||
PrivatePort: 7001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
docker.APIPort{
|
||||
PrivatePort: 4001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
docker.APIPort{
|
||||
PrivatePort: 2380,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
docker.APIPort{
|
||||
PrivatePort: 2379,
|
||||
PublicPort: 2379,
|
||||
Type: "tcp",
|
||||
IP: "0.0.0.0",
|
||||
},
|
||||
},
|
||||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
Names: []string{"/etcd"},
|
||||
}
|
||||
container2 := docker.APIContainers{
|
||||
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||
Image: "quay.io/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941933,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []docker.APIPort{
|
||||
docker.APIPort{
|
||||
PrivatePort: 7002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
docker.APIPort{
|
||||
PrivatePort: 4002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
docker.APIPort{
|
||||
PrivatePort: 2381,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
docker.APIPort{
|
||||
PrivatePort: 2382,
|
||||
PublicPort: 2382,
|
||||
Type: "tcp",
|
||||
IP: "0.0.0.0",
|
||||
},
|
||||
},
|
||||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
Names: []string{"/etcd2"},
|
||||
}
|
||||
|
||||
containers := []docker.APIContainers{container1, container2}
|
||||
return containers, nil
|
||||
|
||||
//#{e6a96c84ca91a5258b7cb752579fb68826b68b49ff957487695cd4d13c343b44 titilambert/snmpsim /bin/sh -c 'snmpsimd --agent-udpv4-endpoint=0.0.0.0:31161 --process-user=root --process-group=user' 1455724831 Up 4 hours [{31161 31161 udp 0.0.0.0}] 0 0 [/snmp] map[]}]2016/02/24 01:05:01 Gathered metrics, (3s interval), from 1 inputs in 1.233836656s
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) Stats(opts docker.StatsOptions) error {
|
||||
jsonStat := `{"read":"2016-02-24T11:42:27.472459608-05:00","memory_stats":{"stats":{},"limit":18935443456},"blkio_stats":{"io_service_bytes_recursive":[{"major":252,"minor":1,"op":"Read","value":753664},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":753664},{"major":252,"minor":1,"op":"Total","value":753664}],"io_serviced_recursive":[{"major":252,"minor":1,"op":"Read","value":26},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":26},{"major":252,"minor":1,"op":"Total","value":26}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052607520000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052599550000000,"throttling_data":{}}}`
|
||||
var stat docker.Stats
|
||||
json.Unmarshal([]byte(jsonStat), &stat)
|
||||
opts.Stats <- &stat
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestDockerGatherInfo(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
client := FakeDockerClient{}
|
||||
d := Docker{client: client}
|
||||
|
||||
err := d.Gather(&acc)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"docker",
|
||||
map[string]interface{}{
|
||||
"n_listener_events": int64(0),
|
||||
"n_cpus": int64(4),
|
||||
"n_used_file_descriptors": int64(19),
|
||||
"n_containers": int64(108),
|
||||
"n_images": int64(199),
|
||||
"n_goroutines": int64(39),
|
||||
},
|
||||
map[string]string{},
|
||||
)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"docker_data",
|
||||
map[string]interface{}{
|
||||
"used": int64(17300000000),
|
||||
"total": int64(107400000000),
|
||||
"available": int64(36530000000),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "bytes",
|
||||
},
|
||||
)
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"docker_cpu",
|
||||
map[string]interface{}{
|
||||
"usage_total": uint64(1231652),
|
||||
},
|
||||
map[string]string{
|
||||
"cont_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||
"cont_name": "etcd2",
|
||||
"cont_image": "quay.io/coreos/etcd:v2.2.2",
|
||||
"cpu": "cpu3",
|
||||
},
|
||||
)
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"docker_mem",
|
||||
map[string]interface{}{
|
||||
"total_pgpgout": uint64(0),
|
||||
"usage_percent": float64(0),
|
||||
"rss": uint64(0),
|
||||
"total_writeback": uint64(0),
|
||||
"active_anon": uint64(0),
|
||||
"total_pgmafault": uint64(0),
|
||||
"total_rss": uint64(0),
|
||||
"total_unevictable": uint64(0),
|
||||
"active_file": uint64(0),
|
||||
"total_mapped_file": uint64(0),
|
||||
"pgpgin": uint64(0),
|
||||
"total_active_file": uint64(0),
|
||||
"total_active_anon": uint64(0),
|
||||
"total_cache": uint64(0),
|
||||
"inactive_anon": uint64(0),
|
||||
"pgmajfault": uint64(0),
|
||||
"total_inactive_anon": uint64(0),
|
||||
"total_rss_huge": uint64(0),
|
||||
"rss_huge": uint64(0),
|
||||
"hierarchical_memory_limit": uint64(0),
|
||||
"pgpgout": uint64(0),
|
||||
"unevictable": uint64(0),
|
||||
"total_inactive_file": uint64(0),
|
||||
"writeback": uint64(0),
|
||||
"total_pgfault": uint64(0),
|
||||
"total_pgpgin": uint64(0),
|
||||
"cache": uint64(0),
|
||||
"mapped_file": uint64(0),
|
||||
"inactive_file": uint64(0),
|
||||
"max_usage": uint64(0),
|
||||
"fail_count": uint64(0),
|
||||
"pgfault": uint64(0),
|
||||
"usage": uint64(0),
|
||||
"limit": uint64(18935443456),
|
||||
},
|
||||
map[string]string{
|
||||
"cont_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||
"cont_name": "etcd2",
|
||||
"cont_image": "quay.io/coreos/etcd:v2.2.2",
|
||||
},
|
||||
)
|
||||
|
||||
//fmt.Print(info)
|
||||
}
|
||||
|
||||
@@ -34,6 +34,8 @@ var sampleConfig = `
|
||||
domains = []
|
||||
`
|
||||
|
||||
var defaultTimeout = time.Second * time.Duration(5)
|
||||
|
||||
func (d *Dovecot) SampleConfig() string { return sampleConfig }
|
||||
|
||||
const defaultPort = "24242"
|
||||
@@ -74,12 +76,15 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, doms map[s
|
||||
return fmt.Errorf("Error: %s on url %s\n", err, addr)
|
||||
}
|
||||
|
||||
c, err := net.Dial("tcp", addr)
|
||||
c, err := net.DialTimeout("tcp", addr, defaultTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to dovecot server '%s': %s", addr, err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
// Extend connection
|
||||
c.SetDeadline(time.Now().Add(defaultTimeout))
|
||||
|
||||
c.Write([]byte("EXPORT\tdomain\n\n"))
|
||||
var buf bytes.Buffer
|
||||
io.Copy(&buf, c)
|
||||
|
||||
@@ -81,7 +81,12 @@ type Elasticsearch struct {
|
||||
|
||||
// NewElasticsearch return a new instance of Elasticsearch
|
||||
func NewElasticsearch() *Elasticsearch {
|
||||
return &Elasticsearch{client: http.DefaultClient}
|
||||
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
return &Elasticsearch{client: client}
|
||||
}
|
||||
|
||||
// SampleConfig returns sample configuration for this plugin.
|
||||
|
||||
@@ -34,6 +34,9 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (t *transportMock) CancelRequest(_ *http.Request) {
|
||||
}
|
||||
|
||||
func TestElasticsearch(t *testing.T) {
|
||||
es := NewElasticsearch()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
|
||||
@@ -73,14 +73,17 @@ func (gh *GithubWebhooks) Stop() {
|
||||
|
||||
// Handles the / route
|
||||
func (gh *GithubWebhooks) eventHandler(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
eventType := r.Header["X-Github-Event"][0]
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
e, err := NewEvent(data, eventType)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
gh.Lock()
|
||||
gh.events = append(gh.events, e)
|
||||
|
||||
@@ -129,8 +129,11 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
|
||||
if g.client == nil {
|
||||
|
||||
client := &http.Client{}
|
||||
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
g.client = client
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
@@ -23,6 +24,15 @@ type HttpJson struct {
|
||||
Parameters map[string]string
|
||||
Headers map[string]string
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
client HTTPClient
|
||||
}
|
||||
|
||||
@@ -36,6 +46,9 @@ type HTTPClient interface {
|
||||
// http.Response: HTTP respons object
|
||||
// error : Any error that may have occurred
|
||||
MakeRequest(req *http.Request) (*http.Response, error)
|
||||
|
||||
SetHTTPClient(client *http.Client)
|
||||
HTTPClient() *http.Client
|
||||
}
|
||||
|
||||
type RealHTTPClient struct {
|
||||
@@ -46,6 +59,14 @@ func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
return c.client.Do(req)
|
||||
}
|
||||
|
||||
func (c RealHTTPClient) SetHTTPClient(client *http.Client) {
|
||||
c.client = client
|
||||
}
|
||||
|
||||
func (c RealHTTPClient) HTTPClient() *http.Client {
|
||||
return c.client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## NOTE This plugin only reads numerical measurements, strings and booleans
|
||||
## will be ignored.
|
||||
@@ -77,6 +98,13 @@ var sampleConfig = `
|
||||
# [inputs.httpjson.headers]
|
||||
# X-Auth-Token = "my-xauth-token"
|
||||
# apiVersion = "v1"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
func (h *HttpJson) SampleConfig() string {
|
||||
@@ -91,6 +119,23 @@ func (h *HttpJson) Description() string {
|
||||
func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
if h.client.HTTPClient() == nil {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
h.SSLCert, h.SSLKey, h.SSLCA, h.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tr := &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
h.client.SetHTTPClient(client)
|
||||
}
|
||||
|
||||
errorChannel := make(chan error, len(h.Servers))
|
||||
|
||||
for _, server := range h.Servers {
|
||||
@@ -244,6 +289,6 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
|
||||
|
||||
func init() {
|
||||
inputs.Add("httpjson", func() telegraf.Input {
|
||||
return &HttpJson{client: RealHTTPClient{client: &http.Client{}}}
|
||||
return &HttpJson{client: RealHTTPClient{}}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -147,6 +147,13 @@ func (c mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c mockHTTPClient) SetHTTPClient(_ *http.Client) {
|
||||
}
|
||||
|
||||
func (c mockHTTPClient) HTTPClient() *http.Client {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generates a pointer to an HttpJson object that uses a mock HTTP client.
|
||||
// Parameters:
|
||||
// response : Body of the response that the mock HTTP client should return
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
@@ -70,6 +71,15 @@ type point struct {
|
||||
Values map[string]interface{} `json:"values"`
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
// Gathers data from a particular URL
|
||||
// Parameters:
|
||||
// acc : The telegraf Accumulator to use
|
||||
@@ -81,7 +91,7 @@ func (i *InfluxDB) gatherURL(
|
||||
acc telegraf.Accumulator,
|
||||
url string,
|
||||
) error {
|
||||
resp, err := http.Get(url)
|
||||
resp, err := client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
@@ -160,6 +161,11 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
func init() {
|
||||
inputs.Add("jolokia", func() telegraf.Input {
|
||||
return &Jolokia{jClient: &JolokiaClientImpl{client: &http.Client{}}}
|
||||
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
return &Jolokia{jClient: &JolokiaClientImpl{client: client}}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -14,10 +14,11 @@ import (
|
||||
)
|
||||
|
||||
type Kafka struct {
|
||||
ConsumerGroup string
|
||||
Topics []string
|
||||
ZookeeperPeers []string
|
||||
Consumer *consumergroup.ConsumerGroup
|
||||
ConsumerGroup string
|
||||
Topics []string
|
||||
ZookeeperPeers []string
|
||||
ZookeeperChroot string
|
||||
Consumer *consumergroup.ConsumerGroup
|
||||
|
||||
// Legacy metric buffer support
|
||||
MetricBuffer int
|
||||
@@ -48,6 +49,8 @@ var sampleConfig = `
|
||||
topics = ["telegraf"]
|
||||
## an array of Zookeeper connection strings
|
||||
zookeeper_peers = ["localhost:2181"]
|
||||
## Zookeeper Chroot
|
||||
zookeeper_chroot = "/"
|
||||
## the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
## Offset (must be either "oldest" or "newest")
|
||||
@@ -80,6 +83,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
|
||||
k.acc = acc
|
||||
|
||||
config := consumergroup.NewConfig()
|
||||
config.Zookeeper.Chroot = k.ZookeeperChroot
|
||||
switch strings.ToLower(k.Offset) {
|
||||
case "oldest", "":
|
||||
config.Offsets.Initial = sarama.OffsetOldest
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -120,7 +121,10 @@ func (a *ChimpAPI) GetReport(campaignID string) (Report, error) {
|
||||
}
|
||||
|
||||
func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) {
|
||||
client := &http.Client{Transport: api.Transport}
|
||||
client := &http.Client{
|
||||
Transport: api.Transport,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
req, err := http.NewRequest("GET", api.url.String(), &b)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
@@ -261,6 +262,15 @@ func (m *Mesos) removeGroup(j *map[string]interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
// This should not belong to the object
|
||||
func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error {
|
||||
var jsonOut map[string]interface{}
|
||||
@@ -282,7 +292,7 @@ func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error {
|
||||
|
||||
ts := strconv.Itoa(m.Timeout) + "ms"
|
||||
|
||||
resp, err := http.Get("http://" + a + "/metrics/snapshot?timeout=" + ts)
|
||||
resp, err := client.Get("http://" + a + "/metrics/snapshot?timeout=" + ts)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -26,6 +26,9 @@ type MQTTConsumer struct {
|
||||
// Legacy metric buffer support
|
||||
MetricBuffer int
|
||||
|
||||
PersistentSession bool
|
||||
ClientID string `toml:"client_id"`
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
@@ -57,6 +60,13 @@ var sampleConfig = `
|
||||
"sensors/#",
|
||||
]
|
||||
|
||||
# if true, messages that can't be delivered while the subscriber is offline
|
||||
# will be delivered when it comes back (such as on service restart).
|
||||
# NOTE: if true, client_id MUST be set
|
||||
persistent_session = false
|
||||
# If empty, a random client ID will be generated.
|
||||
client_id = ""
|
||||
|
||||
## username and password to connect MQTT server.
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
@@ -91,6 +101,11 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
if m.PersistentSession && m.ClientID == "" {
|
||||
return fmt.Errorf("ERROR MQTT Consumer: When using persistent_session" +
|
||||
" = true, you MUST also set client_id")
|
||||
}
|
||||
|
||||
m.acc = acc
|
||||
if m.QoS > 2 || m.QoS < 0 {
|
||||
return fmt.Errorf("MQTT Consumer, invalid QoS value: %d", m.QoS)
|
||||
@@ -166,7 +181,11 @@ func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error {
|
||||
func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
|
||||
opts := mqtt.NewClientOptions()
|
||||
|
||||
opts.SetClientID("Telegraf-Consumer-" + internal.RandomString(5))
|
||||
if m.ClientID == "" {
|
||||
opts.SetClientID("Telegraf-Consumer-" + internal.RandomString(5))
|
||||
} else {
|
||||
opts.SetClientID(m.ClientID)
|
||||
}
|
||||
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
m.SSLCert, m.SSLKey, m.SSLCA, m.InsecureSkipVerify)
|
||||
@@ -181,7 +200,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
|
||||
}
|
||||
|
||||
user := m.Username
|
||||
if user == "" {
|
||||
if user != "" {
|
||||
opts.SetUsername(user)
|
||||
}
|
||||
password := m.Password
|
||||
@@ -199,6 +218,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
|
||||
}
|
||||
opts.SetAutoReconnect(true)
|
||||
opts.SetKeepAlive(time.Second * 60)
|
||||
opts.SetCleanSession(!m.PersistentSession)
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
)
|
||||
|
||||
@@ -28,6 +30,52 @@ func newTestMQTTConsumer() (*MQTTConsumer, chan mqtt.Message) {
|
||||
return n, in
|
||||
}
|
||||
|
||||
// Test that default client has random ID
|
||||
func TestRandomClientID(t *testing.T) {
|
||||
m1 := &MQTTConsumer{
|
||||
Servers: []string{"localhost:1883"}}
|
||||
opts, err := m1.createOpts()
|
||||
assert.NoError(t, err)
|
||||
|
||||
m2 := &MQTTConsumer{
|
||||
Servers: []string{"localhost:1883"}}
|
||||
opts2, err2 := m2.createOpts()
|
||||
assert.NoError(t, err2)
|
||||
|
||||
assert.NotEqual(t, opts.ClientID, opts2.ClientID)
|
||||
}
|
||||
|
||||
// Test that default client has random ID
|
||||
func TestClientID(t *testing.T) {
|
||||
m1 := &MQTTConsumer{
|
||||
Servers: []string{"localhost:1883"},
|
||||
ClientID: "telegraf-test",
|
||||
}
|
||||
opts, err := m1.createOpts()
|
||||
assert.NoError(t, err)
|
||||
|
||||
m2 := &MQTTConsumer{
|
||||
Servers: []string{"localhost:1883"},
|
||||
ClientID: "telegraf-test",
|
||||
}
|
||||
opts2, err2 := m2.createOpts()
|
||||
assert.NoError(t, err2)
|
||||
|
||||
assert.Equal(t, "telegraf-test", opts2.ClientID)
|
||||
assert.Equal(t, "telegraf-test", opts.ClientID)
|
||||
}
|
||||
|
||||
// Test that Start() fails if client ID is not set but persistent is
|
||||
func TestPersistentClientIDFail(t *testing.T) {
|
||||
m1 := &MQTTConsumer{
|
||||
Servers: []string{"localhost:1883"},
|
||||
PersistentSession: true,
|
||||
}
|
||||
acc := testutil.Accumulator{}
|
||||
err := m1.Start(&acc)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// Test that the parser parses NATS messages into metrics
|
||||
func TestRunParser(t *testing.T) {
|
||||
n, in := newTestMQTTConsumer()
|
||||
|
||||
@@ -2,8 +2,10 @@ package mysql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -26,6 +28,8 @@ var sampleConfig = `
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
`
|
||||
|
||||
var defaultTimeout = time.Second * time.Duration(5)
|
||||
|
||||
func (m *Mysql) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
@@ -122,6 +126,10 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error {
|
||||
serv = ""
|
||||
}
|
||||
|
||||
serv, err := dsnAddTimeout(serv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db, err := sql.Open("mysql", serv)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -207,6 +215,27 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func dsnAddTimeout(dsn string) (string, error) {
|
||||
|
||||
// DSN "?timeout=5s" is not valid, but "/?timeout=5s" is valid ("" and "/"
|
||||
// are the same DSN)
|
||||
if dsn == "" {
|
||||
dsn = "/"
|
||||
}
|
||||
u, err := url.Parse(dsn)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
v := u.Query()
|
||||
|
||||
// Only override timeout if not already defined
|
||||
if _, ok := v["timeout"]; ok == false {
|
||||
v.Add("timeout", defaultTimeout.String())
|
||||
u.RawQuery = v.Encode()
|
||||
}
|
||||
return u.String(), nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("mysql", func() telegraf.Input {
|
||||
return &Mysql{}
|
||||
|
||||
@@ -84,3 +84,34 @@ func TestMysqlParseDSN(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMysqlDNSAddTimeout(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
output string
|
||||
}{
|
||||
{
|
||||
"",
|
||||
"/?timeout=5s",
|
||||
},
|
||||
{
|
||||
"tcp(192.168.1.1:3306)/",
|
||||
"tcp(192.168.1.1:3306)/?timeout=5s",
|
||||
},
|
||||
{
|
||||
"root:passwd@tcp(192.168.1.1:3306)/?tls=false",
|
||||
"root:passwd@tcp(192.168.1.1:3306)/?timeout=5s&tls=false",
|
||||
},
|
||||
{
|
||||
"root:passwd@tcp(192.168.1.1:3306)/?tls=false&timeout=10s",
|
||||
"root:passwd@tcp(192.168.1.1:3306)/?tls=false&timeout=10s",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
output, _ := dsnAddTimeout(test.input)
|
||||
if output != test.output {
|
||||
t.Errorf("Expected %s, got %s\n", test.output, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ It can also check response text.
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- host
|
||||
- server
|
||||
- port
|
||||
- protocol
|
||||
|
||||
@@ -60,7 +60,7 @@ It can also check response text.
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter net_response -test
|
||||
net_response,host=127.0.0.1,port=22,protocol=tcp response_time=0.18070360500000002,string_found=true 1454785464182527094
|
||||
net_response,host=127.0.0.1,port=2222,protocol=tcp response_time=1.090124776,string_found=false 1454784433658942325
|
||||
net_response,server=192.168.2.2,port=22,protocol=tcp response_time=0.18070360500000002,string_found=true 1454785464182527094
|
||||
net_response,server=192.168.2.2,port=2222,protocol=tcp response_time=1.090124776,string_found=false 1454784433658942325
|
||||
|
||||
```
|
||||
|
||||
@@ -169,7 +169,7 @@ func (c *NetResponse) Gather(acc telegraf.Accumulator) error {
|
||||
return errors.New("Bad port")
|
||||
}
|
||||
// Prepare data
|
||||
tags := map[string]string{"host": host, "port": port}
|
||||
tags := map[string]string{"server": host, "port": port}
|
||||
var fields map[string]interface{}
|
||||
// Gather data
|
||||
if c.Protocol == "tcp" {
|
||||
|
||||
@@ -69,7 +69,7 @@ func TestTCPOK1(t *testing.T) {
|
||||
"string_found": true,
|
||||
"response_time": 1.0,
|
||||
},
|
||||
map[string]string{"host": "127.0.0.1",
|
||||
map[string]string{"server": "127.0.0.1",
|
||||
"port": "2004",
|
||||
"protocol": "tcp",
|
||||
},
|
||||
@@ -109,7 +109,7 @@ func TestTCPOK2(t *testing.T) {
|
||||
"string_found": false,
|
||||
"response_time": 1.0,
|
||||
},
|
||||
map[string]string{"host": "127.0.0.1",
|
||||
map[string]string{"server": "127.0.0.1",
|
||||
"port": "2004",
|
||||
"protocol": "tcp",
|
||||
},
|
||||
@@ -164,7 +164,7 @@ func TestUDPOK1(t *testing.T) {
|
||||
"string_found": true,
|
||||
"response_time": 1.0,
|
||||
},
|
||||
map[string]string{"host": "127.0.0.1",
|
||||
map[string]string{"server": "127.0.0.1",
|
||||
"port": "2004",
|
||||
"protocol": "udp",
|
||||
},
|
||||
|
||||
46
plugins/inputs/nginx/README.md
Normal file
46
plugins/inputs/nginx/README.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Telegraf Plugin: Nginx
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Read Nginx's basic status information (ngx_http_stub_status_module)
|
||||
[[inputs.nginx]]
|
||||
## An array of Nginx stub_status URI to gather stats.
|
||||
urls = ["http://localhost/server_status"]
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- Measurement
|
||||
- accepts
|
||||
- active
|
||||
- handled
|
||||
- reading
|
||||
- requests
|
||||
- waiting
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- port
|
||||
- server
|
||||
|
||||
### Example Output:
|
||||
|
||||
Using this configuration:
|
||||
```
|
||||
[[inputs.nginx]]
|
||||
## An array of Nginx stub_status URI to gather stats.
|
||||
urls = ["http://localhost/status"]
|
||||
```
|
||||
|
||||
When run with:
|
||||
```
|
||||
./telegraf -config telegraf.conf -input-filter nginx -test
|
||||
```
|
||||
|
||||
It produces:
|
||||
```
|
||||
* Plugin: nginx, Collection 1
|
||||
> nginx,port=80,server=localhost accepts=605i,active=2i,handled=605i,reading=0i,requests=12132i,waiting=1i,writing=1i 1456690994701784331
|
||||
```
|
||||
@@ -58,7 +58,10 @@ var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{Transport: tr}
|
||||
var client = &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
func (n *Nginx) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
resp, err := client.Get(addr.String())
|
||||
|
||||
@@ -84,7 +84,10 @@ var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{Transport: tr}
|
||||
var client = &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error {
|
||||
u, err := buildURL(e)
|
||||
|
||||
60
plugins/inputs/ntpq/README.md
Normal file
60
plugins/inputs/ntpq/README.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# ntpq Input Plugin
|
||||
|
||||
Get standard NTP query metrics, requires ntpq executable.
|
||||
|
||||
Below is the documentation of the various headers returned from the NTP query
|
||||
command when running `ntpq -p`.
|
||||
|
||||
- remote – The remote peer or server being synced to. “LOCAL” is this local host
|
||||
(included in case there are no remote peers or servers available);
|
||||
- refid – Where or what the remote peer or server is itself synchronised to;
|
||||
- st (stratum) – The remote peer or server Stratum
|
||||
- t (type) – Type (u: unicast or manycast client, b: broadcast or multicast client,
|
||||
l: local reference clock, s: symmetric peer, A: manycast server,
|
||||
B: broadcast server, M: multicast server, see “Automatic Server Discovery“);
|
||||
- when – When last polled (seconds ago, “h” hours ago, or “d” days ago);
|
||||
- poll – Polling frequency: rfc5905 suggests this ranges in NTPv4 from 4 (16s)
|
||||
to 17 (36h) (log2 seconds), however observation suggests the actual displayed
|
||||
value is seconds for a much smaller range of 64 (26) to 1024 (210) seconds;
|
||||
- reach – An 8-bit left-shift shift register value recording polls (bit set =
|
||||
successful, bit reset = fail) displayed in octal;
|
||||
- delay – Round trip communication delay to the remote peer or server (milliseconds);
|
||||
- offset – Mean offset (phase) in the times reported between this local host and
|
||||
the remote peer or server (RMS, milliseconds);
|
||||
- jitter – Mean deviation (jitter) in the time reported for that remote peer or
|
||||
server (RMS of difference of multiple time samples, milliseconds);
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Get standard NTP query metrics, requires ntpq executable
|
||||
[[inputs.ntpq]]
|
||||
## If false, set the -n ntpq flag. Can reduce metric gather times.
|
||||
dns_lookup = true
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- ntpq
|
||||
- delay (float, milliseconds)
|
||||
- jitter (float, milliseconds)
|
||||
- offset (float, milliseconds)
|
||||
- poll (int, seconds)
|
||||
- reach (int)
|
||||
- when (int, seconds)
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- refid
|
||||
- remote
|
||||
- type
|
||||
- stratum
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ telegraf -config ~/ws/telegraf.conf -input-filter ntpq -test
|
||||
* Plugin: ntpq, Collection 1
|
||||
> ntpq,refid=.GPSs.,remote=*time.apple.com,stratum=1,type=u delay=91.797,jitter=3.735,offset=12.841,poll=64i,reach=377i,when=35i 1457960478909556134
|
||||
```
|
||||
202
plugins/inputs/ntpq/ntpq.go
Normal file
202
plugins/inputs/ntpq/ntpq.go
Normal file
@@ -0,0 +1,202 @@
|
||||
// +build !windows
|
||||
|
||||
package ntpq
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"log"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// Mapping of ntpq header names to tag keys
|
||||
var tagHeaders map[string]string = map[string]string{
|
||||
"remote": "remote",
|
||||
"refid": "refid",
|
||||
"st": "stratum",
|
||||
"t": "type",
|
||||
}
|
||||
|
||||
// Mapping of the ntpq tag key to the index in the command output
|
||||
var tagI map[string]int = map[string]int{
|
||||
"remote": -1,
|
||||
"refid": -1,
|
||||
"stratum": -1,
|
||||
"type": -1,
|
||||
}
|
||||
|
||||
// Mapping of float metrics to their index in the command output
|
||||
var floatI map[string]int = map[string]int{
|
||||
"delay": -1,
|
||||
"offset": -1,
|
||||
"jitter": -1,
|
||||
}
|
||||
|
||||
// Mapping of int metrics to their index in the command output
|
||||
var intI map[string]int = map[string]int{
|
||||
"when": -1,
|
||||
"poll": -1,
|
||||
"reach": -1,
|
||||
}
|
||||
|
||||
type NTPQ struct {
|
||||
runQ func() ([]byte, error)
|
||||
|
||||
DNSLookup bool `toml:"dns_lookup"`
|
||||
}
|
||||
|
||||
func (n *NTPQ) Description() string {
|
||||
return "Get standard NTP query metrics, requires ntpq executable."
|
||||
}
|
||||
|
||||
func (n *NTPQ) SampleConfig() string {
|
||||
return `
|
||||
## If false, set the -n ntpq flag. Can reduce metric gather time.
|
||||
dns_lookup = true
|
||||
`
|
||||
}
|
||||
|
||||
func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
||||
out, err := n.runQ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lineCounter := 0
|
||||
scanner := bufio.NewScanner(bytes.NewReader(out))
|
||||
for scanner.Scan() {
|
||||
fields := strings.Fields(scanner.Text())
|
||||
if len(fields) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
// If lineCounter == 0, then this is the header line
|
||||
if lineCounter == 0 {
|
||||
for i, field := range fields {
|
||||
// Check if field is a tag:
|
||||
if tagKey, ok := tagHeaders[field]; ok {
|
||||
tagI[tagKey] = i
|
||||
continue
|
||||
}
|
||||
|
||||
// check if field is a float metric:
|
||||
if _, ok := floatI[field]; ok {
|
||||
floatI[field] = i
|
||||
continue
|
||||
}
|
||||
|
||||
// check if field is an int metric:
|
||||
if _, ok := intI[field]; ok {
|
||||
intI[field] = i
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tags := make(map[string]string)
|
||||
mFields := make(map[string]interface{})
|
||||
|
||||
// Get tags from output
|
||||
for key, index := range tagI {
|
||||
if index == -1 {
|
||||
continue
|
||||
}
|
||||
tags[key] = fields[index]
|
||||
}
|
||||
|
||||
// Get integer metrics from output
|
||||
for key, index := range intI {
|
||||
if index == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
if key == "when" {
|
||||
when := fields[index]
|
||||
switch {
|
||||
case strings.HasSuffix(when, "h"):
|
||||
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h"))
|
||||
if err != nil {
|
||||
log.Printf("ERROR ntpq: parsing int: %s", fields[index])
|
||||
continue
|
||||
}
|
||||
// seconds in an hour
|
||||
mFields[key] = int64(m) * 360
|
||||
continue
|
||||
case strings.HasSuffix(when, "d"):
|
||||
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d"))
|
||||
if err != nil {
|
||||
log.Printf("ERROR ntpq: parsing int: %s", fields[index])
|
||||
continue
|
||||
}
|
||||
// seconds in a day
|
||||
mFields[key] = int64(m) * 86400
|
||||
continue
|
||||
case strings.HasSuffix(when, "m"):
|
||||
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m"))
|
||||
if err != nil {
|
||||
log.Printf("ERROR ntpq: parsing int: %s", fields[index])
|
||||
continue
|
||||
}
|
||||
// seconds in a day
|
||||
mFields[key] = int64(m) * 60
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
m, err := strconv.Atoi(fields[index])
|
||||
if err != nil {
|
||||
log.Printf("ERROR ntpq: parsing int: %s", fields[index])
|
||||
continue
|
||||
}
|
||||
mFields[key] = int64(m)
|
||||
}
|
||||
|
||||
// get float metrics from output
|
||||
for key, index := range floatI {
|
||||
if index == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := strconv.ParseFloat(fields[index], 64)
|
||||
if err != nil {
|
||||
log.Printf("ERROR ntpq: parsing float: %s", fields[index])
|
||||
continue
|
||||
}
|
||||
mFields[key] = m
|
||||
}
|
||||
|
||||
acc.AddFields("ntpq", mFields, tags)
|
||||
}
|
||||
|
||||
lineCounter++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NTPQ) runq() ([]byte, error) {
|
||||
bin, err := exec.LookPath("ntpq")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cmd *exec.Cmd
|
||||
if n.DNSLookup {
|
||||
cmd = exec.Command(bin, "-p")
|
||||
} else {
|
||||
cmd = exec.Command(bin, "-p", "-n")
|
||||
}
|
||||
|
||||
return cmd.Output()
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("ntpq", func() telegraf.Input {
|
||||
n := &NTPQ{}
|
||||
n.runQ = n.runq
|
||||
return n
|
||||
})
|
||||
}
|
||||
422
plugins/inputs/ntpq/ntpq_test.go
Normal file
422
plugins/inputs/ntpq/ntpq_test.go
Normal file
@@ -0,0 +1,422 @@
|
||||
// +build !windows
|
||||
|
||||
package ntpq
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSingleNTPQ(t *testing.T) {
|
||||
tt := tester{
|
||||
ret: []byte(singleNTPQ),
|
||||
err: nil,
|
||||
}
|
||||
n := &NTPQ{
|
||||
runQ: tt.runqTest,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
assert.NoError(t, n.Gather(&acc))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"when": int64(101),
|
||||
"poll": int64(256),
|
||||
"reach": int64(37),
|
||||
"delay": float64(51.016),
|
||||
"offset": float64(233.010),
|
||||
"jitter": float64(17.462),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"remote": "*uschi5-ntp-002.",
|
||||
"refid": "10.177.80.46",
|
||||
"stratum": "2",
|
||||
"type": "u",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||
}
|
||||
|
||||
func TestBadIntNTPQ(t *testing.T) {
|
||||
tt := tester{
|
||||
ret: []byte(badIntParseNTPQ),
|
||||
err: nil,
|
||||
}
|
||||
n := &NTPQ{
|
||||
runQ: tt.runqTest,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
assert.NoError(t, n.Gather(&acc))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"when": int64(101),
|
||||
"reach": int64(37),
|
||||
"delay": float64(51.016),
|
||||
"offset": float64(233.010),
|
||||
"jitter": float64(17.462),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"remote": "*uschi5-ntp-002.",
|
||||
"refid": "10.177.80.46",
|
||||
"stratum": "2",
|
||||
"type": "u",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||
}
|
||||
|
||||
func TestBadFloatNTPQ(t *testing.T) {
|
||||
tt := tester{
|
||||
ret: []byte(badFloatParseNTPQ),
|
||||
err: nil,
|
||||
}
|
||||
n := &NTPQ{
|
||||
runQ: tt.runqTest,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
assert.NoError(t, n.Gather(&acc))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"when": int64(2),
|
||||
"poll": int64(256),
|
||||
"reach": int64(37),
|
||||
"delay": float64(51.016),
|
||||
"jitter": float64(17.462),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"remote": "*uschi5-ntp-002.",
|
||||
"refid": "10.177.80.46",
|
||||
"stratum": "2",
|
||||
"type": "u",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||
}
|
||||
|
||||
func TestDaysNTPQ(t *testing.T) {
|
||||
tt := tester{
|
||||
ret: []byte(whenDaysNTPQ),
|
||||
err: nil,
|
||||
}
|
||||
n := &NTPQ{
|
||||
runQ: tt.runqTest,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
assert.NoError(t, n.Gather(&acc))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"when": int64(172800),
|
||||
"poll": int64(256),
|
||||
"reach": int64(37),
|
||||
"delay": float64(51.016),
|
||||
"offset": float64(233.010),
|
||||
"jitter": float64(17.462),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"remote": "*uschi5-ntp-002.",
|
||||
"refid": "10.177.80.46",
|
||||
"stratum": "2",
|
||||
"type": "u",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||
}
|
||||
|
||||
func TestHoursNTPQ(t *testing.T) {
|
||||
tt := tester{
|
||||
ret: []byte(whenHoursNTPQ),
|
||||
err: nil,
|
||||
}
|
||||
n := &NTPQ{
|
||||
runQ: tt.runqTest,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
assert.NoError(t, n.Gather(&acc))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"when": int64(720),
|
||||
"poll": int64(256),
|
||||
"reach": int64(37),
|
||||
"delay": float64(51.016),
|
||||
"offset": float64(233.010),
|
||||
"jitter": float64(17.462),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"remote": "*uschi5-ntp-002.",
|
||||
"refid": "10.177.80.46",
|
||||
"stratum": "2",
|
||||
"type": "u",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||
}
|
||||
|
||||
func TestMinutesNTPQ(t *testing.T) {
|
||||
tt := tester{
|
||||
ret: []byte(whenMinutesNTPQ),
|
||||
err: nil,
|
||||
}
|
||||
n := &NTPQ{
|
||||
runQ: tt.runqTest,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
assert.NoError(t, n.Gather(&acc))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"when": int64(120),
|
||||
"poll": int64(256),
|
||||
"reach": int64(37),
|
||||
"delay": float64(51.016),
|
||||
"offset": float64(233.010),
|
||||
"jitter": float64(17.462),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"remote": "*uschi5-ntp-002.",
|
||||
"refid": "10.177.80.46",
|
||||
"stratum": "2",
|
||||
"type": "u",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||
}
|
||||
|
||||
func TestBadWhenNTPQ(t *testing.T) {
|
||||
tt := tester{
|
||||
ret: []byte(whenBadNTPQ),
|
||||
err: nil,
|
||||
}
|
||||
n := &NTPQ{
|
||||
runQ: tt.runqTest,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
assert.NoError(t, n.Gather(&acc))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"poll": int64(256),
|
||||
"reach": int64(37),
|
||||
"delay": float64(51.016),
|
||||
"offset": float64(233.010),
|
||||
"jitter": float64(17.462),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"remote": "*uschi5-ntp-002.",
|
||||
"refid": "10.177.80.46",
|
||||
"stratum": "2",
|
||||
"type": "u",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||
}
|
||||
|
||||
func TestMultiNTPQ(t *testing.T) {
|
||||
tt := tester{
|
||||
ret: []byte(multiNTPQ),
|
||||
err: nil,
|
||||
}
|
||||
n := &NTPQ{
|
||||
runQ: tt.runqTest,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
assert.NoError(t, n.Gather(&acc))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"delay": float64(54.033),
|
||||
"jitter": float64(449514),
|
||||
"offset": float64(243.426),
|
||||
"poll": int64(1024),
|
||||
"reach": int64(377),
|
||||
"when": int64(740),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"refid": "10.177.80.37",
|
||||
"remote": "83.137.98.96",
|
||||
"stratum": "2",
|
||||
"type": "u",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||
|
||||
fields = map[string]interface{}{
|
||||
"delay": float64(60.785),
|
||||
"jitter": float64(449539),
|
||||
"offset": float64(232.597),
|
||||
"poll": int64(1024),
|
||||
"reach": int64(377),
|
||||
"when": int64(739),
|
||||
}
|
||||
tags = map[string]string{
|
||||
"refid": "10.177.80.37",
|
||||
"remote": "81.7.16.52",
|
||||
"stratum": "2",
|
||||
"type": "u",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||
}
|
||||
|
||||
func TestBadHeaderNTPQ(t *testing.T) {
|
||||
resetVars()
|
||||
tt := tester{
|
||||
ret: []byte(badHeaderNTPQ),
|
||||
err: nil,
|
||||
}
|
||||
n := &NTPQ{
|
||||
runQ: tt.runqTest,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
assert.NoError(t, n.Gather(&acc))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"when": int64(101),
|
||||
"poll": int64(256),
|
||||
"reach": int64(37),
|
||||
"delay": float64(51.016),
|
||||
"offset": float64(233.010),
|
||||
"jitter": float64(17.462),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"remote": "*uschi5-ntp-002.",
|
||||
"refid": "10.177.80.46",
|
||||
"type": "u",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||
}
|
||||
|
||||
func TestMissingDelayColumnNTPQ(t *testing.T) {
|
||||
resetVars()
|
||||
tt := tester{
|
||||
ret: []byte(missingDelayNTPQ),
|
||||
err: nil,
|
||||
}
|
||||
n := &NTPQ{
|
||||
runQ: tt.runqTest,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
assert.NoError(t, n.Gather(&acc))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"when": int64(101),
|
||||
"poll": int64(256),
|
||||
"reach": int64(37),
|
||||
"offset": float64(233.010),
|
||||
"jitter": float64(17.462),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"remote": "*uschi5-ntp-002.",
|
||||
"refid": "10.177.80.46",
|
||||
"type": "u",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||
}
|
||||
|
||||
func TestFailedNTPQ(t *testing.T) {
|
||||
tt := tester{
|
||||
ret: []byte(singleNTPQ),
|
||||
err: fmt.Errorf("Test failure"),
|
||||
}
|
||||
n := &NTPQ{
|
||||
runQ: tt.runqTest,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
assert.Error(t, n.Gather(&acc))
|
||||
}
|
||||
|
||||
type tester struct {
|
||||
ret []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func (t *tester) runqTest() ([]byte, error) {
|
||||
return t.ret, t.err
|
||||
}
|
||||
|
||||
func resetVars() {
|
||||
// Mapping of ntpq header names to tag keys
|
||||
tagHeaders = map[string]string{
|
||||
"remote": "remote",
|
||||
"refid": "refid",
|
||||
"st": "stratum",
|
||||
"t": "type",
|
||||
}
|
||||
|
||||
// Mapping of the ntpq tag key to the index in the command output
|
||||
tagI = map[string]int{
|
||||
"remote": -1,
|
||||
"refid": -1,
|
||||
"stratum": -1,
|
||||
"type": -1,
|
||||
}
|
||||
|
||||
// Mapping of float metrics to their index in the command output
|
||||
floatI = map[string]int{
|
||||
"delay": -1,
|
||||
"offset": -1,
|
||||
"jitter": -1,
|
||||
}
|
||||
|
||||
// Mapping of int metrics to their index in the command output
|
||||
intI = map[string]int{
|
||||
"when": -1,
|
||||
"poll": -1,
|
||||
"reach": -1,
|
||||
}
|
||||
}
|
||||
|
||||
var singleNTPQ = ` remote refid st t when poll reach delay offset jitter
|
||||
==============================================================================
|
||||
*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462
|
||||
`
|
||||
|
||||
var badHeaderNTPQ = `remote refid foobar t when poll reach delay offset jitter
|
||||
==============================================================================
|
||||
*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462
|
||||
`
|
||||
|
||||
var missingDelayNTPQ = `remote refid foobar t when poll reach offset jitter
|
||||
==============================================================================
|
||||
*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 233.010 17.462
|
||||
`
|
||||
|
||||
var whenDaysNTPQ = ` remote refid st t when poll reach delay offset jitter
|
||||
==============================================================================
|
||||
*uschi5-ntp-002. 10.177.80.46 2 u 2d 256 37 51.016 233.010 17.462
|
||||
`
|
||||
|
||||
var whenHoursNTPQ = ` remote refid st t when poll reach delay offset jitter
|
||||
==============================================================================
|
||||
*uschi5-ntp-002. 10.177.80.46 2 u 2h 256 37 51.016 233.010 17.462
|
||||
`
|
||||
|
||||
var whenMinutesNTPQ = ` remote refid st t when poll reach delay offset jitter
|
||||
==============================================================================
|
||||
*uschi5-ntp-002. 10.177.80.46 2 u 2m 256 37 51.016 233.010 17.462
|
||||
`
|
||||
|
||||
var whenBadNTPQ = ` remote refid st t when poll reach delay offset jitter
|
||||
==============================================================================
|
||||
*uschi5-ntp-002. 10.177.80.46 2 u 2q 256 37 51.016 233.010 17.462
|
||||
`
|
||||
|
||||
var badFloatParseNTPQ = ` remote refid st t when poll reach delay offset jitter
|
||||
==============================================================================
|
||||
*uschi5-ntp-002. 10.177.80.46 2 u 2 256 37 51.016 foobar 17.462
|
||||
`
|
||||
|
||||
var badIntParseNTPQ = ` remote refid st t when poll reach delay offset jitter
|
||||
==============================================================================
|
||||
*uschi5-ntp-002. 10.177.80.46 2 u 101 foobar 37 51.016 233.010 17.462
|
||||
`
|
||||
|
||||
var multiNTPQ = ` remote refid st t when poll reach delay offset jitter
|
||||
==============================================================================
|
||||
83.137.98.96 10.177.80.37 2 u 740 1024 377 54.033 243.426 449514.
|
||||
81.7.16.52 10.177.80.37 2 u 739 1024 377 60.785 232.597 449539.
|
||||
131.188.3.221 10.177.80.37 2 u 783 1024 377 111.820 261.921 449528.
|
||||
5.9.29.107 10.177.80.37 2 u 703 1024 377 205.704 160.406 449602.
|
||||
91.189.94.4 10.177.80.37 2 u 673 1024 377 143.047 274.726 449445.
|
||||
`
|
||||
3
plugins/inputs/ntpq/ntpq_windows.go
Normal file
3
plugins/inputs/ntpq/ntpq_windows.go
Normal file
@@ -0,0 +1,3 @@
|
||||
// +build windows
|
||||
|
||||
package ntpq
|
||||
331
plugins/inputs/phpfpm/child.go
Normal file
331
plugins/inputs/phpfpm/child.go
Normal file
@@ -0,0 +1,331 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package phpfpm
|
||||
|
||||
// This file implements FastCGI from the perspective of a child process.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/cgi"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// request holds the state for an in-progress request. As soon as it's complete,
|
||||
// it's converted to an http.Request.
|
||||
type request struct {
|
||||
pw *io.PipeWriter
|
||||
reqId uint16
|
||||
params map[string]string
|
||||
buf [1024]byte
|
||||
rawParams []byte
|
||||
keepConn bool
|
||||
}
|
||||
|
||||
func newRequest(reqId uint16, flags uint8) *request {
|
||||
r := &request{
|
||||
reqId: reqId,
|
||||
params: map[string]string{},
|
||||
keepConn: flags&flagKeepConn != 0,
|
||||
}
|
||||
r.rawParams = r.buf[:0]
|
||||
return r
|
||||
}
|
||||
|
||||
// parseParams reads an encoded []byte into Params.
|
||||
func (r *request) parseParams() {
|
||||
text := r.rawParams
|
||||
r.rawParams = nil
|
||||
for len(text) > 0 {
|
||||
keyLen, n := readSize(text)
|
||||
if n == 0 {
|
||||
return
|
||||
}
|
||||
text = text[n:]
|
||||
valLen, n := readSize(text)
|
||||
if n == 0 {
|
||||
return
|
||||
}
|
||||
text = text[n:]
|
||||
if int(keyLen)+int(valLen) > len(text) {
|
||||
return
|
||||
}
|
||||
key := readString(text, keyLen)
|
||||
text = text[keyLen:]
|
||||
val := readString(text, valLen)
|
||||
text = text[valLen:]
|
||||
r.params[key] = val
|
||||
}
|
||||
}
|
||||
|
||||
// response implements http.ResponseWriter.
|
||||
type response struct {
|
||||
req *request
|
||||
header http.Header
|
||||
w *bufWriter
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
func newResponse(c *child, req *request) *response {
|
||||
return &response{
|
||||
req: req,
|
||||
header: http.Header{},
|
||||
w: newWriter(c.conn, typeStdout, req.reqId),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *response) Header() http.Header {
|
||||
return r.header
|
||||
}
|
||||
|
||||
func (r *response) Write(data []byte) (int, error) {
|
||||
if !r.wroteHeader {
|
||||
r.WriteHeader(http.StatusOK)
|
||||
}
|
||||
return r.w.Write(data)
|
||||
}
|
||||
|
||||
func (r *response) WriteHeader(code int) {
|
||||
if r.wroteHeader {
|
||||
return
|
||||
}
|
||||
r.wroteHeader = true
|
||||
if code == http.StatusNotModified {
|
||||
// Must not have body.
|
||||
r.header.Del("Content-Type")
|
||||
r.header.Del("Content-Length")
|
||||
r.header.Del("Transfer-Encoding")
|
||||
} else if r.header.Get("Content-Type") == "" {
|
||||
r.header.Set("Content-Type", "text/html; charset=utf-8")
|
||||
}
|
||||
|
||||
if r.header.Get("Date") == "" {
|
||||
r.header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code))
|
||||
r.header.Write(r.w)
|
||||
r.w.WriteString("\r\n")
|
||||
}
|
||||
|
||||
func (r *response) Flush() {
|
||||
if !r.wroteHeader {
|
||||
r.WriteHeader(http.StatusOK)
|
||||
}
|
||||
r.w.Flush()
|
||||
}
|
||||
|
||||
func (r *response) Close() error {
|
||||
r.Flush()
|
||||
return r.w.Close()
|
||||
}
|
||||
|
||||
type child struct {
|
||||
conn *conn
|
||||
handler http.Handler
|
||||
|
||||
mu sync.Mutex // protects requests:
|
||||
requests map[uint16]*request // keyed by request ID
|
||||
}
|
||||
|
||||
func newChild(rwc io.ReadWriteCloser, handler http.Handler) *child {
|
||||
return &child{
|
||||
conn: newConn(rwc),
|
||||
handler: handler,
|
||||
requests: make(map[uint16]*request),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *child) serve() {
|
||||
defer c.conn.Close()
|
||||
defer c.cleanUp()
|
||||
var rec record
|
||||
for {
|
||||
if err := rec.read(c.conn.rwc); err != nil {
|
||||
return
|
||||
}
|
||||
if err := c.handleRecord(&rec); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var errCloseConn = errors.New("fcgi: connection should be closed")
|
||||
|
||||
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
|
||||
|
||||
// ErrRequestAborted is returned by Read when a handler attempts to read the
|
||||
// body of a request that has been aborted by the web server.
|
||||
var ErrRequestAborted = errors.New("fcgi: request aborted by web server")
|
||||
|
||||
// ErrConnClosed is returned by Read when a handler attempts to read the body of
|
||||
// a request after the connection to the web server has been closed.
|
||||
var ErrConnClosed = errors.New("fcgi: connection to web server closed")
|
||||
|
||||
func (c *child) handleRecord(rec *record) error {
|
||||
c.mu.Lock()
|
||||
req, ok := c.requests[rec.h.Id]
|
||||
c.mu.Unlock()
|
||||
if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues {
|
||||
// The spec says to ignore unknown request IDs.
|
||||
return nil
|
||||
}
|
||||
|
||||
switch rec.h.Type {
|
||||
case typeBeginRequest:
|
||||
if req != nil {
|
||||
// The server is trying to begin a request with the same ID
|
||||
// as an in-progress request. This is an error.
|
||||
return errors.New("fcgi: received ID that is already in-flight")
|
||||
}
|
||||
|
||||
var br beginRequest
|
||||
if err := br.read(rec.content()); err != nil {
|
||||
return err
|
||||
}
|
||||
if br.role != roleResponder {
|
||||
c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole)
|
||||
return nil
|
||||
}
|
||||
req = newRequest(rec.h.Id, br.flags)
|
||||
c.mu.Lock()
|
||||
c.requests[rec.h.Id] = req
|
||||
c.mu.Unlock()
|
||||
return nil
|
||||
case typeParams:
|
||||
// NOTE(eds): Technically a key-value pair can straddle the boundary
|
||||
// between two packets. We buffer until we've received all parameters.
|
||||
if len(rec.content()) > 0 {
|
||||
req.rawParams = append(req.rawParams, rec.content()...)
|
||||
return nil
|
||||
}
|
||||
req.parseParams()
|
||||
return nil
|
||||
case typeStdin:
|
||||
content := rec.content()
|
||||
if req.pw == nil {
|
||||
var body io.ReadCloser
|
||||
if len(content) > 0 {
|
||||
// body could be an io.LimitReader, but it shouldn't matter
|
||||
// as long as both sides are behaving.
|
||||
body, req.pw = io.Pipe()
|
||||
} else {
|
||||
body = emptyBody
|
||||
}
|
||||
go c.serveRequest(req, body)
|
||||
}
|
||||
if len(content) > 0 {
|
||||
// TODO(eds): This blocks until the handler reads from the pipe.
|
||||
// If the handler takes a long time, it might be a problem.
|
||||
req.pw.Write(content)
|
||||
} else if req.pw != nil {
|
||||
req.pw.Close()
|
||||
}
|
||||
return nil
|
||||
case typeGetValues:
|
||||
values := map[string]string{"FCGI_MPXS_CONNS": "1"}
|
||||
c.conn.writePairs(typeGetValuesResult, 0, values)
|
||||
return nil
|
||||
case typeData:
|
||||
// If the filter role is implemented, read the data stream here.
|
||||
return nil
|
||||
case typeAbortRequest:
|
||||
c.mu.Lock()
|
||||
delete(c.requests, rec.h.Id)
|
||||
c.mu.Unlock()
|
||||
c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete)
|
||||
if req.pw != nil {
|
||||
req.pw.CloseWithError(ErrRequestAborted)
|
||||
}
|
||||
if !req.keepConn {
|
||||
// connection will close upon return
|
||||
return errCloseConn
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
b := make([]byte, 8)
|
||||
b[0] = byte(rec.h.Type)
|
||||
c.conn.writeRecord(typeUnknownType, 0, b)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *child) serveRequest(req *request, body io.ReadCloser) {
|
||||
r := newResponse(c, req)
|
||||
httpReq, err := cgi.RequestFromMap(req.params)
|
||||
if err != nil {
|
||||
// there was an error reading the request
|
||||
r.WriteHeader(http.StatusInternalServerError)
|
||||
c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error()))
|
||||
} else {
|
||||
httpReq.Body = body
|
||||
c.handler.ServeHTTP(r, httpReq)
|
||||
}
|
||||
r.Close()
|
||||
c.mu.Lock()
|
||||
delete(c.requests, req.reqId)
|
||||
c.mu.Unlock()
|
||||
c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete)
|
||||
|
||||
// Consume the entire body, so the host isn't still writing to
|
||||
// us when we close the socket below in the !keepConn case,
|
||||
// otherwise we'd send a RST. (golang.org/issue/4183)
|
||||
// TODO(bradfitz): also bound this copy in time. Or send
|
||||
// some sort of abort request to the host, so the host
|
||||
// can properly cut off the client sending all the data.
|
||||
// For now just bound it a little and
|
||||
io.CopyN(ioutil.Discard, body, 100<<20)
|
||||
body.Close()
|
||||
|
||||
if !req.keepConn {
|
||||
c.conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *child) cleanUp() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
for _, req := range c.requests {
|
||||
if req.pw != nil {
|
||||
// race with call to Close in c.serveRequest doesn't matter because
|
||||
// Pipe(Reader|Writer).Close are idempotent
|
||||
req.pw.CloseWithError(ErrConnClosed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Serve accepts incoming FastCGI connections on the listener l, creating a new
|
||||
// goroutine for each. The goroutine reads requests and then calls handler
|
||||
// to reply to them.
|
||||
// If l is nil, Serve accepts connections from os.Stdin.
|
||||
// If handler is nil, http.DefaultServeMux is used.
|
||||
func Serve(l net.Listener, handler http.Handler) error {
|
||||
if l == nil {
|
||||
var err error
|
||||
l, err = net.FileListener(os.Stdin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer l.Close()
|
||||
}
|
||||
if handler == nil {
|
||||
handler = http.DefaultServeMux
|
||||
}
|
||||
for {
|
||||
rw, err := l.Accept()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c := newChild(rw, handler)
|
||||
go c.serve()
|
||||
}
|
||||
}
|
||||
@@ -17,11 +17,6 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"strings"
|
||||
)
|
||||
|
||||
// recType is a record type, as defined by
|
||||
@@ -277,74 +272,3 @@ func (w *streamWriter) Close() error {
|
||||
// send empty record to close the stream
|
||||
return w.c.writeRecord(w.recType, w.reqId, nil)
|
||||
}
|
||||
|
||||
func NewClient(h string, args ...interface{}) (fcgi *conn, err error) {
|
||||
var con net.Conn
|
||||
if len(args) != 1 {
|
||||
err = errors.New("fcgi: not enough params")
|
||||
return
|
||||
}
|
||||
switch args[0].(type) {
|
||||
case int:
|
||||
addr := h + ":" + strconv.FormatInt(int64(args[0].(int)), 10)
|
||||
con, err = net.Dial("tcp", addr)
|
||||
case string:
|
||||
laddr := net.UnixAddr{Name: args[0].(string), Net: h}
|
||||
con, err = net.DialUnix(h, nil, &laddr)
|
||||
default:
|
||||
err = errors.New("fcgi: we only accept int (port) or string (socket) params.")
|
||||
}
|
||||
fcgi = &conn{
|
||||
rwc: con,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (client *conn) Request(env map[string]string, requestData string) (retout []byte, reterr []byte, err error) {
|
||||
defer client.rwc.Close()
|
||||
var reqId uint16 = 1
|
||||
|
||||
err = client.writeBeginRequest(reqId, uint16(roleResponder), 0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = client.writePairs(typeParams, reqId, env)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(requestData) > 0 {
|
||||
if err = client.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
rec := &record{}
|
||||
var err1 error
|
||||
|
||||
// recive untill EOF or FCGI_END_REQUEST
|
||||
READ_LOOP:
|
||||
for {
|
||||
err1 = rec.read(client.rwc)
|
||||
if err1 != nil && strings.Contains(err1.Error(), "use of closed network connection") {
|
||||
if err1 != io.EOF {
|
||||
err = err1
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
switch {
|
||||
case rec.h.Type == typeStdout:
|
||||
retout = append(retout, rec.content()...)
|
||||
case rec.h.Type == typeStderr:
|
||||
reterr = append(reterr, rec.content()...)
|
||||
case rec.h.Type == typeEndRequest:
|
||||
fallthrough
|
||||
default:
|
||||
break READ_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
86
plugins/inputs/phpfpm/fcgi_client.go
Normal file
86
plugins/inputs/phpfpm/fcgi_client.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package phpfpm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Create an fcgi client
|
||||
func newFcgiClient(h string, args ...interface{}) (*conn, error) {
|
||||
var con net.Conn
|
||||
if len(args) != 1 {
|
||||
return nil, errors.New("fcgi: not enough params")
|
||||
}
|
||||
|
||||
var err error
|
||||
switch args[0].(type) {
|
||||
case int:
|
||||
addr := h + ":" + strconv.FormatInt(int64(args[0].(int)), 10)
|
||||
con, err = net.Dial("tcp", addr)
|
||||
case string:
|
||||
laddr := net.UnixAddr{Name: args[0].(string), Net: h}
|
||||
con, err = net.DialUnix(h, nil, &laddr)
|
||||
default:
|
||||
err = errors.New("fcgi: we only accept int (port) or string (socket) params.")
|
||||
}
|
||||
fcgi := &conn{
|
||||
rwc: con,
|
||||
}
|
||||
|
||||
return fcgi, err
|
||||
}
|
||||
|
||||
func (client *conn) Request(
|
||||
env map[string]string,
|
||||
requestData string,
|
||||
) (retout []byte, reterr []byte, err error) {
|
||||
defer client.rwc.Close()
|
||||
var reqId uint16 = 1
|
||||
|
||||
err = client.writeBeginRequest(reqId, uint16(roleResponder), 0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = client.writePairs(typeParams, reqId, env)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(requestData) > 0 {
|
||||
if err = client.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
rec := &record{}
|
||||
var err1 error
|
||||
|
||||
// recive untill EOF or FCGI_END_REQUEST
|
||||
READ_LOOP:
|
||||
for {
|
||||
err1 = rec.read(client.rwc)
|
||||
if err1 != nil && strings.Contains(err1.Error(), "use of closed network connection") {
|
||||
if err1 != io.EOF {
|
||||
err = err1
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
switch {
|
||||
case rec.h.Type == typeStdout:
|
||||
retout = append(retout, rec.content()...)
|
||||
case rec.h.Type == typeStderr:
|
||||
reterr = append(reterr, rec.content()...)
|
||||
case rec.h.Type == typeEndRequest:
|
||||
fallthrough
|
||||
default:
|
||||
break READ_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
280
plugins/inputs/phpfpm/fcgi_test.go
Normal file
280
plugins/inputs/phpfpm/fcgi_test.go
Normal file
@@ -0,0 +1,280 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package phpfpm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var sizeTests = []struct {
|
||||
size uint32
|
||||
bytes []byte
|
||||
}{
|
||||
{0, []byte{0x00}},
|
||||
{127, []byte{0x7F}},
|
||||
{128, []byte{0x80, 0x00, 0x00, 0x80}},
|
||||
{1000, []byte{0x80, 0x00, 0x03, 0xE8}},
|
||||
{33554431, []byte{0x81, 0xFF, 0xFF, 0xFF}},
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
b := make([]byte, 4)
|
||||
for i, test := range sizeTests {
|
||||
n := encodeSize(b, test.size)
|
||||
if !bytes.Equal(b[:n], test.bytes) {
|
||||
t.Errorf("%d expected %x, encoded %x", i, test.bytes, b)
|
||||
}
|
||||
size, n := readSize(test.bytes)
|
||||
if size != test.size {
|
||||
t.Errorf("%d expected %d, read %d", i, test.size, size)
|
||||
}
|
||||
if len(test.bytes) != n {
|
||||
t.Errorf("%d did not consume all the bytes", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var streamTests = []struct {
|
||||
desc string
|
||||
recType recType
|
||||
reqId uint16
|
||||
content []byte
|
||||
raw []byte
|
||||
}{
|
||||
{"single record", typeStdout, 1, nil,
|
||||
[]byte{1, byte(typeStdout), 0, 1, 0, 0, 0, 0},
|
||||
},
|
||||
// this data will have to be split into two records
|
||||
{"two records", typeStdin, 300, make([]byte, 66000),
|
||||
bytes.Join([][]byte{
|
||||
// header for the first record
|
||||
{1, byte(typeStdin), 0x01, 0x2C, 0xFF, 0xFF, 1, 0},
|
||||
make([]byte, 65536),
|
||||
// header for the second
|
||||
{1, byte(typeStdin), 0x01, 0x2C, 0x01, 0xD1, 7, 0},
|
||||
make([]byte, 472),
|
||||
// header for the empty record
|
||||
{1, byte(typeStdin), 0x01, 0x2C, 0, 0, 0, 0},
|
||||
},
|
||||
nil),
|
||||
},
|
||||
}
|
||||
|
||||
type nilCloser struct {
|
||||
io.ReadWriter
|
||||
}
|
||||
|
||||
func (c *nilCloser) Close() error { return nil }
|
||||
|
||||
func TestStreams(t *testing.T) {
|
||||
var rec record
|
||||
outer:
|
||||
for _, test := range streamTests {
|
||||
buf := bytes.NewBuffer(test.raw)
|
||||
var content []byte
|
||||
for buf.Len() > 0 {
|
||||
if err := rec.read(buf); err != nil {
|
||||
t.Errorf("%s: error reading record: %v", test.desc, err)
|
||||
continue outer
|
||||
}
|
||||
content = append(content, rec.content()...)
|
||||
}
|
||||
if rec.h.Type != test.recType {
|
||||
t.Errorf("%s: got type %d expected %d", test.desc, rec.h.Type, test.recType)
|
||||
continue
|
||||
}
|
||||
if rec.h.Id != test.reqId {
|
||||
t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.Id, test.reqId)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(content, test.content) {
|
||||
t.Errorf("%s: read wrong content", test.desc)
|
||||
continue
|
||||
}
|
||||
buf.Reset()
|
||||
c := newConn(&nilCloser{buf})
|
||||
w := newWriter(c, test.recType, test.reqId)
|
||||
if _, err := w.Write(test.content); err != nil {
|
||||
t.Errorf("%s: error writing record: %v", test.desc, err)
|
||||
continue
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
t.Errorf("%s: error closing stream: %v", test.desc, err)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes(), test.raw) {
|
||||
t.Errorf("%s: wrote wrong content", test.desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type writeOnlyConn struct {
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (c *writeOnlyConn) Write(p []byte) (int, error) {
|
||||
c.buf = append(c.buf, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (c *writeOnlyConn) Read(p []byte) (int, error) {
|
||||
return 0, errors.New("conn is write-only")
|
||||
}
|
||||
|
||||
func (c *writeOnlyConn) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestGetValues(t *testing.T) {
|
||||
var rec record
|
||||
rec.h.Type = typeGetValues
|
||||
|
||||
wc := new(writeOnlyConn)
|
||||
c := newChild(wc, nil)
|
||||
err := c.handleRecord(&rec)
|
||||
if err != nil {
|
||||
t.Fatalf("handleRecord: %v", err)
|
||||
}
|
||||
|
||||
const want = "\x01\n\x00\x00\x00\x12\x06\x00" +
|
||||
"\x0f\x01FCGI_MPXS_CONNS1" +
|
||||
"\x00\x00\x00\x00\x00\x00\x01\n\x00\x00\x00\x00\x00\x00"
|
||||
if got := string(wc.buf); got != want {
|
||||
t.Errorf(" got: %q\nwant: %q\n", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func nameValuePair11(nameData, valueData string) []byte {
|
||||
return bytes.Join(
|
||||
[][]byte{
|
||||
{byte(len(nameData)), byte(len(valueData))},
|
||||
[]byte(nameData),
|
||||
[]byte(valueData),
|
||||
},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
func makeRecord(
|
||||
recordType recType,
|
||||
requestId uint16,
|
||||
contentData []byte,
|
||||
) []byte {
|
||||
requestIdB1 := byte(requestId >> 8)
|
||||
requestIdB0 := byte(requestId)
|
||||
|
||||
contentLength := len(contentData)
|
||||
contentLengthB1 := byte(contentLength >> 8)
|
||||
contentLengthB0 := byte(contentLength)
|
||||
return bytes.Join([][]byte{
|
||||
{1, byte(recordType), requestIdB1, requestIdB0, contentLengthB1,
|
||||
contentLengthB0, 0, 0},
|
||||
contentData,
|
||||
},
|
||||
nil)
|
||||
}
|
||||
|
||||
// a series of FastCGI records that start a request and begin sending the
|
||||
// request body
|
||||
var streamBeginTypeStdin = bytes.Join([][]byte{
|
||||
// set up request 1
|
||||
makeRecord(typeBeginRequest, 1,
|
||||
[]byte{0, byte(roleResponder), 0, 0, 0, 0, 0, 0}),
|
||||
// add required parameters to request 1
|
||||
makeRecord(typeParams, 1, nameValuePair11("REQUEST_METHOD", "GET")),
|
||||
makeRecord(typeParams, 1, nameValuePair11("SERVER_PROTOCOL", "HTTP/1.1")),
|
||||
makeRecord(typeParams, 1, nil),
|
||||
// begin sending body of request 1
|
||||
makeRecord(typeStdin, 1, []byte("0123456789abcdef")),
|
||||
},
|
||||
nil)
|
||||
|
||||
var cleanUpTests = []struct {
|
||||
input []byte
|
||||
err error
|
||||
}{
|
||||
// confirm that child.handleRecord closes req.pw after aborting req
|
||||
{
|
||||
bytes.Join([][]byte{
|
||||
streamBeginTypeStdin,
|
||||
makeRecord(typeAbortRequest, 1, nil),
|
||||
},
|
||||
nil),
|
||||
ErrRequestAborted,
|
||||
},
|
||||
// confirm that child.serve closes all pipes after error reading record
|
||||
{
|
||||
bytes.Join([][]byte{
|
||||
streamBeginTypeStdin,
|
||||
nil,
|
||||
},
|
||||
nil),
|
||||
ErrConnClosed,
|
||||
},
|
||||
}
|
||||
|
||||
type nopWriteCloser struct {
|
||||
io.ReadWriter
|
||||
}
|
||||
|
||||
func (nopWriteCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Test that child.serve closes the bodies of aborted requests and closes the
|
||||
// bodies of all requests before returning. Causes deadlock if either condition
|
||||
// isn't met. See issue 6934.
|
||||
func TestChildServeCleansUp(t *testing.T) {
|
||||
for _, tt := range cleanUpTests {
|
||||
input := make([]byte, len(tt.input))
|
||||
copy(input, tt.input)
|
||||
rc := nopWriteCloser{bytes.NewBuffer(input)}
|
||||
done := make(chan bool)
|
||||
c := newChild(rc, http.HandlerFunc(func(
|
||||
w http.ResponseWriter,
|
||||
r *http.Request,
|
||||
) {
|
||||
// block on reading body of request
|
||||
_, err := io.Copy(ioutil.Discard, r.Body)
|
||||
if err != tt.err {
|
||||
t.Errorf("Expected %#v, got %#v", tt.err, err)
|
||||
}
|
||||
// not reached if body of request isn't closed
|
||||
done <- true
|
||||
}))
|
||||
go c.serve()
|
||||
// wait for body of request to be closed or all goroutines to block
|
||||
<-done
|
||||
}
|
||||
}
|
||||
|
||||
type rwNopCloser struct {
|
||||
io.Reader
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (rwNopCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verifies it doesn't crash. Issue 11824.
|
||||
func TestMalformedParams(t *testing.T) {
|
||||
input := []byte{
|
||||
// beginRequest, requestId=1, contentLength=8, role=1, keepConn=1
|
||||
1, 1, 0, 1, 0, 8, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0,
|
||||
// params, requestId=1, contentLength=10, k1Len=50, v1Len=50 (malformed, wrong length)
|
||||
1, 4, 0, 1, 0, 10, 0, 0, 50, 50, 3, 4, 5, 6, 7, 8, 9, 10,
|
||||
// end of params
|
||||
1, 4, 0, 1, 0, 0, 0, 0,
|
||||
}
|
||||
rw := rwNopCloser{bytes.NewReader(input), ioutil.Discard}
|
||||
c := newChild(rw, http.DefaultServeMux)
|
||||
c.serve()
|
||||
}
|
||||
@@ -112,6 +112,7 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
|
||||
statusPath string
|
||||
)
|
||||
|
||||
var err error
|
||||
if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") {
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
@@ -120,7 +121,7 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
|
||||
socketAddr := strings.Split(u.Host, ":")
|
||||
fcgiIp := socketAddr[0]
|
||||
fcgiPort, _ := strconv.Atoi(socketAddr[1])
|
||||
fcgi, _ = NewClient(fcgiIp, fcgiPort)
|
||||
fcgi, err = newFcgiClient(fcgiIp, fcgiPort)
|
||||
} else {
|
||||
socketAddr := strings.Split(addr, ":")
|
||||
if len(socketAddr) >= 2 {
|
||||
@@ -134,8 +135,13 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
|
||||
if _, err := os.Stat(socketPath); os.IsNotExist(err) {
|
||||
return fmt.Errorf("Socket doesn't exist '%s': %s", socketPath, err)
|
||||
}
|
||||
fcgi, _ = NewClient("unix", socketPath)
|
||||
fcgi, err = newFcgiClient("unix", socketPath)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return g.gatherFcgi(fcgi, statusPath, acc)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,20 +4,22 @@ import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
type Postgresql struct {
|
||||
Address string
|
||||
Databases []string
|
||||
OrderedColumns []string
|
||||
AllColumns []string
|
||||
Address string
|
||||
Databases []string
|
||||
OrderedColumns []string
|
||||
AllColumns []string
|
||||
sanitizedAddress string
|
||||
}
|
||||
|
||||
var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true}
|
||||
@@ -133,6 +135,23 @@ type scanner interface {
|
||||
Scan(dest ...interface{}) error
|
||||
}
|
||||
|
||||
var passwordKVMatcher, _ = regexp.Compile("password=\\S+ ?")
|
||||
|
||||
func (p *Postgresql) SanitizedAddress() (_ string, err error) {
|
||||
var canonicalizedAddress string
|
||||
if strings.HasPrefix(p.Address, "postgres://") || strings.HasPrefix(p.Address, "postgresql://") {
|
||||
canonicalizedAddress, err = pq.ParseURL(p.Address)
|
||||
if err != nil {
|
||||
return p.sanitizedAddress, err
|
||||
}
|
||||
} else {
|
||||
canonicalizedAddress = p.Address
|
||||
}
|
||||
p.sanitizedAddress = passwordKVMatcher.ReplaceAllString(canonicalizedAddress, "")
|
||||
|
||||
return p.sanitizedAddress, err
|
||||
}
|
||||
|
||||
func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator) error {
|
||||
var columnVars []interface{}
|
||||
var dbname bytes.Buffer
|
||||
@@ -165,7 +184,13 @@ func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator) error {
|
||||
dbname.WriteString("postgres")
|
||||
}
|
||||
|
||||
tags := map[string]string{"server": p.Address, "db": dbname.String()}
|
||||
var tagAddress string
|
||||
tagAddress, err = p.SanitizedAddress()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tags := map[string]string{"server": tagAddress, "db": dbname.String()}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for col, val := range columnMap {
|
||||
|
||||
@@ -35,6 +35,10 @@ The above configuration would result in output like:
|
||||
# Measurements
|
||||
Note: prefix can be set by the user, per process.
|
||||
|
||||
|
||||
Threads related measurement names:
|
||||
- procstat_[prefix_]num_threads value=5
|
||||
|
||||
File descriptor related measurement names:
|
||||
- procstat_[prefix_]num_fds value=4
|
||||
|
||||
|
||||
@@ -52,6 +52,7 @@ func NewSpecProcessor(
|
||||
}
|
||||
|
||||
func (p *SpecProcessor) pushMetrics() {
|
||||
p.pushNThreadsStats()
|
||||
p.pushFDStats()
|
||||
p.pushCtxStats()
|
||||
p.pushIOStats()
|
||||
@@ -60,6 +61,15 @@ func (p *SpecProcessor) pushMetrics() {
|
||||
p.flush()
|
||||
}
|
||||
|
||||
func (p *SpecProcessor) pushNThreadsStats() error {
|
||||
numThreads, err := p.proc.NumThreads()
|
||||
if err != nil {
|
||||
return fmt.Errorf("NumThreads error: %s\n", err)
|
||||
}
|
||||
p.add("num_threads", numThreads)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SpecProcessor) pushFDStats() error {
|
||||
fds, err := p.proc.NumFDs()
|
||||
if err != nil {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Prometheus struct {
|
||||
@@ -51,8 +52,17 @@ func (g *Prometheus) Gather(acc telegraf.Accumulator) error {
|
||||
return outerr
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
func (g *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error {
|
||||
resp, err := http.Get(url)
|
||||
resp, err := client.Get(url)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", url, err)
|
||||
}
|
||||
|
||||
@@ -122,7 +122,11 @@ func (r *RabbitMQ) Description() string {
|
||||
|
||||
func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error {
|
||||
if r.Client == nil {
|
||||
r.Client = &http.Client{}
|
||||
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
|
||||
r.Client = &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
}
|
||||
|
||||
var errChan = make(chan error, len(gatherFunctions))
|
||||
|
||||
@@ -177,8 +177,11 @@ func (r *Raindrops) getTags(addr *url.URL) map[string]string {
|
||||
|
||||
func init() {
|
||||
inputs.Add("raindrops", func() telegraf.Input {
|
||||
return &Raindrops{http_client: &http.Client{Transport: &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}}}
|
||||
return &Raindrops{http_client: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
},
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}}
|
||||
})
|
||||
}
|
||||
|
||||
86
plugins/inputs/redis/README.md
Normal file
86
plugins/inputs/redis/README.md
Normal file
@@ -0,0 +1,86 @@
|
||||
# Telegraf Plugin: Redis
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Read Redis's basic status information
|
||||
[[inputs.redis]]
|
||||
## specify servers via a url matching:
|
||||
## [protocol://][:password]@address[:port]
|
||||
## e.g.
|
||||
## tcp://localhost:6379
|
||||
## tcp://:password@192.168.99.100
|
||||
##
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
## If no port is specified, 6379 is used
|
||||
servers = ["tcp://localhost:6379"]
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- Measurement
|
||||
- uptime_in_seconds
|
||||
- connected_clients
|
||||
- used_memory
|
||||
- used_memory_rss
|
||||
- used_memory_peak
|
||||
- used_memory_lua
|
||||
- rdb_changes_since_last_save
|
||||
- total_connections_received
|
||||
- total_commands_processed
|
||||
- instantaneous_ops_per_sec
|
||||
- instantaneous_input_kbps
|
||||
- instantaneous_output_kbps
|
||||
- sync_full
|
||||
- sync_partial_ok
|
||||
- sync_partial_err
|
||||
- expired_keys
|
||||
- evicted_keys
|
||||
- keyspace_hits
|
||||
- keyspace_misses
|
||||
- pubsub_channels
|
||||
- pubsub_patterns
|
||||
- latest_fork_usec
|
||||
- connected_slaves
|
||||
- master_repl_offset
|
||||
- repl_backlog_active
|
||||
- repl_backlog_size
|
||||
- repl_backlog_histlen
|
||||
- mem_fragmentation_ratio
|
||||
- used_cpu_sys
|
||||
- used_cpu_user
|
||||
- used_cpu_sys_children
|
||||
- used_cpu_user_children
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- port
|
||||
- server
|
||||
|
||||
### Example Output:
|
||||
|
||||
Using this configuration:
|
||||
```
|
||||
[[inputs.nginx]]
|
||||
## specify servers via a url matching:
|
||||
## [protocol://][:password]@address[:port]
|
||||
## e.g.
|
||||
## tcp://localhost:6379
|
||||
## tcp://:password@192.168.99.100
|
||||
##
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
## If no port is specified, 6379 is used
|
||||
servers = ["tcp://localhost:6379"]
|
||||
```
|
||||
|
||||
When run with:
|
||||
```
|
||||
./telegraf -config telegraf.conf -input-filter redis -test
|
||||
```
|
||||
|
||||
It produces:
|
||||
```
|
||||
* Plugin: redis, Collection 1
|
||||
> redis,port=6379,server=localhost clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i 1457052084987848383
|
||||
```
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
@@ -30,6 +31,8 @@ var sampleConfig = `
|
||||
servers = ["tcp://localhost:6379"]
|
||||
`
|
||||
|
||||
var defaultTimeout = 5 * time.Second
|
||||
|
||||
func (r *Redis) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
@@ -120,12 +123,15 @@ func (r *Redis) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
addr.Host = addr.Host + ":" + defaultPort
|
||||
}
|
||||
|
||||
c, err := net.Dial("tcp", addr.Host)
|
||||
c, err := net.DialTimeout("tcp", addr.Host, defaultTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to redis server '%s': %s", addr.Host, err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
// Extend connection
|
||||
c.SetDeadline(time.Now().Add(defaultTimeout))
|
||||
|
||||
if addr.User != nil {
|
||||
pwd, set := addr.User.Password()
|
||||
if set && pwd != "" {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
@@ -20,7 +21,12 @@ type Riak struct {
|
||||
|
||||
// NewRiak return a new instance of Riak with a default http client
|
||||
func NewRiak() *Riak {
|
||||
return &Riak{client: http.DefaultClient}
|
||||
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
return &Riak{client: client}
|
||||
}
|
||||
|
||||
// Type riakStats represents the data that is received from Riak
|
||||
|
||||
@@ -49,7 +49,7 @@ func (s *Sensors) Gather(acc telegraf.Accumulator) error {
|
||||
var found bool
|
||||
|
||||
for _, sensor := range s.Sensors {
|
||||
parts := strings.SplitN(":", sensor, 2)
|
||||
parts := strings.SplitN(sensor, ":", 2)
|
||||
|
||||
if parts[0] == chipName {
|
||||
if parts[1] == "*" || parts[1] == featureLabel {
|
||||
|
||||
549
plugins/inputs/snmp/README.md
Normal file
549
plugins/inputs/snmp/README.md
Normal file
@@ -0,0 +1,549 @@
|
||||
# SNMP Input Plugin
|
||||
|
||||
The SNMP input plugin gathers metrics from SNMP agents
|
||||
|
||||
### Configuration:
|
||||
|
||||
|
||||
#### Very simple example
|
||||
|
||||
In this example, the plugin will gather value of OIDS:
|
||||
|
||||
- `.1.3.6.1.2.1.2.2.1.4.1`
|
||||
|
||||
```toml
|
||||
# Very Simple Example
|
||||
[[inputs.snmp]]
|
||||
|
||||
[[inputs.snmp.host]]
|
||||
address = "127.0.0.1:161"
|
||||
# SNMP community
|
||||
community = "public" # default public
|
||||
# SNMP version (1, 2 or 3)
|
||||
# Version 3 not supported yet
|
||||
version = 2 # default 2
|
||||
# Simple list of OIDs to get, in addition to "collect"
|
||||
get_oids = [".1.3.6.1.2.1.2.2.1.4.1"]
|
||||
```
|
||||
|
||||
|
||||
#### Simple example
|
||||
|
||||
In this example, Telegraf gathers value of OIDS:
|
||||
|
||||
- named **ifnumber**
|
||||
- named **interface_speed**
|
||||
|
||||
With **inputs.snmp.get** section the plugin gets the oid number:
|
||||
|
||||
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
|
||||
- **interface_speed** => *ifSpeed*
|
||||
|
||||
As you can see *ifSpeed* is not a valid OID. In order to get
|
||||
the valid OID, the plugin uses `snmptranslate_file` to match the OID:
|
||||
|
||||
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
|
||||
- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5`
|
||||
|
||||
Also as the plugin will append `instance` to the corresponding OID:
|
||||
|
||||
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
|
||||
- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1`
|
||||
|
||||
In this example, the plugin will gather value of OIDS:
|
||||
|
||||
- `.1.3.6.1.2.1.2.1.0`
|
||||
- `.1.3.6.1.2.1.2.2.1.5.1`
|
||||
|
||||
|
||||
```toml
|
||||
# Simple example
|
||||
[[inputs.snmp]]
|
||||
## Use 'oids.txt' file to translate oids to names
|
||||
## To generate 'oids.txt' you need to run:
|
||||
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
|
||||
## Or if you have an other MIB folder with custom MIBs
|
||||
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
|
||||
snmptranslate_file = "/tmp/oids.txt"
|
||||
[[inputs.snmp.host]]
|
||||
address = "127.0.0.1:161"
|
||||
# SNMP community
|
||||
community = "public" # default public
|
||||
# SNMP version (1, 2 or 3)
|
||||
# Version 3 not supported yet
|
||||
version = 2 # default 2
|
||||
# Which get/bulk do you want to collect for this host
|
||||
collect = ["ifnumber", "interface_speed"]
|
||||
|
||||
[[inputs.snmp.get]]
|
||||
name = "ifnumber"
|
||||
oid = ".1.3.6.1.2.1.2.1.0"
|
||||
|
||||
[[inputs.snmp.get]]
|
||||
name = "interface_speed"
|
||||
oid = "ifSpeed"
|
||||
instance = "1"
|
||||
|
||||
```
|
||||
|
||||
|
||||
#### Simple bulk example
|
||||
|
||||
In this example, Telegraf gathers value of OIDS:
|
||||
|
||||
- named **ifnumber**
|
||||
- named **interface_speed**
|
||||
- named **if_out_octets**
|
||||
|
||||
With **inputs.snmp.get** section the plugin gets oid number:
|
||||
|
||||
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
|
||||
- **interface_speed** => *ifSpeed*
|
||||
|
||||
With **inputs.snmp.bulk** section the plugin gets the oid number:
|
||||
|
||||
- **if_out_octets** => *ifOutOctets*
|
||||
|
||||
As you can see *ifSpeed* and *ifOutOctets* are not a valid OID.
|
||||
In order to get the valid OID, the plugin uses `snmptranslate_file`
|
||||
to match the OID:
|
||||
|
||||
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
|
||||
- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5`
|
||||
- **if_out_octets** => *ifOutOctets* => `.1.3.6.1.2.1.2.2.1.16`
|
||||
|
||||
Also, the plugin will append `instance` to the corresponding OID:
|
||||
|
||||
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
|
||||
- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1`
|
||||
|
||||
And **if_out_octets** is a bulk request, the plugin will gathers all
|
||||
OIDS in the table.
|
||||
|
||||
- `.1.3.6.1.2.1.2.2.1.16.1`
|
||||
- `.1.3.6.1.2.1.2.2.1.16.2`
|
||||
- `.1.3.6.1.2.1.2.2.1.16.3`
|
||||
- `.1.3.6.1.2.1.2.2.1.16.4`
|
||||
- `.1.3.6.1.2.1.2.2.1.16.5`
|
||||
- `...`
|
||||
|
||||
In this example, the plugin will gather value of OIDS:
|
||||
|
||||
- `.1.3.6.1.2.1.2.1.0`
|
||||
- `.1.3.6.1.2.1.2.2.1.5.1`
|
||||
- `.1.3.6.1.2.1.2.2.1.16.1`
|
||||
- `.1.3.6.1.2.1.2.2.1.16.2`
|
||||
- `.1.3.6.1.2.1.2.2.1.16.3`
|
||||
- `.1.3.6.1.2.1.2.2.1.16.4`
|
||||
- `.1.3.6.1.2.1.2.2.1.16.5`
|
||||
- `...`
|
||||
|
||||
|
||||
```toml
|
||||
# Simple bulk example
|
||||
[[inputs.snmp]]
|
||||
## Use 'oids.txt' file to translate oids to names
|
||||
## To generate 'oids.txt' you need to run:
|
||||
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
|
||||
## Or if you have an other MIB folder with custom MIBs
|
||||
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
|
||||
snmptranslate_file = "/tmp/oids.txt"
|
||||
[[inputs.snmp.host]]
|
||||
address = "127.0.0.1:161"
|
||||
# SNMP community
|
||||
community = "public" # default public
|
||||
# SNMP version (1, 2 or 3)
|
||||
# Version 3 not supported yet
|
||||
version = 2 # default 2
|
||||
# Which get/bulk do you want to collect for this host
|
||||
collect = ["interface_speed", "if_number", "if_out_octets"]
|
||||
|
||||
[[inputs.snmp.get]]
|
||||
name = "interface_speed"
|
||||
oid = "ifSpeed"
|
||||
instance = "1"
|
||||
|
||||
[[inputs.snmp.get]]
|
||||
name = "if_number"
|
||||
oid = "ifNumber"
|
||||
|
||||
[[inputs.snmp.bulk]]
|
||||
name = "if_out_octets"
|
||||
oid = "ifOutOctets"
|
||||
```
|
||||
|
||||
|
||||
#### Table example
|
||||
|
||||
In this example, we remove collect attribute to the host section,
|
||||
but you can still use it in combination of the following part.
|
||||
|
||||
Note: This example is like a bulk request a but using an
|
||||
other configuration
|
||||
|
||||
Telegraf gathers value of OIDS of the table:
|
||||
|
||||
- named **iftable1**
|
||||
|
||||
With **inputs.snmp.table** section the plugin gets oid number:
|
||||
|
||||
- **iftable1** => `.1.3.6.1.2.1.31.1.1.1`
|
||||
|
||||
Also **iftable1** is a table, the plugin will gathers all
|
||||
OIDS in the table and in the subtables
|
||||
|
||||
- `.1.3.6.1.2.1.31.1.1.1.1`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.1.1`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.1.2`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.1.3`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.1.4`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.1....`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.2`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.2....`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.3`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.3....`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.4`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.4....`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.5`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.5....`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.6....`
|
||||
- `...`
|
||||
|
||||
```toml
|
||||
# Table example
|
||||
[[inputs.snmp]]
|
||||
## Use 'oids.txt' file to translate oids to names
|
||||
## To generate 'oids.txt' you need to run:
|
||||
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
|
||||
## Or if you have an other MIB folder with custom MIBs
|
||||
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
|
||||
snmptranslate_file = "/tmp/oids.txt"
|
||||
[[inputs.snmp.host]]
|
||||
address = "127.0.0.1:161"
|
||||
# SNMP community
|
||||
community = "public" # default public
|
||||
# SNMP version (1, 2 or 3)
|
||||
# Version 3 not supported yet
|
||||
version = 2 # default 2
|
||||
# Which get/bulk do you want to collect for this host
|
||||
# Which table do you want to collect
|
||||
[[inputs.snmp.host.table]]
|
||||
name = "iftable1"
|
||||
|
||||
# table without mapping neither subtables
|
||||
# This is like bulk request
|
||||
[[inputs.snmp.table]]
|
||||
name = "iftable1"
|
||||
oid = ".1.3.6.1.2.1.31.1.1.1"
|
||||
```
|
||||
|
||||
|
||||
#### Table with subtable example
|
||||
|
||||
In this example, we remove collect attribute to the host section,
|
||||
but you can still use it in combination of the following part.
|
||||
|
||||
Note: This example is like a bulk request a but using an
|
||||
other configuration
|
||||
|
||||
Telegraf gathers value of OIDS of the table:
|
||||
|
||||
- named **iftable2**
|
||||
|
||||
With **inputs.snmp.table** section *AND* **sub_tables** attribute,
|
||||
the plugin will get OIDS from subtables:
|
||||
|
||||
- **iftable2** => `.1.3.6.1.2.1.2.2.1.13`
|
||||
|
||||
Also **iftable2** is a table, the plugin will gathers all
|
||||
OIDS in subtables:
|
||||
|
||||
- `.1.3.6.1.2.1.2.2.1.13.1`
|
||||
- `.1.3.6.1.2.1.2.2.1.13.2`
|
||||
- `.1.3.6.1.2.1.2.2.1.13.3`
|
||||
- `.1.3.6.1.2.1.2.2.1.13.4`
|
||||
- `.1.3.6.1.2.1.2.2.1.13....`
|
||||
|
||||
|
||||
```toml
|
||||
# Table with subtable example
|
||||
[[inputs.snmp]]
|
||||
## Use 'oids.txt' file to translate oids to names
|
||||
## To generate 'oids.txt' you need to run:
|
||||
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
|
||||
## Or if you have an other MIB folder with custom MIBs
|
||||
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
|
||||
snmptranslate_file = "/tmp/oids.txt"
|
||||
[[inputs.snmp.host]]
|
||||
address = "127.0.0.1:161"
|
||||
# SNMP community
|
||||
community = "public" # default public
|
||||
# SNMP version (1, 2 or 3)
|
||||
# Version 3 not supported yet
|
||||
version = 2 # default 2
|
||||
# Which table do you want to collect
|
||||
[[inputs.snmp.host.table]]
|
||||
name = "iftable2"
|
||||
|
||||
# table without mapping but with subtables
|
||||
[[inputs.snmp.table]]
|
||||
name = "iftable2"
|
||||
sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
|
||||
# note
|
||||
# oid attribute is useless
|
||||
```
|
||||
|
||||
|
||||
#### Table with mapping example
|
||||
|
||||
In this example, we remove collect attribute to the host section,
|
||||
but you can still use it in combination of the following part.
|
||||
|
||||
Telegraf gathers value of OIDS of the table:
|
||||
|
||||
- named **iftable3**
|
||||
|
||||
With **inputs.snmp.table** section the plugin gets oid number:
|
||||
|
||||
- **iftable3** => `.1.3.6.1.2.1.31.1.1.1`
|
||||
|
||||
Also **iftable2** is a table, the plugin will gathers all
|
||||
OIDS in the table and in the subtables
|
||||
|
||||
- `.1.3.6.1.2.1.31.1.1.1.1`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.1.1`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.1.2`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.1.3`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.1.4`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.1....`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.2`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.2....`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.3`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.3....`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.4`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.4....`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.5`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.5....`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.6....`
|
||||
- `...`
|
||||
|
||||
But the **include_instances** attribute will filter which OIDS
|
||||
will be gathered; As you see, there is an other attribute, `mapping_table`.
|
||||
`include_instances` and `mapping_table` permit to build a hash table
|
||||
to filter only OIDS you want.
|
||||
Let's say, we have the following data on SNMP server:
|
||||
- OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0`
|
||||
- OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1`
|
||||
- OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2`
|
||||
- OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0`
|
||||
- OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1`
|
||||
|
||||
The plugin will build the following hash table:
|
||||
|
||||
| instance name | instance id |
|
||||
|---------------|-------------|
|
||||
| `enp5s0` | `1` |
|
||||
| `enp5s1` | `2` |
|
||||
| `enp5s2` | `3` |
|
||||
| `eth0` | `4` |
|
||||
| `eth1` | `5` |
|
||||
|
||||
With the **include_instances** attribute, the plugin will gather
|
||||
the following OIDS:
|
||||
|
||||
- `.1.3.6.1.2.1.31.1.1.1.1.1`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.1.5`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.2.1`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.2.5`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.3.1`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.3.5`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.4.1`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.4.5`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.5.1`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.5.5`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.6.1`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.6.5`
|
||||
- `...`
|
||||
|
||||
Note: the plugin will add instance name as tag *instance*
|
||||
|
||||
```toml
|
||||
# Simple table with mapping example
|
||||
[[inputs.snmp]]
|
||||
## Use 'oids.txt' file to translate oids to names
|
||||
## To generate 'oids.txt' you need to run:
|
||||
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
|
||||
## Or if you have an other MIB folder with custom MIBs
|
||||
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
|
||||
snmptranslate_file = "/tmp/oids.txt"
|
||||
[[inputs.snmp.host]]
|
||||
address = "127.0.0.1:161"
|
||||
# SNMP community
|
||||
community = "public" # default public
|
||||
# SNMP version (1, 2 or 3)
|
||||
# Version 3 not supported yet
|
||||
version = 2 # default 2
|
||||
# Which table do you want to collect
|
||||
[[inputs.snmp.host.table]]
|
||||
name = "iftable3"
|
||||
include_instances = ["enp5s0", "eth1"]
|
||||
|
||||
# table with mapping but without subtables
|
||||
[[inputs.snmp.table]]
|
||||
name = "iftable3"
|
||||
oid = ".1.3.6.1.2.1.31.1.1.1"
|
||||
# if empty. get all instances
|
||||
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
|
||||
# if empty, get all subtables
|
||||
```
|
||||
|
||||
|
||||
#### Table with both mapping and subtable example
|
||||
|
||||
In this example, we remove collect attribute to the host section,
|
||||
but you can still use it in combination of the following part.
|
||||
|
||||
Telegraf gathers value of OIDS of the table:
|
||||
|
||||
- named **iftable4**
|
||||
|
||||
With **inputs.snmp.table** section *AND* **sub_tables** attribute,
|
||||
the plugin will get OIDS from subtables:
|
||||
|
||||
- **iftable4** => `.1.3.6.1.2.1.31.1.1.1`
|
||||
|
||||
Also **iftable2** is a table, the plugin will gathers all
|
||||
OIDS in the table and in the subtables
|
||||
|
||||
- `.1.3.6.1.2.1.31.1.1.1.6.1
|
||||
- `.1.3.6.1.2.1.31.1.1.1.6.2`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.6.3`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.6.4`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.6....`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.10.1`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.10.2`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.10.3`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.10.4`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.10....`
|
||||
|
||||
But the **include_instances** attribute will filter which OIDS
|
||||
will be gathered; As you see, there is an other attribute, `mapping_table`.
|
||||
`include_instances` and `mapping_table` permit to build a hash table
|
||||
to filter only OIDS you want.
|
||||
Let's say, we have the following data on SNMP server:
|
||||
- OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0`
|
||||
- OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1`
|
||||
- OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2`
|
||||
- OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0`
|
||||
- OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1`
|
||||
|
||||
The plugin will build the following hash table:
|
||||
|
||||
| instance name | instance id |
|
||||
|---------------|-------------|
|
||||
| `enp5s0` | `1` |
|
||||
| `enp5s1` | `2` |
|
||||
| `enp5s2` | `3` |
|
||||
| `eth0` | `4` |
|
||||
| `eth1` | `5` |
|
||||
|
||||
With the **include_instances** attribute, the plugin will gather
|
||||
the following OIDS:
|
||||
|
||||
- `.1.3.6.1.2.1.31.1.1.1.6.1`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.6.5`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.10.1`
|
||||
- `.1.3.6.1.2.1.31.1.1.1.10.5`
|
||||
|
||||
Note: the plugin will add instance name as tag *instance*
|
||||
|
||||
|
||||
|
||||
```toml
|
||||
# Table with both mapping and subtable example
|
||||
[[inputs.snmp]]
|
||||
## Use 'oids.txt' file to translate oids to names
|
||||
## To generate 'oids.txt' you need to run:
|
||||
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
|
||||
## Or if you have an other MIB folder with custom MIBs
|
||||
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
|
||||
snmptranslate_file = "/tmp/oids.txt"
|
||||
[[inputs.snmp.host]]
|
||||
address = "127.0.0.1:161"
|
||||
# SNMP community
|
||||
community = "public" # default public
|
||||
# SNMP version (1, 2 or 3)
|
||||
# Version 3 not supported yet
|
||||
version = 2 # default 2
|
||||
# Which table do you want to collect
|
||||
[[inputs.snmp.host.table]]
|
||||
name = "iftable4"
|
||||
include_instances = ["enp5s0", "eth1"]
|
||||
|
||||
# table with both mapping and subtables
|
||||
[[inputs.snmp.table]]
|
||||
name = "iftable4"
|
||||
# if empty get all instances
|
||||
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
|
||||
# if empty get all subtables
|
||||
# sub_tables could be not "real subtables"
|
||||
sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
|
||||
# note
|
||||
# oid attribute is useless
|
||||
|
||||
# SNMP SUBTABLES
|
||||
[[plugins.snmp.subtable]]
|
||||
name = "bytes_recv"
|
||||
oid = ".1.3.6.1.2.1.31.1.1.1.6"
|
||||
unit = "octets"
|
||||
|
||||
[[plugins.snmp.subtable]]
|
||||
name = "bytes_send"
|
||||
oid = ".1.3.6.1.2.1.31.1.1.1.10"
|
||||
unit = "octets"
|
||||
```
|
||||
|
||||
#### Configuration notes
|
||||
|
||||
- In **plugins.snmp.table** section, the `oid` attribute is useless if
|
||||
the `sub_tables` attributes is defined
|
||||
|
||||
- In **plugins.snmp.subtable** section, you can put a name from `snmptranslate_file`
|
||||
as `oid` attribute instead of a valid OID
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
With the last example (Table with both mapping and subtable example):
|
||||
|
||||
- ifHCOutOctets
|
||||
- ifHCOutOctets
|
||||
- ifInDiscards
|
||||
- ifInDiscards
|
||||
- ifHCInOctets
|
||||
- ifHCInOctets
|
||||
|
||||
### Tags:
|
||||
|
||||
With the last example (Table with both mapping and subtable example):
|
||||
|
||||
- ifHCOutOctets
|
||||
- host
|
||||
- instance
|
||||
- unit
|
||||
- ifInDiscards
|
||||
- host
|
||||
- instance
|
||||
- ifHCInOctets
|
||||
- host
|
||||
- instance
|
||||
- unit
|
||||
|
||||
### Example Output:
|
||||
|
||||
With the last example (Table with both mapping and subtable example):
|
||||
|
||||
```
|
||||
ifHCOutOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCOutOctets=10565628i 1456878706044462901
|
||||
ifInDiscards,host=127.0.0.1,instance=enp5s0 ifInDiscards=0i 1456878706044510264
|
||||
ifHCInOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCInOctets=76351777i 1456878706044531312
|
||||
```
|
||||
@@ -20,7 +20,16 @@ type Snmp struct {
|
||||
Host []Host
|
||||
Get []Data
|
||||
Bulk []Data
|
||||
Table []Table
|
||||
Subtable []Subtable
|
||||
SnmptranslateFile string
|
||||
|
||||
nameToOid map[string]string
|
||||
initNode Node
|
||||
subTableMap map[string]Subtable
|
||||
|
||||
// TODO change as unexportable
|
||||
//OidInstanceMapping map[string]map[string]string
|
||||
}
|
||||
|
||||
type Host struct {
|
||||
@@ -36,9 +45,54 @@ type Host struct {
|
||||
Collect []string
|
||||
// easy get oids
|
||||
GetOids []string
|
||||
// Table
|
||||
Table []HostTable
|
||||
// Oids
|
||||
getOids []Data
|
||||
bulkOids []Data
|
||||
tables []HostTable
|
||||
// array of processed oids
|
||||
// to skip oid duplication
|
||||
processedOids []string
|
||||
}
|
||||
|
||||
type Table struct {
|
||||
// name = "iftable"
|
||||
Name string
|
||||
// oid = ".1.3.6.1.2.1.31.1.1.1"
|
||||
Oid string
|
||||
//if empty get all instances
|
||||
//mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
|
||||
MappingTable string
|
||||
// if empty get all subtables
|
||||
// sub_tables could be not "real subtables"
|
||||
//sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
|
||||
SubTables []string
|
||||
}
|
||||
|
||||
type HostTable struct {
|
||||
// name = "iftable"
|
||||
Name string
|
||||
// Includes only these instances
|
||||
// include_instances = ["eth0", "eth1"]
|
||||
IncludeInstances []string
|
||||
// Excludes only these instances
|
||||
// exclude_instances = ["eth20", "eth21"]
|
||||
ExcludeInstances []string
|
||||
// From Table struct
|
||||
oid string
|
||||
mappingTable string
|
||||
subTables []string
|
||||
}
|
||||
|
||||
// TODO find better names
|
||||
type Subtable struct {
|
||||
//name = "bytes_send"
|
||||
Name string
|
||||
//oid = ".1.3.6.1.2.1.31.1.1.1.10"
|
||||
Oid string
|
||||
//unit = "octets"
|
||||
Unit string
|
||||
}
|
||||
|
||||
type Data struct {
|
||||
@@ -63,13 +117,8 @@ type Node struct {
|
||||
subnodes map[string]Node
|
||||
}
|
||||
|
||||
var initNode = Node{
|
||||
id: "1",
|
||||
name: "",
|
||||
subnodes: make(map[string]Node),
|
||||
}
|
||||
|
||||
var NameToOid = make(map[string]string)
|
||||
// TODO move this var to snmp struct
|
||||
var OidInstanceMapping = make(map[string]map[string]string)
|
||||
|
||||
var sampleConfig = `
|
||||
## Use 'oids.txt' file to translate oids to names
|
||||
@@ -113,7 +162,7 @@ var sampleConfig = `
|
||||
[[inputs.snmp.get]]
|
||||
name = "interface_speed"
|
||||
oid = "ifSpeed"
|
||||
instance = 0
|
||||
instance = "0"
|
||||
|
||||
[[inputs.snmp.get]]
|
||||
name = "sysuptime"
|
||||
@@ -129,6 +178,52 @@ var sampleConfig = `
|
||||
name = "ifoutoctets"
|
||||
max_repetition = 127
|
||||
oid = "ifOutOctets"
|
||||
|
||||
|
||||
[[inputs.snmp.host]]
|
||||
address = "192.168.2.13:161"
|
||||
#address = "127.0.0.1:161"
|
||||
community = "public"
|
||||
version = 2
|
||||
timeout = 2.0
|
||||
retries = 2
|
||||
#collect = ["mybulk", "sysservices", "sysdescr", "systype"]
|
||||
collect = ["sysuptime" ]
|
||||
[[inputs.snmp.host.table]]
|
||||
name = "iftable3"
|
||||
include_instances = ["enp5s0", "eth1"]
|
||||
|
||||
# SNMP TABLEs
|
||||
# table without mapping neither subtables
|
||||
[[inputs.snmp.table]]
|
||||
name = "iftable1"
|
||||
oid = ".1.3.6.1.2.1.31.1.1.1"
|
||||
|
||||
# table without mapping but with subtables
|
||||
[[inputs.snmp.table]]
|
||||
name = "iftable2"
|
||||
oid = ".1.3.6.1.2.1.31.1.1.1"
|
||||
sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
|
||||
|
||||
# table with mapping but without subtables
|
||||
[[inputs.snmp.table]]
|
||||
name = "iftable3"
|
||||
oid = ".1.3.6.1.2.1.31.1.1.1"
|
||||
# if empty. get all instances
|
||||
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
|
||||
# if empty, get all subtables
|
||||
|
||||
# table with both mapping and subtables
|
||||
[[inputs.snmp.table]]
|
||||
name = "iftable4"
|
||||
oid = ".1.3.6.1.2.1.31.1.1.1"
|
||||
# if empty get all instances
|
||||
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
|
||||
# if empty get all subtables
|
||||
# sub_tables could be not "real subtables"
|
||||
sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
|
||||
|
||||
|
||||
`
|
||||
|
||||
// SampleConfig returns sample configuration message
|
||||
@@ -189,8 +284,24 @@ func findnodename(node Node, ids []string) (string, string) {
|
||||
}
|
||||
|
||||
func (s *Snmp) Gather(acc telegraf.Accumulator) error {
|
||||
// TODO put this in cache on first run
|
||||
// Create subtables mapping
|
||||
if len(s.subTableMap) == 0 {
|
||||
s.subTableMap = make(map[string]Subtable)
|
||||
for _, sb := range s.Subtable {
|
||||
s.subTableMap[sb.Name] = sb
|
||||
}
|
||||
}
|
||||
// TODO put this in cache on first run
|
||||
// Create oid tree
|
||||
if s.SnmptranslateFile != "" && len(initNode.subnodes) == 0 {
|
||||
if s.SnmptranslateFile != "" && len(s.initNode.subnodes) == 0 {
|
||||
s.nameToOid = make(map[string]string)
|
||||
s.initNode = Node{
|
||||
id: "1",
|
||||
name: "",
|
||||
subnodes: make(map[string]Node),
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadFile(s.SnmptranslateFile)
|
||||
if err != nil {
|
||||
log.Printf("Reading SNMPtranslate file error: %s", err)
|
||||
@@ -202,8 +313,8 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
|
||||
if oids[2] != "" {
|
||||
oid_name := oids[1]
|
||||
oid := oids[2]
|
||||
fillnode(initNode, oid_name, strings.Split(string(oid), "."))
|
||||
NameToOid[oid_name] = oid
|
||||
fillnode(s.initNode, oid_name, strings.Split(string(oid), "."))
|
||||
s.nameToOid[oid_name] = oid
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -227,7 +338,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
|
||||
// Get Easy GET oids
|
||||
for _, oidstring := range host.GetOids {
|
||||
oid := Data{}
|
||||
if val, ok := NameToOid[oidstring]; ok {
|
||||
if val, ok := s.nameToOid[oidstring]; ok {
|
||||
// TODO should we add the 0 instance ?
|
||||
oid.Name = oidstring
|
||||
oid.Oid = val
|
||||
@@ -248,7 +359,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
|
||||
// Get GET oids
|
||||
for _, oid := range s.Get {
|
||||
if oid.Name == oid_name {
|
||||
if val, ok := NameToOid[oid.Oid]; ok {
|
||||
if val, ok := s.nameToOid[oid.Oid]; ok {
|
||||
// TODO should we add the 0 instance ?
|
||||
if oid.Instance != "" {
|
||||
oid.rawOid = "." + val + "." + oid.Instance
|
||||
@@ -264,7 +375,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
|
||||
// Get GETBULK oids
|
||||
for _, oid := range s.Bulk {
|
||||
if oid.Name == oid_name {
|
||||
if val, ok := NameToOid[oid.Oid]; ok {
|
||||
if val, ok := s.nameToOid[oid.Oid]; ok {
|
||||
oid.rawOid = "." + val
|
||||
} else {
|
||||
oid.rawOid = oid.Oid
|
||||
@@ -273,18 +384,219 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Table
|
||||
for _, hostTable := range host.Table {
|
||||
for _, snmpTable := range s.Table {
|
||||
if hostTable.Name == snmpTable.Name {
|
||||
table := hostTable
|
||||
table.oid = snmpTable.Oid
|
||||
table.mappingTable = snmpTable.MappingTable
|
||||
table.subTables = snmpTable.SubTables
|
||||
host.tables = append(host.tables, table)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Launch Mapping
|
||||
// TODO put this in cache on first run
|
||||
// TODO save mapping and computed oids
|
||||
// to do it only the first time
|
||||
// only if len(s.OidInstanceMapping) == 0
|
||||
if len(OidInstanceMapping) >= 0 {
|
||||
if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Launch Get requests
|
||||
if err := host.SNMPGet(acc); err != nil {
|
||||
if err := host.SNMPGet(acc, s.initNode); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := host.SNMPBulk(acc); err != nil {
|
||||
if err := host.SNMPBulk(acc, s.initNode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Host) SNMPGet(acc telegraf.Accumulator) error {
|
||||
func (h *Host) SNMPMap(acc telegraf.Accumulator, nameToOid map[string]string, subTableMap map[string]Subtable) error {
|
||||
// Get snmp client
|
||||
snmpClient, err := h.GetSNMPClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Deconnection
|
||||
defer snmpClient.Conn.Close()
|
||||
// Prepare OIDs
|
||||
for _, table := range h.tables {
|
||||
// We don't have mapping
|
||||
if table.mappingTable == "" {
|
||||
if len(table.subTables) == 0 {
|
||||
// If We don't have mapping table
|
||||
// neither subtables list
|
||||
// This is just a bulk request
|
||||
oid := Data{}
|
||||
oid.Oid = table.oid
|
||||
if val, ok := nameToOid[oid.Oid]; ok {
|
||||
oid.rawOid = "." + val
|
||||
} else {
|
||||
oid.rawOid = oid.Oid
|
||||
}
|
||||
h.bulkOids = append(h.bulkOids, oid)
|
||||
} else {
|
||||
// If We don't have mapping table
|
||||
// but we have subtables
|
||||
// This is a bunch of bulk requests
|
||||
// For each subtable ...
|
||||
for _, sb := range table.subTables {
|
||||
// ... we create a new Data (oid) object
|
||||
oid := Data{}
|
||||
// Looking for more information about this subtable
|
||||
ssb, exists := subTableMap[sb]
|
||||
if exists {
|
||||
// We found a subtable section in config files
|
||||
oid.Oid = ssb.Oid
|
||||
oid.rawOid = ssb.Oid
|
||||
oid.Unit = ssb.Unit
|
||||
} else {
|
||||
// We did NOT find a subtable section in config files
|
||||
oid.Oid = sb
|
||||
oid.rawOid = sb
|
||||
}
|
||||
// TODO check oid validity
|
||||
|
||||
// Add the new oid to getOids list
|
||||
h.bulkOids = append(h.bulkOids, oid)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// We have a mapping table
|
||||
// We need to query this table
|
||||
// To get mapping between instance id
|
||||
// and instance name
|
||||
oid_asked := table.mappingTable
|
||||
oid_next := oid_asked
|
||||
need_more_requests := true
|
||||
// Set max repetition
|
||||
maxRepetition := uint8(32)
|
||||
// Launch requests
|
||||
for need_more_requests {
|
||||
// Launch request
|
||||
result, err3 := snmpClient.GetBulk([]string{oid_next}, 0, maxRepetition)
|
||||
if err3 != nil {
|
||||
return err3
|
||||
}
|
||||
|
||||
lastOid := ""
|
||||
for _, variable := range result.Variables {
|
||||
lastOid = variable.Name
|
||||
if strings.HasPrefix(variable.Name, oid_asked) {
|
||||
switch variable.Type {
|
||||
// handle instance names
|
||||
case gosnmp.OctetString:
|
||||
// Check if instance is in includes instances
|
||||
getInstances := true
|
||||
if len(table.IncludeInstances) > 0 {
|
||||
getInstances = false
|
||||
for _, instance := range table.IncludeInstances {
|
||||
if instance == string(variable.Value.([]byte)) {
|
||||
getInstances = true
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check if instance is in excludes instances
|
||||
if len(table.ExcludeInstances) > 0 {
|
||||
getInstances = true
|
||||
for _, instance := range table.ExcludeInstances {
|
||||
if instance == string(variable.Value.([]byte)) {
|
||||
getInstances = false
|
||||
}
|
||||
}
|
||||
}
|
||||
// We don't want this instance
|
||||
if !getInstances {
|
||||
continue
|
||||
}
|
||||
|
||||
// remove oid table from the complete oid
|
||||
// in order to get the current instance id
|
||||
key := strings.Replace(variable.Name, oid_asked, "", 1)
|
||||
|
||||
if len(table.subTables) == 0 {
|
||||
// We have a mapping table
|
||||
// but no subtables
|
||||
// This is just a bulk request
|
||||
|
||||
// Building mapping table
|
||||
mapping := map[string]string{strings.Trim(key, "."): string(variable.Value.([]byte))}
|
||||
_, exists := OidInstanceMapping[table.oid]
|
||||
if exists {
|
||||
OidInstanceMapping[table.oid][strings.Trim(key, ".")] = string(variable.Value.([]byte))
|
||||
} else {
|
||||
OidInstanceMapping[table.oid] = mapping
|
||||
}
|
||||
|
||||
// Add table oid in bulk oid list
|
||||
oid := Data{}
|
||||
oid.Oid = table.oid
|
||||
if val, ok := nameToOid[oid.Oid]; ok {
|
||||
oid.rawOid = "." + val
|
||||
} else {
|
||||
oid.rawOid = oid.Oid
|
||||
}
|
||||
h.bulkOids = append(h.bulkOids, oid)
|
||||
} else {
|
||||
// We have a mapping table
|
||||
// and some subtables
|
||||
// This is a bunch of get requests
|
||||
// This is the best case :)
|
||||
|
||||
// For each subtable ...
|
||||
for _, sb := range table.subTables {
|
||||
// ... we create a new Data (oid) object
|
||||
oid := Data{}
|
||||
// Looking for more information about this subtable
|
||||
ssb, exists := subTableMap[sb]
|
||||
if exists {
|
||||
// We found a subtable section in config files
|
||||
oid.Oid = ssb.Oid + key
|
||||
oid.rawOid = ssb.Oid + key
|
||||
oid.Unit = ssb.Unit
|
||||
oid.Instance = string(variable.Value.([]byte))
|
||||
} else {
|
||||
// We did NOT find a subtable section in config files
|
||||
oid.Oid = sb + key
|
||||
oid.rawOid = sb + key
|
||||
oid.Instance = string(variable.Value.([]byte))
|
||||
}
|
||||
// TODO check oid validity
|
||||
|
||||
// Add the new oid to getOids list
|
||||
h.getOids = append(h.getOids, oid)
|
||||
}
|
||||
}
|
||||
default:
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Determine if we need more requests
|
||||
if strings.HasPrefix(lastOid, oid_asked) {
|
||||
need_more_requests = true
|
||||
oid_next = lastOid
|
||||
} else {
|
||||
need_more_requests = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Mapping finished
|
||||
|
||||
// Create newoids based on mapping
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Host) SNMPGet(acc telegraf.Accumulator, initNode Node) error {
|
||||
// Get snmp client
|
||||
snmpClient, err := h.GetSNMPClient()
|
||||
if err != nil {
|
||||
@@ -317,7 +629,7 @@ func (h *Host) SNMPGet(acc telegraf.Accumulator) error {
|
||||
return err3
|
||||
}
|
||||
// Handle response
|
||||
_, err = h.HandleResponse(oidsList, result, acc)
|
||||
_, err = h.HandleResponse(oidsList, result, acc, initNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -325,7 +637,7 @@ func (h *Host) SNMPGet(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Host) SNMPBulk(acc telegraf.Accumulator) error {
|
||||
func (h *Host) SNMPBulk(acc telegraf.Accumulator, initNode Node) error {
|
||||
// Get snmp client
|
||||
snmpClient, err := h.GetSNMPClient()
|
||||
if err != nil {
|
||||
@@ -360,7 +672,7 @@ func (h *Host) SNMPBulk(acc telegraf.Accumulator) error {
|
||||
return err3
|
||||
}
|
||||
// Handle response
|
||||
last_oid, err := h.HandleResponse(oidsList, result, acc)
|
||||
last_oid, err := h.HandleResponse(oidsList, result, acc, initNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -412,12 +724,19 @@ func (h *Host) GetSNMPClient() (*gosnmp.GoSNMP, error) {
|
||||
return snmpClient, nil
|
||||
}
|
||||
|
||||
func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, acc telegraf.Accumulator) (string, error) {
|
||||
func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, acc telegraf.Accumulator, initNode Node) (string, error) {
|
||||
var lastOid string
|
||||
for _, variable := range result.Variables {
|
||||
lastOid = variable.Name
|
||||
// Remove unwanted oid
|
||||
nextresult:
|
||||
// Get only oid wanted
|
||||
for oid_key, oid := range oids {
|
||||
// Skip oids already processed
|
||||
for _, processedOid := range h.processedOids {
|
||||
if variable.Name == processedOid {
|
||||
break nextresult
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(variable.Name, oid_key) {
|
||||
switch variable.Type {
|
||||
// handle Metrics
|
||||
@@ -431,11 +750,27 @@ func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, a
|
||||
// Get name and instance
|
||||
var oid_name string
|
||||
var instance string
|
||||
// Get oidname and instannce from translate file
|
||||
// Get oidname and instance from translate file
|
||||
oid_name, instance = findnodename(initNode,
|
||||
strings.Split(string(variable.Name[1:]), "."))
|
||||
|
||||
if instance != "" {
|
||||
// Set instance tag
|
||||
// From mapping table
|
||||
mapping, inMappingNoSubTable := OidInstanceMapping[oid_key]
|
||||
if inMappingNoSubTable {
|
||||
// filter if the instance in not in
|
||||
// OidInstanceMapping mapping map
|
||||
if instance_name, exists := mapping[instance]; exists {
|
||||
tags["instance"] = instance_name
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
} else if oid.Instance != "" {
|
||||
// From config files
|
||||
tags["instance"] = oid.Instance
|
||||
} else if instance != "" {
|
||||
// Using last id of the current oid, ie:
|
||||
// with .1.3.6.1.2.1.31.1.1.1.10.3
|
||||
// instance is 3
|
||||
tags["instance"] = instance
|
||||
}
|
||||
|
||||
@@ -453,6 +788,7 @@ func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, a
|
||||
fields := make(map[string]interface{})
|
||||
fields[string(field_name)] = variable.Value
|
||||
|
||||
h.processedOids = append(h.processedOids, variable.Name)
|
||||
acc.AddFields(field_name, fields, tags)
|
||||
case gosnmp.NoSuchObject, gosnmp.NoSuchInstance:
|
||||
// Oid not found
|
||||
|
||||
64
plugins/inputs/system/KERNEL_README.md
Normal file
64
plugins/inputs/system/KERNEL_README.md
Normal file
@@ -0,0 +1,64 @@
|
||||
# Kernel Input Plugin
|
||||
|
||||
This plugin is only available on Linux.
|
||||
|
||||
The kernel plugin gathers info about the kernel that doesn't fit into other
|
||||
plugins. In general, it is the statistics available in `/proc/stat` that are
|
||||
not covered by other plugins.
|
||||
|
||||
The metrics are documented in `man proc` under the `/proc/stat` section.
|
||||
|
||||
```
|
||||
/proc/stat
|
||||
kernel/system statistics. Varies with architecture. Common entries include:
|
||||
|
||||
page 5741 1808
|
||||
The number of pages the system paged in and the number that were paged out (from disk).
|
||||
|
||||
swap 1 0
|
||||
The number of swap pages that have been brought in and out.
|
||||
|
||||
intr 1462898
|
||||
This line shows counts of interrupts serviced since boot time, for each of
|
||||
the possible system interrupts. The first column is the total of all
|
||||
interrupts serviced; each subsequent column is the total for a particular interrupt.
|
||||
|
||||
ctxt 115315
|
||||
The number of context switches that the system underwent.
|
||||
|
||||
btime 769041601
|
||||
boot time, in seconds since the Epoch, 1970-01-01 00:00:00 +0000 (UTC).
|
||||
|
||||
processes 86031
|
||||
Number of forks since boot.
|
||||
```
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Get kernel statistics from /proc/stat
|
||||
[[inputs.kernel]]
|
||||
# no configuration
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- kernel
|
||||
- boot_time (integer, seconds since epoch, `btime`)
|
||||
- context_switches (integer, `ctxt`)
|
||||
- disk_pages_in (integer, `page (0)`)
|
||||
- disk_pages_out (integer, `page (1)`)
|
||||
- interrupts (integer, `intr`)
|
||||
- processes_forked (integer, `processes`)
|
||||
|
||||
### Tags:
|
||||
|
||||
None
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ telegraf -config ~/ws/telegraf.conf -input-filter kernel -test
|
||||
* Plugin: kernel, Collection 1
|
||||
> kernel boot_time=1457505775i,context_switches=2626618i,disk_pages_in=5741i,disk_pages_out=1808i,interrupts=1472736i,processes_forked=10673i 1457613402960879816
|
||||
```
|
||||
58
plugins/inputs/system/PROCESSES_README.md
Normal file
58
plugins/inputs/system/PROCESSES_README.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# Processes Input Plugin
|
||||
|
||||
This plugin gathers info about the total number of processes and groups
|
||||
them by status (zombie, sleeping, running, etc.)
|
||||
|
||||
On linux this plugin requires access to procfs (/proc), on other OSes
|
||||
it requires access to execute `ps`.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Get the number of processes and group them by status
|
||||
[[inputs.processes]]
|
||||
# no configuration
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- processes
|
||||
- blocked (aka disk sleep or uninterruptible sleep)
|
||||
- running
|
||||
- sleeping
|
||||
- stopped
|
||||
- total
|
||||
- zombie
|
||||
- wait (freebsd only)
|
||||
- idle (bsd only)
|
||||
- paging (linux only)
|
||||
- total_threads (linux only)
|
||||
|
||||
### Process State Mappings
|
||||
|
||||
Different OSes use slightly different State codes for their processes, these
|
||||
state codes are documented in `man ps`, and I will give a mapping of what major
|
||||
OS state codes correspond to in telegraf metrics:
|
||||
|
||||
```
|
||||
Linux FreeBSD Darwin meaning
|
||||
R R R running
|
||||
S S S sleeping
|
||||
Z Z Z zombie
|
||||
T T T stopped
|
||||
none I I idle (sleeping for longer than about 20 seconds)
|
||||
D D,L U blocked (waiting in uninterruptible sleep, or locked)
|
||||
W W none paging (linux kernel < 2.6 only), wait (freebsd)
|
||||
```
|
||||
|
||||
### Tags:
|
||||
|
||||
None
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ telegraf -config ~/ws/telegraf.conf -input-filter processes -test
|
||||
* Plugin: processes, Collection 1
|
||||
> processes blocked=8i,running=1i,sleeping=265i,stopped=0i,total=274i,zombie=0i,paging=0i,total_threads=687i 1457478636980905042
|
||||
```
|
||||
35
plugins/inputs/system/SYSTEM_README.md
Normal file
35
plugins/inputs/system/SYSTEM_README.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# System Input Plugin
|
||||
|
||||
The system plugin gathers general stats on system load, uptime,
|
||||
and number of users logged in. It is basically equivalent
|
||||
to the unix `uptime` command.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Read metrics about system load & uptime
|
||||
[[inputs.system]]
|
||||
# no configuration
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- system
|
||||
- load1 (float)
|
||||
- load15 (float)
|
||||
- load5 (float)
|
||||
- n_users (integer)
|
||||
- uptime (integer, seconds)
|
||||
- uptime_format (string)
|
||||
|
||||
### Tags:
|
||||
|
||||
None
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ telegraf -config ~/ws/telegraf.conf -input-filter system -test
|
||||
* Plugin: system, Collection 1
|
||||
> system load1=2.05,load15=2.38,load5=2.03,n_users=4i,uptime=239043i,uptime_format="2 days, 18:24" 1457546165399253452
|
||||
```
|
||||
110
plugins/inputs/system/kernel.go
Normal file
110
plugins/inputs/system/kernel.go
Normal file
@@ -0,0 +1,110 @@
|
||||
// +build linux
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// /proc/stat file line prefixes to gather stats on:
|
||||
var (
|
||||
interrupts = []byte("intr")
|
||||
context_switches = []byte("ctxt")
|
||||
processes_forked = []byte("processes")
|
||||
disk_pages = []byte("page")
|
||||
boot_time = []byte("btime")
|
||||
)
|
||||
|
||||
type Kernel struct {
|
||||
statFile string
|
||||
}
|
||||
|
||||
func (k *Kernel) Description() string {
|
||||
return "Get kernel statistics from /proc/stat"
|
||||
}
|
||||
|
||||
func (k *Kernel) SampleConfig() string { return "" }
|
||||
|
||||
func (k *Kernel) Gather(acc telegraf.Accumulator) error {
|
||||
data, err := k.getProcStat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
dataFields := bytes.Fields(data)
|
||||
for i, field := range dataFields {
|
||||
switch {
|
||||
case bytes.Equal(field, interrupts):
|
||||
m, err := strconv.Atoi(string(dataFields[i+1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fields["interrupts"] = int64(m)
|
||||
case bytes.Equal(field, context_switches):
|
||||
m, err := strconv.Atoi(string(dataFields[i+1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fields["context_switches"] = int64(m)
|
||||
case bytes.Equal(field, processes_forked):
|
||||
m, err := strconv.Atoi(string(dataFields[i+1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fields["processes_forked"] = int64(m)
|
||||
case bytes.Equal(field, boot_time):
|
||||
m, err := strconv.Atoi(string(dataFields[i+1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fields["boot_time"] = int64(m)
|
||||
case bytes.Equal(field, disk_pages):
|
||||
in, err := strconv.Atoi(string(dataFields[i+1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out, err := strconv.Atoi(string(dataFields[i+2]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fields["disk_pages_in"] = int64(in)
|
||||
fields["disk_pages_out"] = int64(out)
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("kernel", fields, map[string]string{})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Kernel) getProcStat() ([]byte, error) {
|
||||
if _, err := os.Stat(k.statFile); os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("kernel: %s does not exist!", k.statFile)
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadFile(k.statFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("kernel", func() telegraf.Input {
|
||||
return &Kernel{
|
||||
statFile: "/proc/stat",
|
||||
}
|
||||
})
|
||||
}
|
||||
164
plugins/inputs/system/kernel_test.go
Normal file
164
plugins/inputs/system/kernel_test.go
Normal file
@@ -0,0 +1,164 @@
|
||||
// +build linux
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFullProcFile(t *testing.T) {
|
||||
tmpfile := makeFakeStatFile([]byte(statFile_Full))
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
k := Kernel{
|
||||
statFile: tmpfile,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
err := k.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"boot_time": int64(1457505775),
|
||||
"context_switches": int64(2626618),
|
||||
"disk_pages_in": int64(5741),
|
||||
"disk_pages_out": int64(1808),
|
||||
"interrupts": int64(1472736),
|
||||
"processes_forked": int64(10673),
|
||||
}
|
||||
acc.AssertContainsFields(t, "kernel", fields)
|
||||
}
|
||||
|
||||
func TestPartialProcFile(t *testing.T) {
|
||||
tmpfile := makeFakeStatFile([]byte(statFile_Partial))
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
k := Kernel{
|
||||
statFile: tmpfile,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
err := k.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"boot_time": int64(1457505775),
|
||||
"context_switches": int64(2626618),
|
||||
"disk_pages_in": int64(5741),
|
||||
"disk_pages_out": int64(1808),
|
||||
"interrupts": int64(1472736),
|
||||
}
|
||||
acc.AssertContainsFields(t, "kernel", fields)
|
||||
}
|
||||
|
||||
func TestInvalidProcFile1(t *testing.T) {
|
||||
tmpfile := makeFakeStatFile([]byte(statFile_Invalid))
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
k := Kernel{
|
||||
statFile: tmpfile,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
err := k.Gather(&acc)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestInvalidProcFile2(t *testing.T) {
|
||||
tmpfile := makeFakeStatFile([]byte(statFile_Invalid2))
|
||||
defer os.Remove(tmpfile)
|
||||
|
||||
k := Kernel{
|
||||
statFile: tmpfile,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
err := k.Gather(&acc)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestNoProcFile(t *testing.T) {
|
||||
tmpfile := makeFakeStatFile([]byte(statFile_Invalid2))
|
||||
os.Remove(tmpfile)
|
||||
|
||||
k := Kernel{
|
||||
statFile: tmpfile,
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
err := k.Gather(&acc)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "does not exist")
|
||||
}
|
||||
|
||||
const statFile_Full = `cpu 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
cpu0 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
ctxt 2626618
|
||||
btime 1457505775
|
||||
processes 10673
|
||||
procs_running 2
|
||||
procs_blocked 0
|
||||
softirq 1031662 0 649485 20946 111071 11620 0 1 0 994 237545
|
||||
page 5741 1808
|
||||
swap 1 0
|
||||
`
|
||||
|
||||
const statFile_Partial = `cpu 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
cpu0 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
ctxt 2626618
|
||||
btime 1457505775
|
||||
procs_running 2
|
||||
procs_blocked 0
|
||||
softirq 1031662 0 649485 20946 111071 11620 0 1 0 994 237545
|
||||
page 5741 1808
|
||||
`
|
||||
|
||||
// missing btime measurement
|
||||
const statFile_Invalid = `cpu 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
cpu0 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
ctxt 2626618
|
||||
btime
|
||||
processes 10673
|
||||
procs_running 2
|
||||
procs_blocked 0
|
||||
softirq 1031662 0 649485 20946 111071 11620 0 1 0 994 237545
|
||||
page 5741 1808
|
||||
swap 1 0
|
||||
`
|
||||
|
||||
// missing second page measurement
|
||||
const statFile_Invalid2 = `cpu 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
cpu0 6796 252 5655 10444977 175 0 101 0 0 0
|
||||
intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
ctxt 2626618
|
||||
processes 10673
|
||||
procs_running 2
|
||||
page 5741
|
||||
procs_blocked 0
|
||||
softirq 1031662 0 649485 20946 111071 11620 0 1 0 994 237545
|
||||
`
|
||||
|
||||
func makeFakeStatFile(content []byte) string {
|
||||
tmpfile, err := ioutil.TempFile("", "kerneltest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if _, err := tmpfile.Write(content); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := tmpfile.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return tmpfile.Name()
|
||||
}
|
||||
216
plugins/inputs/system/processes.go
Normal file
216
plugins/inputs/system/processes.go
Normal file
@@ -0,0 +1,216 @@
|
||||
// +build !windows
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Processes struct {
|
||||
execPS func() ([]byte, error)
|
||||
readProcFile func(statFile string) ([]byte, error)
|
||||
|
||||
forcePS bool
|
||||
forceProc bool
|
||||
}
|
||||
|
||||
func (p *Processes) Description() string {
|
||||
return "Get the number of processes and group them by status"
|
||||
}
|
||||
|
||||
func (p *Processes) SampleConfig() string { return "" }
|
||||
|
||||
func (p *Processes) Gather(acc telegraf.Accumulator) error {
|
||||
// Get an empty map of metric fields
|
||||
fields := getEmptyFields()
|
||||
|
||||
// Decide if we will use 'ps' to get stats (use procfs otherwise)
|
||||
usePS := true
|
||||
if runtime.GOOS == "linux" {
|
||||
usePS = false
|
||||
}
|
||||
if p.forcePS {
|
||||
usePS = true
|
||||
} else if p.forceProc {
|
||||
usePS = false
|
||||
}
|
||||
|
||||
// Gather stats from 'ps' or procfs
|
||||
if usePS {
|
||||
if err := p.gatherFromPS(fields); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := p.gatherFromProc(fields); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("processes", fields, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gets empty fields of metrics based on the OS
|
||||
func getEmptyFields() map[string]interface{} {
|
||||
fields := map[string]interface{}{
|
||||
"blocked": int64(0),
|
||||
"zombies": int64(0),
|
||||
"stopped": int64(0),
|
||||
"running": int64(0),
|
||||
"sleeping": int64(0),
|
||||
"total": int64(0),
|
||||
}
|
||||
switch runtime.GOOS {
|
||||
case "freebsd":
|
||||
fields["idle"] = int64(0)
|
||||
fields["wait"] = int64(0)
|
||||
case "darwin":
|
||||
fields["idle"] = int64(0)
|
||||
case "openbsd":
|
||||
fields["idle"] = int64(0)
|
||||
case "linux":
|
||||
fields["paging"] = int64(0)
|
||||
fields["total_threads"] = int64(0)
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
// exec `ps` to get all process states
|
||||
func (p *Processes) gatherFromPS(fields map[string]interface{}) error {
|
||||
out, err := p.execPS()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, status := range bytes.Fields(out) {
|
||||
if i == 0 && string(status) == "STAT" {
|
||||
// This is a header, skip it
|
||||
continue
|
||||
}
|
||||
switch status[0] {
|
||||
case 'W':
|
||||
fields["wait"] = fields["wait"].(int64) + int64(1)
|
||||
case 'U', 'D', 'L':
|
||||
// Also known as uninterruptible sleep or disk sleep
|
||||
fields["blocked"] = fields["blocked"].(int64) + int64(1)
|
||||
case 'Z':
|
||||
fields["zombies"] = fields["zombies"].(int64) + int64(1)
|
||||
case 'T':
|
||||
fields["stopped"] = fields["stopped"].(int64) + int64(1)
|
||||
case 'R':
|
||||
fields["running"] = fields["running"].(int64) + int64(1)
|
||||
case 'S':
|
||||
fields["sleeping"] = fields["sleeping"].(int64) + int64(1)
|
||||
case 'I':
|
||||
fields["idle"] = fields["idle"].(int64) + int64(1)
|
||||
default:
|
||||
log.Printf("processes: Unknown state [ %s ] from ps",
|
||||
string(status[0]))
|
||||
}
|
||||
fields["total"] = fields["total"].(int64) + int64(1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// get process states from /proc/(pid)/stat files
|
||||
func (p *Processes) gatherFromProc(fields map[string]interface{}) error {
|
||||
files, err := ioutil.ReadDir("/proc")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
statFile := path.Join("/proc", file.Name(), "stat")
|
||||
data, err := p.readProcFile(statFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if data == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
stats := bytes.Fields(data)
|
||||
if len(stats) < 3 {
|
||||
return fmt.Errorf("Something is terribly wrong with %s", statFile)
|
||||
}
|
||||
switch stats[2][0] {
|
||||
case 'R':
|
||||
fields["running"] = fields["running"].(int64) + int64(1)
|
||||
case 'S':
|
||||
fields["sleeping"] = fields["sleeping"].(int64) + int64(1)
|
||||
case 'D':
|
||||
fields["blocked"] = fields["blocked"].(int64) + int64(1)
|
||||
case 'Z':
|
||||
fields["zombies"] = fields["zombies"].(int64) + int64(1)
|
||||
case 'T', 't':
|
||||
fields["stopped"] = fields["stopped"].(int64) + int64(1)
|
||||
case 'W':
|
||||
fields["paging"] = fields["paging"].(int64) + int64(1)
|
||||
default:
|
||||
log.Printf("processes: Unknown state [ %s ] in file %s",
|
||||
string(stats[2][0]), statFile)
|
||||
}
|
||||
fields["total"] = fields["total"].(int64) + int64(1)
|
||||
|
||||
threads, err := strconv.Atoi(string(stats[19]))
|
||||
if err != nil {
|
||||
log.Printf("processes: Error parsing thread count: %s", err)
|
||||
continue
|
||||
}
|
||||
fields["total_threads"] = fields["total_threads"].(int64) + int64(threads)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readProcFile(statFile string) ([]byte, error) {
|
||||
if _, err := os.Stat(statFile); os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadFile(statFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func execPS() ([]byte, error) {
|
||||
bin, err := exec.LookPath("ps")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := exec.Command(bin, "axo", "state").Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return out, err
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("processes", func() telegraf.Input {
|
||||
return &Processes{
|
||||
execPS: execPS,
|
||||
readProcFile: readProcFile,
|
||||
}
|
||||
})
|
||||
}
|
||||
151
plugins/inputs/system/processes_test.go
Normal file
151
plugins/inputs/system/processes_test.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestProcesses(t *testing.T) {
|
||||
processes := &Processes{
|
||||
execPS: execPS,
|
||||
readProcFile: readProcFile,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := processes.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, acc.HasIntField("processes", "running"))
|
||||
assert.True(t, acc.HasIntField("processes", "sleeping"))
|
||||
assert.True(t, acc.HasIntField("processes", "stopped"))
|
||||
assert.True(t, acc.HasIntField("processes", "total"))
|
||||
total, ok := acc.Get("processes")
|
||||
require.True(t, ok)
|
||||
assert.True(t, total.Fields["total"].(int64) > 0)
|
||||
}
|
||||
|
||||
func TestFromPS(t *testing.T) {
|
||||
processes := &Processes{
|
||||
execPS: testExecPS,
|
||||
forcePS: true,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := processes.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := getEmptyFields()
|
||||
fields["blocked"] = int64(4)
|
||||
fields["zombies"] = int64(1)
|
||||
fields["running"] = int64(4)
|
||||
fields["sleeping"] = int64(34)
|
||||
fields["total"] = int64(43)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{})
|
||||
}
|
||||
|
||||
func TestFromPSError(t *testing.T) {
|
||||
processes := &Processes{
|
||||
execPS: testExecPSError,
|
||||
forcePS: true,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := processes.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestFromProcFiles(t *testing.T) {
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skip("This test only runs on linux")
|
||||
}
|
||||
tester := tester{}
|
||||
processes := &Processes{
|
||||
readProcFile: tester.testProcFile,
|
||||
forceProc: true,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := processes.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := getEmptyFields()
|
||||
fields["sleeping"] = tester.calls
|
||||
fields["total_threads"] = tester.calls * 2
|
||||
fields["total"] = tester.calls
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{})
|
||||
}
|
||||
|
||||
func testExecPS() ([]byte, error) {
|
||||
return []byte(testPSOut), nil
|
||||
}
|
||||
|
||||
// struct for counting calls to testProcFile
|
||||
type tester struct {
|
||||
calls int64
|
||||
}
|
||||
|
||||
func (t *tester) testProcFile(_ string) ([]byte, error) {
|
||||
t.calls++
|
||||
return []byte(fmt.Sprintf(testProcStat, "S", "2")), nil
|
||||
}
|
||||
|
||||
func testExecPSError() ([]byte, error) {
|
||||
return []byte(testPSOut), fmt.Errorf("ERROR!")
|
||||
}
|
||||
|
||||
const testPSOut = `
|
||||
STAT
|
||||
S
|
||||
S
|
||||
S
|
||||
S
|
||||
R
|
||||
R
|
||||
S
|
||||
S
|
||||
Ss
|
||||
Ss
|
||||
S
|
||||
SNs
|
||||
Ss
|
||||
Ss
|
||||
S
|
||||
R+
|
||||
S
|
||||
U
|
||||
S
|
||||
S
|
||||
S
|
||||
S
|
||||
Ss
|
||||
S+
|
||||
Ss
|
||||
S
|
||||
S+
|
||||
S+
|
||||
Ss
|
||||
S+
|
||||
Ss
|
||||
S
|
||||
R+
|
||||
Ss
|
||||
S
|
||||
S+
|
||||
S+
|
||||
Ss
|
||||
L
|
||||
U
|
||||
Z
|
||||
D
|
||||
S+
|
||||
`
|
||||
|
||||
const testProcStat = `10 (rcuob/0) %s 2 0 0 0 -1 2129984 0 0 0 0 0 0 0 0 20 0 %s 0 11 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
`
|
||||
@@ -31,11 +31,17 @@ func (_ *SystemStats) Gather(acc telegraf.Accumulator) error {
|
||||
return err
|
||||
}
|
||||
|
||||
users, err := host.Users()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"load1": loadavg.Load1,
|
||||
"load5": loadavg.Load5,
|
||||
"load15": loadavg.Load15,
|
||||
"uptime": hostinfo.Uptime,
|
||||
"n_users": len(users),
|
||||
"uptime_format": format_uptime(hostinfo.Uptime),
|
||||
}
|
||||
acc.AddFields("system", fields, nil)
|
||||
|
||||
30
plugins/inputs/tcp_listener/README.md
Normal file
30
plugins/inputs/tcp_listener/README.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# TCP listener service input plugin
|
||||
|
||||
The TCP listener is a service input plugin that listens for messages on a TCP
|
||||
socket and adds those messages to InfluxDB.
|
||||
The plugin expects messages in the
|
||||
[Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
|
||||
|
||||
### Configuration:
|
||||
|
||||
This is a sample configuration for the plugin.
|
||||
|
||||
```toml
|
||||
# Generic TCP listener
|
||||
[[inputs.tcp_listener]]
|
||||
## Address and port to host TCP listener on
|
||||
service_address = ":8094"
|
||||
|
||||
## Number of TCP messages allowed to queue up. Once filled, the
|
||||
## TCP listener will start dropping packets.
|
||||
allowed_pending_messages = 10000
|
||||
|
||||
## Maximum number of concurrent TCP connections to allow
|
||||
max_tcp_connections = 250
|
||||
|
||||
## Data format to consume. This can be "json", "influx" or "graphite"
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
264
plugins/inputs/tcp_listener/tcp_listener.go
Normal file
264
plugins/inputs/tcp_listener/tcp_listener.go
Normal file
@@ -0,0 +1,264 @@
|
||||
package tcp_listener
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
|
||||
type TcpListener struct {
|
||||
ServiceAddress string
|
||||
AllowedPendingMessages int
|
||||
MaxTCPConnections int `toml:"max_tcp_connections"`
|
||||
|
||||
sync.Mutex
|
||||
// Lock for preventing a data race during resource cleanup
|
||||
cleanup sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
|
||||
in chan []byte
|
||||
done chan struct{}
|
||||
// accept channel tracks how many active connections there are, if there
|
||||
// is an available bool in accept, then we are below the maximum and can
|
||||
// accept the connection
|
||||
accept chan bool
|
||||
|
||||
// track the listener here so we can close it in Stop()
|
||||
listener *net.TCPListener
|
||||
// track current connections so we can close them in Stop()
|
||||
conns map[string]*net.TCPConn
|
||||
|
||||
parser parsers.Parser
|
||||
acc telegraf.Accumulator
|
||||
}
|
||||
|
||||
var dropwarn = "ERROR: Message queue full. Discarding line [%s] " +
|
||||
"You may want to increase allowed_pending_messages in the config\n"
|
||||
|
||||
const sampleConfig = `
|
||||
## Address and port to host TCP listener on
|
||||
service_address = ":8094"
|
||||
|
||||
## Number of TCP messages allowed to queue up. Once filled, the
|
||||
## TCP listener will start dropping packets.
|
||||
allowed_pending_messages = 10000
|
||||
|
||||
## Maximum number of concurrent TCP connections to allow
|
||||
max_tcp_connections = 250
|
||||
|
||||
## Data format to consume. This can be "json", "influx" or "graphite"
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
`
|
||||
|
||||
func (t *TcpListener) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (t *TcpListener) Description() string {
|
||||
return "Generic TCP listener"
|
||||
}
|
||||
|
||||
// All the work is done in the Start() function, so this is just a dummy
|
||||
// function.
|
||||
func (t *TcpListener) Gather(_ telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TcpListener) SetParser(parser parsers.Parser) {
|
||||
t.parser = parser
|
||||
}
|
||||
|
||||
// Start starts the tcp listener service.
|
||||
func (t *TcpListener) Start(acc telegraf.Accumulator) error {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
t.acc = acc
|
||||
t.in = make(chan []byte, t.AllowedPendingMessages)
|
||||
t.done = make(chan struct{})
|
||||
t.accept = make(chan bool, t.MaxTCPConnections)
|
||||
t.conns = make(map[string]*net.TCPConn)
|
||||
for i := 0; i < t.MaxTCPConnections; i++ {
|
||||
t.accept <- true
|
||||
}
|
||||
|
||||
// Start listener
|
||||
var err error
|
||||
address, _ := net.ResolveTCPAddr("tcp", t.ServiceAddress)
|
||||
t.listener, err = net.ListenTCP("tcp", address)
|
||||
if err != nil {
|
||||
log.Fatalf("ERROR: ListenUDP - %s", err)
|
||||
return err
|
||||
}
|
||||
log.Println("TCP server listening on: ", t.listener.Addr().String())
|
||||
|
||||
t.wg.Add(2)
|
||||
go t.tcpListen()
|
||||
go t.tcpParser()
|
||||
|
||||
log.Printf("Started TCP listener service on %s\n", t.ServiceAddress)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop cleans up all resources
|
||||
func (t *TcpListener) Stop() {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
close(t.done)
|
||||
t.listener.Close()
|
||||
|
||||
// Close all open TCP connections
|
||||
// - get all conns from the t.conns map and put into slice
|
||||
// - this is so the forget() function doesnt conflict with looping
|
||||
// over the t.conns map
|
||||
var conns []*net.TCPConn
|
||||
t.cleanup.Lock()
|
||||
for _, conn := range t.conns {
|
||||
conns = append(conns, conn)
|
||||
}
|
||||
t.cleanup.Unlock()
|
||||
for _, conn := range conns {
|
||||
conn.Close()
|
||||
}
|
||||
|
||||
t.wg.Wait()
|
||||
close(t.in)
|
||||
log.Println("Stopped TCP listener service on ", t.ServiceAddress)
|
||||
}
|
||||
|
||||
// tcpListen listens for incoming TCP connections.
|
||||
func (t *TcpListener) tcpListen() error {
|
||||
defer t.wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-t.done:
|
||||
return nil
|
||||
default:
|
||||
// Accept connection:
|
||||
conn, err := t.listener.AcceptTCP()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Received TCP Connection from %s", conn.RemoteAddr())
|
||||
|
||||
select {
|
||||
case <-t.accept:
|
||||
// not over connection limit, handle the connection properly.
|
||||
t.wg.Add(1)
|
||||
// generate a random id for this TCPConn
|
||||
id := internal.RandomString(6)
|
||||
t.remember(id, conn)
|
||||
go t.handler(conn, id)
|
||||
default:
|
||||
// We are over the connection limit, refuse & close.
|
||||
t.refuser(conn)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// refuser refuses a TCP connection
|
||||
func (t *TcpListener) refuser(conn *net.TCPConn) {
|
||||
// Tell the connection why we are closing.
|
||||
fmt.Fprintf(conn, "Telegraf maximum concurrent TCP connections (%d)"+
|
||||
" reached, closing.\nYou may want to increase max_tcp_connections in"+
|
||||
" the Telegraf tcp listener configuration.\n", t.MaxTCPConnections)
|
||||
conn.Close()
|
||||
log.Printf("Refused TCP Connection from %s", conn.RemoteAddr())
|
||||
log.Printf("WARNING: Maximum TCP Connections reached, you may want to" +
|
||||
" adjust max_tcp_connections")
|
||||
}
|
||||
|
||||
// handler handles a single TCP Connection
|
||||
func (t *TcpListener) handler(conn *net.TCPConn, id string) {
|
||||
// connection cleanup function
|
||||
defer func() {
|
||||
t.wg.Done()
|
||||
conn.Close()
|
||||
log.Printf("Closed TCP Connection from %s", conn.RemoteAddr())
|
||||
// Add one connection potential back to channel when this one closes
|
||||
t.accept <- true
|
||||
t.forget(id)
|
||||
}()
|
||||
|
||||
scanner := bufio.NewScanner(conn)
|
||||
for {
|
||||
select {
|
||||
case <-t.done:
|
||||
return
|
||||
default:
|
||||
if !scanner.Scan() {
|
||||
return
|
||||
}
|
||||
buf := scanner.Bytes()
|
||||
select {
|
||||
case t.in <- buf:
|
||||
default:
|
||||
log.Printf(dropwarn, string(buf))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// tcpParser parses the incoming tcp byte packets
|
||||
func (t *TcpListener) tcpParser() error {
|
||||
defer t.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-t.done:
|
||||
return nil
|
||||
case packet := <-t.in:
|
||||
if len(packet) == 0 {
|
||||
continue
|
||||
}
|
||||
metrics, err := t.parser.Parse(packet)
|
||||
if err == nil {
|
||||
t.storeMetrics(metrics)
|
||||
} else {
|
||||
log.Printf("Malformed packet: [%s], Error: %s\n",
|
||||
string(packet), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TcpListener) storeMetrics(metrics []telegraf.Metric) error {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
for _, m := range metrics {
|
||||
t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// forget a TCP connection
|
||||
func (t *TcpListener) forget(id string) {
|
||||
t.cleanup.Lock()
|
||||
defer t.cleanup.Unlock()
|
||||
delete(t.conns, id)
|
||||
}
|
||||
|
||||
// remember a TCP connection
|
||||
func (t *TcpListener) remember(id string, conn *net.TCPConn) {
|
||||
t.cleanup.Lock()
|
||||
defer t.cleanup.Unlock()
|
||||
t.conns[id] = conn
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("tcp_listener", func() telegraf.Input {
|
||||
return &TcpListener{}
|
||||
})
|
||||
}
|
||||
259
plugins/inputs/tcp_listener/tcp_listener_test.go
Normal file
259
plugins/inputs/tcp_listener/tcp_listener_test.go
Normal file
@@ -0,0 +1,259 @@
|
||||
package tcp_listener
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n"
|
||||
|
||||
testMsgs = `
|
||||
cpu_load_short,host=server02 value=12.0 1422568543702900257
|
||||
cpu_load_short,host=server03 value=12.0 1422568543702900257
|
||||
cpu_load_short,host=server04 value=12.0 1422568543702900257
|
||||
cpu_load_short,host=server05 value=12.0 1422568543702900257
|
||||
cpu_load_short,host=server06 value=12.0 1422568543702900257
|
||||
`
|
||||
)
|
||||
|
||||
func newTestTcpListener() (*TcpListener, chan []byte) {
|
||||
in := make(chan []byte, 1500)
|
||||
listener := &TcpListener{
|
||||
ServiceAddress: ":8194",
|
||||
AllowedPendingMessages: 10000,
|
||||
MaxTCPConnections: 250,
|
||||
in: in,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
return listener, in
|
||||
}
|
||||
|
||||
func TestConnectTCP(t *testing.T) {
|
||||
listener := TcpListener{
|
||||
ServiceAddress: ":8194",
|
||||
AllowedPendingMessages: 10000,
|
||||
MaxTCPConnections: 250,
|
||||
}
|
||||
listener.parser, _ = parsers.NewInfluxParser()
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
conn, err := net.Dial("tcp", "127.0.0.1:8194")
|
||||
require.NoError(t, err)
|
||||
|
||||
// send single message to socket
|
||||
fmt.Fprintf(conn, testMsg)
|
||||
time.Sleep(time.Millisecond * 15)
|
||||
acc.AssertContainsTaggedFields(t, "cpu_load_short",
|
||||
map[string]interface{}{"value": float64(12)},
|
||||
map[string]string{"host": "server01"},
|
||||
)
|
||||
|
||||
// send multiple messages to socket
|
||||
fmt.Fprintf(conn, testMsgs)
|
||||
time.Sleep(time.Millisecond * 15)
|
||||
hostTags := []string{"server02", "server03",
|
||||
"server04", "server05", "server06"}
|
||||
for _, hostTag := range hostTags {
|
||||
acc.AssertContainsTaggedFields(t, "cpu_load_short",
|
||||
map[string]interface{}{"value": float64(12)},
|
||||
map[string]string{"host": hostTag},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that MaxTCPConections is respected
|
||||
func TestConcurrentConns(t *testing.T) {
|
||||
listener := TcpListener{
|
||||
ServiceAddress: ":8195",
|
||||
AllowedPendingMessages: 10000,
|
||||
MaxTCPConnections: 2,
|
||||
}
|
||||
listener.parser, _ = parsers.NewInfluxParser()
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
_, err := net.Dial("tcp", "127.0.0.1:8195")
|
||||
assert.NoError(t, err)
|
||||
_, err = net.Dial("tcp", "127.0.0.1:8195")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Connection over the limit:
|
||||
conn, err := net.Dial("tcp", "127.0.0.1:8195")
|
||||
assert.NoError(t, err)
|
||||
net.Dial("tcp", "127.0.0.1:8195")
|
||||
buf := make([]byte, 1500)
|
||||
n, err := conn.Read(buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
"Telegraf maximum concurrent TCP connections (2) reached, closing.\n"+
|
||||
"You may want to increase max_tcp_connections in"+
|
||||
" the Telegraf tcp listener configuration.\n",
|
||||
string(buf[:n]))
|
||||
|
||||
_, err = conn.Write([]byte(testMsg))
|
||||
assert.NoError(t, err)
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
assert.Zero(t, acc.NFields())
|
||||
}
|
||||
|
||||
// Test that MaxTCPConections is respected when max==1
|
||||
func TestConcurrentConns1(t *testing.T) {
|
||||
listener := TcpListener{
|
||||
ServiceAddress: ":8196",
|
||||
AllowedPendingMessages: 10000,
|
||||
MaxTCPConnections: 1,
|
||||
}
|
||||
listener.parser, _ = parsers.NewInfluxParser()
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
_, err := net.Dial("tcp", "127.0.0.1:8196")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Connection over the limit:
|
||||
conn, err := net.Dial("tcp", "127.0.0.1:8196")
|
||||
assert.NoError(t, err)
|
||||
net.Dial("tcp", "127.0.0.1:8196")
|
||||
buf := make([]byte, 1500)
|
||||
n, err := conn.Read(buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
"Telegraf maximum concurrent TCP connections (1) reached, closing.\n"+
|
||||
"You may want to increase max_tcp_connections in"+
|
||||
" the Telegraf tcp listener configuration.\n",
|
||||
string(buf[:n]))
|
||||
|
||||
_, err = conn.Write([]byte(testMsg))
|
||||
assert.NoError(t, err)
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
assert.Zero(t, acc.NFields())
|
||||
}
|
||||
|
||||
// Test that MaxTCPConections is respected
|
||||
func TestCloseConcurrentConns(t *testing.T) {
|
||||
listener := TcpListener{
|
||||
ServiceAddress: ":8195",
|
||||
AllowedPendingMessages: 10000,
|
||||
MaxTCPConnections: 2,
|
||||
}
|
||||
listener.parser, _ = parsers.NewInfluxParser()
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
require.NoError(t, listener.Start(acc))
|
||||
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
_, err := net.Dial("tcp", "127.0.0.1:8195")
|
||||
assert.NoError(t, err)
|
||||
_, err = net.Dial("tcp", "127.0.0.1:8195")
|
||||
assert.NoError(t, err)
|
||||
|
||||
listener.Stop()
|
||||
}
|
||||
|
||||
func TestRunParser(t *testing.T) {
|
||||
var testmsg = []byte(testMsg)
|
||||
|
||||
listener, in := newTestTcpListener()
|
||||
acc := testutil.Accumulator{}
|
||||
listener.acc = &acc
|
||||
defer close(listener.done)
|
||||
|
||||
listener.parser, _ = parsers.NewInfluxParser()
|
||||
listener.wg.Add(1)
|
||||
go listener.tcpParser()
|
||||
|
||||
in <- testmsg
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
listener.Gather(&acc)
|
||||
|
||||
if a := acc.NFields(); a != 1 {
|
||||
t.Errorf("got %v, expected %v", a, 1)
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "cpu_load_short",
|
||||
map[string]interface{}{"value": float64(12)},
|
||||
map[string]string{"host": "server01"},
|
||||
)
|
||||
}
|
||||
|
||||
func TestRunParserInvalidMsg(t *testing.T) {
|
||||
var testmsg = []byte("cpu_load_short")
|
||||
|
||||
listener, in := newTestTcpListener()
|
||||
acc := testutil.Accumulator{}
|
||||
listener.acc = &acc
|
||||
defer close(listener.done)
|
||||
|
||||
listener.parser, _ = parsers.NewInfluxParser()
|
||||
listener.wg.Add(1)
|
||||
go listener.tcpParser()
|
||||
|
||||
in <- testmsg
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
|
||||
if a := acc.NFields(); a != 0 {
|
||||
t.Errorf("got %v, expected %v", a, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunParserGraphiteMsg(t *testing.T) {
|
||||
var testmsg = []byte("cpu.load.graphite 12 1454780029")
|
||||
|
||||
listener, in := newTestTcpListener()
|
||||
acc := testutil.Accumulator{}
|
||||
listener.acc = &acc
|
||||
defer close(listener.done)
|
||||
|
||||
listener.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil)
|
||||
listener.wg.Add(1)
|
||||
go listener.tcpParser()
|
||||
|
||||
in <- testmsg
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
listener.Gather(&acc)
|
||||
|
||||
acc.AssertContainsFields(t, "cpu_load_graphite",
|
||||
map[string]interface{}{"value": float64(12)})
|
||||
}
|
||||
|
||||
func TestRunParserJSONMsg(t *testing.T) {
|
||||
var testmsg = []byte("{\"a\": 5, \"b\": {\"c\": 6}}\n")
|
||||
|
||||
listener, in := newTestTcpListener()
|
||||
acc := testutil.Accumulator{}
|
||||
listener.acc = &acc
|
||||
defer close(listener.done)
|
||||
|
||||
listener.parser, _ = parsers.NewJSONParser("udp_json_test", []string{}, nil)
|
||||
listener.wg.Add(1)
|
||||
go listener.tcpParser()
|
||||
|
||||
in <- testmsg
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
listener.Gather(&acc)
|
||||
|
||||
acc.AssertContainsFields(t, "udp_json_test",
|
||||
map[string]interface{}{
|
||||
"a": float64(5),
|
||||
"b_c": float64(6),
|
||||
})
|
||||
}
|
||||
91
plugins/inputs/udp_listener/README.md
Normal file
91
plugins/inputs/udp_listener/README.md
Normal file
@@ -0,0 +1,91 @@
|
||||
# UDP listener service input plugin
|
||||
|
||||
The UDP listener is a service input plugin that listens for messages on a UDP
|
||||
socket and adds those messages to InfluxDB.
|
||||
The plugin expects messages in the
|
||||
[Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
|
||||
|
||||
### Configuration:
|
||||
|
||||
This is a sample configuration for the plugin.
|
||||
|
||||
```toml
|
||||
[[inputs.udp_listener]]
|
||||
## Address and port to host UDP listener on
|
||||
service_address = ":8092"
|
||||
|
||||
## Number of UDP messages allowed to queue up. Once filled, the
|
||||
## UDP listener will start dropping packets.
|
||||
allowed_pending_messages = 10000
|
||||
|
||||
## UDP packet size for the server to listen for. This will depend
|
||||
## on the size of the packets that the client is sending, which is
|
||||
## usually 1500 bytes.
|
||||
udp_packet_size = 1500
|
||||
|
||||
## Data format to consume. This can be "json", "influx" or "graphite"
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
## A Note on UDP OS Buffer Sizes
|
||||
|
||||
Some OSes (most notably, Linux) place very restricive limits on the performance
|
||||
of UDP protocols. It is _highly_ recommended that you increase these OS limits to
|
||||
at least 8MB before trying to run large amounts of UDP traffic to your instance.
|
||||
8MB is just a recommendation, and can be adjusted higher.
|
||||
|
||||
### Linux
|
||||
Check the current UDP/IP receive buffer limit & default by typing the following
|
||||
commands:
|
||||
|
||||
```
|
||||
sysctl net.core.rmem_max
|
||||
sysctl net.core.rmem_default
|
||||
```
|
||||
|
||||
If the values are less than 8388608 bytes you should add the following lines to
|
||||
the /etc/sysctl.conf file:
|
||||
|
||||
```
|
||||
net.core.rmem_max=8388608
|
||||
net.core.rmem_default=8388608
|
||||
```
|
||||
|
||||
Changes to /etc/sysctl.conf do not take effect until reboot.
|
||||
To update the values immediately, type the following commands as root:
|
||||
|
||||
```
|
||||
sysctl -w net.core.rmem_max=8388608
|
||||
sysctl -w net.core.rmem_default=8388608
|
||||
```
|
||||
|
||||
### BSD/Darwin
|
||||
|
||||
On BSD/Darwin systems you need to add about a 15% padding to the kernel limit
|
||||
socket buffer. Meaning if you want an 8MB buffer (8388608 bytes) you need to set
|
||||
the kernel limit to `8388608*1.15 = 9646900`. This is not documented anywhere but
|
||||
happens
|
||||
[in the kernel here.](https://github.com/freebsd/freebsd/blob/master/sys/kern/uipc_sockbuf.c#L63-L64)
|
||||
|
||||
Check the current UDP/IP buffer limit by typing the following command:
|
||||
|
||||
```
|
||||
sysctl kern.ipc.maxsockbuf
|
||||
```
|
||||
|
||||
If the value is less than 9646900 bytes you should add the following lines
|
||||
to the /etc/sysctl.conf file (create it if necessary):
|
||||
|
||||
```
|
||||
kern.ipc.maxsockbuf=9646900
|
||||
```
|
||||
|
||||
Changes to /etc/sysctl.conf do not take effect until reboot.
|
||||
To update the values immediately, type the following commands as root:
|
||||
|
||||
```
|
||||
sysctl -w kern.ipc.maxsockbuf=9646900
|
||||
```
|
||||
154
plugins/inputs/udp_listener/udp_listener.go
Normal file
154
plugins/inputs/udp_listener/udp_listener.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package udp_listener
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
|
||||
type UdpListener struct {
|
||||
ServiceAddress string
|
||||
UDPPacketSize int `toml:"udp_packet_size"`
|
||||
AllowedPendingMessages int
|
||||
sync.Mutex
|
||||
|
||||
in chan []byte
|
||||
done chan struct{}
|
||||
|
||||
parser parsers.Parser
|
||||
|
||||
// Keep the accumulator in this struct
|
||||
acc telegraf.Accumulator
|
||||
}
|
||||
|
||||
const UDP_PACKET_SIZE int = 1500
|
||||
|
||||
var dropwarn = "ERROR: Message queue full. Discarding line [%s] " +
|
||||
"You may want to increase allowed_pending_messages in the config\n"
|
||||
|
||||
const sampleConfig = `
|
||||
## Address and port to host UDP listener on
|
||||
service_address = ":8092"
|
||||
|
||||
## Number of UDP messages allowed to queue up. Once filled, the
|
||||
## UDP listener will start dropping packets.
|
||||
allowed_pending_messages = 10000
|
||||
|
||||
## UDP packet size for the server to listen for. This will depend
|
||||
## on the size of the packets that the client is sending, which is
|
||||
## usually 1500 bytes, but can be as large as 65,535 bytes.
|
||||
udp_packet_size = 1500
|
||||
|
||||
## Data format to consume. This can be "json", "influx" or "graphite"
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
`
|
||||
|
||||
func (u *UdpListener) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (u *UdpListener) Description() string {
|
||||
return "Generic UDP listener"
|
||||
}
|
||||
|
||||
// All the work is done in the Start() function, so this is just a dummy
|
||||
// function.
|
||||
func (u *UdpListener) Gather(_ telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *UdpListener) SetParser(parser parsers.Parser) {
|
||||
u.parser = parser
|
||||
}
|
||||
|
||||
func (u *UdpListener) Start(acc telegraf.Accumulator) error {
|
||||
u.Lock()
|
||||
defer u.Unlock()
|
||||
|
||||
u.acc = acc
|
||||
u.in = make(chan []byte, u.AllowedPendingMessages)
|
||||
u.done = make(chan struct{})
|
||||
|
||||
go u.udpListen()
|
||||
go u.udpParser()
|
||||
|
||||
log.Printf("Started UDP listener service on %s\n", u.ServiceAddress)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *UdpListener) Stop() {
|
||||
u.Lock()
|
||||
defer u.Unlock()
|
||||
close(u.done)
|
||||
close(u.in)
|
||||
log.Println("Stopped UDP listener service on ", u.ServiceAddress)
|
||||
}
|
||||
|
||||
func (u *UdpListener) udpListen() error {
|
||||
address, _ := net.ResolveUDPAddr("udp", u.ServiceAddress)
|
||||
listener, err := net.ListenUDP("udp", address)
|
||||
if err != nil {
|
||||
log.Fatalf("ERROR: ListenUDP - %s", err)
|
||||
}
|
||||
defer listener.Close()
|
||||
log.Println("UDP server listening on: ", listener.LocalAddr().String())
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-u.done:
|
||||
return nil
|
||||
default:
|
||||
buf := make([]byte, u.UDPPacketSize)
|
||||
n, _, err := listener.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
log.Printf("ERROR: %s\n", err.Error())
|
||||
}
|
||||
|
||||
select {
|
||||
case u.in <- buf[:n]:
|
||||
default:
|
||||
log.Printf(dropwarn, string(buf[:n]))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (u *UdpListener) udpParser() error {
|
||||
for {
|
||||
select {
|
||||
case <-u.done:
|
||||
return nil
|
||||
case packet := <-u.in:
|
||||
metrics, err := u.parser.Parse(packet)
|
||||
if err == nil {
|
||||
u.storeMetrics(metrics)
|
||||
} else {
|
||||
log.Printf("Malformed packet: [%s], Error: %s\n", packet, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (u *UdpListener) storeMetrics(metrics []telegraf.Metric) error {
|
||||
u.Lock()
|
||||
defer u.Unlock()
|
||||
for _, m := range metrics {
|
||||
u.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("udp_listener", func() telegraf.Input {
|
||||
return &UdpListener{
|
||||
UDPPacketSize: UDP_PACKET_SIZE,
|
||||
}
|
||||
})
|
||||
}
|
||||
112
plugins/inputs/udp_listener/udp_listener_test.go
Normal file
112
plugins/inputs/udp_listener/udp_listener_test.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package udp_listener
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func newTestUdpListener() (*UdpListener, chan []byte) {
|
||||
in := make(chan []byte, 1500)
|
||||
listener := &UdpListener{
|
||||
ServiceAddress: ":8125",
|
||||
UDPPacketSize: 1500,
|
||||
AllowedPendingMessages: 10000,
|
||||
in: in,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
return listener, in
|
||||
}
|
||||
|
||||
func TestRunParser(t *testing.T) {
|
||||
log.SetOutput(ioutil.Discard)
|
||||
var testmsg = []byte("cpu_load_short,host=server01 value=12.0 1422568543702900257")
|
||||
|
||||
listener, in := newTestUdpListener()
|
||||
acc := testutil.Accumulator{}
|
||||
listener.acc = &acc
|
||||
defer close(listener.done)
|
||||
|
||||
listener.parser, _ = parsers.NewInfluxParser()
|
||||
go listener.udpParser()
|
||||
|
||||
in <- testmsg
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
listener.Gather(&acc)
|
||||
|
||||
if a := acc.NFields(); a != 1 {
|
||||
t.Errorf("got %v, expected %v", a, 1)
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "cpu_load_short",
|
||||
map[string]interface{}{"value": float64(12)},
|
||||
map[string]string{"host": "server01"},
|
||||
)
|
||||
}
|
||||
|
||||
func TestRunParserInvalidMsg(t *testing.T) {
|
||||
log.SetOutput(ioutil.Discard)
|
||||
var testmsg = []byte("cpu_load_short")
|
||||
|
||||
listener, in := newTestUdpListener()
|
||||
acc := testutil.Accumulator{}
|
||||
listener.acc = &acc
|
||||
defer close(listener.done)
|
||||
|
||||
listener.parser, _ = parsers.NewInfluxParser()
|
||||
go listener.udpParser()
|
||||
|
||||
in <- testmsg
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
|
||||
if a := acc.NFields(); a != 0 {
|
||||
t.Errorf("got %v, expected %v", a, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunParserGraphiteMsg(t *testing.T) {
|
||||
log.SetOutput(ioutil.Discard)
|
||||
var testmsg = []byte("cpu.load.graphite 12 1454780029")
|
||||
|
||||
listener, in := newTestUdpListener()
|
||||
acc := testutil.Accumulator{}
|
||||
listener.acc = &acc
|
||||
defer close(listener.done)
|
||||
|
||||
listener.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil)
|
||||
go listener.udpParser()
|
||||
|
||||
in <- testmsg
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
listener.Gather(&acc)
|
||||
|
||||
acc.AssertContainsFields(t, "cpu_load_graphite",
|
||||
map[string]interface{}{"value": float64(12)})
|
||||
}
|
||||
|
||||
func TestRunParserJSONMsg(t *testing.T) {
|
||||
log.SetOutput(ioutil.Discard)
|
||||
var testmsg = []byte("{\"a\": 5, \"b\": {\"c\": 6}}\n")
|
||||
|
||||
listener, in := newTestUdpListener()
|
||||
acc := testutil.Accumulator{}
|
||||
listener.acc = &acc
|
||||
defer close(listener.done)
|
||||
|
||||
listener.parser, _ = parsers.NewJSONParser("udp_json_test", []string{}, nil)
|
||||
go listener.udpParser()
|
||||
|
||||
in <- testmsg
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
listener.Gather(&acc)
|
||||
|
||||
acc.AssertContainsFields(t, "udp_json_test",
|
||||
map[string]interface{}{
|
||||
"a": float64(5),
|
||||
"b_c": float64(6),
|
||||
})
|
||||
}
|
||||
@@ -67,6 +67,9 @@ func (z *Zookeeper) gatherServer(address string, acc telegraf.Accumulator) error
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
// Extend connection
|
||||
c.SetDeadline(time.Now().Add(defaultTimeout))
|
||||
|
||||
fmt.Fprintf(c, "%s\n", "mntr")
|
||||
rdr := bufio.NewReader(c)
|
||||
scanner := bufio.NewScanner(rdr)
|
||||
|
||||
@@ -18,15 +18,16 @@ import (
|
||||
|
||||
type InfluxDB struct {
|
||||
// URL is only for backwards compatability
|
||||
URL string
|
||||
URLs []string `toml:"urls"`
|
||||
Username string
|
||||
Password string
|
||||
Database string
|
||||
UserAgent string
|
||||
Precision string
|
||||
Timeout internal.Duration
|
||||
UDPPayload int `toml:"udp_payload"`
|
||||
URL string
|
||||
URLs []string `toml:"urls"`
|
||||
Username string
|
||||
Password string
|
||||
Database string
|
||||
UserAgent string
|
||||
Precision string
|
||||
RetentionPolicy string
|
||||
Timeout internal.Duration
|
||||
UDPPayload int `toml:"udp_payload"`
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
@@ -46,10 +47,12 @@ var sampleConfig = `
|
||||
## this means that only ONE of the urls will be written to each interval.
|
||||
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
||||
urls = ["http://localhost:8086"] # required
|
||||
## The target database for metrics (telegraf will create it if not exists)
|
||||
## The target database for metrics (telegraf will create it if not exists).
|
||||
database = "telegraf" # required
|
||||
## Retention policy to write to.
|
||||
retention_policy = "default"
|
||||
## Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
## note: using "s" precision greatly improves InfluxDB compression
|
||||
## note: using "s" precision greatly improves InfluxDB compression.
|
||||
precision = "s"
|
||||
|
||||
## Write timeout (for the InfluxDB client), formatted as a string.
|
||||
@@ -129,6 +132,7 @@ func (i *InfluxDB) Connect() error {
|
||||
|
||||
if e != nil {
|
||||
log.Println("Database creation failed: " + e.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
conns = append(conns, c)
|
||||
@@ -156,9 +160,16 @@ func (i *InfluxDB) Description() string {
|
||||
// Choose a random server in the cluster to write to until a successful write
|
||||
// occurs, logging each unsuccessful. If all servers fail, return error.
|
||||
func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
|
||||
if len(i.conns) == 0 {
|
||||
err := i.Connect()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
bp, err := client.NewBatchPoints(client.BatchPointsConfig{
|
||||
Database: i.Database,
|
||||
Precision: i.Precision,
|
||||
Database: i.Database,
|
||||
Precision: i.Precision,
|
||||
RetentionPolicy: i.RetentionPolicy,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -180,6 +191,12 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If all of the writes failed, create a new connection array so that
|
||||
// i.Connect() will be called on the next gather.
|
||||
if err != nil {
|
||||
i.conns = make([]client.Client, 0)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -4,19 +4,24 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdata/telegraf/plugins/serializers/graphite"
|
||||
)
|
||||
|
||||
type Librato struct {
|
||||
ApiUser string
|
||||
ApiToken string
|
||||
SourceTag string
|
||||
Timeout internal.Duration
|
||||
ApiUser string
|
||||
ApiToken string
|
||||
Debug bool
|
||||
NameFromTags bool
|
||||
SourceTag string
|
||||
Timeout internal.Duration
|
||||
|
||||
apiUrl string
|
||||
client *http.Client
|
||||
@@ -32,9 +37,12 @@ var sampleConfig = `
|
||||
## Librato API token
|
||||
api_token = "my-secret-token" # required.
|
||||
|
||||
## Tag Field to populate source attribute (optional)
|
||||
## This is typically the _hostname_ from which the metric was obtained.
|
||||
source_tag = "hostname"
|
||||
### Debug
|
||||
# debug = false
|
||||
|
||||
### Tag Field to populate source attribute (optional)
|
||||
### This is typically the _hostname_ from which the metric was obtained.
|
||||
source_tag = "host"
|
||||
|
||||
## Connection timeout.
|
||||
# timeout = "5s"
|
||||
@@ -82,17 +90,27 @@ func (l *Librato) Write(metrics []telegraf.Metric) error {
|
||||
for _, gauge := range gauges {
|
||||
tempGauges = append(tempGauges, gauge)
|
||||
metricCounter++
|
||||
if l.Debug {
|
||||
log.Printf("[DEBUG] Got a gauge: %v\n", gauge)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Printf("unable to build Gauge for %s, skipping\n", m.Name())
|
||||
if l.Debug {
|
||||
log.Printf("[DEBUG] Couldn't build gauge: %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lmetrics.Gauges = make([]*Gauge, metricCounter)
|
||||
copy(lmetrics.Gauges, tempGauges[0:])
|
||||
metricsBytes, err := json.Marshal(metrics)
|
||||
metricsBytes, err := json.Marshal(lmetrics)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error())
|
||||
} else {
|
||||
if l.Debug {
|
||||
log.Printf("[DEBUG] Librato request: %v\n", string(metricsBytes))
|
||||
}
|
||||
}
|
||||
req, err := http.NewRequest("POST", l.apiUrl, bytes.NewBuffer(metricsBytes))
|
||||
if err != nil {
|
||||
@@ -103,8 +121,21 @@ func (l *Librato) Write(metrics []telegraf.Metric) error {
|
||||
|
||||
resp, err := l.client.Do(req)
|
||||
if err != nil {
|
||||
if l.Debug {
|
||||
log.Printf("[DEBUG] Error POSTing metrics: %v\n", err.Error())
|
||||
}
|
||||
return fmt.Errorf("error POSTing metrics, %s\n", err.Error())
|
||||
} else {
|
||||
if l.Debug {
|
||||
htmlData, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Printf("[DEBUG] Couldn't get response! (%v)\n", err)
|
||||
} else {
|
||||
log.Printf("[DEBUG] Librato response: %v\n", string(htmlData))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
@@ -122,11 +153,20 @@ func (l *Librato) Description() string {
|
||||
return "Configuration for Librato API to send metrics to."
|
||||
}
|
||||
|
||||
func (l *Librato) buildGaugeName(m telegraf.Metric, fieldName string) string {
|
||||
// Use the GraphiteSerializer
|
||||
graphiteSerializer := graphite.GraphiteSerializer{}
|
||||
serializedMetric := graphiteSerializer.SerializeBucketName(m, fieldName)
|
||||
|
||||
// Deal with slash characters:
|
||||
return strings.Replace(serializedMetric, "/", "-", -1)
|
||||
}
|
||||
|
||||
func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) {
|
||||
gauges := []*Gauge{}
|
||||
for fieldName, value := range m.Fields() {
|
||||
gauge := &Gauge{
|
||||
Name: m.Name() + "_" + fieldName,
|
||||
Name: l.buildGaugeName(m, fieldName),
|
||||
MeasureTime: m.Time().Unix(),
|
||||
}
|
||||
if err := gauge.setValue(value); err != nil {
|
||||
@@ -142,6 +182,10 @@ func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) {
|
||||
l.SourceTag)
|
||||
}
|
||||
}
|
||||
gauges = append(gauges, gauge)
|
||||
}
|
||||
if l.Debug {
|
||||
fmt.Printf("[DEBUG] Built gauges: %v\n", gauges)
|
||||
}
|
||||
return gauges, nil
|
||||
}
|
||||
|
||||
@@ -9,9 +9,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/serializers/graphite"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -28,6 +28,14 @@ func fakeLibrato() *Librato {
|
||||
return l
|
||||
}
|
||||
|
||||
func BuildTags(t *testing.T) {
|
||||
testMetric := testutil.TestMetric(0.0, "test1")
|
||||
graphiteSerializer := graphite.GraphiteSerializer{}
|
||||
tags, err := graphiteSerializer.Serialize(testMetric)
|
||||
fmt.Printf("Tags: %v", tags)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestUriOverride(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
@@ -78,7 +86,7 @@ func TestBuildGauge(t *testing.T) {
|
||||
{
|
||||
testutil.TestMetric(0.0, "test1"),
|
||||
&Gauge{
|
||||
Name: "test1",
|
||||
Name: "value1.test1.value",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 0.0,
|
||||
},
|
||||
@@ -87,7 +95,7 @@ func TestBuildGauge(t *testing.T) {
|
||||
{
|
||||
testutil.TestMetric(1.0, "test2"),
|
||||
&Gauge{
|
||||
Name: "test2",
|
||||
Name: "value1.test2.value",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 1.0,
|
||||
},
|
||||
@@ -96,7 +104,7 @@ func TestBuildGauge(t *testing.T) {
|
||||
{
|
||||
testutil.TestMetric(10, "test3"),
|
||||
&Gauge{
|
||||
Name: "test3",
|
||||
Name: "value1.test3.value",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 10.0,
|
||||
},
|
||||
@@ -105,7 +113,7 @@ func TestBuildGauge(t *testing.T) {
|
||||
{
|
||||
testutil.TestMetric(int32(112345), "test4"),
|
||||
&Gauge{
|
||||
Name: "test4",
|
||||
Name: "value1.test4.value",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 112345.0,
|
||||
},
|
||||
@@ -114,7 +122,7 @@ func TestBuildGauge(t *testing.T) {
|
||||
{
|
||||
testutil.TestMetric(int64(112345), "test5"),
|
||||
&Gauge{
|
||||
Name: "test5",
|
||||
Name: "value1.test5.value",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 112345.0,
|
||||
},
|
||||
@@ -123,7 +131,7 @@ func TestBuildGauge(t *testing.T) {
|
||||
{
|
||||
testutil.TestMetric(float32(11234.5), "test6"),
|
||||
&Gauge{
|
||||
Name: "test6",
|
||||
Name: "value1.test6.value",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 11234.5,
|
||||
},
|
||||
@@ -132,7 +140,7 @@ func TestBuildGauge(t *testing.T) {
|
||||
{
|
||||
testutil.TestMetric("11234.5", "test7"),
|
||||
&Gauge{
|
||||
Name: "test7",
|
||||
Name: "value1.test7.value",
|
||||
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 11234.5,
|
||||
},
|
||||
@@ -163,13 +171,13 @@ func TestBuildGauge(t *testing.T) {
|
||||
func TestBuildGaugeWithSource(t *testing.T) {
|
||||
pt1, _ := telegraf.NewMetric(
|
||||
"test1",
|
||||
map[string]string{"hostname": "192.168.0.1"},
|
||||
map[string]string{"hostname": "192.168.0.1", "tag1": "value1"},
|
||||
map[string]interface{}{"value": 0.0},
|
||||
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
pt2, _ := telegraf.NewMetric(
|
||||
"test2",
|
||||
map[string]string{"hostnam": "192.168.0.1"},
|
||||
map[string]string{"hostnam": "192.168.0.1", "tag1": "value1"},
|
||||
map[string]interface{}{"value": 1.0},
|
||||
time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC),
|
||||
)
|
||||
@@ -182,7 +190,7 @@ func TestBuildGaugeWithSource(t *testing.T) {
|
||||
{
|
||||
pt1,
|
||||
&Gauge{
|
||||
Name: "test1",
|
||||
Name: "192_168_0_1.value1.test1.value",
|
||||
MeasureTime: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 0.0,
|
||||
Source: "192.168.0.1",
|
||||
@@ -192,7 +200,7 @@ func TestBuildGaugeWithSource(t *testing.T) {
|
||||
{
|
||||
pt2,
|
||||
&Gauge{
|
||||
Name: "test2",
|
||||
Name: "192_168_0_1.value1.test1.value",
|
||||
MeasureTime: time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||
Value: 1.0,
|
||||
},
|
||||
|
||||
@@ -172,7 +172,7 @@ func (m *MQTT) createOpts() (*paho.ClientOptions, error) {
|
||||
}
|
||||
|
||||
user := m.Username
|
||||
if user == "" {
|
||||
if user != "" {
|
||||
opts.SetUsername(user)
|
||||
}
|
||||
password := m.Password
|
||||
|
||||
@@ -14,39 +14,49 @@ type GraphiteSerializer struct {
|
||||
|
||||
func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]string, error) {
|
||||
out := []string{}
|
||||
// Get name
|
||||
name := metric.Name()
|
||||
|
||||
// Convert UnixNano to Unix timestamps
|
||||
timestamp := metric.UnixNano() / 1000000000
|
||||
tag_str := buildTags(metric)
|
||||
|
||||
for field_name, value := range metric.Fields() {
|
||||
// Convert value
|
||||
value_str := fmt.Sprintf("%#v", value)
|
||||
// Write graphite metric
|
||||
var graphitePoint string
|
||||
if name == field_name {
|
||||
graphitePoint = fmt.Sprintf("%s.%s %s %d",
|
||||
tag_str,
|
||||
strings.Replace(name, ".", "_", -1),
|
||||
value_str,
|
||||
timestamp)
|
||||
} else {
|
||||
graphitePoint = fmt.Sprintf("%s.%s.%s %s %d",
|
||||
tag_str,
|
||||
strings.Replace(name, ".", "_", -1),
|
||||
strings.Replace(field_name, ".", "_", -1),
|
||||
value_str,
|
||||
timestamp)
|
||||
}
|
||||
if s.Prefix != "" {
|
||||
graphitePoint = fmt.Sprintf("%s.%s", s.Prefix, graphitePoint)
|
||||
}
|
||||
graphitePoint = fmt.Sprintf("%s %s %d",
|
||||
s.SerializeBucketName(metric, field_name),
|
||||
value_str,
|
||||
timestamp)
|
||||
out = append(out, graphitePoint)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (s *GraphiteSerializer) SerializeBucketName(metric telegraf.Metric, field_name string) string {
|
||||
// Get the metric name
|
||||
name := metric.Name()
|
||||
|
||||
// Convert UnixNano to Unix timestamps
|
||||
tag_str := buildTags(metric)
|
||||
|
||||
// Write graphite metric
|
||||
var serializedBucketName string
|
||||
if name == field_name {
|
||||
serializedBucketName = fmt.Sprintf("%s.%s",
|
||||
tag_str,
|
||||
strings.Replace(name, ".", "_", -1))
|
||||
} else {
|
||||
serializedBucketName = fmt.Sprintf("%s.%s.%s",
|
||||
tag_str,
|
||||
strings.Replace(name, ".", "_", -1),
|
||||
strings.Replace(field_name, ".", "_", -1))
|
||||
}
|
||||
if s.Prefix != "" {
|
||||
serializedBucketName = fmt.Sprintf("%s.%s", s.Prefix, serializedBucketName)
|
||||
}
|
||||
return serializedBucketName
|
||||
}
|
||||
|
||||
func buildTags(metric telegraf.Metric) string {
|
||||
var keys []string
|
||||
tags := metric.Tags()
|
||||
|
||||
@@ -119,3 +119,62 @@ func TestSerializeMetricPrefix(t *testing.T) {
|
||||
sort.Strings(expS)
|
||||
assert.Equal(t, expS, mS)
|
||||
}
|
||||
|
||||
func TestSerializeBucketNameNoHost(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"cpu": "cpu0",
|
||||
"datacenter": "us-west-2",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(91.5),
|
||||
}
|
||||
m, err := telegraf.NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
s := GraphiteSerializer{}
|
||||
mS := s.SerializeBucketName(m, "usage_idle")
|
||||
|
||||
expS := fmt.Sprintf("cpu0.us-west-2.cpu.usage_idle")
|
||||
assert.Equal(t, expS, mS)
|
||||
}
|
||||
|
||||
func TestSerializeBucketNameHost(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"cpu": "cpu0",
|
||||
"datacenter": "us-west-2",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(91.5),
|
||||
}
|
||||
m, err := telegraf.NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
s := GraphiteSerializer{}
|
||||
mS := s.SerializeBucketName(m, "usage_idle")
|
||||
|
||||
expS := fmt.Sprintf("localhost.cpu0.us-west-2.cpu.usage_idle")
|
||||
assert.Equal(t, expS, mS)
|
||||
}
|
||||
|
||||
func TestSerializeBucketNamePrefix(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"cpu": "cpu0",
|
||||
"datacenter": "us-west-2",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(91.5),
|
||||
}
|
||||
m, err := telegraf.NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
s := GraphiteSerializer{Prefix: "prefix"}
|
||||
mS := s.SerializeBucketName(m, "usage_idle")
|
||||
|
||||
expS := fmt.Sprintf("prefix.localhost.cpu0.us-west-2.cpu.usage_idle")
|
||||
assert.Equal(t, expS, mS)
|
||||
}
|
||||
|
||||
566
scripts/build.py
566
scripts/build.py
@@ -1,11 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# This is the Telegraf build script.
|
||||
#
|
||||
# Current caveats:
|
||||
# - Does not checkout the correct commit/branch (for now, you will need to do so manually)
|
||||
# - Has external dependencies for packaging (fpm) and uploading (boto)
|
||||
#
|
||||
#!/usr/bin/python -u
|
||||
|
||||
import sys
|
||||
import os
|
||||
@@ -19,7 +12,12 @@ import re
|
||||
|
||||
debug = False
|
||||
|
||||
# PACKAGING VARIABLES
|
||||
################
|
||||
#### Telegraf Variables
|
||||
################
|
||||
|
||||
# Packaging variables
|
||||
PACKAGE_NAME = "telegraf"
|
||||
INSTALL_ROOT_DIR = "/usr/bin"
|
||||
LOG_DIR = "/var/log/telegraf"
|
||||
SCRIPT_DIR = "/usr/lib/telegraf/scripts"
|
||||
@@ -34,6 +32,14 @@ DEFAULT_WINDOWS_CONFIG = "etc/telegraf_windows.conf"
|
||||
POSTINST_SCRIPT = "scripts/post-install.sh"
|
||||
PREINST_SCRIPT = "scripts/pre-install.sh"
|
||||
|
||||
# Default AWS S3 bucket for uploads
|
||||
DEFAULT_BUCKET = "get.influxdb.org/telegraf"
|
||||
|
||||
CONFIGURATION_FILES = [
|
||||
CONFIG_DIR + '/telegraf.conf',
|
||||
LOGROTATE_DIR + '/telegraf',
|
||||
]
|
||||
|
||||
# META-PACKAGE VARIABLES
|
||||
PACKAGE_LICENSE = "MIT"
|
||||
PACKAGE_URL = "https://github.com/influxdata/telegraf"
|
||||
@@ -43,7 +49,8 @@ DESCRIPTION = "Plugin-driven server agent for reporting metrics into InfluxDB."
|
||||
|
||||
# SCRIPT START
|
||||
prereqs = [ 'git', 'go' ]
|
||||
optional_prereqs = [ 'fpm', 'rpmbuild' ]
|
||||
go_vet_command = "go tool vet -composites=true ./"
|
||||
optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ]
|
||||
|
||||
fpm_common_args = "-f -s dir --log error \
|
||||
--vendor {} \
|
||||
@@ -66,28 +73,83 @@ fpm_common_args = "-f -s dir --log error \
|
||||
DESCRIPTION)
|
||||
|
||||
targets = {
|
||||
'telegraf' : './cmd/telegraf/telegraf.go',
|
||||
'telegraf' : './cmd/telegraf',
|
||||
}
|
||||
|
||||
supported_builds = {
|
||||
'darwin': [ "amd64", "i386" ],
|
||||
'windows': [ "amd64", "i386" ],
|
||||
'linux': [ "amd64", "i386", "arm" ]
|
||||
"darwin": [ "amd64" ],
|
||||
"windows": [ "amd64", "i386" ],
|
||||
"linux": [ "amd64", "i386", "armhf", "armel", "arm64" ],
|
||||
"freebsd": [ "amd64" ]
|
||||
}
|
||||
|
||||
supported_packages = {
|
||||
"darwin": [ "tar", "zip" ],
|
||||
"linux": [ "deb", "rpm", "tar", "zip" ],
|
||||
"linux": [ "deb", "rpm", "tar" ],
|
||||
"windows": [ "zip" ],
|
||||
"freebsd": [ "tar" ]
|
||||
}
|
||||
|
||||
supported_tags = {
|
||||
# "linux": {
|
||||
# "amd64": ["sensors"]
|
||||
# }
|
||||
}
|
||||
|
||||
prereq_cmds = {
|
||||
# "linux": "sudo apt-get install lm-sensors libsensors4-dev"
|
||||
}
|
||||
|
||||
################
|
||||
#### Telegraf Functions
|
||||
################
|
||||
|
||||
def create_package_fs(build_root):
|
||||
print("Creating a filesystem hierarchy from directory: {}".format(build_root))
|
||||
# Using [1:] for the path names due to them being absolute
|
||||
# (will overwrite previous paths, per 'os.path.join' documentation)
|
||||
dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ]
|
||||
for d in dirs:
|
||||
create_dir(os.path.join(build_root, d))
|
||||
os.chmod(os.path.join(build_root, d), 0o755)
|
||||
|
||||
def package_scripts(build_root, windows=False):
|
||||
print("Copying scripts and sample configuration to build directory")
|
||||
if windows:
|
||||
shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf"))
|
||||
os.chmod(os.path.join(build_root, "telegraf.conf"), 0o644)
|
||||
else:
|
||||
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
|
||||
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)
|
||||
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
|
||||
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)
|
||||
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"))
|
||||
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644)
|
||||
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"))
|
||||
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644)
|
||||
|
||||
def run_generate():
|
||||
# NOOP for Telegraf
|
||||
return True
|
||||
|
||||
def go_get(branch, update=False, no_stash=False):
|
||||
if not check_path_for("gdm"):
|
||||
print("Downloading `gdm`...")
|
||||
get_command = "go get github.com/sparrc/gdm"
|
||||
run(get_command)
|
||||
print("Retrieving dependencies with `gdm`...")
|
||||
run("{}/bin/gdm restore -f Godeps_windows".format(os.environ.get("GOPATH")))
|
||||
run("{}/bin/gdm restore".format(os.environ.get("GOPATH")))
|
||||
return True
|
||||
|
||||
def run_tests(race, parallel, timeout, no_vet):
|
||||
# Currently a NOOP for Telegraf
|
||||
return True
|
||||
|
||||
################
|
||||
#### All Telegraf-specific content above this line
|
||||
################
|
||||
|
||||
def run(command, allow_failure=False, shell=False):
|
||||
out = None
|
||||
if debug:
|
||||
@@ -98,6 +160,8 @@ def run(command, allow_failure=False, shell=False):
|
||||
else:
|
||||
out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
|
||||
out = out.decode("utf8")
|
||||
if debug:
|
||||
print("[DEBUG] command output: {}".format(out))
|
||||
except subprocess.CalledProcessError as e:
|
||||
print("")
|
||||
print("")
|
||||
@@ -127,16 +191,32 @@ def run(command, allow_failure=False, shell=False):
|
||||
else:
|
||||
return out
|
||||
|
||||
def create_temp_dir(prefix=None):
|
||||
def create_temp_dir(prefix = None):
|
||||
if prefix is None:
|
||||
return tempfile.mkdtemp(prefix="telegraf-build.")
|
||||
return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME))
|
||||
else:
|
||||
return tempfile.mkdtemp(prefix=prefix)
|
||||
|
||||
def get_current_version_tag():
|
||||
version = run("git describe --always --tags --abbrev=0").strip()
|
||||
return version
|
||||
|
||||
def get_current_version():
|
||||
command = "git describe --always --tags --abbrev=0"
|
||||
out = run(command)
|
||||
return out.strip()
|
||||
version_tag = get_current_version_tag()
|
||||
if version_tag[0] == 'v':
|
||||
# Remove leading 'v' and possible '-rc\d+'
|
||||
version = re.sub(r'-rc\d+', '', version_tag[1:])
|
||||
else:
|
||||
version = re.sub(r'-rc\d+', '', version_tag)
|
||||
return version
|
||||
|
||||
def get_current_rc():
|
||||
rc = None
|
||||
version_tag = get_current_version_tag()
|
||||
matches = re.match(r'.*-rc(\d+)', version_tag)
|
||||
if matches:
|
||||
rc, = matches.groups(1)
|
||||
return rc
|
||||
|
||||
def get_current_commit(short=False):
|
||||
command = None
|
||||
@@ -181,56 +261,61 @@ def check_path_for(b):
|
||||
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
|
||||
return full_path
|
||||
|
||||
def check_environ(build_dir = None):
|
||||
print("\nChecking environment:")
|
||||
def check_environ(build_dir=None):
|
||||
print("")
|
||||
print("Checking environment:")
|
||||
for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
|
||||
print("\t- {} -> {}".format(v, os.environ.get(v)))
|
||||
print("- {} -> {}".format(v, os.environ.get(v)))
|
||||
|
||||
cwd = os.getcwd()
|
||||
if build_dir == None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
|
||||
print("\n!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures.")
|
||||
if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
|
||||
print("!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures.")
|
||||
|
||||
def check_prereqs():
|
||||
print("\nChecking for dependencies:")
|
||||
print("")
|
||||
print("Checking for dependencies:")
|
||||
for req in prereqs:
|
||||
path = check_path_for(req)
|
||||
if path is None:
|
||||
path = '?'
|
||||
print("\t- {} -> {}".format(req, path))
|
||||
if path:
|
||||
print("- {} -> {}".format(req, path))
|
||||
else:
|
||||
print("- {} -> ?".format(req))
|
||||
for req in optional_prereqs:
|
||||
path = check_path_for(req)
|
||||
if path is None:
|
||||
path = '?'
|
||||
print("\t- {} (optional) -> {}".format(req, path))
|
||||
if path:
|
||||
print("- {} (optional) -> {}".format(req, path))
|
||||
else:
|
||||
print("- {} (optional) -> ?".format(req))
|
||||
print("")
|
||||
return True
|
||||
|
||||
def upload_packages(packages, bucket_name=None, nightly=False):
|
||||
if debug:
|
||||
print("[DEBUG] upload_packags: {}".format(packages))
|
||||
print("[DEBUG] upload_packages: {}".format(packages))
|
||||
try:
|
||||
import boto
|
||||
from boto.s3.key import Key
|
||||
except ImportError:
|
||||
print "!! Cannot upload packages without the 'boto' python library."
|
||||
print("!! Cannot upload packages without the 'boto' Python library.")
|
||||
return 1
|
||||
print("Uploading packages to S3...")
|
||||
print("")
|
||||
print("Connecting to S3...".format(bucket_name))
|
||||
c = boto.connect_s3()
|
||||
if bucket_name is None:
|
||||
bucket_name = 'get.influxdb.org/telegraf'
|
||||
bucket_name = DEFAULT_BUCKET
|
||||
bucket = c.get_bucket(bucket_name.split('/')[0])
|
||||
print("\t - Using bucket: {}".format(bucket_name))
|
||||
print("Using bucket: {}".format(bucket_name))
|
||||
for p in packages:
|
||||
if '/' in bucket_name:
|
||||
# Allow for nested paths within the bucket name (ex:
|
||||
# bucket/telegraf). Assuming forward-slashes as path
|
||||
# bucket/folder). Assuming forward-slashes as path
|
||||
# delimiter.
|
||||
name = os.path.join('/'.join(bucket_name.split('/')[1:]),
|
||||
os.path.basename(p))
|
||||
else:
|
||||
name = os.path.basename(p)
|
||||
if bucket.get_key(name) is None or nightly:
|
||||
print("\t - Uploading {} to {}...".format(name, bucket_name))
|
||||
print("Uploading {}...".format(name))
|
||||
sys.stdout.flush()
|
||||
k = Key(bucket)
|
||||
k.key = name
|
||||
if nightly:
|
||||
@@ -239,8 +324,10 @@ def upload_packages(packages, bucket_name=None, nightly=False):
|
||||
n = k.set_contents_from_filename(p, replace=False)
|
||||
k.make_public()
|
||||
else:
|
||||
print("\t - Not uploading {}, already exists.".format(p))
|
||||
print("!! Not uploading package {}, as it already exists.".format(p))
|
||||
print("")
|
||||
return 0
|
||||
|
||||
|
||||
def build(version=None,
|
||||
branch=None,
|
||||
@@ -251,22 +338,18 @@ def build(version=None,
|
||||
rc=None,
|
||||
race=False,
|
||||
clean=False,
|
||||
outdir=".",
|
||||
goarm_version="6"):
|
||||
print("-------------------------")
|
||||
print("")
|
||||
print("Build plan:")
|
||||
print("\t- version: {}".format(version))
|
||||
outdir="."):
|
||||
print("\n-------------------------\n")
|
||||
print("Build Plan:")
|
||||
print("- version: {}".format(version))
|
||||
if rc:
|
||||
print("\t- release candidate: {}".format(rc))
|
||||
print("\t- commit: {}".format(commit))
|
||||
print("\t- branch: {}".format(branch))
|
||||
print("\t- platform: {}".format(platform))
|
||||
print("\t- arch: {}".format(arch))
|
||||
if arch == 'arm' and goarm_version:
|
||||
print("\t- ARM version: {}".format(goarm_version))
|
||||
print("\t- nightly? {}".format(str(nightly).lower()))
|
||||
print("\t- race enabled? {}".format(str(race).lower()))
|
||||
print("- release candidate: {}".format(rc))
|
||||
print("- commit: {}".format(get_current_commit(short=True)))
|
||||
print("- branch: {}".format(get_current_branch()))
|
||||
print("- platform: {}".format(platform))
|
||||
print("- arch: {}".format(arch))
|
||||
print("- nightly? {}".format(str(nightly).lower()))
|
||||
print("- race enabled? {}".format(str(race).lower()))
|
||||
print("")
|
||||
|
||||
if not os.path.exists(outdir):
|
||||
@@ -280,45 +363,49 @@ def build(version=None,
|
||||
# If a release candidate, update the version information accordingly
|
||||
version = "{}rc{}".format(version, rc)
|
||||
|
||||
# Set architecture to something that Go expects
|
||||
if arch == 'i386':
|
||||
arch = '386'
|
||||
elif arch == 'x86_64':
|
||||
arch = 'amd64'
|
||||
|
||||
print("Starting build...")
|
||||
tmp_build_dir = create_temp_dir()
|
||||
for b, c in targets.items():
|
||||
if platform == 'windows':
|
||||
b = b + '.exe'
|
||||
print("\t- Building '{}'...".format(os.path.join(outdir, b)))
|
||||
print("Building '{}'...".format(os.path.join(outdir, b)))
|
||||
build_command = ""
|
||||
build_command += "GOOS={} GOARCH={} ".format(platform, arch)
|
||||
if arch == "arm" and goarm_version:
|
||||
if goarm_version not in ["5", "6", "7", "arm64"]:
|
||||
print("!! Invalid ARM build version: {}".format(goarm_version))
|
||||
build_command += "GOARM={} ".format(goarm_version)
|
||||
build_command += "go build -o {} ".format(os.path.join(outdir, b))
|
||||
if "arm" in arch:
|
||||
build_command += "GOOS={} GOARCH={} ".format(platform, "arm")
|
||||
else:
|
||||
if arch == 'i386':
|
||||
arch = '386'
|
||||
elif arch == 'x86_64':
|
||||
arch = 'amd64'
|
||||
build_command += "GOOS={} GOARCH={} ".format(platform, arch)
|
||||
if "arm" in arch:
|
||||
if arch == "armel":
|
||||
build_command += "GOARM=5 "
|
||||
elif arch == "armhf" or arch == "arm":
|
||||
build_command += "GOARM=6 "
|
||||
elif arch == "arm64":
|
||||
build_command += "GOARM=7 "
|
||||
else:
|
||||
print("!! Invalid ARM architecture specifed: {}".format(arch))
|
||||
print("Please specify either 'armel', 'armhf', or 'arm64'")
|
||||
return 1
|
||||
if platform == 'windows':
|
||||
build_command += "go build -o {} ".format(os.path.join(outdir, b + '.exe'))
|
||||
else:
|
||||
build_command += "go build -o {} ".format(os.path.join(outdir, b))
|
||||
if race:
|
||||
build_command += "-race "
|
||||
if platform in supported_tags:
|
||||
if arch in supported_tags[platform]:
|
||||
build_tags = supported_tags[platform][arch]
|
||||
for build_tag in build_tags:
|
||||
build_command += "-tags "+build_tag+" "
|
||||
go_version = get_go_version()
|
||||
if "1.4" in go_version:
|
||||
build_command += "-ldflags=\"-X main.buildTime '{}' ".format(datetime.datetime.utcnow().isoformat())
|
||||
build_command += "-X main.Version {} ".format(version)
|
||||
build_command += "-X main.Branch {} ".format(get_current_branch())
|
||||
build_command += "-X main.Commit {}\" ".format(get_current_commit())
|
||||
build_command += "-ldflags=\"-X main.Version {} -X main.Branch {} -X main.Commit {}\" ".format(version,
|
||||
get_current_branch(),
|
||||
get_current_commit())
|
||||
else:
|
||||
build_command += "-ldflags=\"-X main.buildTime='{}' ".format(datetime.datetime.utcnow().isoformat())
|
||||
build_command += "-X main.Version={} ".format(version)
|
||||
build_command += "-X main.Branch={} ".format(get_current_branch())
|
||||
build_command += "-X main.Commit={}\" ".format(get_current_commit())
|
||||
# With Go 1.5, the linker flag arguments changed to 'name=value' from 'name value'
|
||||
build_command += "-ldflags=\"-X main.Version={} -X main.Branch={} -X main.Commit={}\" ".format(version,
|
||||
get_current_branch(),
|
||||
get_current_commit())
|
||||
build_command += c
|
||||
run(build_command, shell=True)
|
||||
print("")
|
||||
return 0
|
||||
|
||||
def create_dir(path):
|
||||
try:
|
||||
@@ -343,35 +430,12 @@ def copy_file(fr, to):
|
||||
except OSError as e:
|
||||
print(e)
|
||||
|
||||
def create_package_fs(build_root):
|
||||
print("\t- Creating a filesystem hierarchy from directory: {}".format(build_root))
|
||||
# Using [1:] for the path names due to them being absolute
|
||||
# (will overwrite previous paths, per 'os.path.join' documentation)
|
||||
dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ]
|
||||
for d in dirs:
|
||||
create_dir(os.path.join(build_root, d))
|
||||
os.chmod(os.path.join(build_root, d), 0o755)
|
||||
|
||||
def package_scripts(build_root, windows=False):
|
||||
print("\t- Copying scripts and sample configuration to build directory")
|
||||
if windows:
|
||||
shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf"))
|
||||
os.chmod(os.path.join(build_root, "telegraf.conf"), 0o644)
|
||||
else:
|
||||
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
|
||||
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)
|
||||
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
|
||||
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)
|
||||
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"))
|
||||
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644)
|
||||
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"))
|
||||
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644)
|
||||
|
||||
def go_get():
|
||||
print("Retrieving Go dependencies...")
|
||||
run("go get github.com/sparrc/gdm")
|
||||
run("gdm restore -f Godeps_windows")
|
||||
run("gdm restore")
|
||||
def generate_md5_from_file(path):
|
||||
m = hashlib.md5()
|
||||
with open(path, 'rb') as f:
|
||||
for chunk in iter(lambda: f.read(4096), b""):
|
||||
m.update(chunk)
|
||||
return m.hexdigest()
|
||||
|
||||
def generate_md5_from_file(path):
|
||||
m = hashlib.md5()
|
||||
@@ -383,101 +447,119 @@ def generate_md5_from_file(path):
|
||||
m.update(data)
|
||||
return m.hexdigest()
|
||||
|
||||
def build_packages(build_output, version, pkg_arch, nightly=False, rc=None, iteration=1):
|
||||
def build_packages(build_output, version, nightly=False, rc=None, iteration=1):
|
||||
outfiles = []
|
||||
tmp_build_dir = create_temp_dir()
|
||||
if debug:
|
||||
print("[DEBUG] build_output = {}".format(build_output))
|
||||
try:
|
||||
print("-------------------------")
|
||||
print("")
|
||||
print("-------------------------\n")
|
||||
print("Packaging...")
|
||||
for p in build_output:
|
||||
for platform in build_output:
|
||||
# Create top-level folder displaying which platform (linux, etc)
|
||||
create_dir(os.path.join(tmp_build_dir, p))
|
||||
for a in build_output[p]:
|
||||
current_location = build_output[p][a]
|
||||
# Create second-level directory displaying the architecture (amd64, etc)p
|
||||
build_root = os.path.join(tmp_build_dir, p, a)
|
||||
create_dir(os.path.join(tmp_build_dir, platform))
|
||||
for arch in build_output[platform]:
|
||||
# Create second-level directory displaying the architecture (amd64, etc)
|
||||
current_location = build_output[platform][arch]
|
||||
|
||||
# Create directory tree to mimic file system of package
|
||||
build_root = os.path.join(tmp_build_dir,
|
||||
platform,
|
||||
arch,
|
||||
'{}-{}-{}'.format(PACKAGE_NAME, version, iteration))
|
||||
create_dir(build_root)
|
||||
if p == 'windows':
|
||||
package_scripts(build_root, windows=True)
|
||||
else:
|
||||
create_package_fs(build_root)
|
||||
# Copy in packaging and miscellaneous scripts
|
||||
package_scripts(build_root)
|
||||
# Copy newly-built binaries to packaging directory
|
||||
for b in targets:
|
||||
if p == 'windows':
|
||||
b = b + '.exe'
|
||||
fr = os.path.join(current_location, b)
|
||||
to = os.path.join(build_root, b)
|
||||
print("\t- [{}][{}] - Moving from '{}' to '{}'".format(p, a, fr, to))
|
||||
create_package_fs(build_root)
|
||||
|
||||
# Copy packaging scripts to build directory
|
||||
package_scripts(build_root)
|
||||
|
||||
for binary in targets:
|
||||
# Copy newly-built binaries to packaging directory
|
||||
if platform == 'windows':
|
||||
binary = binary + '.exe'
|
||||
# Where the binary currently is located
|
||||
fr = os.path.join(current_location, binary)
|
||||
# Where the binary should go in the package filesystem
|
||||
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary)
|
||||
if debug:
|
||||
print("[{}][{}] - Moving from '{}' to '{}'".format(platform,
|
||||
arch,
|
||||
fr,
|
||||
to))
|
||||
copy_file(fr, to)
|
||||
# Package the directory structure
|
||||
for package_type in supported_packages[p]:
|
||||
print("\t- Packaging directory '{}' as '{}'...".format(build_root, package_type))
|
||||
name = "telegraf"
|
||||
|
||||
for package_type in supported_packages[platform]:
|
||||
# Package the directory structure for each package type for the platform
|
||||
print("Packaging directory '{}' as '{}'...".format(build_root, package_type))
|
||||
name = PACKAGE_NAME
|
||||
# Reset version, iteration, and current location on each run
|
||||
# since they may be modified below.
|
||||
package_version = version
|
||||
package_iteration = iteration
|
||||
current_location = build_output[p][a]
|
||||
package_build_root = build_root
|
||||
current_location = build_output[platform][arch]
|
||||
if rc is not None:
|
||||
# Set iteration to 0 since it's a release candidate
|
||||
package_iteration = "0.rc{}".format(rc)
|
||||
|
||||
if package_type in ['zip', 'tar']:
|
||||
# For tars and zips, start the packaging one folder above
|
||||
# the build root (to include the package name)
|
||||
package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1]))
|
||||
if nightly:
|
||||
name = '{}-nightly_{}_{}'.format(name, p, a)
|
||||
name = '{}-nightly_{}_{}'.format(name,
|
||||
platform,
|
||||
arch)
|
||||
else:
|
||||
name = '{}-{}-{}_{}_{}'.format(name, package_version, package_iteration, p, a)
|
||||
if package_type == 'tar':
|
||||
# Add `tar.gz` to path to reduce package size
|
||||
current_location = os.path.join(current_location, name + '.tar.gz')
|
||||
if rc is not None:
|
||||
package_iteration = "0.rc{}".format(rc)
|
||||
saved_a = a
|
||||
if pkg_arch is not None:
|
||||
a = pkg_arch
|
||||
if a == '386':
|
||||
a = 'i386'
|
||||
if package_type == 'zip':
|
||||
zip_command = "cd {} && zip {}.zip ./*".format(
|
||||
build_root,
|
||||
name)
|
||||
run(zip_command, shell=True)
|
||||
run("mv {}.zip {}".format(os.path.join(build_root, name), current_location), shell=True)
|
||||
outfile = os.path.join(current_location, name+".zip")
|
||||
outfiles.append(outfile)
|
||||
print("\t\tMD5 = {}".format(generate_md5_from_file(outfile)))
|
||||
name = '{}-{}-{}_{}_{}'.format(name,
|
||||
package_version,
|
||||
package_iteration,
|
||||
platform,
|
||||
arch)
|
||||
|
||||
current_location = os.path.join(os.getcwd(), current_location)
|
||||
if package_type == 'tar':
|
||||
tar_command = "cd {} && tar -cvzf {}.tar.gz ./*".format(build_root, name)
|
||||
run(tar_command, shell=True)
|
||||
run("mv {}.tar.gz {}".format(os.path.join(build_root, name), current_location), shell=True)
|
||||
outfile = os.path.join(current_location, name + ".tar.gz")
|
||||
outfiles.append(outfile)
|
||||
print("MD5({}) = {}".format(outfile, generate_md5_from_file(outfile)))
|
||||
elif package_type == 'zip':
|
||||
zip_command = "cd {} && zip -r {}.zip ./*".format(build_root, name)
|
||||
run(zip_command, shell=True)
|
||||
run("mv {}.zip {}".format(os.path.join(build_root, name), current_location), shell=True)
|
||||
outfile = os.path.join(current_location, name + ".zip")
|
||||
outfiles.append(outfile)
|
||||
print("MD5({}) = {}".format(outfile, generate_md5_from_file(outfile)))
|
||||
else:
|
||||
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
|
||||
fpm_common_args,
|
||||
name,
|
||||
a,
|
||||
package_type,
|
||||
package_version,
|
||||
package_iteration,
|
||||
build_root,
|
||||
current_location)
|
||||
if pkg_arch is not None:
|
||||
a = saved_a
|
||||
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(fpm_common_args,
|
||||
name,
|
||||
arch,
|
||||
package_type,
|
||||
package_version,
|
||||
package_iteration,
|
||||
package_build_root,
|
||||
current_location)
|
||||
if debug:
|
||||
fpm_command += "--verbose "
|
||||
if package_type == "rpm":
|
||||
fpm_command += "--depends coreutils "
|
||||
fpm_command += "--depends lsof"
|
||||
fpm_command += "--depends lsof "
|
||||
out = run(fpm_command, shell=True)
|
||||
matches = re.search(':path=>"(.*)"', out)
|
||||
outfile = None
|
||||
if matches is not None:
|
||||
outfile = matches.groups()[0]
|
||||
if outfile is None:
|
||||
print("[ COULD NOT DETERMINE OUTPUT ]")
|
||||
print("!! Could not determine output from packaging command.")
|
||||
else:
|
||||
# Strip nightly version (the unix epoch) from filename
|
||||
if nightly and package_type in ['deb', 'rpm']:
|
||||
if nightly:
|
||||
outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly"))
|
||||
outfiles.append(os.path.join(os.getcwd(), outfile))
|
||||
# Display MD5 hash for generated package
|
||||
print("\t\tMD5 = {}".format(generate_md5_from_file(outfile)))
|
||||
print("MD5({}) = {}".format(outfile, generate_md5_from_file(outfile)))
|
||||
print("")
|
||||
if debug:
|
||||
print("[DEBUG] package outfiles: {}".format(outfiles))
|
||||
@@ -491,11 +573,9 @@ def print_usage():
|
||||
print("")
|
||||
print("Options:")
|
||||
print("\t --outdir=<path> \n\t\t- Send build output to a specified path. Defaults to ./build.")
|
||||
print("\t --arch=<arch> \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386, arm, or all")
|
||||
print("\t --goarm=<arm version> \n\t\t- Build for specified ARM version (when building for ARM). Default value is: 6")
|
||||
print("\t --arch=<arch> \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386|i386, arm, or all")
|
||||
print("\t --platform=<platform> \n\t\t- Build for specified platform. Acceptable values: linux, windows, darwin, or all")
|
||||
print("\t --version=<version> \n\t\t- Version information to apply to build metadata. If not specified, will be pulled from repo tag.")
|
||||
print("\t --pkgarch=<package-arch> \n\t\t- Package architecture if different from <arch>")
|
||||
print("\t --commit=<commit> \n\t\t- Use specific commit for build (currently a NOOP).")
|
||||
print("\t --branch=<branch> \n\t\t- Build from a specific branch (currently a NOOP).")
|
||||
print("\t --rc=<rc number> \n\t\t- Whether or not the build is a release candidate (affects version information).")
|
||||
@@ -503,9 +583,13 @@ def print_usage():
|
||||
print("\t --race \n\t\t- Whether the produced build should have race detection enabled.")
|
||||
print("\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s).")
|
||||
print("\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information).")
|
||||
print("\t --update \n\t\t- Whether dependencies should be updated prior to building.")
|
||||
print("\t --test \n\t\t- Run Go tests. Will not produce a build.")
|
||||
print("\t --parallel \n\t\t- Run Go tests in parallel up to the count specified.")
|
||||
print("\t --generate \n\t\t- Run `go generate`.")
|
||||
print("\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s.")
|
||||
print("\t --clean \n\t\t- Clean the build output directory prior to creating build.")
|
||||
print("\t --no-get \n\t\t- Do not run `go get` before building.")
|
||||
print("\t --bucket=<S3 bucket>\n\t\t- Full path of the bucket to upload packages to (must also specify --upload).")
|
||||
print("\t --debug \n\t\t- Displays debug output.")
|
||||
print("")
|
||||
@@ -514,17 +598,18 @@ def print_package_summary(packages):
|
||||
print(packages)
|
||||
|
||||
def main():
|
||||
global debug
|
||||
|
||||
# Command-line arguments
|
||||
outdir = "build"
|
||||
commit = None
|
||||
target_platform = None
|
||||
target_arch = None
|
||||
package_arch = None
|
||||
nightly = False
|
||||
race = False
|
||||
branch = None
|
||||
version = get_current_version()
|
||||
rc = None
|
||||
rc = get_current_rc()
|
||||
package = False
|
||||
update = False
|
||||
clean = False
|
||||
@@ -534,15 +619,15 @@ def main():
|
||||
timeout = None
|
||||
iteration = 1
|
||||
no_vet = False
|
||||
goarm_version = "6"
|
||||
run_get = True
|
||||
upload_bucket = None
|
||||
global debug
|
||||
generate = False
|
||||
no_stash = False
|
||||
|
||||
for arg in sys.argv[1:]:
|
||||
if '--outdir' in arg:
|
||||
# Output directory. If none is specified, then builds will be placed in the same directory.
|
||||
output_dir = arg.split("=")[1]
|
||||
outdir = arg.split("=")[1]
|
||||
if '--commit' in arg:
|
||||
# Commit to build from. If none is specified, then it will build from the most recent commit.
|
||||
commit = arg.split("=")[1]
|
||||
@@ -558,9 +643,6 @@ def main():
|
||||
elif '--version' in arg:
|
||||
# Version to assign to this build (0.9.5, etc)
|
||||
version = arg.split("=")[1]
|
||||
elif '--pkgarch' in arg:
|
||||
# Package architecture if different from <arch> (armhf, etc)
|
||||
package_arch = arg.split("=")[1]
|
||||
elif '--rc' in arg:
|
||||
# Signifies that this is a release candidate build.
|
||||
rc = arg.split("=")[1]
|
||||
@@ -570,12 +652,20 @@ def main():
|
||||
elif '--package' in arg:
|
||||
# Signifies that packages should be built.
|
||||
package = True
|
||||
# If packaging do not allow stashing of local changes
|
||||
no_stash = True
|
||||
elif '--nightly' in arg:
|
||||
# Signifies that this is a nightly build.
|
||||
nightly = True
|
||||
elif '--update' in arg:
|
||||
# Signifies that dependencies should be updated.
|
||||
update = True
|
||||
elif '--upload' in arg:
|
||||
# Signifies that the resulting packages should be uploaded to S3
|
||||
upload = True
|
||||
elif '--test' in arg:
|
||||
# Run tests and exit
|
||||
test = True
|
||||
elif '--parallel' in arg:
|
||||
# Set parallel for tests.
|
||||
parallel = int(arg.split("=")[1])
|
||||
@@ -589,14 +679,19 @@ def main():
|
||||
iteration = arg.split("=")[1]
|
||||
elif '--no-vet' in arg:
|
||||
no_vet = True
|
||||
elif '--goarm' in arg:
|
||||
# Signifies GOARM flag to pass to build command when compiling for ARM
|
||||
goarm_version = arg.split("=")[1]
|
||||
elif '--no-get' in arg:
|
||||
run_get = False
|
||||
elif '--bucket' in arg:
|
||||
# The bucket to upload the packages to, relies on boto
|
||||
upload_bucket = arg.split("=")[1]
|
||||
elif '--no-stash' in arg:
|
||||
# Do not stash uncommited changes
|
||||
# Fail if uncommited changes exist
|
||||
no_stash = True
|
||||
elif '--generate' in arg:
|
||||
generate = True
|
||||
elif '--debug' in arg:
|
||||
print "[DEBUG] Using debug output"
|
||||
print("[DEBUG] Using debug output")
|
||||
debug = True
|
||||
elif '--help' in arg:
|
||||
print_usage()
|
||||
@@ -606,54 +701,69 @@ def main():
|
||||
print_usage()
|
||||
return 1
|
||||
|
||||
if nightly and rc:
|
||||
print("!! Cannot be both nightly and a release candidate! Stopping.")
|
||||
return 1
|
||||
|
||||
if nightly:
|
||||
if rc:
|
||||
print("!! Cannot be both nightly and a release candidate! Stopping.")
|
||||
return 1
|
||||
# In order to support nightly builds on the repository, we are adding the epoch timestamp
|
||||
# In order to cleanly delineate nightly version, we are adding the epoch timestamp
|
||||
# to the version so that version numbers are always greater than the previous nightly.
|
||||
version = "{}.n{}".format(version, int(time.time()))
|
||||
version = "{}~n{}".format(version, int(time.time()))
|
||||
iteration = 0
|
||||
elif rc:
|
||||
iteration = 0
|
||||
|
||||
# Pre-build checks
|
||||
check_environ()
|
||||
check_prereqs()
|
||||
if not check_prereqs():
|
||||
return 1
|
||||
|
||||
if not commit:
|
||||
commit = get_current_commit(short=True)
|
||||
if not branch:
|
||||
branch = get_current_branch()
|
||||
if not target_arch:
|
||||
if 'arm' in get_system_arch():
|
||||
system_arch = get_system_arch()
|
||||
if 'arm' in system_arch:
|
||||
# Prevent uname from reporting ARM arch (eg 'armv7l')
|
||||
target_arch = "arm"
|
||||
else:
|
||||
target_arch = get_system_arch()
|
||||
if not target_platform:
|
||||
target_arch = system_arch
|
||||
if target_arch == '386':
|
||||
target_arch = 'i386'
|
||||
elif target_arch == 'x86_64':
|
||||
target_arch = 'amd64'
|
||||
if target_platform:
|
||||
if target_platform not in supported_builds and target_platform != 'all':
|
||||
print("! Invalid build platform: {}".format(target_platform))
|
||||
return 1
|
||||
else:
|
||||
target_platform = get_system_platform()
|
||||
if rc or nightly:
|
||||
# If a release candidate or nightly, set iteration to 0 (instead of 1)
|
||||
iteration = 0
|
||||
|
||||
if target_arch == '386':
|
||||
target_arch = 'i386'
|
||||
elif target_arch == 'x86_64':
|
||||
target_arch = 'amd64'
|
||||
|
||||
build_output = {}
|
||||
|
||||
go_get()
|
||||
if generate:
|
||||
if not run_generate():
|
||||
return 1
|
||||
|
||||
if run_get:
|
||||
if not go_get(branch, update=update, no_stash=no_stash):
|
||||
return 1
|
||||
|
||||
if test:
|
||||
if not run_tests(race, parallel, timeout, no_vet):
|
||||
return 1
|
||||
return 0
|
||||
|
||||
platforms = []
|
||||
single_build = True
|
||||
if target_platform == 'all':
|
||||
platforms = list(supported_builds.keys())
|
||||
platforms = supported_builds.keys()
|
||||
single_build = False
|
||||
else:
|
||||
platforms = [target_platform]
|
||||
|
||||
for platform in platforms:
|
||||
if platform in prereq_cmds:
|
||||
run(prereq_cmds[platform])
|
||||
build_output.update( { platform : {} } )
|
||||
archs = []
|
||||
if target_arch == "all":
|
||||
@@ -661,32 +771,34 @@ def main():
|
||||
archs = supported_builds.get(platform)
|
||||
else:
|
||||
archs = [target_arch]
|
||||
|
||||
for arch in archs:
|
||||
od = outdir
|
||||
if not single_build:
|
||||
od = os.path.join(outdir, platform, arch)
|
||||
build(version=version,
|
||||
branch=branch,
|
||||
commit=commit,
|
||||
platform=platform,
|
||||
arch=arch,
|
||||
nightly=nightly,
|
||||
rc=rc,
|
||||
race=race,
|
||||
clean=clean,
|
||||
outdir=od,
|
||||
goarm_version=goarm_version)
|
||||
if build(version=version,
|
||||
branch=branch,
|
||||
commit=commit,
|
||||
platform=platform,
|
||||
arch=arch,
|
||||
nightly=nightly,
|
||||
rc=rc,
|
||||
race=race,
|
||||
clean=clean,
|
||||
outdir=od):
|
||||
return 1
|
||||
build_output.get(platform).update( { arch : od } )
|
||||
|
||||
# Build packages
|
||||
if package:
|
||||
if not check_path_for("fpm"):
|
||||
print("!! Cannot package without command 'fpm'. Stopping.")
|
||||
print("!! Cannot package without command 'fpm'.")
|
||||
return 1
|
||||
packages = build_packages(build_output, version, package_arch, nightly=nightly, rc=rc, iteration=iteration)
|
||||
# Optionally upload to S3
|
||||
|
||||
packages = build_packages(build_output, version, nightly=nightly, rc=rc, iteration=iteration)
|
||||
if upload:
|
||||
upload_packages(packages, bucket_name=upload_bucket, nightly=nightly)
|
||||
print("Done!")
|
||||
return 0
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -68,7 +68,7 @@ telegraf -sample-config > $tmpdir/config.toml
|
||||
exit_if_fail telegraf -config $tmpdir/config.toml \
|
||||
-test -input-filter cpu:mem
|
||||
|
||||
mv $GOPATH/bin/telegraf $CIRCLE_ARTIFACTS
|
||||
cat $GOPATH/bin/telegraf | gzip > $CIRCLE_ARTIFACTS/telegraf.gz
|
||||
|
||||
eval "git describe --exact-match HEAD"
|
||||
if [ $? -eq 0 ]; then
|
||||
@@ -77,5 +77,6 @@ if [ $? -eq 0 ]; then
|
||||
echo $tag
|
||||
exit_if_fail ./scripts/build.py --package --version=$tag --platform=linux --arch=all --upload
|
||||
exit_if_fail ./scripts/build.py --package --version=$tag --platform=windows --arch=all --upload
|
||||
exit_if_fail ./scripts/build.py --package --version=$tag --platform=freebsd --arch=all --upload
|
||||
mv build $CIRCLE_ARTIFACTS
|
||||
fi
|
||||
|
||||
@@ -28,7 +28,9 @@ if [[ $? -ne 0 ]]; then
|
||||
useradd --system -U -M telegraf -s /bin/false -d /etc/telegraf
|
||||
fi
|
||||
|
||||
test -d $LOG_DIR || mkdir -p $LOG_DIR
|
||||
chown -R -L telegraf:telegraf $LOG_DIR
|
||||
chmod 755 $LOG_DIR
|
||||
|
||||
# Remove legacy symlink, if it exists
|
||||
if [[ -L /etc/init.d/telegraf ]]; then
|
||||
|
||||
Reference in New Issue
Block a user