Compare commits

...

111 Commits

Author SHA1 Message Date
Cameron Sparr
59568f5311 Release 0.11.1 2016-03-16 14:45:35 -06:00
Cameron Sparr
822706367b provide args for telegraf for consistency with influxd:
- telegraf version
- telegraf config

closes #857
2016-03-16 14:22:01 -06:00
HUANG Wei
f8e9fafda3 Add reload configuration for telegraf service scripts.
closes #794
2016-03-16 11:20:46 -06:00
Cameron Sparr
c2bb9db012 Changelog update 2016-03-16 11:20:28 -06:00
Pierre Fersing
e4e7d7fbfc Improved install script for packaged telegraf:
* Start/stop service on Debian/Ubuntu
* Disable init-script/Systemd-unit on package removal

closes #747
2016-03-16 11:17:28 -06:00
Cameron Sparr
035e4cf90a Fix bug with httpjson client pointer receiver
fixes #859
2016-03-16 10:57:15 -06:00
Cameron Sparr
18fff4a3f5 Merge pull request #858 from LordFPL/patch-1
(very) Little error in changelog
2016-03-16 09:28:30 -06:00
Ross McDonald
675b6dc305 Corrected issue with windows builds where the correct configuration
and filesystem would be used.

closes #852
closes #854
2016-03-16 09:27:09 -06:00
LordFPL
4071c78b2b (very) Little error in changelog
We are not going in the past, no ? ;)
2016-03-16 08:57:33 +01:00
Cameron Sparr
2cb32a683e README fixes for 0.11.0 2016-03-15 11:19:40 +00:00
Cameron Sparr
b6dc9c004b Release 0.11.0 2016-03-14 17:19:46 +00:00
Cameron Sparr
4ea0c707c1 Input plugin for running ntp queries
see #235
2016-03-14 16:53:37 +00:00
Thomas Menard
2fbcb5c6d8 Fix postgresql password exposure in metrics
Fix the password exposure in the metrics or tags.

closes #821
closes #845
2016-03-14 11:00:30 +00:00
Cameron Sparr
a4d60d9750 Update Godeps_windows
closes #839
2016-03-14 10:49:28 +00:00
Cameron Sparr
d3925890b1 github wh: return from eventHandler when err != nil
closes #837
2016-03-14 10:34:58 +00:00
Cameron Sparr
8c6c144f28 influxdb output: If all write fails, trigger a reconnect
closes #836
2016-03-14 10:28:01 +00:00
Cameron Sparr
db8c24cc7b Add a "kernel" plugin for /proc/stat statistics
see #235
2016-03-11 14:50:45 +01:00
Thibault Cohen
ecbbb8426f Fix #828
closes #828
closes #829
2016-03-11 12:21:29 +01:00
Thibault Cohen
0752879fc8 SNMP fix concurrency issue
closes #823
2016-03-10 12:04:29 +01:00
Chris Goller
3f2a04b25b Fix build-for-docker Makefile target syntax.
closes #819
2016-03-09 22:58:29 +01:00
Cameron Sparr
aa15e7916e processes: Fix zombie process procfs panic
fixes #822
2016-03-09 22:55:26 +01:00
Cameron Sparr
7b09623fa8 Add number of users to 'system' plugin
see #235
2016-03-09 19:27:22 +01:00
Cameron Sparr
2f45b8b7f5 Cross platform support for the 'processes' plugin
closes #798
2016-03-09 15:47:37 +01:00
Thibault Cohen
5ffa2a30be Add processes status stats in system input plugin 2016-03-09 15:47:37 +01:00
Cameron Sparr
b102ae141a CONFIGURATION drop->fielddrop 2016-03-09 15:46:37 +01:00
Cameron Sparr
845abcdd77 Only log the overwritten metric warning on 1st overwrite per buffer
see #807
2016-03-09 14:44:32 +01:00
Cameron Sparr
805db7ca50 Break out fcgi code into orig Go files, don't ignore errs
closes #816
2016-03-09 13:44:11 +01:00
Prune Sebastien THOMAS
bd3d0c330f parsed with gofmt
closes #776
2016-03-07 18:48:02 +01:00
Prune Sebastien THOMAS
0060df9877 added zookeeper_chroot option
added a plugin option zookeeper_chroot to set up the kafka endpoint in zookeeper, which may not be / (default).
This chroot is then configured in the consumergroup config.Zookeeper.Chroot

This is workaround the fact that this plugins does not handle the urls like    "zookeeper_server:port/chroot"
As the peers are stored in an array, it makes no sens to have them beeing URL. Peers should all be members of the same cluster, so they all have the same chroot.
2016-03-07 18:46:10 +01:00
Thibault Cohen
cd66e203bd Improve procstat
closes #799
2016-03-07 17:57:32 +01:00
Cameron Sparr
240f99478a Prevent Inf and NaN from being added, and unit test Accumulator
closes #803
2016-03-07 15:46:23 +01:00
Cameron Sparr
41534c73f0 mqtt_consumer: option to set persistent session and client ID
closes #797
2016-03-07 14:34:42 +01:00
Matt Morrison
6139a69fa8 [SNMP Input] SNMPMap() loops forever if table has more than 32 entries
closes #800
closes #801
2016-03-07 12:18:55 +01:00
Cameron Sparr
3cca312e61 Adding a TCP input listener
closes #481
2016-03-07 12:10:28 +01:00
Cameron Sparr
7e312797ec Grammar corrections and consistency for output-list, input-list
closes #788
2016-03-07 11:42:01 +01:00
张光权
fe44fa648a Fix the incorrect indent of input-list help message 2016-03-07 11:33:03 +01:00
张光权
3249030257 add flags '-input-list' and '-output-list' for telegraf command 2016-03-07 11:33:03 +01:00
张光权
8f98c20c51 Add flags -usage-list to print all plugins inputs for telegraf 2016-03-07 11:33:03 +01:00
Thibault Cohen
1c76d5d096 Improve docker input plugin
closes #754
2016-03-07 11:24:32 +01:00
Cameron Sparr
35f1e28809 Merge pull request #790 from arthtux/master
Add README.md for redis
2016-03-04 17:46:12 +00:00
Arthur Deschamps
20999979de Update redis.go 2016-03-04 07:22:54 -05:00
arthtux
c6706a86f1 add README.md for redis 2016-03-03 20:20:03 -05:00
Ross McDonald
b4b1866286 Removed test functionality from build script.
closes #708
closes #713
2016-03-03 21:37:35 +00:00
Ross McDonald
28eb9b4c29 Fixed issue where binary wasnt copied to packaging directory correctly. 2016-03-03 21:34:57 +00:00
Ross McDonald
0a9accccc1 Added permissions check to post-install script due to issues with RPMs having the incorrect permissions on the log directory. 2016-03-03 21:34:57 +00:00
Ross McDonald
c3d220175f Removed i386 as a target for darwin, as it currently doesnt compile. 2016-03-03 21:34:57 +00:00
Ross McDonald
095c90ad22 Re-added zip package output format. Modified zip and tar packaging process to use the base 'tar' and 'zip' commands, instead of 'fpm'. 2016-03-03 21:34:57 +00:00
Ross McDonald
a77bfecb02 Updates to build script to improve ARM builds and other functionality. 2016-03-03 21:34:57 +00:00
Thibault Cohen
72027b5b3c Add README.md for snmp input plugin
closes #735
closes #773
closes #540
2016-03-03 15:54:34 +00:00
Thibault Cohen
e5503c56ad Fix #773 2016-03-03 15:50:36 +00:00
Thibault Cohen
ee7b225272 Add snmp table feature #540 2016-03-03 15:50:36 +00:00
Cameron Sparr
03d37725a9 dns_query unit tests, require that field exists 2016-03-03 15:44:16 +00:00
Pierre Fersing
29d1cbb673 Reduce metric_buffer_limit to 1000
closes #780
2016-03-03 15:32:44 +00:00
Auke Willem Oosterhoff
e81278b800 Fix bug in sample code.
closes #784
closes #785
2016-03-03 15:31:37 +00:00
Manuel Sangoi
e5482a5725 Do not ignore username option for mqtt output 2016-03-03 15:27:37 +00:00
Pascal Larin
8464be691e Username not set for mqtt_consumer plugin
Username parameter for the mqtt_consumer plugin was not pass to the client because an incorrect empty check.

closes #781
2016-03-03 12:17:19 +00:00
Cameron Sparr
ed9937bbd8 Update all dependency hashes 2016-03-02 10:23:01 +00:00
Cameron Sparr
b2a4d4a018 Allow ssl option specification for httpjson plugin
closes #769
2016-03-01 18:17:19 +00:00
Cameron Sparr
74aaf4f75b Add udp listener to readme list of plugins 2016-03-01 15:46:29 +00:00
Cameron Sparr
2945f9daa9 Changelog update 2016-03-01 15:11:37 +00:00
Cameron Sparr
3b496ab3d8 udp listener: add os buffer size notes & change default port
- using 8092 as the default port because it's similar to the rest of
  the TICK stack (InfluxDB, for example, uses 8083, 8086, 8088, etc.).
  didn't want to use 8125 because that conflicts with statsd.

closes #758
2016-03-01 15:01:07 +00:00
Andrea Leopardi
e1f30aeff9 Add a README for the UDP listener input plugin 2016-03-01 11:24:49 +01:00
Andrea Leopardi
a92e73231d Add tests for the udp_listener input plugin 2016-03-01 11:24:49 +01:00
Aleksei Magusev and Andrea Leopardi
8d91115623 Add generic UDP listener service input 2016-03-01 11:24:49 +01:00
Cameron Sparr
9af8d6912a Remove naoina/toml dependency, use influxdata/toml
closes #745
2016-03-01 10:17:02 +00:00
Pierre Fersing
fe43fb47e1 Fix test
closes #771
2016-03-01 09:44:52 +00:00
Pierre Fersing
ca3a80fbe1 Fix invalid DSN after dsnAddTimeout and "" DSN 2016-03-01 09:43:28 +00:00
Pierre Fersing
f0747e76da Fix newly added test 2016-03-01 09:43:28 +00:00
Pierre Fersing
7416d6ea71 Improve timeout in input plugins 2016-03-01 09:43:28 +00:00
Dirk Pahl
ea7cbc781e Create a FreeBSD build
closes #766
2016-03-01 09:38:58 +00:00
Cameron Sparr
3568fb9f93 Support specifying influxdb retention policy
closes #692
2016-02-29 18:10:32 +00:00
Cameron Sparr
43b7ce4f6d Merge pull request #764 from arthtux/master
Readme for nginx plugin
2016-02-29 11:36:32 +00:00
bastard
baa38d6266 Fixing Librato plugin
closes #722
2016-02-29 11:35:45 +00:00
arthtux
1677960caa correct nginx README 2016-02-28 15:41:16 -05:00
arthtux
0fab573c98 add nginx description 2016-02-28 15:38:46 -05:00
Cameron Sparr
04a8e5b888 influxdb output: try to connect on write if there are no conns 2016-02-26 16:26:43 +00:00
Cameron Sparr
6284e2011c Fix sensor plugin, was splitting on ":" incorrectly
closes #748
2016-02-26 15:21:05 +00:00
Cameron Sparr
a97c93abe4 add usage_percent into docker readme
closes #726
2016-02-26 15:12:37 +00:00
Cameron Sparr
664816383a Update readme to 0.10.4.1 2016-02-24 10:05:10 -07:00
Cameron Sparr
fc4cb1654c Fix deb and rpm packages
closes #752
closes #750
2016-02-24 09:12:14 -07:00
Cameron Sparr
f1fa915985 Release 0.10.4 w/ windows builds 2016-02-23 16:00:39 -07:00
Cameron Sparr
11482a75a1 Changelog update, field and name drop and pass params 2016-02-23 15:47:31 -07:00
Matt Heath
e983d35c25 Add support for multiple field names for timers
closes #737
2016-02-23 15:43:29 -07:00
Cameron Sparr
85c4f753ad modify Windows default conf to use win perf over WMI 2016-02-23 15:40:02 -07:00
Cameron Sparr
1847ce3f3d Experimental windows build process changes 2016-02-23 13:52:24 -07:00
Cameron Sparr
83c27cc7b1 dns query: Don't use mjasion.pl for unit tests, check errs 2016-02-23 12:30:18 -07:00
Cameron Sparr
3e8f96a463 httpjson: add unit test to verify that POST params get passed 2016-02-23 11:01:23 -07:00
Cameron Sparr
69e4f16b13 Fix bad http GET parameter encoding, add unit test 2016-02-23 10:07:56 -07:00
Cameron Sparr
918c3fb260 httpjson test real response from issue #729 2016-02-23 09:34:01 -07:00
Cameron Sparr
54ee44839c Put arm deb and rpm downloads on readme 2016-02-22 16:59:45 -07:00
Cameron Sparr
8362aa9d66 Some windows build script fixes 2016-02-22 15:12:35 -07:00
Cameron Sparr
2a6ff16819 Fix up config panic points for naoina/toml support
closes #736
2016-02-22 14:44:33 -07:00
Pierre Fersing
47ad73cc89 Ignore boring filesystems from disk plugin
Modern Linux has a lots of boring filesystem (tmpfs on /dev, devpts on
/dev/pts, lots of cgroup on /sys/fs/cgroup/*, ...).

* Ignore filesystem with 0 bytes (this cover cgroup, devpts and other).
* Add IgnoreFS to ignore additional FS by their type. Add tmpfs and
  devtmpfs as default ignored type.
2016-02-22 14:34:26 -07:00
Aurélien DEHAY
9687f71a17 README updated for pgrep user support
closes #724
2016-02-22 14:33:37 -07:00
Aurélien DEHAY
ed684be18d Adding pgrep user support 2016-02-22 14:32:04 -07:00
Cameron Sparr
5aef725c13 Change pass/drop to namepass/namedrop for outputs
closes #730
2016-02-22 13:35:06 -07:00
Thibault Cohen
d00550c45f Add metric pass/drop filter 2016-02-22 12:11:33 -07:00
Cameron Sparr
9ce8d78835 Set running output quiet mode in agent connect func
closes #701
2016-02-22 11:42:02 -07:00
Cameron Sparr
29016822fd Sensors input currently only available if built from source 2016-02-21 16:35:56 -07:00
Marcin Jasion
bb50d7edb4 dns_query plugin fixups:
- renamed plugin to dns_query
- domains are optional
- new record types

closes #694
2016-02-21 16:33:04 -07:00
Marcin Jasion
d43d6f2b13 renamed plugin to dns_query and value to query_time_ms
small polishings

added more record types - AAAA and ANY
2016-02-21 16:21:11 -07:00
Marcin Jasion
636dc27ead Dns query input plugin 2016-02-21 16:21:11 -07:00
Cameron Sparr
a18f535f21 Circle script: unset GOGC so it uses default 2016-02-21 16:00:41 -07:00
Cameron Sparr
6994d4a712 Turn GOGC on for packaging, use go 1.5.3 2016-02-21 10:41:46 -07:00
Cameron Sparr
c9d0ae7cf3 Circle script: create packages if commit is tagged 2016-02-20 12:47:31 -07:00
Jason Coene
9edc25999e Minor formatting improvements
closes #727
2016-02-19 16:18:06 -07:00
Jason Coene
53c130b704 Add riak plugin 2016-02-19 16:16:50 -07:00
Cameron Sparr
e4e174981d Skip snmp tests that require docker in short mode 2016-02-19 16:15:14 -07:00
Cameron Sparr
584a52ac21 InfluxDB output should not default to 'no timeout' for http writes
default to 5s instead, since even if it times out we will cache the
points and move on

closes #685
2016-02-19 15:38:51 -07:00
Cameron Sparr
f9b5767dae Provide default args: percpu=true and totalcpu=true for cpu plugin
Also if outputs.file is empty, write to stdout

closes #720
2016-02-19 11:56:33 -07:00
Cameron Sparr
3179829fa5 Update changelog for 0.10.3 2016-02-18 17:18:43 -07:00
116 changed files with 7995 additions and 667 deletions

View File

@@ -1,4 +1,77 @@
## v0.10.3 [unreleased]
## v0.11.1 [unreleased]
### Features
- [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @pierref!
- [#794](https://github.com/influxdata/telegraf/pull/794): Add service reload ability. Thanks @entertainyou!
### Bugfixes
- [#852](https://github.com/influxdata/telegraf/issues/852): Windows zip package fix
- [#859](https://github.com/influxdata/telegraf/issues/859): httpjson plugin panic
## v0.11.0 [2016-03-15]
### Release Notes
### Features
- [#692](https://github.com/influxdata/telegraf/pull/770): Support InfluxDB retention policies
- [#771](https://github.com/influxdata/telegraf/pull/771): Default timeouts for input plugns. Thanks @PierreF!
- [#758](https://github.com/influxdata/telegraf/pull/758): UDP Listener input plugin, thanks @whatyouhide!
- [#769](https://github.com/influxdata/telegraf/issues/769): httpjson plugin: allow specifying SSL configuration.
- [#735](https://github.com/influxdata/telegraf/pull/735): SNMP Table feature. Thanks @titilambert!
- [#754](https://github.com/influxdata/telegraf/pull/754): docker plugin: adding `docker info` metrics to output. Thanks @titilambert!
- [#788](https://github.com/influxdata/telegraf/pull/788): -input-list and -output-list command-line options. Thanks @ebookbug!
- [#778](https://github.com/influxdata/telegraf/pull/778): Adding a TCP input listener.
- [#797](https://github.com/influxdata/telegraf/issues/797): Provide option for persistent MQTT consumer client sessions.
- [#799](https://github.com/influxdata/telegraf/pull/799): Add number of threads for procstat input plugin. Thanks @titilambert!
- [#776](https://github.com/influxdata/telegraf/pull/776): Add Zookeeper chroot option to kafka_consumer. Thanks @prune998!
- [#811](https://github.com/influxdata/telegraf/pull/811): Add processes plugin for classifying total procs on system. Thanks @titilambert!
- [#235](https://github.com/influxdata/telegraf/issues/235): Add number of users to the `system` input plugin.
- [#826](https://github.com/influxdata/telegraf/pull/826): "kernel" linux plugin for /proc/stat metrics (context switches, interrupts, etc.)
- [#847](https://github.com/influxdata/telegraf/pull/847): `ntpq`: Input plugin for running ntp query executable and gathering metrics.
### Bugfixes
- [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":"
- [#722](https://github.com/influxdata/telegraf/pull/722): Librato output plugin fixes. Thanks @chrusty!
- [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. Thanks @titilambert!
- [#781](https://github.com/influxdata/telegraf/pull/781): Fix mqtt_consumer username not being set. Thanks @chaton78!
- [#786](https://github.com/influxdata/telegraf/pull/786): Fix mqtt output username not being set. Thanks @msangoi!
- [#773](https://github.com/influxdata/telegraf/issues/773): Fix duplicate measurements in snmp plugin. Thanks @titilambert!
- [#708](https://github.com/influxdata/telegraf/issues/708): packaging: build ARM package
- [#713](https://github.com/influxdata/telegraf/issues/713): packaging: insecure permissions error on log directory
- [#816](https://github.com/influxdata/telegraf/issues/816): Fix phpfpm panic if fcgi endpoint unreachable.
- [#828](https://github.com/influxdata/telegraf/issues/828): fix net_response plugin overwriting host tag.
- [#821](https://github.com/influxdata/telegraf/issues/821): Remove postgres password from server tag. Thanks @menardorama!
## v0.10.4.1
### Release Notes
- Bug in the build script broke deb and rpm packages.
### Bugfixes
- [#750](https://github.com/influxdata/telegraf/issues/750): deb package broken
- [#752](https://github.com/influxdata/telegraf/issues/752): rpm package broken
## v0.10.4 [2016-02-24]
### Release Notes
- The pass/drop parameters have been renamed to fielddrop/fieldpass parameters,
to more accurately indicate their purpose.
- There are also now namedrop/namepass parameters for passing/dropping based
on the metric _name_.
- Experimental windows builds now available.
### Features
- [#727](https://github.com/influxdata/telegraf/pull/727): riak input, thanks @jcoene!
- [#694](https://github.com/influxdata/telegraf/pull/694): DNS Query input, thanks @mjasion!
- [#724](https://github.com/influxdata/telegraf/pull/724): username matching for procstat input, thanks @zorel!
- [#736](https://github.com/influxdata/telegraf/pull/736): Ignore dummy filesystems from disk plugin. Thanks @PierreF!
- [#737](https://github.com/influxdata/telegraf/pull/737): Support multiple fields for statsd input. Thanks @mattheath!
### Bugfixes
- [#701](https://github.com/influxdata/telegraf/pull/701): output write count shouldnt print in quiet mode.
- [#746](https://github.com/influxdata/telegraf/pull/746): httpjson plugin: Fix HTTP GET parameters.
## v0.10.3 [2016-02-18]
### Release Notes
- Users of the `exec` and `kafka_consumer` (and the new `nats_consumer`

View File

@@ -80,7 +80,7 @@ func (s *Simple) SampleConfig() string {
return "ok = true # indicate if everything is fine"
}
func (s *Simple) Gather(acc inputs.Accumulator) error {
func (s *Simple) Gather(acc telegraf.Accumulator) error {
if s.Ok {
acc.Add("state", "pretty good", nil)
} else {

64
Godeps
View File

@@ -1,52 +1,54 @@
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5
github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef
github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
github.com/aws/aws-sdk-go 87b1e60a50b09e4812dee560b33a238f67305804
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
github.com/dancannon/gorethink 6f088135ff288deb9d5546f4c71919207f891a70
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
github.com/fsouza/go-dockerclient 7b651349f9479f5114913eefbfd3c4eeddd79ab4
github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3
github.com/go-sql-driver/mysql 7c7f556282622f94213bc028b4d0a7b6151ba239
github.com/golang/protobuf 6aaa8d47701fa6cf07e914ec01fde3d4a1fe79c3
github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a
github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967
github.com/go-ini/ini 776aa739ce9373377cd16f526cdf06cb4c89b40f
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d
github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24
github.com/influxdata/influxdb ef571fc104dc24b77cd3710c156cd95e5cfd7aa5
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38
github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
github.com/nats-io/nats 6a83f1a633cfbfd90aa648ac99fb38c06a8b40df
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common 14ca1097bbe21584194c15e391a9dab95ad42a59
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil e77438504d45b9985c99a75730fe65220ceea00e
github.com/shirou/gopsutil 1de1357e7737a536c7f4ff6be7bd27977db4d2cb
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/crypto 1f22c0103821b9390939b6776727195525381532
golang.org/x/net 04b9de9b512f58addf28c9853d50ebef61c3953e
golang.org/x/text 6d3c22c4525a4da167968fa2479be5524d2e8bd0
gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70
golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4

View File

@@ -1,56 +1,53 @@
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034
github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef
github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
github.com/aws/aws-sdk-go 87b1e60a50b09e4812dee560b33a238f67305804
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
github.com/dancannon/gorethink 6f088135ff288deb9d5546f4c71919207f891a70
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
github.com/fsouza/go-dockerclient 7b651349f9479f5114913eefbfd3c4eeddd79ab4
github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3
github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967
github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4
github.com/go-sql-driver/mysql 7c7f556282622f94213bc028b4d0a7b6151ba239
github.com/golang/protobuf 6aaa8d47701fa6cf07e914ec01fde3d4a1fe79c3
github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d
github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24
github.com/influxdata/influxdb a9552fdd91361819a792f337e5d9998859732a67
github.com/influxdb/influxdb a9552fdd91361819a792f337e5d9998859732a67
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38
github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
github.com/pborman/uuid dee7705ef7b324f27ceb85a121c61f2c2e8ce988
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common 14ca1097bbe21584194c15e391a9dab95ad42a59
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil e77438504d45b9985c99a75730fe65220ceea00e
github.com/shirou/gopsutil 1de1357e7737a536c7f4ff6be7bd27977db4d2cb
github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/net 04b9de9b512f58addf28c9853d50ebef61c3953e
golang.org/x/text 6d3c22c4525a4da167968fa2479be5524d2e8bd0
gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4

View File

@@ -22,8 +22,8 @@ build-windows:
./cmd/telegraf/telegraf.go
build-for-docker:
CGO_ENABLED=0 GOOS=linux go -o telegraf -ldflags \
"-X main.Version=$(VERSION)" \
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
"-s -X main.Version=$(VERSION)" \
./cmd/telegraf/telegraf.go
# Build with race detector
@@ -92,14 +92,17 @@ docker-kill:
-docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
# Run full unit tests using docker containers (includes setup and teardown)
test: docker-kill docker-run
test: vet docker-kill docker-run
# Sleeping for kafka leadership election, TSDB setup, etc.
sleep 60
# SUCCESS, running tests
go test -race ./...
# Run "short" unit tests
test-short:
test-short: vet
go test -short ./...
.PHONY: test
vet:
go vet ./...
.PHONY: test test-short vet build default

View File

@@ -27,8 +27,12 @@ the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/)
### Linux deb and rpm Packages:
Latest:
* http://get.influxdb.org/telegraf/telegraf_0.10.2-1_amd64.deb
* http://get.influxdb.org/telegraf/telegraf-0.10.2-1.x86_64.rpm
* http://get.influxdb.org/telegraf/telegraf_0.11.1-1_amd64.deb
* http://get.influxdb.org/telegraf/telegraf-0.11.1-1.x86_64.rpm
Latest (arm):
* http://get.influxdb.org/telegraf/telegraf_0.11.1-1_armhf.deb
* http://get.influxdb.org/telegraf/telegraf-0.11.1-1.armhf.rpm
0.2.x:
* http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb
@@ -52,9 +56,9 @@ for instructions, replacing the `influxdb` package name with `telegraf`.
### Linux tarballs:
Latest:
* http://get.influxdb.org/telegraf/telegraf-0.10.2-1_linux_amd64.tar.gz
* http://get.influxdb.org/telegraf/telegraf-0.10.2-1_linux_i386.tar.gz
* http://get.influxdb.org/telegraf/telegraf-0.10.2-1_linux_arm.tar.gz
* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_linux_amd64.tar.gz
* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_linux_i386.tar.gz
* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_linux_armhf.tar.gz
0.2.x:
* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz
@@ -66,13 +70,13 @@ Latest:
To install the full directory structure with config file, run:
```
sudo tar -C / -zxvf ./telegraf-0.10.2-1_linux_amd64.tar.gz
sudo tar -C / -zxvf ./telegraf-0.11.1-1_linux_amd64.tar.gz
```
To extract only the binary, run:
```
tar -zxvf telegraf-0.10.2-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
tar -zxvf telegraf-0.11.1-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
```
### Ansible Role:
@@ -86,6 +90,12 @@ brew update
brew install telegraf
```
### Windows Binaries (EXPERIMENTAL)
Latest:
* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_windows_amd64.zip
* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_windows_i386.zip
### From Source:
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
@@ -157,6 +167,7 @@ Currently implemented sources:
* bcache
* couchdb
* disque
* dns query time
* docker
* dovecot
* elasticsearch
@@ -175,6 +186,7 @@ Currently implemented sources:
* net_response
* nginx
* nsq
* ntpq
* phpfpm
* phusion passenger
* ping
@@ -187,12 +199,13 @@ Currently implemented sources:
* raindrops
* redis
* rethinkdb
* riak
* sensors (only available if built from source)
* snmp
* sql server (microsoft)
* twemproxy
* zfs
* zookeeper
* sensors
* snmp
* win_perf_counters (windows performance counters)
* system
* cpu
@@ -202,10 +215,14 @@ Currently implemented sources:
* disk
* diskio
* swap
* processes
* kernel (/proc/stat)
Telegraf can also collect metrics via the following service plugins:
* statsd
* udp_listener
* tcp_listener
* mqtt_consumer
* kafka_consumer
* nats_consumer

View File

@@ -43,6 +43,11 @@ func (ac *accumulator) Add(
) {
fields := make(map[string]interface{})
fields["value"] = value
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
return
}
ac.AddFields(measurement, fields, tags, t...)
}
@@ -56,6 +61,10 @@ func (ac *accumulator) AddFields(
return
}
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
return
}
if !ac.inputConfig.Filter.ShouldTagsPass(tags) {
return
}
@@ -92,11 +101,10 @@ func (ac *accumulator) AddFields(
for k, v := range fields {
// Filter out any filtered fields
if ac.inputConfig != nil {
if !ac.inputConfig.Filter.ShouldPass(k) {
if !ac.inputConfig.Filter.ShouldFieldsPass(k) {
continue
}
}
result[k] = v
// Validate uint64 and float64 fields
switch val := v.(type) {
@@ -107,6 +115,7 @@ func (ac *accumulator) AddFields(
} else {
result[k] = int64(9223372036854775807)
}
continue
case float64:
// NaNs are invalid values in influxdb, skip measurement
if math.IsNaN(val) || math.IsInf(val, 0) {
@@ -118,6 +127,8 @@ func (ac *accumulator) AddFields(
continue
}
}
result[k] = v
}
fields = nil
if len(result) == 0 {
@@ -159,5 +170,8 @@ func (ac *accumulator) setDefaultTags(tags map[string]string) {
}
func (ac *accumulator) addDefaultTag(key, value string) {
if ac.defaultTags == nil {
ac.defaultTags = make(map[string]string)
}
ac.defaultTags[key] = value
}

302
agent/accumulator_test.go Normal file
View File

@@ -0,0 +1,302 @@
package agent
import (
"fmt"
"math"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/models"
"github.com/stretchr/testify/assert"
)
func TestAdd(t *testing.T) {
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
a.Add("acctest", float64(101), map[string]string{})
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
actual)
}
func TestAddDefaultTags(t *testing.T) {
a := accumulator{}
a.addDefaultTag("default", "tag")
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
a.Add("acctest", float64(101), map[string]string{})
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest,default=tag value=101")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()),
actual)
}
func TestAddFields(t *testing.T) {
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
fields := map[string]interface{}{
"usage": float64(99),
}
a.AddFields("acctest", fields, map[string]string{})
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest usage=99")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test usage=99")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test usage=99 %d", now.UnixNano()),
actual)
}
// Test that all Inf fields get dropped, and not added to metrics channel
func TestAddInfFields(t *testing.T) {
inf := math.Inf(1)
ninf := math.Inf(-1)
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
fields := map[string]interface{}{
"usage": inf,
"nusage": ninf,
}
a.AddFields("acctest", fields, map[string]string{})
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
assert.Len(t, a.metrics, 0)
// test that non-inf fields are kept and not dropped
fields["notinf"] = float64(100)
a.AddFields("acctest", fields, map[string]string{})
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest notinf=100")
}
// Test that nan fields are dropped and not added
func TestAddNaNFields(t *testing.T) {
nan := math.NaN()
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
fields := map[string]interface{}{
"usage": nan,
}
a.AddFields("acctest", fields, map[string]string{})
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
assert.Len(t, a.metrics, 0)
// test that non-nan fields are kept and not dropped
fields["notnan"] = float64(100)
a.AddFields("acctest", fields, map[string]string{})
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest notnan=100")
}
func TestAddUint64Fields(t *testing.T) {
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
fields := map[string]interface{}{
"usage": uint64(99),
}
a.AddFields("acctest", fields, map[string]string{})
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest usage=99i")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test usage=99i")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test usage=99i %d", now.UnixNano()),
actual)
}
func TestAddUint64Overflow(t *testing.T) {
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
fields := map[string]interface{}{
"usage": uint64(9223372036854775808),
}
a.AddFields("acctest", fields, map[string]string{})
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest usage=9223372036854775807i")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test usage=9223372036854775807i")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test usage=9223372036854775807i %d", now.UnixNano()),
actual)
}
func TestAddInts(t *testing.T) {
a := accumulator{}
a.addDefaultTag("default", "tag")
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
a.Add("acctest", int(101), map[string]string{})
a.Add("acctest", int32(101), map[string]string{"acc": "test"})
a.Add("acctest", int64(101), map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest,default=tag value=101i")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101i")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test,default=tag value=101i %d", now.UnixNano()),
actual)
}
func TestAddFloats(t *testing.T) {
a := accumulator{}
a.addDefaultTag("default", "tag")
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
a.Add("acctest", float32(101), map[string]string{"acc": "test"})
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()),
actual)
}
func TestAddStrings(t *testing.T) {
a := accumulator{}
a.addDefaultTag("default", "tag")
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
a.Add("acctest", "test", map[string]string{"acc": "test"})
a.Add("acctest", "foo", map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest,acc=test,default=tag value=\"test\"")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test,default=tag value=\"foo\" %d", now.UnixNano()),
actual)
}
func TestAddBools(t *testing.T) {
a := accumulator{}
a.addDefaultTag("default", "tag")
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
a.Add("acctest", true, map[string]string{"acc": "test"})
a.Add("acctest", false, map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest,acc=test,default=tag value=true")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test,default=tag value=false %d", now.UnixNano()),
actual)
}

View File

@@ -44,6 +44,8 @@ func NewAgent(config *config.Config) (*Agent, error) {
// Connect connects to all configured outputs
func (a *Agent) Connect() error {
for _, o := range a.Config.Outputs {
o.Quiet = a.Config.Agent.Quiet
switch ot := o.Output.(type) {
case telegraf.ServiceOutput:
if err := ot.Start(); err != nil {

View File

@@ -4,14 +4,17 @@ machine:
post:
- sudo service zookeeper stop
- go version
- go version | grep 1.5.2 || sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.5.2.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.5.2.linux-amd64.tar.gz
- go version | grep 1.5.3 || sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.5.3.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.5.3.linux-amd64.tar.gz
- go version
dependencies:
override:
- docker info
post:
- gem install fpm
- sudo apt-get install -y rpm python-boto
test:
override:

View File

@@ -11,8 +11,9 @@ import (
"github.com/influxdata/telegraf/agent"
"github.com/influxdata/telegraf/internal/config"
"github.com/influxdata/telegraf/plugins/inputs"
_ "github.com/influxdata/telegraf/plugins/inputs/all"
"github.com/influxdata/telegraf/plugins/outputs"
_ "github.com/influxdata/telegraf/plugins/outputs/all"
)
@@ -30,11 +31,14 @@ var fSampleConfig = flag.Bool("sample-config", false,
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
var fInputFilters = flag.String("input-filter", "",
"filter the inputs to enable, separator is :")
var fInputList = flag.Bool("input-list", false,
"print available output plugins.")
var fOutputFilters = flag.String("output-filter", "",
"filter the outputs to enable, separator is :")
var fOutputList = flag.Bool("output-list", false,
"print available output plugins.")
var fUsage = flag.String("usage", "",
"print usage for a plugin, ie, 'telegraf -usage mysql'")
var fInputFiltersLegacy = flag.String("filter", "",
"filter the inputs to enable, separator is :")
var fOutputFiltersLegacy = flag.String("outputfilter", "",
@@ -59,7 +63,9 @@ The flags are:
-sample-config print out full sample configuration to stdout
-config-directory directory containing additional *.conf files
-input-filter filter the input plugins to enable, separator is :
-input-list print all the plugins inputs
-output-filter filter the output plugins to enable, separator is :
-output-list print all the available outputs
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
-debug print metrics as they're generated to stdout
-quiet run in quiet mode
@@ -90,8 +96,9 @@ func main() {
reload <- false
flag.Usage = func() { usageExit(0) }
flag.Parse()
args := flag.Args()
if flag.NFlag() == 0 {
if flag.NFlag() == 0 && len(args) == 0 {
usageExit(0)
}
@@ -115,6 +122,34 @@ func main() {
outputFilters = strings.Split(":"+outputFilter+":", ":")
}
if len(args) > 0 {
switch args[0] {
case "version":
v := fmt.Sprintf("Telegraf - Version %s", Version)
fmt.Println(v)
return
case "config":
config.PrintSampleConfig(inputFilters, outputFilters)
return
}
}
if *fOutputList {
fmt.Println("Available Output Plugins:")
for k, _ := range outputs.Outputs {
fmt.Printf(" %s\n", k)
}
return
}
if *fInputList {
fmt.Println("Available Input Plugins:")
for k, _ := range inputs.Inputs {
fmt.Printf(" %s\n", k)
}
return
}
if *fVersion {
v := fmt.Sprintf("Telegraf - Version %s", Version)
fmt.Println(v)

View File

@@ -58,10 +58,14 @@ you can configure that here.
There are also filters that can be configured per input:
* **pass**: An array of strings that is used to filter metrics generated by the
* **namepass**: An array of strings that is used to filter metrics generated by the
current input. Each string in the array is tested as a glob match against
measurement names and if it matches, the field is emitted.
* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted.
* **fieldpass**: An array of strings that is used to filter metrics generated by the
current input. Each string in the array is tested as a glob match against field names
and if it matches, the field is emitted.
* **drop**: The inverse of pass, if a field name matches, it is not emitted.
* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted.
* **tagpass**: tag names and arrays of strings that are used to filter
measurements by the current input. Each string in the array is tested as a glob
match against the tag name, and if it matches the measurement is emitted.
@@ -93,7 +97,7 @@ fields which begin with `time_`.
percpu = true
totalcpu = false
# filter all fields beginning with 'time_'
drop = ["time_*"]
fielddrop = ["time_*"]
```
#### Input Config: tagpass and tagdrop
@@ -102,7 +106,7 @@ fields which begin with `time_`.
[[inputs.cpu]]
percpu = true
totalcpu = false
drop = ["cpu_time"]
fielddrop = ["cpu_time"]
# Don't collect CPU data for cpu6 & cpu7
[inputs.cpu.tagdrop]
cpu = [ "cpu6", "cpu7" ]
@@ -117,18 +121,32 @@ fields which begin with `time_`.
path = [ "/opt", "/home*" ]
```
#### Input Config: pass and drop
#### Input Config: fieldpass and fielddrop
```toml
# Drop all metrics for guest & steal CPU usage
[[inputs.cpu]]
percpu = false
totalcpu = true
drop = ["usage_guest", "usage_steal"]
fielddrop = ["usage_guest", "usage_steal"]
# Only store inode related metrics for disks
[[inputs.disk]]
pass = ["inodes*"]
fieldpass = ["inodes*"]
```
#### Input Config: namepass and namedrop
```toml
# Drop all metrics about containers for kubelet
[[inputs.prometheus]]
urls = ["http://kube-node-1:4194/metrics"]
namedrop = ["container_"]
# Only store rest client related metrics for kubelet
[[inputs.prometheus]]
urls = ["http://kube-node-1:4194/metrics"]
namepass = ["rest_client_"]
```
#### Input config: prefix, suffix, and override
@@ -181,7 +199,7 @@ to avoid measurement collisions:
percpu = true
totalcpu = false
name_override = "percpu_usage"
drop = ["cpu_time*"]
fielddrop = ["cpu_time*"]
```
## `[outputs.xxx]` Configuration
@@ -191,7 +209,7 @@ configuring each output sink is different, but examples can be
found by running `telegraf -sample-config`.
Outputs also support the same configurable options as inputs
(pass, drop, tagpass, tagdrop)
(namepass, namedrop, tagpass, tagdrop)
```toml
[[outputs.influxdb]]
@@ -199,14 +217,14 @@ Outputs also support the same configurable options as inputs
database = "telegraf"
precision = "s"
# Drop all measurements that start with "aerospike"
drop = ["aerospike*"]
namedrop = ["aerospike*"]
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
database = "telegraf-aerospike-data"
precision = "s"
# Only accept aerospike data:
pass = ["aerospike*"]
namepass = ["aerospike*"]
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]

View File

@@ -24,7 +24,7 @@
## Telegraf will cache metric_buffer_limit metrics for each output, and will
## flush this buffer on a successful write.
metric_buffer_limit = 10000
metric_buffer_limit = 1000
## Flush the buffer whenever full, regardless of flush_interval.
flush_buffer_when_full = true
@@ -56,27 +56,36 @@
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
## The full HTTP or UDP endpoint URL for your InfluxDB instance.
## Multiple urls can be specified as part of the same cluster,
## this means that only ONE of the urls will be written to each interval.
# urls = ["udp://localhost:8089"] # UDP endpoint example
urls = ["http://localhost:8086"] # required
# The target database for metrics (telegraf will create it if not exists)
## The target database for metrics (telegraf will create it if not exists).
database = "telegraf" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# note: using second precision greatly helps InfluxDB compression
## Retention policy to write to.
retention_policy = "default"
## Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
## note: using "s" precision greatly improves InfluxDB compression.
precision = "s"
# Connection timeout (for the connection with InfluxDB), formatted as a string.
# If not provided, will default to 0 (no timeout)
# timeout = "5s"
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
## Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
###############################################################################
# INPUTS #
@@ -89,7 +98,7 @@
# Whether to report total system cpu stats or not
totalcpu = true
# Comment this line if you want the raw CPU time metrics
drop = ["time_*"]
fielddrop = ["time_*"]
# Read metrics about disk usage by mount point
[[inputs.disk]]
@@ -97,6 +106,10 @@
# Setting mountpoints will restrict the stats to the specified mountpoints.
# mount_points=["/"]
# Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
# present on /run, /var/run, /dev/shm or /dev).
ignore_fs = ["tmpfs", "devtmpfs"]
# Read metrics about disk IO by device
[[inputs.diskio]]
# By default, telegraf will gather stats for all devices including
@@ -106,10 +119,18 @@
# Uncomment the following line if you do not need disk serial numbers.
# skip_serial_number = true
# Get kernel statistics from /proc/stat
[[inputs.kernel]]
# no configuration
# Read metrics about memory usage
[[inputs.mem]]
# no configuration
# Get the number of processes and group them by status
[[inputs.processes]]
# no configuration
# Read metrics about swap memory usage
[[inputs.swap]]
# no configuration

164
etc/telegraf_windows.conf Normal file
View File

@@ -0,0 +1,164 @@
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will cache metric_buffer_limit metrics for each output, and will
## flush this buffer on a successful write.
metric_buffer_limit = 1000
## Flush the buffer whenever full, regardless of flush_interval.
flush_buffer_when_full = true
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Run telegraf in debug mode
debug = false
## Run telegraf in quiet mode
quiet = false
## Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://localhost:8089"] # UDP endpoint example
urls = ["http://localhost:8086"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "telegraf" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# note: using second precision greatly helps InfluxDB compression
precision = "s"
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
###############################################################################
# INPUTS #
###############################################################################
# Windows Performance Counters plugin.
# These are the recommended method of monitoring system metrics on windows,
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
# which utilizes a lot of system resources.
#
# See more configuration examples at:
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters
[[inputs.win_perf_counters]]
[[inputs.win_perf_counters.object]]
# Processor usage, alternative to native, reports on a per core.
ObjectName = "Processor"
Instances = ["*"]
Counters = ["% Idle Time", "% Interrupt Time", "% Privileged Time", "% User Time", "% Processor Time"]
Measurement = "win_cpu"
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
[[inputs.win_perf_counters.object]]
# Disk times and queues
ObjectName = "LogicalDisk"
Instances = ["*"]
Counters = ["% Idle Time", "% Disk Time","% Disk Read Time", "% Disk Write Time", "% User Time", "Current Disk Queue Length"]
Measurement = "win_disk"
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
[[inputs.win_perf_counters.object]]
ObjectName = "System"
Counters = ["Context Switches/sec","System Calls/sec"]
Instances = ["------"]
Measurement = "win_system"
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
[[inputs.win_perf_counters.object]]
# Example query where the Instance portion must be removed to get data back, such as from the Memory object.
ObjectName = "Memory"
Counters = ["Available Bytes","Cache Faults/sec","Demand Zero Faults/sec","Page Faults/sec","Pages/sec","Transition Faults/sec","Pool Nonpaged Bytes","Pool Paged Bytes"]
Instances = ["------"] # Use 6 x - to remove the Instance bit from the query.
Measurement = "win_mem"
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
# Windows system plugins using WMI (disabled by default, using
# win_perf_counters over WMI is recommended)
# Read metrics about cpu usage
#[[inputs.cpu]]
## Whether to report per-cpu stats or not
#percpu = true
## Whether to report total system cpu stats or not
#totalcpu = true
## Comment this line if you want the raw CPU time metrics
#fielddrop = ["time_*"]
# Read metrics about disk usage by mount point
#[[inputs.disk]]
## By default, telegraf gather stats for all mountpoints.
## Setting mountpoints will restrict the stats to the specified mountpoints.
## mount_points=["/"]
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
## present on /run, /var/run, /dev/shm or /dev).
#ignore_fs = ["tmpfs", "devtmpfs"]
# Read metrics about disk IO by device
#[[inputs.diskio]]
## By default, telegraf will gather stats for all devices including
## disk partitions.
## Setting devices will restrict the stats to the specified devices.
## devices = ["sda", "sdb"]
## Uncomment the following line if you do not need disk serial numbers.
## skip_serial_number = true
# Read metrics about memory usage
#[[inputs.mem]]
# no configuration
# Read metrics about swap memory usage
#[[inputs.swap]]
# no configuration

View File

@@ -19,7 +19,7 @@ import (
"github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/config"
"github.com/naoina/toml/ast"
"github.com/influxdata/toml/ast"
)
// Config specifies the URL/user/password for the database that telegraf
@@ -159,7 +159,7 @@ var header = `# Telegraf Configuration
## Telegraf will cache metric_buffer_limit metrics for each output, and will
## flush this buffer on a successful write.
metric_buffer_limit = 10000
metric_buffer_limit = 1000
## Flush the buffer whenever full, regardless of flush_interval.
flush_buffer_when_full = true
@@ -185,25 +185,22 @@ var header = `# Telegraf Configuration
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
#
# OUTPUTS:
#
`
var pluginHeader = `
###############################################################################
# INPUTS #
###############################################################################
#
# INPUTS:
#
`
var serviceInputHeader = `
###############################################################################
# SERVICE INPUTS #
###############################################################################
#
# SERVICE INPUTS:
#
`
// PrintSampleConfig prints the sample config
@@ -429,7 +426,6 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
ro.MetricBufferLimit = c.Agent.MetricBufferLimit
}
ro.FlushBufferWhenFull = c.Agent.FlushBufferWhenFull
ro.Quiet = c.Agent.Quiet
c.Outputs = append(c.Outputs, ro)
return nil
}
@@ -478,18 +474,19 @@ func (c *Config) addInput(name string, table *ast.Table) error {
return nil
}
// buildFilter builds a Filter (tagpass/tagdrop/pass/drop) to
// buildFilter builds a Filter
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
// be inserted into the internal_models.OutputConfig/internal_models.InputConfig to be used for prefix
// filtering on tags and measurements
func buildFilter(tbl *ast.Table) internal_models.Filter {
f := internal_models.Filter{}
if node, ok := tbl.Fields["pass"]; ok {
if node, ok := tbl.Fields["namepass"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.Pass = append(f.Pass, str.Value)
f.NamePass = append(f.NamePass, str.Value)
f.IsActive = true
}
}
@@ -497,12 +494,12 @@ func buildFilter(tbl *ast.Table) internal_models.Filter {
}
}
if node, ok := tbl.Fields["drop"]; ok {
if node, ok := tbl.Fields["namedrop"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.Drop = append(f.Drop, str.Value)
f.NameDrop = append(f.NameDrop, str.Value)
f.IsActive = true
}
}
@@ -510,6 +507,38 @@ func buildFilter(tbl *ast.Table) internal_models.Filter {
}
}
fields := []string{"pass", "fieldpass"}
for _, field := range fields {
if node, ok := tbl.Fields[field]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.FieldPass = append(f.FieldPass, str.Value)
f.IsActive = true
}
}
}
}
}
}
fields = []string{"drop", "fielddrop"}
for _, field := range fields {
if node, ok := tbl.Fields[field]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.FieldDrop = append(f.FieldDrop, str.Value)
f.IsActive = true
}
}
}
}
}
}
if node, ok := tbl.Fields["tagpass"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
@@ -548,6 +577,10 @@ func buildFilter(tbl *ast.Table) internal_models.Filter {
}
}
delete(tbl.Fields, "namedrop")
delete(tbl.Fields, "namepass")
delete(tbl.Fields, "fielddrop")
delete(tbl.Fields, "fieldpass")
delete(tbl.Fields, "drop")
delete(tbl.Fields, "pass")
delete(tbl.Fields, "tagdrop")
@@ -717,5 +750,12 @@ func buildOutput(name string, tbl *ast.Table) (*internal_models.OutputConfig, er
Name: name,
Filter: buildFilter(tbl),
}
// Outputs don't support FieldDrop/FieldPass, so set to NameDrop/NamePass
if len(oc.Filter.FieldDrop) > 0 {
oc.Filter.NameDrop = oc.Filter.FieldDrop
}
if len(oc.Filter.FieldPass) > 0 {
oc.Filter.NamePass = oc.Filter.FieldPass
}
return oc, nil
}

View File

@@ -23,8 +23,10 @@ func TestConfig_LoadSingleInput(t *testing.T) {
mConfig := &internal_models.InputConfig{
Name: "memcached",
Filter: internal_models.Filter{
Drop: []string{"other", "stuff"},
Pass: []string{"some", "strings"},
NameDrop: []string{"metricname2"},
NamePass: []string{"metricname1"},
FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"},
TagDrop: []internal_models.TagFilter{
internal_models.TagFilter{
Name: "badtag",
@@ -66,8 +68,10 @@ func TestConfig_LoadDirectory(t *testing.T) {
mConfig := &internal_models.InputConfig{
Name: "memcached",
Filter: internal_models.Filter{
Drop: []string{"other", "stuff"},
Pass: []string{"some", "strings"},
NameDrop: []string{"metricname2"},
NamePass: []string{"metricname1"},
FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"},
TagDrop: []internal_models.TagFilter{
internal_models.TagFilter{
Name: "badtag",

View File

@@ -1,7 +1,9 @@
[[inputs.memcached]]
servers = ["localhost"]
pass = ["some", "strings"]
drop = ["other", "stuff"]
namepass = ["metricname1"]
namedrop = ["metricname2"]
fieldpass = ["some", "strings"]
fielddrop = ["other", "stuff"]
interval = "5s"
[inputs.memcached.tagpass]
goodtag = ["mytag"]

View File

@@ -1,5 +1,7 @@
[[inputs.memcached]]
servers = ["192.168.1.1"]
namepass = ["metricname1"]
namedrop = ["metricname2"]
pass = ["some", "strings"]
drop = ["other", "stuff"]
interval = "5s"

View File

@@ -15,8 +15,11 @@ type TagFilter struct {
// Filter containing drop/pass and tagdrop/tagpass rules
type Filter struct {
Drop []string
Pass []string
NameDrop []string
NamePass []string
FieldDrop []string
FieldPass []string
TagDrop []TagFilter
TagPass []TagFilter
@@ -25,17 +28,17 @@ type Filter struct {
}
func (f Filter) ShouldMetricPass(metric telegraf.Metric) bool {
if f.ShouldPass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) {
if f.ShouldNamePass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) {
return true
}
return false
}
// ShouldPass returns true if the metric should pass, false if should drop
// ShouldFieldsPass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters
func (f Filter) ShouldPass(key string) bool {
if f.Pass != nil {
for _, pat := range f.Pass {
func (f Filter) ShouldNamePass(key string) bool {
if f.NamePass != nil {
for _, pat := range f.NamePass {
// TODO remove HasPrefix check, leaving it for now for legacy support.
// Cam, 2015-12-07
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
@@ -45,8 +48,36 @@ func (f Filter) ShouldPass(key string) bool {
return false
}
if f.Drop != nil {
for _, pat := range f.Drop {
if f.NameDrop != nil {
for _, pat := range f.NameDrop {
// TODO remove HasPrefix check, leaving it for now for legacy support.
// Cam, 2015-12-07
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
return false
}
}
return true
}
return true
}
// ShouldFieldsPass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters
func (f Filter) ShouldFieldsPass(key string) bool {
if f.FieldPass != nil {
for _, pat := range f.FieldPass {
// TODO remove HasPrefix check, leaving it for now for legacy support.
// Cam, 2015-12-07
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
return true
}
}
return false
}
if f.FieldDrop != nil {
for _, pat := range f.FieldDrop {
// TODO remove HasPrefix check, leaving it for now for legacy support.
// Cam, 2015-12-07
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {

View File

@@ -18,15 +18,15 @@ func TestFilter_Empty(t *testing.T) {
}
for _, measurement := range measurements {
if !f.ShouldPass(measurement) {
if !f.ShouldFieldsPass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
}
func TestFilter_Pass(t *testing.T) {
func TestFilter_NamePass(t *testing.T) {
f := Filter{
Pass: []string{"foo*", "cpu_usage_idle"},
NamePass: []string{"foo*", "cpu_usage_idle"},
}
passes := []string{
@@ -45,21 +45,21 @@ func TestFilter_Pass(t *testing.T) {
}
for _, measurement := range passes {
if !f.ShouldPass(measurement) {
if !f.ShouldNamePass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
for _, measurement := range drops {
if f.ShouldPass(measurement) {
if f.ShouldNamePass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement)
}
}
}
func TestFilter_Drop(t *testing.T) {
func TestFilter_NameDrop(t *testing.T) {
f := Filter{
Drop: []string{"foo*", "cpu_usage_idle"},
NameDrop: []string{"foo*", "cpu_usage_idle"},
}
drops := []string{
@@ -78,13 +78,79 @@ func TestFilter_Drop(t *testing.T) {
}
for _, measurement := range passes {
if !f.ShouldPass(measurement) {
if !f.ShouldNamePass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
for _, measurement := range drops {
if f.ShouldPass(measurement) {
if f.ShouldNamePass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement)
}
}
}
func TestFilter_FieldPass(t *testing.T) {
f := Filter{
FieldPass: []string{"foo*", "cpu_usage_idle"},
}
passes := []string{
"foo",
"foo_bar",
"foo.bar",
"foo-bar",
"cpu_usage_idle",
}
drops := []string{
"bar",
"barfoo",
"bar_foo",
"cpu_usage_busy",
}
for _, measurement := range passes {
if !f.ShouldFieldsPass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
for _, measurement := range drops {
if f.ShouldFieldsPass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement)
}
}
}
func TestFilter_FieldDrop(t *testing.T) {
f := Filter{
FieldDrop: []string{"foo*", "cpu_usage_idle"},
}
drops := []string{
"foo",
"foo_bar",
"foo.bar",
"foo-bar",
"cpu_usage_idle",
}
passes := []string{
"bar",
"barfoo",
"bar_foo",
"cpu_usage_busy",
}
for _, measurement := range passes {
if !f.ShouldFieldsPass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
for _, measurement := range drops {
if f.ShouldFieldsPass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement)
}
}

View File

@@ -10,7 +10,7 @@ import (
const (
// Default number of metrics kept between flushes.
DEFAULT_METRIC_BUFFER_LIMIT = 10000
DEFAULT_METRIC_BUFFER_LIMIT = 1000
// Limit how many full metric buffers are kept due to failed writes.
FULL_METRIC_BUFFERS_LIMIT = 100
@@ -82,9 +82,11 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
}
}
} else {
log.Printf("WARNING: overwriting cached metrics, you may want to " +
"increase the metric_buffer_limit setting in your [agent] " +
"config if you do not wish to overwrite metrics.\n")
if ro.overwriteI == 0 {
log.Printf("WARNING: overwriting cached metrics, you may want to " +
"increase the metric_buffer_limit setting in your [agent] " +
"config if you do not wish to overwrite metrics.\n")
}
if ro.overwriteI == len(ro.metrics) {
ro.overwriteI = 0
}

View File

@@ -30,8 +30,6 @@ The example plugin gathers metrics about example things
### Example Output:
Give an example `-test` output here
```
$ ./telegraf -config telegraf.conf -input-filter example -test
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455

View File

@@ -6,6 +6,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
_ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
_ "github.com/influxdata/telegraf/plugins/inputs/docker"
_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
@@ -28,6 +29,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
@@ -40,13 +42,16 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/raindrops"
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
_ "github.com/influxdata/telegraf/plugins/inputs/system"
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"

View File

@@ -58,7 +58,10 @@ var tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}
var client = &http.Client{Transport: tr}
var client = &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
resp, err := client.Get(addr.String())

View File

@@ -10,6 +10,7 @@ import (
"reflect"
"strings"
"sync"
"time"
)
// Schema:
@@ -112,9 +113,18 @@ func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error {
}
var tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}
var client = &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host string) error {
response, error := http.Get(host)
response, error := client.Get(host)
if error != nil {
return error
}

View File

@@ -9,6 +9,7 @@ import (
"strconv"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
@@ -30,6 +31,8 @@ var sampleConfig = `
servers = ["localhost"]
`
var defaultTimeout = 5 * time.Second
func (r *Disque) SampleConfig() string {
return sampleConfig
}
@@ -107,7 +110,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
addr.Host = addr.Host + ":" + defaultPort
}
c, err := net.Dial("tcp", addr.Host)
c, err := net.DialTimeout("tcp", addr.Host, defaultTimeout)
if err != nil {
return fmt.Errorf("Unable to connect to disque server '%s': %s", addr.Host, err)
}
@@ -132,6 +135,9 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
g.c = c
}
// Extend connection
g.c.SetDeadline(time.Now().Add(defaultTimeout))
g.c.Write([]byte("info\r\n"))
r := bufio.NewReader(g.c)

View File

@@ -0,0 +1,51 @@
# DNS Query Input Plugin
The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wikipedia.org/wiki/Dig_\(command\))
### Configuration:
```
# Sample Config:
[[inputs.dns_query]]
## servers to query
servers = ["8.8.8.8"] # required
## Domains or subdomains to query. "." (root) is default
domains = ["."] # optional
## Query record type. Posible values: A, AAAA, ANY, CNAME, MX, NS, PTR, SOA, SPF, SRV, TXT. Default is "NS"
record_type = "A" # optional
## Dns server port. 53 is default
port = 53 # optional
## Query timeout in seconds. Default is 2 seconds
timeout = 2 # optional
```
For querying more than one record type make:
```
[[inputs.dns_query]]
domains = ["mjasion.pl"]
servers = ["8.8.8.8", "8.8.4.4"]
record_type = "A"
[[inputs.dns_query]]
domains = ["mjasion.pl"]
servers = ["8.8.8.8", "8.8.4.4"]
record_type = "MX"
```
### Tags:
- server
- domain
- record_type
### Example output:
```
./telegraf -config telegraf.conf -test -input-filter dns_query -test
> dns_query,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=67.189842 1456082743585760680
```

View File

@@ -0,0 +1,159 @@
package dns_query
import (
"errors"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/miekg/dns"
"net"
"strconv"
"time"
)
type DnsQuery struct {
// Domains or subdomains to query
Domains []string
// Server to query
Servers []string
// Record type
RecordType string `toml:"record_type"`
// DNS server port number
Port int
// Dns query timeout in seconds. 0 means no timeout
Timeout int
}
var sampleConfig = `
## servers to query
servers = ["8.8.8.8"] # required
## Domains or subdomains to query. "."(root) is default
domains = ["."] # optional
## Query record type. Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. Default is "NS"
record_type = "A" # optional
## Dns server port. 53 is default
port = 53 # optional
## Query timeout in seconds. Default is 2 seconds
timeout = 2 # optional
`
func (d *DnsQuery) SampleConfig() string {
return sampleConfig
}
func (d *DnsQuery) Description() string {
return "Query given DNS server and gives statistics"
}
func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
d.setDefaultValues()
for _, domain := range d.Domains {
for _, server := range d.Servers {
dnsQueryTime, err := d.getDnsQueryTime(domain, server)
if err != nil {
return err
}
tags := map[string]string{
"server": server,
"domain": domain,
"record_type": d.RecordType,
}
fields := map[string]interface{}{"query_time_ms": dnsQueryTime}
acc.AddFields("dns_query", fields, tags)
}
}
return nil
}
func (d *DnsQuery) setDefaultValues() {
if len(d.RecordType) == 0 {
d.RecordType = "NS"
}
if len(d.Domains) == 0 {
d.Domains = []string{"."}
d.RecordType = "NS"
}
if d.Port == 0 {
d.Port = 53
}
if d.Timeout == 0 {
d.Timeout = 2
}
}
func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, error) {
dnsQueryTime := float64(0)
c := new(dns.Client)
c.ReadTimeout = time.Duration(d.Timeout) * time.Second
m := new(dns.Msg)
recordType, err := d.parseRecordType()
if err != nil {
return dnsQueryTime, err
}
m.SetQuestion(dns.Fqdn(domain), recordType)
m.RecursionDesired = true
r, rtt, err := c.Exchange(m, net.JoinHostPort(server, strconv.Itoa(d.Port)))
if err != nil {
return dnsQueryTime, err
}
if r.Rcode != dns.RcodeSuccess {
return dnsQueryTime, errors.New(fmt.Sprintf("Invalid answer name %s after %s query for %s\n", domain, d.RecordType, domain))
}
dnsQueryTime = float64(rtt.Nanoseconds()) / 1e6
return dnsQueryTime, nil
}
func (d *DnsQuery) parseRecordType() (uint16, error) {
var recordType uint16
var error error
switch d.RecordType {
case "A":
recordType = dns.TypeA
case "AAAA":
recordType = dns.TypeAAAA
case "ANY":
recordType = dns.TypeANY
case "CNAME":
recordType = dns.TypeCNAME
case "MX":
recordType = dns.TypeMX
case "NS":
recordType = dns.TypeNS
case "PTR":
recordType = dns.TypePTR
case "SOA":
recordType = dns.TypeSOA
case "SPF":
recordType = dns.TypeSPF
case "SRV":
recordType = dns.TypeSRV
case "TXT":
recordType = dns.TypeTXT
default:
error = errors.New(fmt.Sprintf("Record type %s not recognized", d.RecordType))
}
return recordType, error
}
func init() {
inputs.Add("dns_query", func() telegraf.Input {
return &DnsQuery{}
})
}

View File

@@ -0,0 +1,195 @@
package dns_query
import (
"testing"
"time"
"github.com/influxdata/telegraf/testutil"
"github.com/miekg/dns"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var servers = []string{"8.8.8.8"}
var domains = []string{"google.com"}
func TestGathering(t *testing.T) {
var dnsConfig = DnsQuery{
Servers: servers,
Domains: domains,
}
var acc testutil.Accumulator
err := dnsConfig.Gather(&acc)
assert.NoError(t, err)
metric, ok := acc.Get("dns_query")
require.True(t, ok)
queryTime, _ := metric.Fields["query_time_ms"].(float64)
assert.NotEqual(t, 0, queryTime)
}
func TestGatheringMxRecord(t *testing.T) {
var dnsConfig = DnsQuery{
Servers: servers,
Domains: domains,
}
var acc testutil.Accumulator
dnsConfig.RecordType = "MX"
err := dnsConfig.Gather(&acc)
assert.NoError(t, err)
metric, ok := acc.Get("dns_query")
require.True(t, ok)
queryTime, _ := metric.Fields["query_time_ms"].(float64)
assert.NotEqual(t, 0, queryTime)
}
func TestGatheringRootDomain(t *testing.T) {
var dnsConfig = DnsQuery{
Servers: servers,
Domains: []string{"."},
RecordType: "MX",
}
var acc testutil.Accumulator
tags := map[string]string{
"server": "8.8.8.8",
"domain": ".",
"record_type": "MX",
}
fields := map[string]interface{}{}
err := dnsConfig.Gather(&acc)
assert.NoError(t, err)
metric, ok := acc.Get("dns_query")
require.True(t, ok)
queryTime, _ := metric.Fields["query_time_ms"].(float64)
fields["query_time_ms"] = queryTime
acc.AssertContainsTaggedFields(t, "dns_query", fields, tags)
}
func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) {
var dnsConfig = DnsQuery{
Servers: servers,
Domains: domains,
}
var acc testutil.Accumulator
tags := map[string]string{
"server": "8.8.8.8",
"domain": "google.com",
"record_type": "NS",
}
fields := map[string]interface{}{}
err := dnsConfig.Gather(&acc)
assert.NoError(t, err)
metric, ok := acc.Get("dns_query")
require.True(t, ok)
queryTime, _ := metric.Fields["query_time_ms"].(float64)
fields["query_time_ms"] = queryTime
acc.AssertContainsTaggedFields(t, "dns_query", fields, tags)
}
func TestGatheringTimeout(t *testing.T) {
var dnsConfig = DnsQuery{
Servers: servers,
Domains: domains,
}
var acc testutil.Accumulator
dnsConfig.Port = 60054
dnsConfig.Timeout = 1
var err error
channel := make(chan error, 1)
go func() {
channel <- dnsConfig.Gather(&acc)
}()
select {
case res := <-channel:
err = res
case <-time.After(time.Second * 2):
err = nil
}
assert.Error(t, err)
assert.Contains(t, err.Error(), "i/o timeout")
}
func TestSettingDefaultValues(t *testing.T) {
dnsConfig := DnsQuery{}
dnsConfig.setDefaultValues()
assert.Equal(t, []string{"."}, dnsConfig.Domains, "Default domain not equal \".\"")
assert.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'")
assert.Equal(t, 53, dnsConfig.Port, "Default port number not equal 53")
assert.Equal(t, 2, dnsConfig.Timeout, "Default timeout not equal 2")
dnsConfig = DnsQuery{Domains: []string{"."}}
dnsConfig.setDefaultValues()
assert.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'")
}
func TestRecordTypeParser(t *testing.T) {
var dnsConfig = DnsQuery{}
var recordType uint16
dnsConfig.RecordType = "A"
recordType, _ = dnsConfig.parseRecordType()
assert.Equal(t, dns.TypeA, recordType)
dnsConfig.RecordType = "AAAA"
recordType, _ = dnsConfig.parseRecordType()
assert.Equal(t, dns.TypeAAAA, recordType)
dnsConfig.RecordType = "ANY"
recordType, _ = dnsConfig.parseRecordType()
assert.Equal(t, dns.TypeANY, recordType)
dnsConfig.RecordType = "CNAME"
recordType, _ = dnsConfig.parseRecordType()
assert.Equal(t, dns.TypeCNAME, recordType)
dnsConfig.RecordType = "MX"
recordType, _ = dnsConfig.parseRecordType()
assert.Equal(t, dns.TypeMX, recordType)
dnsConfig.RecordType = "NS"
recordType, _ = dnsConfig.parseRecordType()
assert.Equal(t, dns.TypeNS, recordType)
dnsConfig.RecordType = "PTR"
recordType, _ = dnsConfig.parseRecordType()
assert.Equal(t, dns.TypePTR, recordType)
dnsConfig.RecordType = "SOA"
recordType, _ = dnsConfig.parseRecordType()
assert.Equal(t, dns.TypeSOA, recordType)
dnsConfig.RecordType = "SPF"
recordType, _ = dnsConfig.parseRecordType()
assert.Equal(t, dns.TypeSPF, recordType)
dnsConfig.RecordType = "SRV"
recordType, _ = dnsConfig.parseRecordType()
assert.Equal(t, dns.TypeSRV, recordType)
dnsConfig.RecordType = "TXT"
recordType, _ = dnsConfig.parseRecordType()
assert.Equal(t, dns.TypeTXT, recordType)
}
func TestRecordTypeParserError(t *testing.T) {
var dnsConfig = DnsQuery{}
var err error
dnsConfig.RecordType = "nil"
_, err = dnsConfig.parseRecordType()
assert.Error(t, err)
}

View File

@@ -74,6 +74,7 @@ on the availability of per-cpu stats on your system.
- usage_in_usermode
- usage_system
- usage_total
- usage_percent
- docker_net
- rx_dropped
- rx_bytes
@@ -94,18 +95,50 @@ on the availability of per-cpu stats on your system.
- io_serviced_recursive_sync
- io_serviced_recursive_total
- io_serviced_recursive_write
- docker_
- n_used_file_descriptors
- n_cpus
- n_containers
- n_images
- n_goroutines
- n_listener_events
- memory_total
- pool_blocksize
- docker_data
- available
- total
- used
- docker_metadata
- available
- total
- used
### Tags:
- All stats have the following tags:
- docker (memory_total)
- unit=bytes
- docker (pool_blocksize)
- unit=bytes
- docker_data
- unit=bytes
- docker_metadata
- unit=bytes
- docker_cpu specific:
- cont_id (container ID)
- cont_image (container image)
- cont_name (container name)
- docker_cpu specific:
- cpu
- docker_net specific:
- cont_id (container ID)
- cont_image (container image)
- cont_name (container name)
- network
- docker_blkio specific:
- cont_id (container ID)
- cont_image (container image)
- cont_name (container name)
- device
### Example Output:
@@ -113,6 +146,16 @@ on the availability of per-cpu stats on your system.
```
% ./telegraf -config ~/ws/telegraf.conf -input-filter docker -test
* Plugin: docker, Collection 1
> docker n_cpus=8i 1456926671065383978
> docker n_used_file_descriptors=15i 1456926671065383978
> docker n_containers=7i 1456926671065383978
> docker n_images=152i 1456926671065383978
> docker n_goroutines=36i 1456926671065383978
> docker n_listener_events=0i 1456926671065383978
> docker,unit=bytes memory_total=18935443456i 1456926671065383978
> docker,unit=bytes pool_blocksize=65540i 1456926671065383978
> docker_data,unit=bytes available=24340000000i,total=107400000000i,used=14820000000i 1456926671065383978
> docker_metadata,unit=bytes available=2126999999i,total=2146999999i,used=20420000i 145692667106538
> docker_mem,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
cont_image=spotify/kafka,cont_name=kafka \
active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\

View File

@@ -1,8 +1,11 @@
package system
import (
"encoding/json"
"fmt"
"log"
"regexp"
"strconv"
"strings"
"sync"
"time"
@@ -17,9 +20,29 @@ type Docker struct {
Endpoint string
ContainerNames []string
client *docker.Client
client DockerClient
}
type DockerClient interface {
// Docker Client wrapper
// Useful for test
Info() (*docker.Env, error)
ListContainers(opts docker.ListContainersOptions) ([]docker.APIContainers, error)
Stats(opts docker.StatsOptions) error
}
const (
KB = 1000
MB = 1000 * KB
GB = 1000 * MB
TB = 1000 * GB
PB = 1000 * TB
)
var (
sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
)
var sampleConfig = `
## Docker Endpoint
## To use TCP, set endpoint = "tcp://[ip]:[port]"
@@ -58,12 +81,20 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
d.client = c
}
// Get daemon info
err := d.gatherInfo(acc)
if err != nil {
fmt.Println(err.Error())
}
// List containers
opts := docker.ListContainersOptions{}
containers, err := d.client.ListContainers(opts)
if err != nil {
return err
}
// Get container data
var wg sync.WaitGroup
wg.Add(len(containers))
for _, container := range containers {
@@ -81,6 +112,76 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
return nil
}
func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
// Init vars
var driverStatus [][]string
dataFields := make(map[string]interface{})
metadataFields := make(map[string]interface{})
now := time.Now()
// Get info from docker daemon
info, err := d.client.Info()
if err != nil {
return err
}
fields := map[string]interface{}{
"n_cpus": info.GetInt64("NCPU"),
"n_used_file_descriptors": info.GetInt64("NFd"),
"n_containers": info.GetInt64("Containers"),
"n_images": info.GetInt64("Images"),
"n_goroutines": info.GetInt64("NGoroutines"),
"n_listener_events": info.GetInt64("NEventsListener"),
}
// Add metrics
acc.AddFields("docker",
fields,
nil,
now)
acc.AddFields("docker",
map[string]interface{}{"memory_total": info.GetInt64("MemTotal")},
map[string]string{"unit": "bytes"},
now)
// Get storage metrics
driverStatusRaw := []byte(info.Get("DriverStatus"))
json.Unmarshal(driverStatusRaw, &driverStatus)
for _, rawData := range driverStatus {
// Try to convert string to int (bytes)
value, err := parseSize(rawData[1])
if err != nil {
continue
}
name := strings.ToLower(strings.Replace(rawData[0], " ", "_", -1))
if name == "pool_blocksize" {
// pool blocksize
acc.AddFields("docker",
map[string]interface{}{"pool_blocksize": value},
map[string]string{"unit": "bytes"},
now)
} else if strings.HasPrefix(name, "data_space_") {
// data space
field_name := strings.TrimPrefix(name, "data_space_")
dataFields[field_name] = value
} else if strings.HasPrefix(name, "metadata_space_") {
// metadata space
field_name := strings.TrimPrefix(name, "metadata_space_")
metadataFields[field_name] = value
}
}
if len(dataFields) > 0 {
acc.AddFields("docker_data",
dataFields,
map[string]string{"unit": "bytes"},
now)
}
if len(metadataFields) > 0 {
acc.AddFields("docker_metadata",
metadataFields,
map[string]string{"unit": "bytes"},
now)
}
return nil
}
func (d *Docker) gatherContainer(
container docker.APIContainers,
acc telegraf.Accumulator,
@@ -334,6 +435,27 @@ func sliceContains(in string, sl []string) bool {
return false
}
// Parses the human-readable size string into the amount it represents.
func parseSize(sizeStr string) (int64, error) {
matches := sizeRegex.FindStringSubmatch(sizeStr)
if len(matches) != 4 {
return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
}
size, err := strconv.ParseFloat(matches[1], 64)
if err != nil {
return -1, err
}
uMap := map[string]int64{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
unitPrefix := strings.ToLower(matches[3])
if mul, ok := uMap[unitPrefix]; ok {
size *= float64(mul)
}
return int64(size), nil
}
func init() {
inputs.Add("docker", func() telegraf.Input {
return &Docker{}

View File

@@ -1,12 +1,14 @@
package system
import (
"encoding/json"
"testing"
"time"
"github.com/influxdata/telegraf/testutil"
"github.com/fsouza/go-dockerclient"
"github.com/stretchr/testify/require"
)
func TestDockerGatherContainerStats(t *testing.T) {
@@ -194,3 +196,186 @@ func testStats() *docker.Stats {
return stats
}
type FakeDockerClient struct {
}
func (d FakeDockerClient) Info() (*docker.Env, error) {
env := docker.Env{"Containers=108", "OomKillDisable=false", "SystemTime=2016-02-24T00:55:09.15073105-05:00", "NEventsListener=0", "ID=5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD", "Debug=false", "LoggingDriver=json-file", "KernelVersion=4.3.0-1-amd64", "IndexServerAddress=https://index.docker.io/v1/", "MemTotal=3840757760", "Images=199", "CpuCfsQuota=true", "Name=absol", "SwapLimit=false", "IPv4Forwarding=true", "ExecutionDriver=native-0.2", "InitSha1=23a51f3c916d2b5a3bbb31caf301fd2d14edd518", "ExperimentalBuild=false", "CpuCfsPeriod=true", "RegistryConfig={\"IndexConfigs\":{\"docker.io\":{\"Mirrors\":null,\"Name\":\"docker.io\",\"Official\":true,\"Secure\":true}},\"InsecureRegistryCIDRs\":[\"127.0.0.0/8\"],\"Mirrors\":null}", "OperatingSystem=Linux Mint LMDE (containerized)", "BridgeNfIptables=true", "HttpsProxy=", "Labels=null", "MemoryLimit=false", "DriverStatus=[[\"Pool Name\",\"docker-8:1-1182287-pool\"],[\"Pool Blocksize\",\"65.54 kB\"],[\"Backing Filesystem\",\"extfs\"],[\"Data file\",\"/dev/loop0\"],[\"Metadata file\",\"/dev/loop1\"],[\"Data Space Used\",\"17.3 GB\"],[\"Data Space Total\",\"107.4 GB\"],[\"Data Space Available\",\"36.53 GB\"],[\"Metadata Space Used\",\"20.97 MB\"],[\"Metadata Space Total\",\"2.147 GB\"],[\"Metadata Space Available\",\"2.127 GB\"],[\"Udev Sync Supported\",\"true\"],[\"Deferred Removal Enabled\",\"false\"],[\"Data loop file\",\"/var/lib/docker/devicemapper/devicemapper/data\"],[\"Metadata loop file\",\"/var/lib/docker/devicemapper/devicemapper/metadata\"],[\"Library Version\",\"1.02.115 (2016-01-25)\"]]", "NFd=19", "HttpProxy=", "Driver=devicemapper", "NGoroutines=39", "InitPath=/usr/lib/docker.io/dockerinit", "NCPU=4", "DockerRootDir=/var/lib/docker", "NoProxy=", "BridgeNfIp6tables=true"}
return &env, nil
}
func (d FakeDockerClient) ListContainers(opts docker.ListContainersOptions) ([]docker.APIContainers, error) {
container1 := docker.APIContainers{
ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
Image: "quay.io/coreos/etcd:v2.2.2",
Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
Created: 1455941930,
Status: "Up 4 hours",
Ports: []docker.APIPort{
docker.APIPort{
PrivatePort: 7001,
PublicPort: 0,
Type: "tcp",
},
docker.APIPort{
PrivatePort: 4001,
PublicPort: 0,
Type: "tcp",
},
docker.APIPort{
PrivatePort: 2380,
PublicPort: 0,
Type: "tcp",
},
docker.APIPort{
PrivatePort: 2379,
PublicPort: 2379,
Type: "tcp",
IP: "0.0.0.0",
},
},
SizeRw: 0,
SizeRootFs: 0,
Names: []string{"/etcd"},
}
container2 := docker.APIContainers{
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
Image: "quay.io/coreos/etcd:v2.2.2",
Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
Created: 1455941933,
Status: "Up 4 hours",
Ports: []docker.APIPort{
docker.APIPort{
PrivatePort: 7002,
PublicPort: 0,
Type: "tcp",
},
docker.APIPort{
PrivatePort: 4002,
PublicPort: 0,
Type: "tcp",
},
docker.APIPort{
PrivatePort: 2381,
PublicPort: 0,
Type: "tcp",
},
docker.APIPort{
PrivatePort: 2382,
PublicPort: 2382,
Type: "tcp",
IP: "0.0.0.0",
},
},
SizeRw: 0,
SizeRootFs: 0,
Names: []string{"/etcd2"},
}
containers := []docker.APIContainers{container1, container2}
return containers, nil
//#{e6a96c84ca91a5258b7cb752579fb68826b68b49ff957487695cd4d13c343b44 titilambert/snmpsim /bin/sh -c 'snmpsimd --agent-udpv4-endpoint=0.0.0.0:31161 --process-user=root --process-group=user' 1455724831 Up 4 hours [{31161 31161 udp 0.0.0.0}] 0 0 [/snmp] map[]}]2016/02/24 01:05:01 Gathered metrics, (3s interval), from 1 inputs in 1.233836656s
}
func (d FakeDockerClient) Stats(opts docker.StatsOptions) error {
jsonStat := `{"read":"2016-02-24T11:42:27.472459608-05:00","memory_stats":{"stats":{},"limit":18935443456},"blkio_stats":{"io_service_bytes_recursive":[{"major":252,"minor":1,"op":"Read","value":753664},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":753664},{"major":252,"minor":1,"op":"Total","value":753664}],"io_serviced_recursive":[{"major":252,"minor":1,"op":"Read","value":26},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":26},{"major":252,"minor":1,"op":"Total","value":26}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052607520000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052599550000000,"throttling_data":{}}}`
var stat docker.Stats
json.Unmarshal([]byte(jsonStat), &stat)
opts.Stats <- &stat
return nil
}
func TestDockerGatherInfo(t *testing.T) {
var acc testutil.Accumulator
client := FakeDockerClient{}
d := Docker{client: client}
err := d.Gather(&acc)
require.NoError(t, err)
acc.AssertContainsTaggedFields(t,
"docker",
map[string]interface{}{
"n_listener_events": int64(0),
"n_cpus": int64(4),
"n_used_file_descriptors": int64(19),
"n_containers": int64(108),
"n_images": int64(199),
"n_goroutines": int64(39),
},
map[string]string{},
)
acc.AssertContainsTaggedFields(t,
"docker_data",
map[string]interface{}{
"used": int64(17300000000),
"total": int64(107400000000),
"available": int64(36530000000),
},
map[string]string{
"unit": "bytes",
},
)
acc.AssertContainsTaggedFields(t,
"docker_cpu",
map[string]interface{}{
"usage_total": uint64(1231652),
},
map[string]string{
"cont_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
"cont_name": "etcd2",
"cont_image": "quay.io/coreos/etcd:v2.2.2",
"cpu": "cpu3",
},
)
acc.AssertContainsTaggedFields(t,
"docker_mem",
map[string]interface{}{
"total_pgpgout": uint64(0),
"usage_percent": float64(0),
"rss": uint64(0),
"total_writeback": uint64(0),
"active_anon": uint64(0),
"total_pgmafault": uint64(0),
"total_rss": uint64(0),
"total_unevictable": uint64(0),
"active_file": uint64(0),
"total_mapped_file": uint64(0),
"pgpgin": uint64(0),
"total_active_file": uint64(0),
"total_active_anon": uint64(0),
"total_cache": uint64(0),
"inactive_anon": uint64(0),
"pgmajfault": uint64(0),
"total_inactive_anon": uint64(0),
"total_rss_huge": uint64(0),
"rss_huge": uint64(0),
"hierarchical_memory_limit": uint64(0),
"pgpgout": uint64(0),
"unevictable": uint64(0),
"total_inactive_file": uint64(0),
"writeback": uint64(0),
"total_pgfault": uint64(0),
"total_pgpgin": uint64(0),
"cache": uint64(0),
"mapped_file": uint64(0),
"inactive_file": uint64(0),
"max_usage": uint64(0),
"fail_count": uint64(0),
"pgfault": uint64(0),
"usage": uint64(0),
"limit": uint64(18935443456),
},
map[string]string{
"cont_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
"cont_name": "etcd2",
"cont_image": "quay.io/coreos/etcd:v2.2.2",
},
)
//fmt.Print(info)
}

View File

@@ -34,6 +34,8 @@ var sampleConfig = `
domains = []
`
var defaultTimeout = time.Second * time.Duration(5)
func (d *Dovecot) SampleConfig() string { return sampleConfig }
const defaultPort = "24242"
@@ -74,12 +76,15 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, doms map[s
return fmt.Errorf("Error: %s on url %s\n", err, addr)
}
c, err := net.Dial("tcp", addr)
c, err := net.DialTimeout("tcp", addr, defaultTimeout)
if err != nil {
return fmt.Errorf("Unable to connect to dovecot server '%s': %s", addr, err)
}
defer c.Close()
// Extend connection
c.SetDeadline(time.Now().Add(defaultTimeout))
c.Write([]byte("EXPORT\tdomain\n\n"))
var buf bytes.Buffer
io.Copy(&buf, c)

View File

@@ -81,7 +81,12 @@ type Elasticsearch struct {
// NewElasticsearch return a new instance of Elasticsearch
func NewElasticsearch() *Elasticsearch {
return &Elasticsearch{client: http.DefaultClient}
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
client := &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
return &Elasticsearch{client: client}
}
// SampleConfig returns sample configuration for this plugin.

View File

@@ -34,6 +34,9 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) {
return res, nil
}
func (t *transportMock) CancelRequest(_ *http.Request) {
}
func TestElasticsearch(t *testing.T) {
es := NewElasticsearch()
es.Servers = []string{"http://example.com:9200"}

View File

@@ -73,14 +73,17 @@ func (gh *GithubWebhooks) Stop() {
// Handles the / route
func (gh *GithubWebhooks) eventHandler(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
eventType := r.Header["X-Github-Event"][0]
data, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
e, err := NewEvent(data, eventType)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
gh.Lock()
gh.events = append(gh.events, e)

View File

@@ -129,8 +129,11 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error {
func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
if g.client == nil {
client := &http.Client{}
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
client := &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
g.client = client
}

View File

@@ -1,7 +1,6 @@
package httpjson
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
@@ -12,6 +11,7 @@ import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
@@ -23,7 +23,17 @@ type HttpJson struct {
TagKeys []string
Parameters map[string]string
Headers map[string]string
client HTTPClient
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
client HTTPClient
}
type HTTPClient interface {
@@ -36,16 +46,27 @@ type HTTPClient interface {
// http.Response: HTTP respons object
// error : Any error that may have occurred
MakeRequest(req *http.Request) (*http.Response, error)
SetHTTPClient(client *http.Client)
HTTPClient() *http.Client
}
type RealHTTPClient struct {
client *http.Client
}
func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
func (c *RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
return c.client.Do(req)
}
func (c *RealHTTPClient) SetHTTPClient(client *http.Client) {
c.client = client
}
func (c *RealHTTPClient) HTTPClient() *http.Client {
return c.client
}
var sampleConfig = `
## NOTE This plugin only reads numerical measurements, strings and booleans
## will be ignored.
@@ -77,6 +98,13 @@ var sampleConfig = `
# [inputs.httpjson.headers]
# X-Auth-Token = "my-xauth-token"
# apiVersion = "v1"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
`
func (h *HttpJson) SampleConfig() string {
@@ -91,6 +119,23 @@ func (h *HttpJson) Description() string {
func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
if h.client.HTTPClient() == nil {
tlsCfg, err := internal.GetTLSConfig(
h.SSLCert, h.SSLKey, h.SSLCA, h.InsecureSkipVerify)
if err != nil {
return err
}
tr := &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
TLSClientConfig: tlsCfg,
}
client := &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
h.client.SetHTTPClient(client)
}
errorChannel := make(chan error, len(h.Servers))
for _, server := range h.Servers {
@@ -182,15 +227,14 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL)
}
params := url.Values{}
data := url.Values{}
switch {
case h.Method == "GET":
requestURL.RawQuery = params.Encode()
params := requestURL.Query()
for k, v := range h.Parameters {
params.Add(k, v)
}
requestURL.RawQuery = params.Encode()
case h.Method == "POST":
requestURL.RawQuery = ""
@@ -200,7 +244,8 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
}
// Create + send request
req, err := http.NewRequest(h.Method, requestURL.String(), bytes.NewBufferString(data.Encode()))
req, err := http.NewRequest(h.Method, requestURL.String(),
strings.NewReader(data.Encode()))
if err != nil {
return "", -1, err
}
@@ -244,6 +289,8 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
func init() {
inputs.Add("httpjson", func() telegraf.Input {
return &HttpJson{client: RealHTTPClient{client: &http.Client{}}}
return &HttpJson{
client: &RealHTTPClient{},
}
})
}

View File

@@ -1,8 +1,10 @@
package httpjson
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
@@ -27,6 +29,75 @@ const validJSON = `
"another_list": [4]
}`
const validJSON2 = `{
"user":{
"hash_rate":0,
"expected_24h_rewards":0,
"total_rewards":0.000595109232,
"paid_rewards":0,
"unpaid_rewards":0.000595109232,
"past_24h_rewards":0,
"total_work":"5172625408",
"blocks_found":0
},
"workers":{
"brminer.1":{
"hash_rate":0,
"hash_rate_24h":0,
"valid_shares":"6176",
"stale_shares":"0",
"invalid_shares":"0",
"rewards":4.5506464e-5,
"rewards_24h":0,
"reset_time":1455409950
},
"brminer.2":{
"hash_rate":0,
"hash_rate_24h":0,
"valid_shares":"0",
"stale_shares":"0",
"invalid_shares":"0",
"rewards":0,
"rewards_24h":0,
"reset_time":1455936726
},
"brminer.3":{
"hash_rate":0,
"hash_rate_24h":0,
"valid_shares":"0",
"stale_shares":"0",
"invalid_shares":"0",
"rewards":0,
"rewards_24h":0,
"reset_time":1455936733
}
},
"pool":{
"hash_rate":114100000,
"active_users":843,
"total_work":"5015346808842682368",
"pps_ratio":1.04,
"pps_rate":7.655e-9
},
"network":{
"hash_rate":1426117703,
"block_number":944895,
"time_per_block":156,
"difficulty":51825.72835216,
"next_difficulty":51916.15249019,
"retarget_time":95053
},
"market":{
"ltc_btc":0.00798,
"ltc_usd":3.37801,
"ltc_eur":3.113,
"ltc_gbp":2.32807,
"ltc_rub":241.796,
"ltc_cny":21.3883,
"btc_usd":422.852
}
}`
const validJSONTags = `
{
"value": 15,
@@ -54,7 +125,7 @@ type mockHTTPClient struct {
// Mock implementation of MakeRequest. Usually returns an http.Response with
// hard-coded responseBody and statusCode. However, if the request uses a
// nonstandard method, it uses status code 405 (method not allowed)
func (c mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
func (c *mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
resp := http.Response{}
resp.StatusCode = c.statusCode
@@ -76,6 +147,13 @@ func (c mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
return &resp, nil
}
func (c *mockHTTPClient) SetHTTPClient(_ *http.Client) {
}
func (c *mockHTTPClient) HTTPClient() *http.Client {
return nil
}
// Generates a pointer to an HttpJson object that uses a mock HTTP client.
// Parameters:
// response : Body of the response that the mock HTTP client should return
@@ -86,7 +164,7 @@ func (c mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
func genMockHttpJson(response string, statusCode int) []*HttpJson {
return []*HttpJson{
&HttpJson{
client: mockHTTPClient{responseBody: response, statusCode: statusCode},
client: &mockHTTPClient{responseBody: response, statusCode: statusCode},
Servers: []string{
"http://server1.example.com/metrics/",
"http://server2.example.com/metrics/",
@@ -103,7 +181,7 @@ func genMockHttpJson(response string, statusCode int) []*HttpJson {
},
},
&HttpJson{
client: mockHTTPClient{responseBody: response, statusCode: statusCode},
client: &mockHTTPClient{responseBody: response, statusCode: statusCode},
Servers: []string{
"http://server3.example.com/metrics/",
"http://server4.example.com/metrics/",
@@ -149,6 +227,222 @@ func TestHttpJson200(t *testing.T) {
}
}
// Test that GET Parameters from the url string are applied properly
func TestHttpJsonGET_URL(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
key := r.FormValue("api_key")
assert.Equal(t, "mykey", key)
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, validJSON2)
}))
defer ts.Close()
a := HttpJson{
Servers: []string{ts.URL + "?api_key=mykey"},
Name: "",
Method: "GET",
client: &RealHTTPClient{client: &http.Client{}},
}
var acc testutil.Accumulator
err := a.Gather(&acc)
require.NoError(t, err)
// remove response_time from gathered fields because it's non-deterministic
delete(acc.Metrics[0].Fields, "response_time")
fields := map[string]interface{}{
"market_btc_usd": float64(422.852),
"market_ltc_btc": float64(0.00798),
"market_ltc_cny": float64(21.3883),
"market_ltc_eur": float64(3.113),
"market_ltc_gbp": float64(2.32807),
"market_ltc_rub": float64(241.796),
"market_ltc_usd": float64(3.37801),
"network_block_number": float64(944895),
"network_difficulty": float64(51825.72835216),
"network_hash_rate": float64(1.426117703e+09),
"network_next_difficulty": float64(51916.15249019),
"network_retarget_time": float64(95053),
"network_time_per_block": float64(156),
"pool_active_users": float64(843),
"pool_hash_rate": float64(1.141e+08),
"pool_pps_rate": float64(7.655e-09),
"pool_pps_ratio": float64(1.04),
"user_blocks_found": float64(0),
"user_expected_24h_rewards": float64(0),
"user_hash_rate": float64(0),
"user_paid_rewards": float64(0),
"user_past_24h_rewards": float64(0),
"user_total_rewards": float64(0.000595109232),
"user_unpaid_rewards": float64(0.000595109232),
"workers_brminer.1_hash_rate": float64(0),
"workers_brminer.1_hash_rate_24h": float64(0),
"workers_brminer.1_reset_time": float64(1.45540995e+09),
"workers_brminer.1_rewards": float64(4.5506464e-05),
"workers_brminer.1_rewards_24h": float64(0),
"workers_brminer.2_hash_rate": float64(0),
"workers_brminer.2_hash_rate_24h": float64(0),
"workers_brminer.2_reset_time": float64(1.455936726e+09),
"workers_brminer.2_rewards": float64(0),
"workers_brminer.2_rewards_24h": float64(0),
"workers_brminer.3_hash_rate": float64(0),
"workers_brminer.3_hash_rate_24h": float64(0),
"workers_brminer.3_reset_time": float64(1.455936733e+09),
"workers_brminer.3_rewards": float64(0),
"workers_brminer.3_rewards_24h": float64(0),
}
acc.AssertContainsFields(t, "httpjson", fields)
}
// Test that GET Parameters are applied properly
func TestHttpJsonGET(t *testing.T) {
params := map[string]string{
"api_key": "mykey",
}
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
key := r.FormValue("api_key")
assert.Equal(t, "mykey", key)
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, validJSON2)
}))
defer ts.Close()
a := HttpJson{
Servers: []string{ts.URL},
Name: "",
Method: "GET",
Parameters: params,
client: &RealHTTPClient{client: &http.Client{}},
}
var acc testutil.Accumulator
err := a.Gather(&acc)
require.NoError(t, err)
// remove response_time from gathered fields because it's non-deterministic
delete(acc.Metrics[0].Fields, "response_time")
fields := map[string]interface{}{
"market_btc_usd": float64(422.852),
"market_ltc_btc": float64(0.00798),
"market_ltc_cny": float64(21.3883),
"market_ltc_eur": float64(3.113),
"market_ltc_gbp": float64(2.32807),
"market_ltc_rub": float64(241.796),
"market_ltc_usd": float64(3.37801),
"network_block_number": float64(944895),
"network_difficulty": float64(51825.72835216),
"network_hash_rate": float64(1.426117703e+09),
"network_next_difficulty": float64(51916.15249019),
"network_retarget_time": float64(95053),
"network_time_per_block": float64(156),
"pool_active_users": float64(843),
"pool_hash_rate": float64(1.141e+08),
"pool_pps_rate": float64(7.655e-09),
"pool_pps_ratio": float64(1.04),
"user_blocks_found": float64(0),
"user_expected_24h_rewards": float64(0),
"user_hash_rate": float64(0),
"user_paid_rewards": float64(0),
"user_past_24h_rewards": float64(0),
"user_total_rewards": float64(0.000595109232),
"user_unpaid_rewards": float64(0.000595109232),
"workers_brminer.1_hash_rate": float64(0),
"workers_brminer.1_hash_rate_24h": float64(0),
"workers_brminer.1_reset_time": float64(1.45540995e+09),
"workers_brminer.1_rewards": float64(4.5506464e-05),
"workers_brminer.1_rewards_24h": float64(0),
"workers_brminer.2_hash_rate": float64(0),
"workers_brminer.2_hash_rate_24h": float64(0),
"workers_brminer.2_reset_time": float64(1.455936726e+09),
"workers_brminer.2_rewards": float64(0),
"workers_brminer.2_rewards_24h": float64(0),
"workers_brminer.3_hash_rate": float64(0),
"workers_brminer.3_hash_rate_24h": float64(0),
"workers_brminer.3_reset_time": float64(1.455936733e+09),
"workers_brminer.3_rewards": float64(0),
"workers_brminer.3_rewards_24h": float64(0),
}
acc.AssertContainsFields(t, "httpjson", fields)
}
// Test that POST Parameters are applied properly
func TestHttpJsonPOST(t *testing.T) {
params := map[string]string{
"api_key": "mykey",
}
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
assert.NoError(t, err)
assert.Equal(t, "api_key=mykey", string(body))
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, validJSON2)
}))
defer ts.Close()
a := HttpJson{
Servers: []string{ts.URL},
Name: "",
Method: "POST",
Parameters: params,
client: &RealHTTPClient{client: &http.Client{}},
}
var acc testutil.Accumulator
err := a.Gather(&acc)
require.NoError(t, err)
// remove response_time from gathered fields because it's non-deterministic
delete(acc.Metrics[0].Fields, "response_time")
fields := map[string]interface{}{
"market_btc_usd": float64(422.852),
"market_ltc_btc": float64(0.00798),
"market_ltc_cny": float64(21.3883),
"market_ltc_eur": float64(3.113),
"market_ltc_gbp": float64(2.32807),
"market_ltc_rub": float64(241.796),
"market_ltc_usd": float64(3.37801),
"network_block_number": float64(944895),
"network_difficulty": float64(51825.72835216),
"network_hash_rate": float64(1.426117703e+09),
"network_next_difficulty": float64(51916.15249019),
"network_retarget_time": float64(95053),
"network_time_per_block": float64(156),
"pool_active_users": float64(843),
"pool_hash_rate": float64(1.141e+08),
"pool_pps_rate": float64(7.655e-09),
"pool_pps_ratio": float64(1.04),
"user_blocks_found": float64(0),
"user_expected_24h_rewards": float64(0),
"user_hash_rate": float64(0),
"user_paid_rewards": float64(0),
"user_past_24h_rewards": float64(0),
"user_total_rewards": float64(0.000595109232),
"user_unpaid_rewards": float64(0.000595109232),
"workers_brminer.1_hash_rate": float64(0),
"workers_brminer.1_hash_rate_24h": float64(0),
"workers_brminer.1_reset_time": float64(1.45540995e+09),
"workers_brminer.1_rewards": float64(4.5506464e-05),
"workers_brminer.1_rewards_24h": float64(0),
"workers_brminer.2_hash_rate": float64(0),
"workers_brminer.2_hash_rate_24h": float64(0),
"workers_brminer.2_reset_time": float64(1.455936726e+09),
"workers_brminer.2_rewards": float64(0),
"workers_brminer.2_rewards_24h": float64(0),
"workers_brminer.3_hash_rate": float64(0),
"workers_brminer.3_hash_rate_24h": float64(0),
"workers_brminer.3_reset_time": float64(1.455936733e+09),
"workers_brminer.3_rewards": float64(0),
"workers_brminer.3_rewards_24h": float64(0),
}
acc.AssertContainsFields(t, "httpjson", fields)
}
// Test response to HTTP 500
func TestHttpJson500(t *testing.T) {
httpjson := genMockHttpJson(validJSON, 500)

View File

@@ -7,6 +7,7 @@ import (
"net/http"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
@@ -70,6 +71,15 @@ type point struct {
Values map[string]interface{} `json:"values"`
}
var tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}
var client = &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
// Gathers data from a particular URL
// Parameters:
// acc : The telegraf Accumulator to use
@@ -81,7 +91,7 @@ func (i *InfluxDB) gatherURL(
acc telegraf.Accumulator,
url string,
) error {
resp, err := http.Get(url)
resp, err := client.Get(url)
if err != nil {
return err
}

View File

@@ -7,6 +7,7 @@ import (
"io/ioutil"
"net/http"
"net/url"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
@@ -160,6 +161,11 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
func init() {
inputs.Add("jolokia", func() telegraf.Input {
return &Jolokia{jClient: &JolokiaClientImpl{client: &http.Client{}}}
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
client := &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
return &Jolokia{jClient: &JolokiaClientImpl{client: client}}
})
}

View File

@@ -14,10 +14,11 @@ import (
)
type Kafka struct {
ConsumerGroup string
Topics []string
ZookeeperPeers []string
Consumer *consumergroup.ConsumerGroup
ConsumerGroup string
Topics []string
ZookeeperPeers []string
ZookeeperChroot string
Consumer *consumergroup.ConsumerGroup
// Legacy metric buffer support
MetricBuffer int
@@ -48,6 +49,8 @@ var sampleConfig = `
topics = ["telegraf"]
## an array of Zookeeper connection strings
zookeeper_peers = ["localhost:2181"]
## Zookeeper Chroot
zookeeper_chroot = "/"
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
## Offset (must be either "oldest" or "newest")
@@ -80,6 +83,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
k.acc = acc
config := consumergroup.NewConfig()
config.Zookeeper.Chroot = k.ZookeeperChroot
switch strings.ToLower(k.Offset) {
case "oldest", "":
config.Offsets.Initial = sarama.OffsetOldest

View File

@@ -10,6 +10,7 @@ import (
"net/url"
"regexp"
"sync"
"time"
)
const (
@@ -120,7 +121,10 @@ func (a *ChimpAPI) GetReport(campaignID string) (Report, error) {
}
func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) {
client := &http.Client{Transport: api.Transport}
client := &http.Client{
Transport: api.Transport,
Timeout: time.Duration(4 * time.Second),
}
var b bytes.Buffer
req, err := http.NewRequest("GET", api.url.String(), &b)

View File

@@ -10,6 +10,7 @@ import (
"strconv"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
@@ -261,6 +262,15 @@ func (m *Mesos) removeGroup(j *map[string]interface{}) {
}
}
var tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}
var client = &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
// This should not belong to the object
func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error {
var jsonOut map[string]interface{}
@@ -282,7 +292,7 @@ func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error {
ts := strconv.Itoa(m.Timeout) + "ms"
resp, err := http.Get("http://" + a + "/metrics/snapshot?timeout=" + ts)
resp, err := client.Get("http://" + a + "/metrics/snapshot?timeout=" + ts)
if err != nil {
return err

View File

@@ -26,6 +26,9 @@ type MQTTConsumer struct {
// Legacy metric buffer support
MetricBuffer int
PersistentSession bool
ClientID string `toml:"client_id"`
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
@@ -57,6 +60,13 @@ var sampleConfig = `
"sensors/#",
]
# if true, messages that can't be delivered while the subscriber is offline
# will be delivered when it comes back (such as on service restart).
# NOTE: if true, client_id MUST be set
persistent_session = false
# If empty, a random client ID will be generated.
client_id = ""
## username and password to connect MQTT server.
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
@@ -91,6 +101,11 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {
m.Lock()
defer m.Unlock()
if m.PersistentSession && m.ClientID == "" {
return fmt.Errorf("ERROR MQTT Consumer: When using persistent_session" +
" = true, you MUST also set client_id")
}
m.acc = acc
if m.QoS > 2 || m.QoS < 0 {
return fmt.Errorf("MQTT Consumer, invalid QoS value: %d", m.QoS)
@@ -166,7 +181,11 @@ func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error {
func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
opts := mqtt.NewClientOptions()
opts.SetClientID("Telegraf-Consumer-" + internal.RandomString(5))
if m.ClientID == "" {
opts.SetClientID("Telegraf-Consumer-" + internal.RandomString(5))
} else {
opts.SetClientID(m.ClientID)
}
tlsCfg, err := internal.GetTLSConfig(
m.SSLCert, m.SSLKey, m.SSLCA, m.InsecureSkipVerify)
@@ -181,7 +200,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
}
user := m.Username
if user == "" {
if user != "" {
opts.SetUsername(user)
}
password := m.Password
@@ -199,6 +218,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
}
opts.SetAutoReconnect(true)
opts.SetKeepAlive(time.Second * 60)
opts.SetCleanSession(!m.PersistentSession)
return opts, nil
}

View File

@@ -7,6 +7,8 @@ import (
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
)
@@ -28,6 +30,52 @@ func newTestMQTTConsumer() (*MQTTConsumer, chan mqtt.Message) {
return n, in
}
// Test that default client has random ID
func TestRandomClientID(t *testing.T) {
m1 := &MQTTConsumer{
Servers: []string{"localhost:1883"}}
opts, err := m1.createOpts()
assert.NoError(t, err)
m2 := &MQTTConsumer{
Servers: []string{"localhost:1883"}}
opts2, err2 := m2.createOpts()
assert.NoError(t, err2)
assert.NotEqual(t, opts.ClientID, opts2.ClientID)
}
// Test that default client has random ID
func TestClientID(t *testing.T) {
m1 := &MQTTConsumer{
Servers: []string{"localhost:1883"},
ClientID: "telegraf-test",
}
opts, err := m1.createOpts()
assert.NoError(t, err)
m2 := &MQTTConsumer{
Servers: []string{"localhost:1883"},
ClientID: "telegraf-test",
}
opts2, err2 := m2.createOpts()
assert.NoError(t, err2)
assert.Equal(t, "telegraf-test", opts2.ClientID)
assert.Equal(t, "telegraf-test", opts.ClientID)
}
// Test that Start() fails if client ID is not set but persistent is
func TestPersistentClientIDFail(t *testing.T) {
m1 := &MQTTConsumer{
Servers: []string{"localhost:1883"},
PersistentSession: true,
}
acc := testutil.Accumulator{}
err := m1.Start(&acc)
assert.Error(t, err)
}
// Test that the parser parses NATS messages into metrics
func TestRunParser(t *testing.T) {
n, in := newTestMQTTConsumer()

View File

@@ -2,8 +2,10 @@ package mysql
import (
"database/sql"
"net/url"
"strconv"
"strings"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/influxdata/telegraf"
@@ -26,6 +28,8 @@ var sampleConfig = `
servers = ["tcp(127.0.0.1:3306)/"]
`
var defaultTimeout = time.Second * time.Duration(5)
func (m *Mysql) SampleConfig() string {
return sampleConfig
}
@@ -122,6 +126,10 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error {
serv = ""
}
serv, err := dsnAddTimeout(serv)
if err != nil {
return err
}
db, err := sql.Open("mysql", serv)
if err != nil {
return err
@@ -207,6 +215,27 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error {
return nil
}
func dsnAddTimeout(dsn string) (string, error) {
// DSN "?timeout=5s" is not valid, but "/?timeout=5s" is valid ("" and "/"
// are the same DSN)
if dsn == "" {
dsn = "/"
}
u, err := url.Parse(dsn)
if err != nil {
return "", err
}
v := u.Query()
// Only override timeout if not already defined
if _, ok := v["timeout"]; ok == false {
v.Add("timeout", defaultTimeout.String())
u.RawQuery = v.Encode()
}
return u.String(), nil
}
func init() {
inputs.Add("mysql", func() telegraf.Input {
return &Mysql{}

View File

@@ -84,3 +84,34 @@ func TestMysqlParseDSN(t *testing.T) {
}
}
}
func TestMysqlDNSAddTimeout(t *testing.T) {
tests := []struct {
input string
output string
}{
{
"",
"/?timeout=5s",
},
{
"tcp(192.168.1.1:3306)/",
"tcp(192.168.1.1:3306)/?timeout=5s",
},
{
"root:passwd@tcp(192.168.1.1:3306)/?tls=false",
"root:passwd@tcp(192.168.1.1:3306)/?timeout=5s&tls=false",
},
{
"root:passwd@tcp(192.168.1.1:3306)/?tls=false&timeout=10s",
"root:passwd@tcp(192.168.1.1:3306)/?tls=false&timeout=10s",
},
}
for _, test := range tests {
output, _ := dsnAddTimeout(test.input)
if output != test.output {
t.Errorf("Expected %s, got %s\n", test.output, output)
}
}
}

View File

@@ -52,7 +52,7 @@ It can also check response text.
### Tags:
- All measurements have the following tags:
- host
- server
- port
- protocol
@@ -60,7 +60,7 @@ It can also check response text.
```
$ ./telegraf -config telegraf.conf -input-filter net_response -test
net_response,host=127.0.0.1,port=22,protocol=tcp response_time=0.18070360500000002,string_found=true 1454785464182527094
net_response,host=127.0.0.1,port=2222,protocol=tcp response_time=1.090124776,string_found=false 1454784433658942325
net_response,server=192.168.2.2,port=22,protocol=tcp response_time=0.18070360500000002,string_found=true 1454785464182527094
net_response,server=192.168.2.2,port=2222,protocol=tcp response_time=1.090124776,string_found=false 1454784433658942325
```

View File

@@ -169,7 +169,7 @@ func (c *NetResponse) Gather(acc telegraf.Accumulator) error {
return errors.New("Bad port")
}
// Prepare data
tags := map[string]string{"host": host, "port": port}
tags := map[string]string{"server": host, "port": port}
var fields map[string]interface{}
// Gather data
if c.Protocol == "tcp" {

View File

@@ -69,7 +69,7 @@ func TestTCPOK1(t *testing.T) {
"string_found": true,
"response_time": 1.0,
},
map[string]string{"host": "127.0.0.1",
map[string]string{"server": "127.0.0.1",
"port": "2004",
"protocol": "tcp",
},
@@ -109,7 +109,7 @@ func TestTCPOK2(t *testing.T) {
"string_found": false,
"response_time": 1.0,
},
map[string]string{"host": "127.0.0.1",
map[string]string{"server": "127.0.0.1",
"port": "2004",
"protocol": "tcp",
},
@@ -164,7 +164,7 @@ func TestUDPOK1(t *testing.T) {
"string_found": true,
"response_time": 1.0,
},
map[string]string{"host": "127.0.0.1",
map[string]string{"server": "127.0.0.1",
"port": "2004",
"protocol": "udp",
},

View File

@@ -0,0 +1,46 @@
# Telegraf Plugin: Nginx
### Configuration:
```
# Read Nginx's basic status information (ngx_http_stub_status_module)
[[inputs.nginx]]
## An array of Nginx stub_status URI to gather stats.
urls = ["http://localhost/server_status"]
```
### Measurements & Fields:
- Measurement
- accepts
- active
- handled
- reading
- requests
- waiting
### Tags:
- All measurements have the following tags:
- port
- server
### Example Output:
Using this configuration:
```
[[inputs.nginx]]
## An array of Nginx stub_status URI to gather stats.
urls = ["http://localhost/status"]
```
When run with:
```
./telegraf -config telegraf.conf -input-filter nginx -test
```
It produces:
```
* Plugin: nginx, Collection 1
> nginx,port=80,server=localhost accepts=605i,active=2i,handled=605i,reading=0i,requests=12132i,waiting=1i,writing=1i 1456690994701784331
```

View File

@@ -58,7 +58,10 @@ var tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}
var client = &http.Client{Transport: tr}
var client = &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
func (n *Nginx) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
resp, err := client.Get(addr.String())

View File

@@ -84,7 +84,10 @@ var tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}
var client = &http.Client{Transport: tr}
var client = &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error {
u, err := buildURL(e)

View File

@@ -0,0 +1,60 @@
# ntpq Input Plugin
Get standard NTP query metrics, requires ntpq executable.
Below is the documentation of the various headers returned from the NTP query
command when running `ntpq -p`.
- remote The remote peer or server being synced to. “LOCAL” is this local host
(included in case there are no remote peers or servers available);
- refid Where or what the remote peer or server is itself synchronised to;
- st (stratum) The remote peer or server Stratum
- t (type) Type (u: unicast or manycast client, b: broadcast or multicast client,
l: local reference clock, s: symmetric peer, A: manycast server,
B: broadcast server, M: multicast server, see “Automatic Server Discovery“);
- when When last polled (seconds ago, “h” hours ago, or “d” days ago);
- poll Polling frequency: rfc5905 suggests this ranges in NTPv4 from 4 (16s)
to 17 (36h) (log2 seconds), however observation suggests the actual displayed
value is seconds for a much smaller range of 64 (26) to 1024 (210) seconds;
- reach An 8-bit left-shift shift register value recording polls (bit set =
successful, bit reset = fail) displayed in octal;
- delay Round trip communication delay to the remote peer or server (milliseconds);
- offset Mean offset (phase) in the times reported between this local host and
the remote peer or server (RMS, milliseconds);
- jitter Mean deviation (jitter) in the time reported for that remote peer or
server (RMS of difference of multiple time samples, milliseconds);
### Configuration:
```toml
# Get standard NTP query metrics, requires ntpq executable
[[inputs.ntpq]]
## If false, set the -n ntpq flag. Can reduce metric gather times.
dns_lookup = true
```
### Measurements & Fields:
- ntpq
- delay (float, milliseconds)
- jitter (float, milliseconds)
- offset (float, milliseconds)
- poll (int, seconds)
- reach (int)
- when (int, seconds)
### Tags:
- All measurements have the following tags:
- refid
- remote
- type
- stratum
### Example Output:
```
$ telegraf -config ~/ws/telegraf.conf -input-filter ntpq -test
* Plugin: ntpq, Collection 1
> ntpq,refid=.GPSs.,remote=*time.apple.com,stratum=1,type=u delay=91.797,jitter=3.735,offset=12.841,poll=64i,reach=377i,when=35i 1457960478909556134
```

202
plugins/inputs/ntpq/ntpq.go Normal file
View File

@@ -0,0 +1,202 @@
// +build !windows
package ntpq
import (
"bufio"
"bytes"
"log"
"os/exec"
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
// Mapping of ntpq header names to tag keys
var tagHeaders map[string]string = map[string]string{
"remote": "remote",
"refid": "refid",
"st": "stratum",
"t": "type",
}
// Mapping of the ntpq tag key to the index in the command output
var tagI map[string]int = map[string]int{
"remote": -1,
"refid": -1,
"stratum": -1,
"type": -1,
}
// Mapping of float metrics to their index in the command output
var floatI map[string]int = map[string]int{
"delay": -1,
"offset": -1,
"jitter": -1,
}
// Mapping of int metrics to their index in the command output
var intI map[string]int = map[string]int{
"when": -1,
"poll": -1,
"reach": -1,
}
type NTPQ struct {
runQ func() ([]byte, error)
DNSLookup bool `toml:"dns_lookup"`
}
func (n *NTPQ) Description() string {
return "Get standard NTP query metrics, requires ntpq executable."
}
func (n *NTPQ) SampleConfig() string {
return `
## If false, set the -n ntpq flag. Can reduce metric gather time.
dns_lookup = true
`
}
func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
out, err := n.runQ()
if err != nil {
return err
}
lineCounter := 0
scanner := bufio.NewScanner(bytes.NewReader(out))
for scanner.Scan() {
fields := strings.Fields(scanner.Text())
if len(fields) < 2 {
continue
}
// If lineCounter == 0, then this is the header line
if lineCounter == 0 {
for i, field := range fields {
// Check if field is a tag:
if tagKey, ok := tagHeaders[field]; ok {
tagI[tagKey] = i
continue
}
// check if field is a float metric:
if _, ok := floatI[field]; ok {
floatI[field] = i
continue
}
// check if field is an int metric:
if _, ok := intI[field]; ok {
intI[field] = i
continue
}
}
} else {
tags := make(map[string]string)
mFields := make(map[string]interface{})
// Get tags from output
for key, index := range tagI {
if index == -1 {
continue
}
tags[key] = fields[index]
}
// Get integer metrics from output
for key, index := range intI {
if index == -1 {
continue
}
if key == "when" {
when := fields[index]
switch {
case strings.HasSuffix(when, "h"):
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h"))
if err != nil {
log.Printf("ERROR ntpq: parsing int: %s", fields[index])
continue
}
// seconds in an hour
mFields[key] = int64(m) * 360
continue
case strings.HasSuffix(when, "d"):
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d"))
if err != nil {
log.Printf("ERROR ntpq: parsing int: %s", fields[index])
continue
}
// seconds in a day
mFields[key] = int64(m) * 86400
continue
case strings.HasSuffix(when, "m"):
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m"))
if err != nil {
log.Printf("ERROR ntpq: parsing int: %s", fields[index])
continue
}
// seconds in a day
mFields[key] = int64(m) * 60
continue
}
}
m, err := strconv.Atoi(fields[index])
if err != nil {
log.Printf("ERROR ntpq: parsing int: %s", fields[index])
continue
}
mFields[key] = int64(m)
}
// get float metrics from output
for key, index := range floatI {
if index == -1 {
continue
}
m, err := strconv.ParseFloat(fields[index], 64)
if err != nil {
log.Printf("ERROR ntpq: parsing float: %s", fields[index])
continue
}
mFields[key] = m
}
acc.AddFields("ntpq", mFields, tags)
}
lineCounter++
}
return nil
}
func (n *NTPQ) runq() ([]byte, error) {
bin, err := exec.LookPath("ntpq")
if err != nil {
return nil, err
}
var cmd *exec.Cmd
if n.DNSLookup {
cmd = exec.Command(bin, "-p")
} else {
cmd = exec.Command(bin, "-p", "-n")
}
return cmd.Output()
}
func init() {
inputs.Add("ntpq", func() telegraf.Input {
n := &NTPQ{}
n.runQ = n.runq
return n
})
}

View File

@@ -0,0 +1,422 @@
// +build !windows
package ntpq
import (
"fmt"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
func TestSingleNTPQ(t *testing.T) {
tt := tester{
ret: []byte(singleNTPQ),
err: nil,
}
n := &NTPQ{
runQ: tt.runqTest,
}
acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc))
fields := map[string]interface{}{
"when": int64(101),
"poll": int64(256),
"reach": int64(37),
"delay": float64(51.016),
"offset": float64(233.010),
"jitter": float64(17.462),
}
tags := map[string]string{
"remote": "*uschi5-ntp-002.",
"refid": "10.177.80.46",
"stratum": "2",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
}
func TestBadIntNTPQ(t *testing.T) {
tt := tester{
ret: []byte(badIntParseNTPQ),
err: nil,
}
n := &NTPQ{
runQ: tt.runqTest,
}
acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc))
fields := map[string]interface{}{
"when": int64(101),
"reach": int64(37),
"delay": float64(51.016),
"offset": float64(233.010),
"jitter": float64(17.462),
}
tags := map[string]string{
"remote": "*uschi5-ntp-002.",
"refid": "10.177.80.46",
"stratum": "2",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
}
func TestBadFloatNTPQ(t *testing.T) {
tt := tester{
ret: []byte(badFloatParseNTPQ),
err: nil,
}
n := &NTPQ{
runQ: tt.runqTest,
}
acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc))
fields := map[string]interface{}{
"when": int64(2),
"poll": int64(256),
"reach": int64(37),
"delay": float64(51.016),
"jitter": float64(17.462),
}
tags := map[string]string{
"remote": "*uschi5-ntp-002.",
"refid": "10.177.80.46",
"stratum": "2",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
}
func TestDaysNTPQ(t *testing.T) {
tt := tester{
ret: []byte(whenDaysNTPQ),
err: nil,
}
n := &NTPQ{
runQ: tt.runqTest,
}
acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc))
fields := map[string]interface{}{
"when": int64(172800),
"poll": int64(256),
"reach": int64(37),
"delay": float64(51.016),
"offset": float64(233.010),
"jitter": float64(17.462),
}
tags := map[string]string{
"remote": "*uschi5-ntp-002.",
"refid": "10.177.80.46",
"stratum": "2",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
}
func TestHoursNTPQ(t *testing.T) {
tt := tester{
ret: []byte(whenHoursNTPQ),
err: nil,
}
n := &NTPQ{
runQ: tt.runqTest,
}
acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc))
fields := map[string]interface{}{
"when": int64(720),
"poll": int64(256),
"reach": int64(37),
"delay": float64(51.016),
"offset": float64(233.010),
"jitter": float64(17.462),
}
tags := map[string]string{
"remote": "*uschi5-ntp-002.",
"refid": "10.177.80.46",
"stratum": "2",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
}
func TestMinutesNTPQ(t *testing.T) {
tt := tester{
ret: []byte(whenMinutesNTPQ),
err: nil,
}
n := &NTPQ{
runQ: tt.runqTest,
}
acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc))
fields := map[string]interface{}{
"when": int64(120),
"poll": int64(256),
"reach": int64(37),
"delay": float64(51.016),
"offset": float64(233.010),
"jitter": float64(17.462),
}
tags := map[string]string{
"remote": "*uschi5-ntp-002.",
"refid": "10.177.80.46",
"stratum": "2",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
}
func TestBadWhenNTPQ(t *testing.T) {
tt := tester{
ret: []byte(whenBadNTPQ),
err: nil,
}
n := &NTPQ{
runQ: tt.runqTest,
}
acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc))
fields := map[string]interface{}{
"poll": int64(256),
"reach": int64(37),
"delay": float64(51.016),
"offset": float64(233.010),
"jitter": float64(17.462),
}
tags := map[string]string{
"remote": "*uschi5-ntp-002.",
"refid": "10.177.80.46",
"stratum": "2",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
}
func TestMultiNTPQ(t *testing.T) {
tt := tester{
ret: []byte(multiNTPQ),
err: nil,
}
n := &NTPQ{
runQ: tt.runqTest,
}
acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc))
fields := map[string]interface{}{
"delay": float64(54.033),
"jitter": float64(449514),
"offset": float64(243.426),
"poll": int64(1024),
"reach": int64(377),
"when": int64(740),
}
tags := map[string]string{
"refid": "10.177.80.37",
"remote": "83.137.98.96",
"stratum": "2",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
fields = map[string]interface{}{
"delay": float64(60.785),
"jitter": float64(449539),
"offset": float64(232.597),
"poll": int64(1024),
"reach": int64(377),
"when": int64(739),
}
tags = map[string]string{
"refid": "10.177.80.37",
"remote": "81.7.16.52",
"stratum": "2",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
}
func TestBadHeaderNTPQ(t *testing.T) {
resetVars()
tt := tester{
ret: []byte(badHeaderNTPQ),
err: nil,
}
n := &NTPQ{
runQ: tt.runqTest,
}
acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc))
fields := map[string]interface{}{
"when": int64(101),
"poll": int64(256),
"reach": int64(37),
"delay": float64(51.016),
"offset": float64(233.010),
"jitter": float64(17.462),
}
tags := map[string]string{
"remote": "*uschi5-ntp-002.",
"refid": "10.177.80.46",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
}
func TestMissingDelayColumnNTPQ(t *testing.T) {
resetVars()
tt := tester{
ret: []byte(missingDelayNTPQ),
err: nil,
}
n := &NTPQ{
runQ: tt.runqTest,
}
acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc))
fields := map[string]interface{}{
"when": int64(101),
"poll": int64(256),
"reach": int64(37),
"offset": float64(233.010),
"jitter": float64(17.462),
}
tags := map[string]string{
"remote": "*uschi5-ntp-002.",
"refid": "10.177.80.46",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
}
func TestFailedNTPQ(t *testing.T) {
tt := tester{
ret: []byte(singleNTPQ),
err: fmt.Errorf("Test failure"),
}
n := &NTPQ{
runQ: tt.runqTest,
}
acc := testutil.Accumulator{}
assert.Error(t, n.Gather(&acc))
}
type tester struct {
ret []byte
err error
}
func (t *tester) runqTest() ([]byte, error) {
return t.ret, t.err
}
func resetVars() {
// Mapping of ntpq header names to tag keys
tagHeaders = map[string]string{
"remote": "remote",
"refid": "refid",
"st": "stratum",
"t": "type",
}
// Mapping of the ntpq tag key to the index in the command output
tagI = map[string]int{
"remote": -1,
"refid": -1,
"stratum": -1,
"type": -1,
}
// Mapping of float metrics to their index in the command output
floatI = map[string]int{
"delay": -1,
"offset": -1,
"jitter": -1,
}
// Mapping of int metrics to their index in the command output
intI = map[string]int{
"when": -1,
"poll": -1,
"reach": -1,
}
}
var singleNTPQ = ` remote refid st t when poll reach delay offset jitter
==============================================================================
*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462
`
var badHeaderNTPQ = `remote refid foobar t when poll reach delay offset jitter
==============================================================================
*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462
`
var missingDelayNTPQ = `remote refid foobar t when poll reach offset jitter
==============================================================================
*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 233.010 17.462
`
var whenDaysNTPQ = ` remote refid st t when poll reach delay offset jitter
==============================================================================
*uschi5-ntp-002. 10.177.80.46 2 u 2d 256 37 51.016 233.010 17.462
`
var whenHoursNTPQ = ` remote refid st t when poll reach delay offset jitter
==============================================================================
*uschi5-ntp-002. 10.177.80.46 2 u 2h 256 37 51.016 233.010 17.462
`
var whenMinutesNTPQ = ` remote refid st t when poll reach delay offset jitter
==============================================================================
*uschi5-ntp-002. 10.177.80.46 2 u 2m 256 37 51.016 233.010 17.462
`
var whenBadNTPQ = ` remote refid st t when poll reach delay offset jitter
==============================================================================
*uschi5-ntp-002. 10.177.80.46 2 u 2q 256 37 51.016 233.010 17.462
`
var badFloatParseNTPQ = ` remote refid st t when poll reach delay offset jitter
==============================================================================
*uschi5-ntp-002. 10.177.80.46 2 u 2 256 37 51.016 foobar 17.462
`
var badIntParseNTPQ = ` remote refid st t when poll reach delay offset jitter
==============================================================================
*uschi5-ntp-002. 10.177.80.46 2 u 101 foobar 37 51.016 233.010 17.462
`
var multiNTPQ = ` remote refid st t when poll reach delay offset jitter
==============================================================================
83.137.98.96 10.177.80.37 2 u 740 1024 377 54.033 243.426 449514.
81.7.16.52 10.177.80.37 2 u 739 1024 377 60.785 232.597 449539.
131.188.3.221 10.177.80.37 2 u 783 1024 377 111.820 261.921 449528.
5.9.29.107 10.177.80.37 2 u 703 1024 377 205.704 160.406 449602.
91.189.94.4 10.177.80.37 2 u 673 1024 377 143.047 274.726 449445.
`

View File

@@ -0,0 +1,3 @@
// +build windows
package ntpq

View File

@@ -0,0 +1,331 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package phpfpm
// This file implements FastCGI from the perspective of a child process.
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/cgi"
"os"
"strings"
"sync"
"time"
)
// request holds the state for an in-progress request. As soon as it's complete,
// it's converted to an http.Request.
type request struct {
pw *io.PipeWriter
reqId uint16
params map[string]string
buf [1024]byte
rawParams []byte
keepConn bool
}
func newRequest(reqId uint16, flags uint8) *request {
r := &request{
reqId: reqId,
params: map[string]string{},
keepConn: flags&flagKeepConn != 0,
}
r.rawParams = r.buf[:0]
return r
}
// parseParams reads an encoded []byte into Params.
func (r *request) parseParams() {
text := r.rawParams
r.rawParams = nil
for len(text) > 0 {
keyLen, n := readSize(text)
if n == 0 {
return
}
text = text[n:]
valLen, n := readSize(text)
if n == 0 {
return
}
text = text[n:]
if int(keyLen)+int(valLen) > len(text) {
return
}
key := readString(text, keyLen)
text = text[keyLen:]
val := readString(text, valLen)
text = text[valLen:]
r.params[key] = val
}
}
// response implements http.ResponseWriter.
type response struct {
req *request
header http.Header
w *bufWriter
wroteHeader bool
}
func newResponse(c *child, req *request) *response {
return &response{
req: req,
header: http.Header{},
w: newWriter(c.conn, typeStdout, req.reqId),
}
}
func (r *response) Header() http.Header {
return r.header
}
func (r *response) Write(data []byte) (int, error) {
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
return r.w.Write(data)
}
func (r *response) WriteHeader(code int) {
if r.wroteHeader {
return
}
r.wroteHeader = true
if code == http.StatusNotModified {
// Must not have body.
r.header.Del("Content-Type")
r.header.Del("Content-Length")
r.header.Del("Transfer-Encoding")
} else if r.header.Get("Content-Type") == "" {
r.header.Set("Content-Type", "text/html; charset=utf-8")
}
if r.header.Get("Date") == "" {
r.header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
}
fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code))
r.header.Write(r.w)
r.w.WriteString("\r\n")
}
func (r *response) Flush() {
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
r.w.Flush()
}
func (r *response) Close() error {
r.Flush()
return r.w.Close()
}
type child struct {
conn *conn
handler http.Handler
mu sync.Mutex // protects requests:
requests map[uint16]*request // keyed by request ID
}
func newChild(rwc io.ReadWriteCloser, handler http.Handler) *child {
return &child{
conn: newConn(rwc),
handler: handler,
requests: make(map[uint16]*request),
}
}
func (c *child) serve() {
defer c.conn.Close()
defer c.cleanUp()
var rec record
for {
if err := rec.read(c.conn.rwc); err != nil {
return
}
if err := c.handleRecord(&rec); err != nil {
return
}
}
}
var errCloseConn = errors.New("fcgi: connection should be closed")
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// ErrRequestAborted is returned by Read when a handler attempts to read the
// body of a request that has been aborted by the web server.
var ErrRequestAborted = errors.New("fcgi: request aborted by web server")
// ErrConnClosed is returned by Read when a handler attempts to read the body of
// a request after the connection to the web server has been closed.
var ErrConnClosed = errors.New("fcgi: connection to web server closed")
func (c *child) handleRecord(rec *record) error {
c.mu.Lock()
req, ok := c.requests[rec.h.Id]
c.mu.Unlock()
if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues {
// The spec says to ignore unknown request IDs.
return nil
}
switch rec.h.Type {
case typeBeginRequest:
if req != nil {
// The server is trying to begin a request with the same ID
// as an in-progress request. This is an error.
return errors.New("fcgi: received ID that is already in-flight")
}
var br beginRequest
if err := br.read(rec.content()); err != nil {
return err
}
if br.role != roleResponder {
c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole)
return nil
}
req = newRequest(rec.h.Id, br.flags)
c.mu.Lock()
c.requests[rec.h.Id] = req
c.mu.Unlock()
return nil
case typeParams:
// NOTE(eds): Technically a key-value pair can straddle the boundary
// between two packets. We buffer until we've received all parameters.
if len(rec.content()) > 0 {
req.rawParams = append(req.rawParams, rec.content()...)
return nil
}
req.parseParams()
return nil
case typeStdin:
content := rec.content()
if req.pw == nil {
var body io.ReadCloser
if len(content) > 0 {
// body could be an io.LimitReader, but it shouldn't matter
// as long as both sides are behaving.
body, req.pw = io.Pipe()
} else {
body = emptyBody
}
go c.serveRequest(req, body)
}
if len(content) > 0 {
// TODO(eds): This blocks until the handler reads from the pipe.
// If the handler takes a long time, it might be a problem.
req.pw.Write(content)
} else if req.pw != nil {
req.pw.Close()
}
return nil
case typeGetValues:
values := map[string]string{"FCGI_MPXS_CONNS": "1"}
c.conn.writePairs(typeGetValuesResult, 0, values)
return nil
case typeData:
// If the filter role is implemented, read the data stream here.
return nil
case typeAbortRequest:
c.mu.Lock()
delete(c.requests, rec.h.Id)
c.mu.Unlock()
c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete)
if req.pw != nil {
req.pw.CloseWithError(ErrRequestAborted)
}
if !req.keepConn {
// connection will close upon return
return errCloseConn
}
return nil
default:
b := make([]byte, 8)
b[0] = byte(rec.h.Type)
c.conn.writeRecord(typeUnknownType, 0, b)
return nil
}
}
func (c *child) serveRequest(req *request, body io.ReadCloser) {
r := newResponse(c, req)
httpReq, err := cgi.RequestFromMap(req.params)
if err != nil {
// there was an error reading the request
r.WriteHeader(http.StatusInternalServerError)
c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error()))
} else {
httpReq.Body = body
c.handler.ServeHTTP(r, httpReq)
}
r.Close()
c.mu.Lock()
delete(c.requests, req.reqId)
c.mu.Unlock()
c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete)
// Consume the entire body, so the host isn't still writing to
// us when we close the socket below in the !keepConn case,
// otherwise we'd send a RST. (golang.org/issue/4183)
// TODO(bradfitz): also bound this copy in time. Or send
// some sort of abort request to the host, so the host
// can properly cut off the client sending all the data.
// For now just bound it a little and
io.CopyN(ioutil.Discard, body, 100<<20)
body.Close()
if !req.keepConn {
c.conn.Close()
}
}
func (c *child) cleanUp() {
c.mu.Lock()
defer c.mu.Unlock()
for _, req := range c.requests {
if req.pw != nil {
// race with call to Close in c.serveRequest doesn't matter because
// Pipe(Reader|Writer).Close are idempotent
req.pw.CloseWithError(ErrConnClosed)
}
}
}
// Serve accepts incoming FastCGI connections on the listener l, creating a new
// goroutine for each. The goroutine reads requests and then calls handler
// to reply to them.
// If l is nil, Serve accepts connections from os.Stdin.
// If handler is nil, http.DefaultServeMux is used.
func Serve(l net.Listener, handler http.Handler) error {
if l == nil {
var err error
l, err = net.FileListener(os.Stdin)
if err != nil {
return err
}
defer l.Close()
}
if handler == nil {
handler = http.DefaultServeMux
}
for {
rw, err := l.Accept()
if err != nil {
return err
}
c := newChild(rw, handler)
go c.serve()
}
}

View File

@@ -17,11 +17,6 @@ import (
"errors"
"io"
"sync"
"net"
"strconv"
"strings"
)
// recType is a record type, as defined by
@@ -277,74 +272,3 @@ func (w *streamWriter) Close() error {
// send empty record to close the stream
return w.c.writeRecord(w.recType, w.reqId, nil)
}
func NewClient(h string, args ...interface{}) (fcgi *conn, err error) {
var con net.Conn
if len(args) != 1 {
err = errors.New("fcgi: not enough params")
return
}
switch args[0].(type) {
case int:
addr := h + ":" + strconv.FormatInt(int64(args[0].(int)), 10)
con, err = net.Dial("tcp", addr)
case string:
laddr := net.UnixAddr{Name: args[0].(string), Net: h}
con, err = net.DialUnix(h, nil, &laddr)
default:
err = errors.New("fcgi: we only accept int (port) or string (socket) params.")
}
fcgi = &conn{
rwc: con,
}
return
}
func (client *conn) Request(env map[string]string, requestData string) (retout []byte, reterr []byte, err error) {
defer client.rwc.Close()
var reqId uint16 = 1
err = client.writeBeginRequest(reqId, uint16(roleResponder), 0)
if err != nil {
return
}
err = client.writePairs(typeParams, reqId, env)
if err != nil {
return
}
if len(requestData) > 0 {
if err = client.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil {
return
}
}
rec := &record{}
var err1 error
// recive untill EOF or FCGI_END_REQUEST
READ_LOOP:
for {
err1 = rec.read(client.rwc)
if err1 != nil && strings.Contains(err1.Error(), "use of closed network connection") {
if err1 != io.EOF {
err = err1
}
break
}
switch {
case rec.h.Type == typeStdout:
retout = append(retout, rec.content()...)
case rec.h.Type == typeStderr:
reterr = append(reterr, rec.content()...)
case rec.h.Type == typeEndRequest:
fallthrough
default:
break READ_LOOP
}
}
return
}

View File

@@ -0,0 +1,86 @@
package phpfpm
import (
"errors"
"io"
"net"
"strconv"
"strings"
)
// Create an fcgi client
func newFcgiClient(h string, args ...interface{}) (*conn, error) {
var con net.Conn
if len(args) != 1 {
return nil, errors.New("fcgi: not enough params")
}
var err error
switch args[0].(type) {
case int:
addr := h + ":" + strconv.FormatInt(int64(args[0].(int)), 10)
con, err = net.Dial("tcp", addr)
case string:
laddr := net.UnixAddr{Name: args[0].(string), Net: h}
con, err = net.DialUnix(h, nil, &laddr)
default:
err = errors.New("fcgi: we only accept int (port) or string (socket) params.")
}
fcgi := &conn{
rwc: con,
}
return fcgi, err
}
func (client *conn) Request(
env map[string]string,
requestData string,
) (retout []byte, reterr []byte, err error) {
defer client.rwc.Close()
var reqId uint16 = 1
err = client.writeBeginRequest(reqId, uint16(roleResponder), 0)
if err != nil {
return
}
err = client.writePairs(typeParams, reqId, env)
if err != nil {
return
}
if len(requestData) > 0 {
if err = client.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil {
return
}
}
rec := &record{}
var err1 error
// recive untill EOF or FCGI_END_REQUEST
READ_LOOP:
for {
err1 = rec.read(client.rwc)
if err1 != nil && strings.Contains(err1.Error(), "use of closed network connection") {
if err1 != io.EOF {
err = err1
}
break
}
switch {
case rec.h.Type == typeStdout:
retout = append(retout, rec.content()...)
case rec.h.Type == typeStderr:
reterr = append(reterr, rec.content()...)
case rec.h.Type == typeEndRequest:
fallthrough
default:
break READ_LOOP
}
}
return
}

View File

@@ -0,0 +1,280 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package phpfpm
import (
"bytes"
"errors"
"io"
"io/ioutil"
"net/http"
"testing"
)
var sizeTests = []struct {
size uint32
bytes []byte
}{
{0, []byte{0x00}},
{127, []byte{0x7F}},
{128, []byte{0x80, 0x00, 0x00, 0x80}},
{1000, []byte{0x80, 0x00, 0x03, 0xE8}},
{33554431, []byte{0x81, 0xFF, 0xFF, 0xFF}},
}
func TestSize(t *testing.T) {
b := make([]byte, 4)
for i, test := range sizeTests {
n := encodeSize(b, test.size)
if !bytes.Equal(b[:n], test.bytes) {
t.Errorf("%d expected %x, encoded %x", i, test.bytes, b)
}
size, n := readSize(test.bytes)
if size != test.size {
t.Errorf("%d expected %d, read %d", i, test.size, size)
}
if len(test.bytes) != n {
t.Errorf("%d did not consume all the bytes", i)
}
}
}
var streamTests = []struct {
desc string
recType recType
reqId uint16
content []byte
raw []byte
}{
{"single record", typeStdout, 1, nil,
[]byte{1, byte(typeStdout), 0, 1, 0, 0, 0, 0},
},
// this data will have to be split into two records
{"two records", typeStdin, 300, make([]byte, 66000),
bytes.Join([][]byte{
// header for the first record
{1, byte(typeStdin), 0x01, 0x2C, 0xFF, 0xFF, 1, 0},
make([]byte, 65536),
// header for the second
{1, byte(typeStdin), 0x01, 0x2C, 0x01, 0xD1, 7, 0},
make([]byte, 472),
// header for the empty record
{1, byte(typeStdin), 0x01, 0x2C, 0, 0, 0, 0},
},
nil),
},
}
type nilCloser struct {
io.ReadWriter
}
func (c *nilCloser) Close() error { return nil }
func TestStreams(t *testing.T) {
var rec record
outer:
for _, test := range streamTests {
buf := bytes.NewBuffer(test.raw)
var content []byte
for buf.Len() > 0 {
if err := rec.read(buf); err != nil {
t.Errorf("%s: error reading record: %v", test.desc, err)
continue outer
}
content = append(content, rec.content()...)
}
if rec.h.Type != test.recType {
t.Errorf("%s: got type %d expected %d", test.desc, rec.h.Type, test.recType)
continue
}
if rec.h.Id != test.reqId {
t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.Id, test.reqId)
continue
}
if !bytes.Equal(content, test.content) {
t.Errorf("%s: read wrong content", test.desc)
continue
}
buf.Reset()
c := newConn(&nilCloser{buf})
w := newWriter(c, test.recType, test.reqId)
if _, err := w.Write(test.content); err != nil {
t.Errorf("%s: error writing record: %v", test.desc, err)
continue
}
if err := w.Close(); err != nil {
t.Errorf("%s: error closing stream: %v", test.desc, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.raw) {
t.Errorf("%s: wrote wrong content", test.desc)
}
}
}
type writeOnlyConn struct {
buf []byte
}
func (c *writeOnlyConn) Write(p []byte) (int, error) {
c.buf = append(c.buf, p...)
return len(p), nil
}
func (c *writeOnlyConn) Read(p []byte) (int, error) {
return 0, errors.New("conn is write-only")
}
func (c *writeOnlyConn) Close() error {
return nil
}
func TestGetValues(t *testing.T) {
var rec record
rec.h.Type = typeGetValues
wc := new(writeOnlyConn)
c := newChild(wc, nil)
err := c.handleRecord(&rec)
if err != nil {
t.Fatalf("handleRecord: %v", err)
}
const want = "\x01\n\x00\x00\x00\x12\x06\x00" +
"\x0f\x01FCGI_MPXS_CONNS1" +
"\x00\x00\x00\x00\x00\x00\x01\n\x00\x00\x00\x00\x00\x00"
if got := string(wc.buf); got != want {
t.Errorf(" got: %q\nwant: %q\n", got, want)
}
}
func nameValuePair11(nameData, valueData string) []byte {
return bytes.Join(
[][]byte{
{byte(len(nameData)), byte(len(valueData))},
[]byte(nameData),
[]byte(valueData),
},
nil,
)
}
func makeRecord(
recordType recType,
requestId uint16,
contentData []byte,
) []byte {
requestIdB1 := byte(requestId >> 8)
requestIdB0 := byte(requestId)
contentLength := len(contentData)
contentLengthB1 := byte(contentLength >> 8)
contentLengthB0 := byte(contentLength)
return bytes.Join([][]byte{
{1, byte(recordType), requestIdB1, requestIdB0, contentLengthB1,
contentLengthB0, 0, 0},
contentData,
},
nil)
}
// a series of FastCGI records that start a request and begin sending the
// request body
var streamBeginTypeStdin = bytes.Join([][]byte{
// set up request 1
makeRecord(typeBeginRequest, 1,
[]byte{0, byte(roleResponder), 0, 0, 0, 0, 0, 0}),
// add required parameters to request 1
makeRecord(typeParams, 1, nameValuePair11("REQUEST_METHOD", "GET")),
makeRecord(typeParams, 1, nameValuePair11("SERVER_PROTOCOL", "HTTP/1.1")),
makeRecord(typeParams, 1, nil),
// begin sending body of request 1
makeRecord(typeStdin, 1, []byte("0123456789abcdef")),
},
nil)
var cleanUpTests = []struct {
input []byte
err error
}{
// confirm that child.handleRecord closes req.pw after aborting req
{
bytes.Join([][]byte{
streamBeginTypeStdin,
makeRecord(typeAbortRequest, 1, nil),
},
nil),
ErrRequestAborted,
},
// confirm that child.serve closes all pipes after error reading record
{
bytes.Join([][]byte{
streamBeginTypeStdin,
nil,
},
nil),
ErrConnClosed,
},
}
type nopWriteCloser struct {
io.ReadWriter
}
func (nopWriteCloser) Close() error {
return nil
}
// Test that child.serve closes the bodies of aborted requests and closes the
// bodies of all requests before returning. Causes deadlock if either condition
// isn't met. See issue 6934.
func TestChildServeCleansUp(t *testing.T) {
for _, tt := range cleanUpTests {
input := make([]byte, len(tt.input))
copy(input, tt.input)
rc := nopWriteCloser{bytes.NewBuffer(input)}
done := make(chan bool)
c := newChild(rc, http.HandlerFunc(func(
w http.ResponseWriter,
r *http.Request,
) {
// block on reading body of request
_, err := io.Copy(ioutil.Discard, r.Body)
if err != tt.err {
t.Errorf("Expected %#v, got %#v", tt.err, err)
}
// not reached if body of request isn't closed
done <- true
}))
go c.serve()
// wait for body of request to be closed or all goroutines to block
<-done
}
}
type rwNopCloser struct {
io.Reader
io.Writer
}
func (rwNopCloser) Close() error {
return nil
}
// Verifies it doesn't crash. Issue 11824.
func TestMalformedParams(t *testing.T) {
input := []byte{
// beginRequest, requestId=1, contentLength=8, role=1, keepConn=1
1, 1, 0, 1, 0, 8, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0,
// params, requestId=1, contentLength=10, k1Len=50, v1Len=50 (malformed, wrong length)
1, 4, 0, 1, 0, 10, 0, 0, 50, 50, 3, 4, 5, 6, 7, 8, 9, 10,
// end of params
1, 4, 0, 1, 0, 0, 0, 0,
}
rw := rwNopCloser{bytes.NewReader(input), ioutil.Discard}
c := newChild(rw, http.DefaultServeMux)
c.serve()
}

View File

@@ -112,6 +112,7 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
statusPath string
)
var err error
if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") {
u, err := url.Parse(addr)
if err != nil {
@@ -120,7 +121,7 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
socketAddr := strings.Split(u.Host, ":")
fcgiIp := socketAddr[0]
fcgiPort, _ := strconv.Atoi(socketAddr[1])
fcgi, _ = NewClient(fcgiIp, fcgiPort)
fcgi, err = newFcgiClient(fcgiIp, fcgiPort)
} else {
socketAddr := strings.Split(addr, ":")
if len(socketAddr) >= 2 {
@@ -134,8 +135,13 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
if _, err := os.Stat(socketPath); os.IsNotExist(err) {
return fmt.Errorf("Socket doesn't exist '%s': %s", socketPath, err)
}
fcgi, _ = NewClient("unix", socketPath)
fcgi, err = newFcgiClient("unix", socketPath)
}
if err != nil {
return err
}
return g.gatherFcgi(fcgi, statusPath, acc)
}

View File

@@ -4,20 +4,22 @@ import (
"bytes"
"database/sql"
"fmt"
"regexp"
"sort"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
_ "github.com/lib/pq"
"github.com/lib/pq"
)
type Postgresql struct {
Address string
Databases []string
OrderedColumns []string
AllColumns []string
Address string
Databases []string
OrderedColumns []string
AllColumns []string
sanitizedAddress string
}
var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true}
@@ -133,6 +135,23 @@ type scanner interface {
Scan(dest ...interface{}) error
}
var passwordKVMatcher, _ = regexp.Compile("password=\\S+ ?")
func (p *Postgresql) SanitizedAddress() (_ string, err error) {
var canonicalizedAddress string
if strings.HasPrefix(p.Address, "postgres://") || strings.HasPrefix(p.Address, "postgresql://") {
canonicalizedAddress, err = pq.ParseURL(p.Address)
if err != nil {
return p.sanitizedAddress, err
}
} else {
canonicalizedAddress = p.Address
}
p.sanitizedAddress = passwordKVMatcher.ReplaceAllString(canonicalizedAddress, "")
return p.sanitizedAddress, err
}
func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator) error {
var columnVars []interface{}
var dbname bytes.Buffer
@@ -165,7 +184,13 @@ func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator) error {
dbname.WriteString("postgres")
}
tags := map[string]string{"server": p.Address, "db": dbname.String()}
var tagAddress string
tagAddress, err = p.SanitizedAddress()
if err != nil {
return err
}
tags := map[string]string{"server": tagAddress, "db": dbname.String()}
fields := make(map[string]interface{})
for col, val := range columnMap {

View File

@@ -7,7 +7,8 @@ individual process using their /proc data.
The plugin will tag processes by their PID and their process name.
Processes can be specified either by pid file or by executable name. Procstat
Processes can be specified either by pid file, by executable name, by command
line pattern matching, or by username (in this order or priority. Procstat
plugin will use `pgrep` when executable name is provided to obtain the pid.
Proctstas plugin will transmit IO, memory, cpu, file descriptor related
measurements for every process specified. A prefix can be set to isolate
@@ -34,6 +35,10 @@ The above configuration would result in output like:
# Measurements
Note: prefix can be set by the user, per process.
Threads related measurement names:
- procstat_[prefix_]num_threads value=5
File descriptor related measurement names:
- procstat_[prefix_]num_fds value=4

View File

@@ -19,6 +19,7 @@ type Procstat struct {
Exe string
Pattern string
Prefix string
User string
pidmap map[int32]*process.Process
}
@@ -37,6 +38,8 @@ var sampleConfig = `
# exe = "nginx"
## pattern as argument for pgrep (ie, pgrep -f <pattern>)
# pattern = "nginx"
## user as argument for pgrep (ie, pgrep -u <user>)
# user = "nginx"
## Field name prefix
prefix = ""
@@ -53,8 +56,8 @@ func (_ *Procstat) Description() string {
func (p *Procstat) Gather(acc telegraf.Accumulator) error {
err := p.createProcesses()
if err != nil {
log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] %s",
p.Exe, p.PidFile, p.Pattern, err.Error())
log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
p.Exe, p.PidFile, p.Pattern, p.User, err.Error())
} else {
for _, proc := range p.pidmap {
p := NewSpecProcessor(p.Prefix, acc, proc)
@@ -103,6 +106,8 @@ func (p *Procstat) getAllPids() ([]int32, error) {
pids, err = pidsFromExe(p.Exe)
} else if p.Pattern != "" {
pids, err = pidsFromPattern(p.Pattern)
} else if p.User != "" {
pids, err = pidsFromUser(p.User)
} else {
err = fmt.Errorf("Either exe, pid_file or pattern has to be specified")
}
@@ -175,6 +180,30 @@ func pidsFromPattern(pattern string) ([]int32, error) {
return out, outerr
}
func pidsFromUser(user string) ([]int32, error) {
var out []int32
var outerr error
bin, err := exec.LookPath("pgrep")
if err != nil {
return out, fmt.Errorf("Couldn't find pgrep binary: %s", err)
}
pgrep, err := exec.Command(bin, "-u", user).Output()
if err != nil {
return out, fmt.Errorf("Failed to execute %s. Error: '%s'", bin, err)
} else {
pids := strings.Fields(string(pgrep))
for _, pid := range pids {
ipid, err := strconv.Atoi(pid)
if err == nil {
out = append(out, int32(ipid))
} else {
outerr = err
}
}
}
return out, outerr
}
func init() {
inputs.Add("procstat", func() telegraf.Input {
return NewProcstat()

View File

@@ -52,6 +52,7 @@ func NewSpecProcessor(
}
func (p *SpecProcessor) pushMetrics() {
p.pushNThreadsStats()
p.pushFDStats()
p.pushCtxStats()
p.pushIOStats()
@@ -60,6 +61,15 @@ func (p *SpecProcessor) pushMetrics() {
p.flush()
}
func (p *SpecProcessor) pushNThreadsStats() error {
numThreads, err := p.proc.NumThreads()
if err != nil {
return fmt.Errorf("NumThreads error: %s\n", err)
}
p.add("num_threads", numThreads)
return nil
}
func (p *SpecProcessor) pushFDStats() error {
fds, err := p.proc.NumFDs()
if err != nil {

View File

@@ -10,6 +10,7 @@ import (
"io"
"net/http"
"sync"
"time"
)
type Prometheus struct {
@@ -51,8 +52,17 @@ func (g *Prometheus) Gather(acc telegraf.Accumulator) error {
return outerr
}
var tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}
var client = &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
func (g *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error {
resp, err := http.Get(url)
resp, err := client.Get(url)
if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", url, err)
}

View File

@@ -122,7 +122,11 @@ func (r *RabbitMQ) Description() string {
func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error {
if r.Client == nil {
r.Client = &http.Client{}
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
r.Client = &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
}
var errChan = make(chan error, len(gatherFunctions))

View File

@@ -177,8 +177,11 @@ func (r *Raindrops) getTags(addr *url.URL) map[string]string {
func init() {
inputs.Add("raindrops", func() telegraf.Input {
return &Raindrops{http_client: &http.Client{Transport: &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}}}
return &Raindrops{http_client: &http.Client{
Transport: &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
},
Timeout: time.Duration(4 * time.Second),
}}
})
}

View File

@@ -0,0 +1,86 @@
# Telegraf Plugin: Redis
### Configuration:
```
# Read Redis's basic status information
[[inputs.redis]]
## specify servers via a url matching:
## [protocol://][:password]@address[:port]
## e.g.
## tcp://localhost:6379
## tcp://:password@192.168.99.100
##
## If no servers are specified, then localhost is used as the host.
## If no port is specified, 6379 is used
servers = ["tcp://localhost:6379"]
```
### Measurements & Fields:
- Measurement
- uptime_in_seconds
- connected_clients
- used_memory
- used_memory_rss
- used_memory_peak
- used_memory_lua
- rdb_changes_since_last_save
- total_connections_received
- total_commands_processed
- instantaneous_ops_per_sec
- instantaneous_input_kbps
- instantaneous_output_kbps
- sync_full
- sync_partial_ok
- sync_partial_err
- expired_keys
- evicted_keys
- keyspace_hits
- keyspace_misses
- pubsub_channels
- pubsub_patterns
- latest_fork_usec
- connected_slaves
- master_repl_offset
- repl_backlog_active
- repl_backlog_size
- repl_backlog_histlen
- mem_fragmentation_ratio
- used_cpu_sys
- used_cpu_user
- used_cpu_sys_children
- used_cpu_user_children
### Tags:
- All measurements have the following tags:
- port
- server
### Example Output:
Using this configuration:
```
[[inputs.nginx]]
## specify servers via a url matching:
## [protocol://][:password]@address[:port]
## e.g.
## tcp://localhost:6379
## tcp://:password@192.168.99.100
##
## If no servers are specified, then localhost is used as the host.
## If no port is specified, 6379 is used
servers = ["tcp://localhost:6379"]
```
When run with:
```
./telegraf -config telegraf.conf -input-filter redis -test
```
It produces:
```
* Plugin: redis, Collection 1
> redis,port=6379,server=localhost clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i 1457052084987848383
```

View File

@@ -9,6 +9,7 @@ import (
"strconv"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
@@ -30,6 +31,8 @@ var sampleConfig = `
servers = ["tcp://localhost:6379"]
`
var defaultTimeout = 5 * time.Second
func (r *Redis) SampleConfig() string {
return sampleConfig
}
@@ -120,12 +123,15 @@ func (r *Redis) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
addr.Host = addr.Host + ":" + defaultPort
}
c, err := net.Dial("tcp", addr.Host)
c, err := net.DialTimeout("tcp", addr.Host, defaultTimeout)
if err != nil {
return fmt.Errorf("Unable to connect to redis server '%s': %s", addr.Host, err)
}
defer c.Close()
// Extend connection
c.SetDeadline(time.Now().Add(defaultTimeout))
if addr.User != nil {
pwd, set := addr.User.Password()
if set && pwd != "" {

View File

@@ -0,0 +1,76 @@
# Riak Plugin
The Riak plugin gathers metrics from one or more riak instances.
### Configuration:
```toml
# Description
[[inputs.riak]]
# Specify a list of one or more riak http servers
servers = ["http://localhost:8098"]
```
### Measurements & Fields:
Riak provides one measurement named "riak", with the following fields:
- cpu_avg1
- cpu_avg15
- cpu_avg5
- memory_code
- memory_ets
- memory_processes
- memory_system
- memory_total
- node_get_fsm_objsize_100
- node_get_fsm_objsize_95
- node_get_fsm_objsize_99
- node_get_fsm_objsize_mean
- node_get_fsm_objsize_median
- node_get_fsm_siblings_100
- node_get_fsm_siblings_95
- node_get_fsm_siblings_99
- node_get_fsm_siblings_mean
- node_get_fsm_siblings_median
- node_get_fsm_time_100
- node_get_fsm_time_95
- node_get_fsm_time_99
- node_get_fsm_time_mean
- node_get_fsm_time_median
- node_gets
- node_gets_total
- node_put_fsm_time_100
- node_put_fsm_time_95
- node_put_fsm_time_99
- node_put_fsm_time_mean
- node_put_fsm_time_median
- node_puts
- node_puts_total
- pbc_active
- pbc_connects
- pbc_connects_total
- vnode_gets
- vnode_gets_total
- vnode_index_reads
- vnode_index_reads_total
- vnode_index_writes
- vnode_index_writes_total
- vnode_puts
- vnode_puts_total
Measurements of time (such as node_get_fsm_time_mean) are measured in nanoseconds.
### Tags:
All measurements have the following tags:
- server (the host:port of the given server address, ex. `127.0.0.1:8087`)
- nodename (the internal node name received, ex. `riak@127.0.0.1`)
### Example Output:
```
$ ./telegraf -config telegraf.conf -input-filter riak -test
> riak,nodename=riak@127.0.0.1,server=localhost:8098 cpu_avg1=31i,cpu_avg15=69i,cpu_avg5=51i,memory_code=11563738i,memory_ets=5925872i,memory_processes=30236069i,memory_system=93074971i,memory_total=123311040i,node_get_fsm_objsize_100=0i,node_get_fsm_objsize_95=0i,node_get_fsm_objsize_99=0i,node_get_fsm_objsize_mean=0i,node_get_fsm_objsize_median=0i,node_get_fsm_siblings_100=0i,node_get_fsm_siblings_95=0i,node_get_fsm_siblings_99=0i,node_get_fsm_siblings_mean=0i,node_get_fsm_siblings_median=0i,node_get_fsm_time_100=0i,node_get_fsm_time_95=0i,node_get_fsm_time_99=0i,node_get_fsm_time_mean=0i,node_get_fsm_time_median=0i,node_gets=0i,node_gets_total=19i,node_put_fsm_time_100=0i,node_put_fsm_time_95=0i,node_put_fsm_time_99=0i,node_put_fsm_time_mean=0i,node_put_fsm_time_median=0i,node_puts=0i,node_puts_total=0i,pbc_active=0i,pbc_connects=0i,pbc_connects_total=20i,vnode_gets=0i,vnode_gets_total=57i,vnode_index_reads=0i,vnode_index_reads_total=0i,vnode_index_writes=0i,vnode_index_writes_total=0i,vnode_puts=0i,vnode_puts_total=0i 1455913392622482332
```

202
plugins/inputs/riak/riak.go Normal file
View File

@@ -0,0 +1,202 @@
package riak
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
// Type Riak gathers statistics from one or more Riak instances
type Riak struct {
// Servers is a slice of servers as http addresses (ex. http://127.0.0.1:8098)
Servers []string
client *http.Client
}
// NewRiak return a new instance of Riak with a default http client
func NewRiak() *Riak {
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
client := &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
return &Riak{client: client}
}
// Type riakStats represents the data that is received from Riak
type riakStats struct {
CpuAvg1 int64 `json:"cpu_avg1"`
CpuAvg15 int64 `json:"cpu_avg15"`
CpuAvg5 int64 `json:"cpu_avg5"`
MemoryCode int64 `json:"memory_code"`
MemoryEts int64 `json:"memory_ets"`
MemoryProcesses int64 `json:"memory_processes"`
MemorySystem int64 `json:"memory_system"`
MemoryTotal int64 `json:"memory_total"`
NodeGetFsmObjsize100 int64 `json:"node_get_fsm_objsize_100"`
NodeGetFsmObjsize95 int64 `json:"node_get_fsm_objsize_95"`
NodeGetFsmObjsize99 int64 `json:"node_get_fsm_objsize_99"`
NodeGetFsmObjsizeMean int64 `json:"node_get_fsm_objsize_mean"`
NodeGetFsmObjsizeMedian int64 `json:"node_get_fsm_objsize_median"`
NodeGetFsmSiblings100 int64 `json:"node_get_fsm_siblings_100"`
NodeGetFsmSiblings95 int64 `json:"node_get_fsm_siblings_95"`
NodeGetFsmSiblings99 int64 `json:"node_get_fsm_siblings_99"`
NodeGetFsmSiblingsMean int64 `json:"node_get_fsm_siblings_mean"`
NodeGetFsmSiblingsMedian int64 `json:"node_get_fsm_siblings_median"`
NodeGetFsmTime100 int64 `json:"node_get_fsm_time_100"`
NodeGetFsmTime95 int64 `json:"node_get_fsm_time_95"`
NodeGetFsmTime99 int64 `json:"node_get_fsm_time_99"`
NodeGetFsmTimeMean int64 `json:"node_get_fsm_time_mean"`
NodeGetFsmTimeMedian int64 `json:"node_get_fsm_time_median"`
NodeGets int64 `json:"node_gets"`
NodeGetsTotal int64 `json:"node_gets_total"`
Nodename string `json:"nodename"`
NodePutFsmTime100 int64 `json:"node_put_fsm_time_100"`
NodePutFsmTime95 int64 `json:"node_put_fsm_time_95"`
NodePutFsmTime99 int64 `json:"node_put_fsm_time_99"`
NodePutFsmTimeMean int64 `json:"node_put_fsm_time_mean"`
NodePutFsmTimeMedian int64 `json:"node_put_fsm_time_median"`
NodePuts int64 `json:"node_puts"`
NodePutsTotal int64 `json:"node_puts_total"`
PbcActive int64 `json:"pbc_active"`
PbcConnects int64 `json:"pbc_connects"`
PbcConnectsTotal int64 `json:"pbc_connects_total"`
VnodeGets int64 `json:"vnode_gets"`
VnodeGetsTotal int64 `json:"vnode_gets_total"`
VnodeIndexReads int64 `json:"vnode_index_reads"`
VnodeIndexReadsTotal int64 `json:"vnode_index_reads_total"`
VnodeIndexWrites int64 `json:"vnode_index_writes"`
VnodeIndexWritesTotal int64 `json:"vnode_index_writes_total"`
VnodePuts int64 `json:"vnode_puts"`
VnodePutsTotal int64 `json:"vnode_puts_total"`
}
// A sample configuration to only gather stats from localhost, default port.
const sampleConfig = `
# Specify a list of one or more riak http servers
servers = ["http://localhost:8098"]
`
// Returns a sample configuration for the plugin
func (r *Riak) SampleConfig() string {
return sampleConfig
}
// Returns a description of the plugin
func (r *Riak) Description() string {
return "Read metrics one or many Riak servers"
}
// Reads stats from all configured servers.
func (r *Riak) Gather(acc telegraf.Accumulator) error {
// Default to a single server at localhost (default port) if none specified
if len(r.Servers) == 0 {
r.Servers = []string{"http://127.0.0.1:8098"}
}
// Range over all servers, gathering stats. Returns early in case of any error.
for _, s := range r.Servers {
if err := r.gatherServer(s, acc); err != nil {
return err
}
}
return nil
}
// Gathers stats from a single server, adding them to the accumulator
func (r *Riak) gatherServer(s string, acc telegraf.Accumulator) error {
// Parse the given URL to extract the server tag
u, err := url.Parse(s)
if err != nil {
return fmt.Errorf("riak unable to parse given server url %s: %s", s, err)
}
// Perform the GET request to the riak /stats endpoint
resp, err := r.client.Get(s + "/stats")
if err != nil {
return err
}
defer resp.Body.Close()
// Successful responses will always return status code 200
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("riak responded with unexepcted status code %d", resp.StatusCode)
}
// Decode the response JSON into a new stats struct
stats := &riakStats{}
if err := json.NewDecoder(resp.Body).Decode(stats); err != nil {
return fmt.Errorf("unable to decode riak response: %s", err)
}
// Build a map of tags
tags := map[string]string{
"nodename": stats.Nodename,
"server": u.Host,
}
// Build a map of field values
fields := map[string]interface{}{
"cpu_avg1": stats.CpuAvg1,
"cpu_avg15": stats.CpuAvg15,
"cpu_avg5": stats.CpuAvg5,
"memory_code": stats.MemoryCode,
"memory_ets": stats.MemoryEts,
"memory_processes": stats.MemoryProcesses,
"memory_system": stats.MemorySystem,
"memory_total": stats.MemoryTotal,
"node_get_fsm_objsize_100": stats.NodeGetFsmObjsize100,
"node_get_fsm_objsize_95": stats.NodeGetFsmObjsize95,
"node_get_fsm_objsize_99": stats.NodeGetFsmObjsize99,
"node_get_fsm_objsize_mean": stats.NodeGetFsmObjsizeMean,
"node_get_fsm_objsize_median": stats.NodeGetFsmObjsizeMedian,
"node_get_fsm_siblings_100": stats.NodeGetFsmSiblings100,
"node_get_fsm_siblings_95": stats.NodeGetFsmSiblings95,
"node_get_fsm_siblings_99": stats.NodeGetFsmSiblings99,
"node_get_fsm_siblings_mean": stats.NodeGetFsmSiblingsMean,
"node_get_fsm_siblings_median": stats.NodeGetFsmSiblingsMedian,
"node_get_fsm_time_100": stats.NodeGetFsmTime100,
"node_get_fsm_time_95": stats.NodeGetFsmTime95,
"node_get_fsm_time_99": stats.NodeGetFsmTime99,
"node_get_fsm_time_mean": stats.NodeGetFsmTimeMean,
"node_get_fsm_time_median": stats.NodeGetFsmTimeMedian,
"node_gets": stats.NodeGets,
"node_gets_total": stats.NodeGetsTotal,
"node_put_fsm_time_100": stats.NodePutFsmTime100,
"node_put_fsm_time_95": stats.NodePutFsmTime95,
"node_put_fsm_time_99": stats.NodePutFsmTime99,
"node_put_fsm_time_mean": stats.NodePutFsmTimeMean,
"node_put_fsm_time_median": stats.NodePutFsmTimeMedian,
"node_puts": stats.NodePuts,
"node_puts_total": stats.NodePutsTotal,
"pbc_active": stats.PbcActive,
"pbc_connects": stats.PbcConnects,
"pbc_connects_total": stats.PbcConnectsTotal,
"vnode_gets": stats.VnodeGets,
"vnode_gets_total": stats.VnodeGetsTotal,
"vnode_index_reads": stats.VnodeIndexReads,
"vnode_index_reads_total": stats.VnodeIndexReadsTotal,
"vnode_index_writes": stats.VnodeIndexWrites,
"vnode_index_writes_total": stats.VnodeIndexWritesTotal,
"vnode_puts": stats.VnodePuts,
"vnode_puts_total": stats.VnodePutsTotal,
}
// Accumulate the tags and values
acc.AddFields("riak", fields, tags)
return nil
}
func init() {
inputs.Add("riak", func() telegraf.Input {
return NewRiak()
})
}

View File

@@ -0,0 +1,275 @@
package riak
import (
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
func TestRiak(t *testing.T) {
// Create a test server with the const response JSON
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, response)
}))
defer ts.Close()
// Parse the URL of the test server, used to verify the expected host
u, err := url.Parse(ts.URL)
require.NoError(t, err)
// Create a new Riak instance with our given test server
riak := NewRiak()
riak.Servers = []string{ts.URL}
// Create a test accumulator
acc := &testutil.Accumulator{}
// Gather data from the test server
err = riak.Gather(acc)
require.NoError(t, err)
// Expect the correct values for all known keys
expectFields := map[string]interface{}{
"cpu_avg1": int64(504),
"cpu_avg15": int64(294),
"cpu_avg5": int64(325),
"memory_code": int64(12329143),
"memory_ets": int64(17330176),
"memory_processes": int64(58454730),
"memory_system": int64(120401678),
"memory_total": int64(178856408),
"node_get_fsm_objsize_100": int64(73596),
"node_get_fsm_objsize_95": int64(36663),
"node_get_fsm_objsize_99": int64(51552),
"node_get_fsm_objsize_mean": int64(13241),
"node_get_fsm_objsize_median": int64(10365),
"node_get_fsm_siblings_100": int64(1),
"node_get_fsm_siblings_95": int64(1),
"node_get_fsm_siblings_99": int64(1),
"node_get_fsm_siblings_mean": int64(1),
"node_get_fsm_siblings_median": int64(1),
"node_get_fsm_time_100": int64(230445),
"node_get_fsm_time_95": int64(24259),
"node_get_fsm_time_99": int64(96653),
"node_get_fsm_time_mean": int64(6851),
"node_get_fsm_time_median": int64(2368),
"node_gets": int64(1116),
"node_gets_total": int64(1026058217),
"node_put_fsm_time_100": int64(267390),
"node_put_fsm_time_95": int64(38286),
"node_put_fsm_time_99": int64(84422),
"node_put_fsm_time_mean": int64(10832),
"node_put_fsm_time_median": int64(4085),
"node_puts": int64(1155),
"node_puts_total": int64(444895769),
"pbc_active": int64(360),
"pbc_connects": int64(120),
"pbc_connects_total": int64(66793268),
"vnode_gets": int64(14629),
"vnode_gets_total": int64(3748432761),
"vnode_index_reads": int64(20),
"vnode_index_reads_total": int64(3438296),
"vnode_index_writes": int64(4293),
"vnode_index_writes_total": int64(1515986619),
"vnode_puts": int64(4308),
"vnode_puts_total": int64(1519062272),
}
// Expect the correct values for all tags
expectTags := map[string]string{
"nodename": "riak@127.0.0.1",
"server": u.Host,
}
acc.AssertContainsTaggedFields(t, "riak", expectFields, expectTags)
}
var response = `
{
"riak_kv_stat_ts": 1455908558,
"vnode_gets": 14629,
"vnode_gets_total": 3748432761,
"vnode_puts": 4308,
"vnode_puts_total": 1519062272,
"vnode_index_refreshes": 0,
"vnode_index_refreshes_total": 0,
"vnode_index_reads": 20,
"vnode_index_reads_total": 3438296,
"vnode_index_writes": 4293,
"vnode_index_writes_total": 1515986619,
"vnode_index_writes_postings": 1,
"vnode_index_writes_postings_total": 265613,
"vnode_index_deletes": 0,
"vnode_index_deletes_total": 0,
"vnode_index_deletes_postings": 0,
"vnode_index_deletes_postings_total": 1,
"node_gets": 1116,
"node_gets_total": 1026058217,
"node_get_fsm_siblings_mean": 1,
"node_get_fsm_siblings_median": 1,
"node_get_fsm_siblings_95": 1,
"node_get_fsm_siblings_99": 1,
"node_get_fsm_siblings_100": 1,
"node_get_fsm_objsize_mean": 13241,
"node_get_fsm_objsize_median": 10365,
"node_get_fsm_objsize_95": 36663,
"node_get_fsm_objsize_99": 51552,
"node_get_fsm_objsize_100": 73596,
"node_get_fsm_time_mean": 6851,
"node_get_fsm_time_median": 2368,
"node_get_fsm_time_95": 24259,
"node_get_fsm_time_99": 96653,
"node_get_fsm_time_100": 230445,
"node_puts": 1155,
"node_puts_total": 444895769,
"node_put_fsm_time_mean": 10832,
"node_put_fsm_time_median": 4085,
"node_put_fsm_time_95": 38286,
"node_put_fsm_time_99": 84422,
"node_put_fsm_time_100": 267390,
"read_repairs": 2,
"read_repairs_total": 7918375,
"coord_redirs_total": 118238575,
"executing_mappers": 0,
"precommit_fail": 0,
"postcommit_fail": 0,
"index_fsm_create": 0,
"index_fsm_create_error": 0,
"index_fsm_active": 0,
"list_fsm_create": 0,
"list_fsm_create_error": 0,
"list_fsm_active": 0,
"pbc_active": 360,
"pbc_connects": 120,
"pbc_connects_total": 66793268,
"late_put_fsm_coordinator_ack": 152,
"node_get_fsm_active": 1,
"node_get_fsm_active_60s": 1029,
"node_get_fsm_in_rate": 21,
"node_get_fsm_out_rate": 21,
"node_get_fsm_rejected": 0,
"node_get_fsm_rejected_60s": 0,
"node_get_fsm_rejected_total": 0,
"node_put_fsm_active": 69,
"node_put_fsm_active_60s": 1053,
"node_put_fsm_in_rate": 30,
"node_put_fsm_out_rate": 31,
"node_put_fsm_rejected": 0,
"node_put_fsm_rejected_60s": 0,
"node_put_fsm_rejected_total": 0,
"read_repairs_primary_outofdate_one": 4,
"read_repairs_primary_outofdate_count": 14761552,
"read_repairs_primary_notfound_one": 0,
"read_repairs_primary_notfound_count": 65879,
"read_repairs_fallback_outofdate_one": 0,
"read_repairs_fallback_outofdate_count": 23761,
"read_repairs_fallback_notfound_one": 0,
"read_repairs_fallback_notfound_count": 455697,
"leveldb_read_block_error": 0,
"riak_pipe_stat_ts": 1455908558,
"pipeline_active": 0,
"pipeline_create_count": 0,
"pipeline_create_one": 0,
"pipeline_create_error_count": 0,
"pipeline_create_error_one": 0,
"cpu_nprocs": 362,
"cpu_avg1": 504,
"cpu_avg5": 325,
"cpu_avg15": 294,
"mem_total": 33695432704,
"mem_allocated": 33454874624,
"nodename": "riak@127.0.0.1",
"connected_nodes": [],
"sys_driver_version": "2.0",
"sys_global_heaps_size": 0,
"sys_heap_type": "private",
"sys_logical_processors": 8,
"sys_otp_release": "R15B01",
"sys_process_count": 2201,
"sys_smp_support": true,
"sys_system_version": "Erlang R15B01 (erts-5.9.1) [source] [64-bit] [smp:8:8] [async-threads:64] [kernel-poll:true]",
"sys_system_architecture": "x86_64-unknown-linux-gnu",
"sys_threads_enabled": true,
"sys_thread_pool_size": 64,
"sys_wordsize": 8,
"ring_members": [
"riak@127.0.0.1"
],
"ring_num_partitions": 256,
"ring_ownership": "[{'riak@127.0.0.1',256}]",
"ring_creation_size": 256,
"storage_backend": "riak_kv_eleveldb_backend",
"erlydtl_version": "0.7.0",
"riak_control_version": "1.4.12-0-g964c5db",
"cluster_info_version": "1.2.4",
"riak_search_version": "1.4.12-0-g7fe0e00",
"merge_index_version": "1.3.2-0-gcb38ee7",
"riak_kv_version": "1.4.12-0-gc6bbd66",
"sidejob_version": "0.2.0",
"riak_api_version": "1.4.12-0-gd9e1cc8",
"riak_pipe_version": "1.4.12-0-g986a226",
"riak_core_version": "1.4.10",
"bitcask_version": "1.6.8-0-gea14cb0",
"basho_stats_version": "1.0.3",
"webmachine_version": "1.10.4-0-gfcff795",
"mochiweb_version": "1.5.1p6",
"inets_version": "5.9",
"erlang_js_version": "1.2.2",
"runtime_tools_version": "1.8.8",
"os_mon_version": "2.2.9",
"riak_sysmon_version": "1.1.3",
"ssl_version": "5.0.1",
"public_key_version": "0.15",
"crypto_version": "2.1",
"sasl_version": "2.2.1",
"lager_version": "2.0.1",
"goldrush_version": "0.1.5",
"compiler_version": "4.8.1",
"syntax_tools_version": "1.6.8",
"stdlib_version": "1.18.1",
"kernel_version": "2.15.1",
"memory_total": 178856408,
"memory_processes": 58454730,
"memory_processes_used": 58371238,
"memory_system": 120401678,
"memory_atom": 586345,
"memory_atom_used": 563485,
"memory_binary": 48677920,
"memory_code": 12329143,
"memory_ets": 17330176,
"riak_core_stat_ts": 1455908559,
"ignored_gossip_total": 0,
"rings_reconciled_total": 5459,
"rings_reconciled": 0,
"gossip_received": 6,
"rejected_handoffs": 94,
"handoff_timeouts": 0,
"dropped_vnode_requests_total": 0,
"converge_delay_min": 0,
"converge_delay_max": 0,
"converge_delay_mean": 0,
"converge_delay_last": 0,
"rebalance_delay_min": 0,
"rebalance_delay_max": 0,
"rebalance_delay_mean": 0,
"rebalance_delay_last": 0,
"riak_kv_vnodes_running": 16,
"riak_kv_vnodeq_min": 0,
"riak_kv_vnodeq_median": 0,
"riak_kv_vnodeq_mean": 0,
"riak_kv_vnodeq_max": 0,
"riak_kv_vnodeq_total": 0,
"riak_pipe_vnodes_running": 16,
"riak_pipe_vnodeq_min": 0,
"riak_pipe_vnodeq_median": 0,
"riak_pipe_vnodeq_mean": 0,
"riak_pipe_vnodeq_max": 0,
"riak_pipe_vnodeq_total": 0
}
`

View File

@@ -49,7 +49,7 @@ func (s *Sensors) Gather(acc telegraf.Accumulator) error {
var found bool
for _, sensor := range s.Sensors {
parts := strings.SplitN(":", sensor, 2)
parts := strings.SplitN(sensor, ":", 2)
if parts[0] == chipName {
if parts[1] == "*" || parts[1] == featureLabel {

View File

@@ -0,0 +1,549 @@
# SNMP Input Plugin
The SNMP input plugin gathers metrics from SNMP agents
### Configuration:
#### Very simple example
In this example, the plugin will gather value of OIDS:
- `.1.3.6.1.2.1.2.2.1.4.1`
```toml
# Very Simple Example
[[inputs.snmp]]
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Simple list of OIDs to get, in addition to "collect"
get_oids = [".1.3.6.1.2.1.2.2.1.4.1"]
```
#### Simple example
In this example, Telegraf gathers value of OIDS:
- named **ifnumber**
- named **interface_speed**
With **inputs.snmp.get** section the plugin gets the oid number:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed*
As you can see *ifSpeed* is not a valid OID. In order to get
the valid OID, the plugin uses `snmptranslate_file` to match the OID:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5`
Also as the plugin will append `instance` to the corresponding OID:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1`
In this example, the plugin will gather value of OIDS:
- `.1.3.6.1.2.1.2.1.0`
- `.1.3.6.1.2.1.2.2.1.5.1`
```toml
# Simple example
[[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which get/bulk do you want to collect for this host
collect = ["ifnumber", "interface_speed"]
[[inputs.snmp.get]]
name = "ifnumber"
oid = ".1.3.6.1.2.1.2.1.0"
[[inputs.snmp.get]]
name = "interface_speed"
oid = "ifSpeed"
instance = "1"
```
#### Simple bulk example
In this example, Telegraf gathers value of OIDS:
- named **ifnumber**
- named **interface_speed**
- named **if_out_octets**
With **inputs.snmp.get** section the plugin gets oid number:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed*
With **inputs.snmp.bulk** section the plugin gets the oid number:
- **if_out_octets** => *ifOutOctets*
As you can see *ifSpeed* and *ifOutOctets* are not a valid OID.
In order to get the valid OID, the plugin uses `snmptranslate_file`
to match the OID:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5`
- **if_out_octets** => *ifOutOctets* => `.1.3.6.1.2.1.2.2.1.16`
Also, the plugin will append `instance` to the corresponding OID:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1`
And **if_out_octets** is a bulk request, the plugin will gathers all
OIDS in the table.
- `.1.3.6.1.2.1.2.2.1.16.1`
- `.1.3.6.1.2.1.2.2.1.16.2`
- `.1.3.6.1.2.1.2.2.1.16.3`
- `.1.3.6.1.2.1.2.2.1.16.4`
- `.1.3.6.1.2.1.2.2.1.16.5`
- `...`
In this example, the plugin will gather value of OIDS:
- `.1.3.6.1.2.1.2.1.0`
- `.1.3.6.1.2.1.2.2.1.5.1`
- `.1.3.6.1.2.1.2.2.1.16.1`
- `.1.3.6.1.2.1.2.2.1.16.2`
- `.1.3.6.1.2.1.2.2.1.16.3`
- `.1.3.6.1.2.1.2.2.1.16.4`
- `.1.3.6.1.2.1.2.2.1.16.5`
- `...`
```toml
# Simple bulk example
[[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which get/bulk do you want to collect for this host
collect = ["interface_speed", "if_number", "if_out_octets"]
[[inputs.snmp.get]]
name = "interface_speed"
oid = "ifSpeed"
instance = "1"
[[inputs.snmp.get]]
name = "if_number"
oid = "ifNumber"
[[inputs.snmp.bulk]]
name = "if_out_octets"
oid = "ifOutOctets"
```
#### Table example
In this example, we remove collect attribute to the host section,
but you can still use it in combination of the following part.
Note: This example is like a bulk request a but using an
other configuration
Telegraf gathers value of OIDS of the table:
- named **iftable1**
With **inputs.snmp.table** section the plugin gets oid number:
- **iftable1** => `.1.3.6.1.2.1.31.1.1.1`
Also **iftable1** is a table, the plugin will gathers all
OIDS in the table and in the subtables
- `.1.3.6.1.2.1.31.1.1.1.1`
- `.1.3.6.1.2.1.31.1.1.1.1.1`
- `.1.3.6.1.2.1.31.1.1.1.1.2`
- `.1.3.6.1.2.1.31.1.1.1.1.3`
- `.1.3.6.1.2.1.31.1.1.1.1.4`
- `.1.3.6.1.2.1.31.1.1.1.1....`
- `.1.3.6.1.2.1.31.1.1.1.2`
- `.1.3.6.1.2.1.31.1.1.1.2....`
- `.1.3.6.1.2.1.31.1.1.1.3`
- `.1.3.6.1.2.1.31.1.1.1.3....`
- `.1.3.6.1.2.1.31.1.1.1.4`
- `.1.3.6.1.2.1.31.1.1.1.4....`
- `.1.3.6.1.2.1.31.1.1.1.5`
- `.1.3.6.1.2.1.31.1.1.1.5....`
- `.1.3.6.1.2.1.31.1.1.1.6....`
- `...`
```toml
# Table example
[[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which get/bulk do you want to collect for this host
# Which table do you want to collect
[[inputs.snmp.host.table]]
name = "iftable1"
# table without mapping neither subtables
# This is like bulk request
[[inputs.snmp.table]]
name = "iftable1"
oid = ".1.3.6.1.2.1.31.1.1.1"
```
#### Table with subtable example
In this example, we remove collect attribute to the host section,
but you can still use it in combination of the following part.
Note: This example is like a bulk request a but using an
other configuration
Telegraf gathers value of OIDS of the table:
- named **iftable2**
With **inputs.snmp.table** section *AND* **sub_tables** attribute,
the plugin will get OIDS from subtables:
- **iftable2** => `.1.3.6.1.2.1.2.2.1.13`
Also **iftable2** is a table, the plugin will gathers all
OIDS in subtables:
- `.1.3.6.1.2.1.2.2.1.13.1`
- `.1.3.6.1.2.1.2.2.1.13.2`
- `.1.3.6.1.2.1.2.2.1.13.3`
- `.1.3.6.1.2.1.2.2.1.13.4`
- `.1.3.6.1.2.1.2.2.1.13....`
```toml
# Table with subtable example
[[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which table do you want to collect
[[inputs.snmp.host.table]]
name = "iftable2"
# table without mapping but with subtables
[[inputs.snmp.table]]
name = "iftable2"
sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
# note
# oid attribute is useless
```
#### Table with mapping example
In this example, we remove collect attribute to the host section,
but you can still use it in combination of the following part.
Telegraf gathers value of OIDS of the table:
- named **iftable3**
With **inputs.snmp.table** section the plugin gets oid number:
- **iftable3** => `.1.3.6.1.2.1.31.1.1.1`
Also **iftable2** is a table, the plugin will gathers all
OIDS in the table and in the subtables
- `.1.3.6.1.2.1.31.1.1.1.1`
- `.1.3.6.1.2.1.31.1.1.1.1.1`
- `.1.3.6.1.2.1.31.1.1.1.1.2`
- `.1.3.6.1.2.1.31.1.1.1.1.3`
- `.1.3.6.1.2.1.31.1.1.1.1.4`
- `.1.3.6.1.2.1.31.1.1.1.1....`
- `.1.3.6.1.2.1.31.1.1.1.2`
- `.1.3.6.1.2.1.31.1.1.1.2....`
- `.1.3.6.1.2.1.31.1.1.1.3`
- `.1.3.6.1.2.1.31.1.1.1.3....`
- `.1.3.6.1.2.1.31.1.1.1.4`
- `.1.3.6.1.2.1.31.1.1.1.4....`
- `.1.3.6.1.2.1.31.1.1.1.5`
- `.1.3.6.1.2.1.31.1.1.1.5....`
- `.1.3.6.1.2.1.31.1.1.1.6....`
- `...`
But the **include_instances** attribute will filter which OIDS
will be gathered; As you see, there is an other attribute, `mapping_table`.
`include_instances` and `mapping_table` permit to build a hash table
to filter only OIDS you want.
Let's say, we have the following data on SNMP server:
- OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1`
The plugin will build the following hash table:
| instance name | instance id |
|---------------|-------------|
| `enp5s0` | `1` |
| `enp5s1` | `2` |
| `enp5s2` | `3` |
| `eth0` | `4` |
| `eth1` | `5` |
With the **include_instances** attribute, the plugin will gather
the following OIDS:
- `.1.3.6.1.2.1.31.1.1.1.1.1`
- `.1.3.6.1.2.1.31.1.1.1.1.5`
- `.1.3.6.1.2.1.31.1.1.1.2.1`
- `.1.3.6.1.2.1.31.1.1.1.2.5`
- `.1.3.6.1.2.1.31.1.1.1.3.1`
- `.1.3.6.1.2.1.31.1.1.1.3.5`
- `.1.3.6.1.2.1.31.1.1.1.4.1`
- `.1.3.6.1.2.1.31.1.1.1.4.5`
- `.1.3.6.1.2.1.31.1.1.1.5.1`
- `.1.3.6.1.2.1.31.1.1.1.5.5`
- `.1.3.6.1.2.1.31.1.1.1.6.1`
- `.1.3.6.1.2.1.31.1.1.1.6.5`
- `...`
Note: the plugin will add instance name as tag *instance*
```toml
# Simple table with mapping example
[[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which table do you want to collect
[[inputs.snmp.host.table]]
name = "iftable3"
include_instances = ["enp5s0", "eth1"]
# table with mapping but without subtables
[[inputs.snmp.table]]
name = "iftable3"
oid = ".1.3.6.1.2.1.31.1.1.1"
# if empty. get all instances
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# if empty, get all subtables
```
#### Table with both mapping and subtable example
In this example, we remove collect attribute to the host section,
but you can still use it in combination of the following part.
Telegraf gathers value of OIDS of the table:
- named **iftable4**
With **inputs.snmp.table** section *AND* **sub_tables** attribute,
the plugin will get OIDS from subtables:
- **iftable4** => `.1.3.6.1.2.1.31.1.1.1`
Also **iftable2** is a table, the plugin will gathers all
OIDS in the table and in the subtables
- `.1.3.6.1.2.1.31.1.1.1.6.1
- `.1.3.6.1.2.1.31.1.1.1.6.2`
- `.1.3.6.1.2.1.31.1.1.1.6.3`
- `.1.3.6.1.2.1.31.1.1.1.6.4`
- `.1.3.6.1.2.1.31.1.1.1.6....`
- `.1.3.6.1.2.1.31.1.1.1.10.1`
- `.1.3.6.1.2.1.31.1.1.1.10.2`
- `.1.3.6.1.2.1.31.1.1.1.10.3`
- `.1.3.6.1.2.1.31.1.1.1.10.4`
- `.1.3.6.1.2.1.31.1.1.1.10....`
But the **include_instances** attribute will filter which OIDS
will be gathered; As you see, there is an other attribute, `mapping_table`.
`include_instances` and `mapping_table` permit to build a hash table
to filter only OIDS you want.
Let's say, we have the following data on SNMP server:
- OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1`
The plugin will build the following hash table:
| instance name | instance id |
|---------------|-------------|
| `enp5s0` | `1` |
| `enp5s1` | `2` |
| `enp5s2` | `3` |
| `eth0` | `4` |
| `eth1` | `5` |
With the **include_instances** attribute, the plugin will gather
the following OIDS:
- `.1.3.6.1.2.1.31.1.1.1.6.1`
- `.1.3.6.1.2.1.31.1.1.1.6.5`
- `.1.3.6.1.2.1.31.1.1.1.10.1`
- `.1.3.6.1.2.1.31.1.1.1.10.5`
Note: the plugin will add instance name as tag *instance*
```toml
# Table with both mapping and subtable example
[[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which table do you want to collect
[[inputs.snmp.host.table]]
name = "iftable4"
include_instances = ["enp5s0", "eth1"]
# table with both mapping and subtables
[[inputs.snmp.table]]
name = "iftable4"
# if empty get all instances
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# if empty get all subtables
# sub_tables could be not "real subtables"
sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
# note
# oid attribute is useless
# SNMP SUBTABLES
[[plugins.snmp.subtable]]
name = "bytes_recv"
oid = ".1.3.6.1.2.1.31.1.1.1.6"
unit = "octets"
[[plugins.snmp.subtable]]
name = "bytes_send"
oid = ".1.3.6.1.2.1.31.1.1.1.10"
unit = "octets"
```
#### Configuration notes
- In **plugins.snmp.table** section, the `oid` attribute is useless if
the `sub_tables` attributes is defined
- In **plugins.snmp.subtable** section, you can put a name from `snmptranslate_file`
as `oid` attribute instead of a valid OID
### Measurements & Fields:
With the last example (Table with both mapping and subtable example):
- ifHCOutOctets
- ifHCOutOctets
- ifInDiscards
- ifInDiscards
- ifHCInOctets
- ifHCInOctets
### Tags:
With the last example (Table with both mapping and subtable example):
- ifHCOutOctets
- host
- instance
- unit
- ifInDiscards
- host
- instance
- ifHCInOctets
- host
- instance
- unit
### Example Output:
With the last example (Table with both mapping and subtable example):
```
ifHCOutOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCOutOctets=10565628i 1456878706044462901
ifInDiscards,host=127.0.0.1,instance=enp5s0 ifInDiscards=0i 1456878706044510264
ifHCInOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCInOctets=76351777i 1456878706044531312
```

View File

@@ -20,7 +20,16 @@ type Snmp struct {
Host []Host
Get []Data
Bulk []Data
Table []Table
Subtable []Subtable
SnmptranslateFile string
nameToOid map[string]string
initNode Node
subTableMap map[string]Subtable
// TODO change as unexportable
//OidInstanceMapping map[string]map[string]string
}
type Host struct {
@@ -36,9 +45,54 @@ type Host struct {
Collect []string
// easy get oids
GetOids []string
// Table
Table []HostTable
// Oids
getOids []Data
bulkOids []Data
tables []HostTable
// array of processed oids
// to skip oid duplication
processedOids []string
}
type Table struct {
// name = "iftable"
Name string
// oid = ".1.3.6.1.2.1.31.1.1.1"
Oid string
//if empty get all instances
//mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
MappingTable string
// if empty get all subtables
// sub_tables could be not "real subtables"
//sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
SubTables []string
}
type HostTable struct {
// name = "iftable"
Name string
// Includes only these instances
// include_instances = ["eth0", "eth1"]
IncludeInstances []string
// Excludes only these instances
// exclude_instances = ["eth20", "eth21"]
ExcludeInstances []string
// From Table struct
oid string
mappingTable string
subTables []string
}
// TODO find better names
type Subtable struct {
//name = "bytes_send"
Name string
//oid = ".1.3.6.1.2.1.31.1.1.1.10"
Oid string
//unit = "octets"
Unit string
}
type Data struct {
@@ -63,13 +117,8 @@ type Node struct {
subnodes map[string]Node
}
var initNode = Node{
id: "1",
name: "",
subnodes: make(map[string]Node),
}
var NameToOid = make(map[string]string)
// TODO move this var to snmp struct
var OidInstanceMapping = make(map[string]map[string]string)
var sampleConfig = `
## Use 'oids.txt' file to translate oids to names
@@ -113,7 +162,7 @@ var sampleConfig = `
[[inputs.snmp.get]]
name = "interface_speed"
oid = "ifSpeed"
instance = 0
instance = "0"
[[inputs.snmp.get]]
name = "sysuptime"
@@ -129,6 +178,52 @@ var sampleConfig = `
name = "ifoutoctets"
max_repetition = 127
oid = "ifOutOctets"
[[inputs.snmp.host]]
address = "192.168.2.13:161"
#address = "127.0.0.1:161"
community = "public"
version = 2
timeout = 2.0
retries = 2
#collect = ["mybulk", "sysservices", "sysdescr", "systype"]
collect = ["sysuptime" ]
[[inputs.snmp.host.table]]
name = "iftable3"
include_instances = ["enp5s0", "eth1"]
# SNMP TABLEs
# table without mapping neither subtables
[[inputs.snmp.table]]
name = "iftable1"
oid = ".1.3.6.1.2.1.31.1.1.1"
# table without mapping but with subtables
[[inputs.snmp.table]]
name = "iftable2"
oid = ".1.3.6.1.2.1.31.1.1.1"
sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
# table with mapping but without subtables
[[inputs.snmp.table]]
name = "iftable3"
oid = ".1.3.6.1.2.1.31.1.1.1"
# if empty. get all instances
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# if empty, get all subtables
# table with both mapping and subtables
[[inputs.snmp.table]]
name = "iftable4"
oid = ".1.3.6.1.2.1.31.1.1.1"
# if empty get all instances
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# if empty get all subtables
# sub_tables could be not "real subtables"
sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
`
// SampleConfig returns sample configuration message
@@ -189,8 +284,24 @@ func findnodename(node Node, ids []string) (string, string) {
}
func (s *Snmp) Gather(acc telegraf.Accumulator) error {
// TODO put this in cache on first run
// Create subtables mapping
if len(s.subTableMap) == 0 {
s.subTableMap = make(map[string]Subtable)
for _, sb := range s.Subtable {
s.subTableMap[sb.Name] = sb
}
}
// TODO put this in cache on first run
// Create oid tree
if s.SnmptranslateFile != "" && len(initNode.subnodes) == 0 {
if s.SnmptranslateFile != "" && len(s.initNode.subnodes) == 0 {
s.nameToOid = make(map[string]string)
s.initNode = Node{
id: "1",
name: "",
subnodes: make(map[string]Node),
}
data, err := ioutil.ReadFile(s.SnmptranslateFile)
if err != nil {
log.Printf("Reading SNMPtranslate file error: %s", err)
@@ -202,8 +313,8 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
if oids[2] != "" {
oid_name := oids[1]
oid := oids[2]
fillnode(initNode, oid_name, strings.Split(string(oid), "."))
NameToOid[oid_name] = oid
fillnode(s.initNode, oid_name, strings.Split(string(oid), "."))
s.nameToOid[oid_name] = oid
}
}
}
@@ -227,7 +338,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
// Get Easy GET oids
for _, oidstring := range host.GetOids {
oid := Data{}
if val, ok := NameToOid[oidstring]; ok {
if val, ok := s.nameToOid[oidstring]; ok {
// TODO should we add the 0 instance ?
oid.Name = oidstring
oid.Oid = val
@@ -248,7 +359,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
// Get GET oids
for _, oid := range s.Get {
if oid.Name == oid_name {
if val, ok := NameToOid[oid.Oid]; ok {
if val, ok := s.nameToOid[oid.Oid]; ok {
// TODO should we add the 0 instance ?
if oid.Instance != "" {
oid.rawOid = "." + val + "." + oid.Instance
@@ -264,7 +375,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
// Get GETBULK oids
for _, oid := range s.Bulk {
if oid.Name == oid_name {
if val, ok := NameToOid[oid.Oid]; ok {
if val, ok := s.nameToOid[oid.Oid]; ok {
oid.rawOid = "." + val
} else {
oid.rawOid = oid.Oid
@@ -273,18 +384,219 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
}
}
}
// Table
for _, hostTable := range host.Table {
for _, snmpTable := range s.Table {
if hostTable.Name == snmpTable.Name {
table := hostTable
table.oid = snmpTable.Oid
table.mappingTable = snmpTable.MappingTable
table.subTables = snmpTable.SubTables
host.tables = append(host.tables, table)
}
}
}
// Launch Mapping
// TODO put this in cache on first run
// TODO save mapping and computed oids
// to do it only the first time
// only if len(s.OidInstanceMapping) == 0
if len(OidInstanceMapping) >= 0 {
if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil {
return err
}
}
// Launch Get requests
if err := host.SNMPGet(acc); err != nil {
if err := host.SNMPGet(acc, s.initNode); err != nil {
return err
}
if err := host.SNMPBulk(acc); err != nil {
if err := host.SNMPBulk(acc, s.initNode); err != nil {
return err
}
}
return nil
}
func (h *Host) SNMPGet(acc telegraf.Accumulator) error {
func (h *Host) SNMPMap(acc telegraf.Accumulator, nameToOid map[string]string, subTableMap map[string]Subtable) error {
// Get snmp client
snmpClient, err := h.GetSNMPClient()
if err != nil {
return err
}
// Deconnection
defer snmpClient.Conn.Close()
// Prepare OIDs
for _, table := range h.tables {
// We don't have mapping
if table.mappingTable == "" {
if len(table.subTables) == 0 {
// If We don't have mapping table
// neither subtables list
// This is just a bulk request
oid := Data{}
oid.Oid = table.oid
if val, ok := nameToOid[oid.Oid]; ok {
oid.rawOid = "." + val
} else {
oid.rawOid = oid.Oid
}
h.bulkOids = append(h.bulkOids, oid)
} else {
// If We don't have mapping table
// but we have subtables
// This is a bunch of bulk requests
// For each subtable ...
for _, sb := range table.subTables {
// ... we create a new Data (oid) object
oid := Data{}
// Looking for more information about this subtable
ssb, exists := subTableMap[sb]
if exists {
// We found a subtable section in config files
oid.Oid = ssb.Oid
oid.rawOid = ssb.Oid
oid.Unit = ssb.Unit
} else {
// We did NOT find a subtable section in config files
oid.Oid = sb
oid.rawOid = sb
}
// TODO check oid validity
// Add the new oid to getOids list
h.bulkOids = append(h.bulkOids, oid)
}
}
} else {
// We have a mapping table
// We need to query this table
// To get mapping between instance id
// and instance name
oid_asked := table.mappingTable
oid_next := oid_asked
need_more_requests := true
// Set max repetition
maxRepetition := uint8(32)
// Launch requests
for need_more_requests {
// Launch request
result, err3 := snmpClient.GetBulk([]string{oid_next}, 0, maxRepetition)
if err3 != nil {
return err3
}
lastOid := ""
for _, variable := range result.Variables {
lastOid = variable.Name
if strings.HasPrefix(variable.Name, oid_asked) {
switch variable.Type {
// handle instance names
case gosnmp.OctetString:
// Check if instance is in includes instances
getInstances := true
if len(table.IncludeInstances) > 0 {
getInstances = false
for _, instance := range table.IncludeInstances {
if instance == string(variable.Value.([]byte)) {
getInstances = true
}
}
}
// Check if instance is in excludes instances
if len(table.ExcludeInstances) > 0 {
getInstances = true
for _, instance := range table.ExcludeInstances {
if instance == string(variable.Value.([]byte)) {
getInstances = false
}
}
}
// We don't want this instance
if !getInstances {
continue
}
// remove oid table from the complete oid
// in order to get the current instance id
key := strings.Replace(variable.Name, oid_asked, "", 1)
if len(table.subTables) == 0 {
// We have a mapping table
// but no subtables
// This is just a bulk request
// Building mapping table
mapping := map[string]string{strings.Trim(key, "."): string(variable.Value.([]byte))}
_, exists := OidInstanceMapping[table.oid]
if exists {
OidInstanceMapping[table.oid][strings.Trim(key, ".")] = string(variable.Value.([]byte))
} else {
OidInstanceMapping[table.oid] = mapping
}
// Add table oid in bulk oid list
oid := Data{}
oid.Oid = table.oid
if val, ok := nameToOid[oid.Oid]; ok {
oid.rawOid = "." + val
} else {
oid.rawOid = oid.Oid
}
h.bulkOids = append(h.bulkOids, oid)
} else {
// We have a mapping table
// and some subtables
// This is a bunch of get requests
// This is the best case :)
// For each subtable ...
for _, sb := range table.subTables {
// ... we create a new Data (oid) object
oid := Data{}
// Looking for more information about this subtable
ssb, exists := subTableMap[sb]
if exists {
// We found a subtable section in config files
oid.Oid = ssb.Oid + key
oid.rawOid = ssb.Oid + key
oid.Unit = ssb.Unit
oid.Instance = string(variable.Value.([]byte))
} else {
// We did NOT find a subtable section in config files
oid.Oid = sb + key
oid.rawOid = sb + key
oid.Instance = string(variable.Value.([]byte))
}
// TODO check oid validity
// Add the new oid to getOids list
h.getOids = append(h.getOids, oid)
}
}
default:
}
} else {
break
}
}
// Determine if we need more requests
if strings.HasPrefix(lastOid, oid_asked) {
need_more_requests = true
oid_next = lastOid
} else {
need_more_requests = false
}
}
}
}
// Mapping finished
// Create newoids based on mapping
return nil
}
func (h *Host) SNMPGet(acc telegraf.Accumulator, initNode Node) error {
// Get snmp client
snmpClient, err := h.GetSNMPClient()
if err != nil {
@@ -317,7 +629,7 @@ func (h *Host) SNMPGet(acc telegraf.Accumulator) error {
return err3
}
// Handle response
_, err = h.HandleResponse(oidsList, result, acc)
_, err = h.HandleResponse(oidsList, result, acc, initNode)
if err != nil {
return err
}
@@ -325,7 +637,7 @@ func (h *Host) SNMPGet(acc telegraf.Accumulator) error {
return nil
}
func (h *Host) SNMPBulk(acc telegraf.Accumulator) error {
func (h *Host) SNMPBulk(acc telegraf.Accumulator, initNode Node) error {
// Get snmp client
snmpClient, err := h.GetSNMPClient()
if err != nil {
@@ -360,7 +672,7 @@ func (h *Host) SNMPBulk(acc telegraf.Accumulator) error {
return err3
}
// Handle response
last_oid, err := h.HandleResponse(oidsList, result, acc)
last_oid, err := h.HandleResponse(oidsList, result, acc, initNode)
if err != nil {
return err
}
@@ -412,12 +724,19 @@ func (h *Host) GetSNMPClient() (*gosnmp.GoSNMP, error) {
return snmpClient, nil
}
func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, acc telegraf.Accumulator) (string, error) {
func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, acc telegraf.Accumulator, initNode Node) (string, error) {
var lastOid string
for _, variable := range result.Variables {
lastOid = variable.Name
// Remove unwanted oid
nextresult:
// Get only oid wanted
for oid_key, oid := range oids {
// Skip oids already processed
for _, processedOid := range h.processedOids {
if variable.Name == processedOid {
break nextresult
}
}
if strings.HasPrefix(variable.Name, oid_key) {
switch variable.Type {
// handle Metrics
@@ -431,11 +750,27 @@ func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, a
// Get name and instance
var oid_name string
var instance string
// Get oidname and instannce from translate file
// Get oidname and instance from translate file
oid_name, instance = findnodename(initNode,
strings.Split(string(variable.Name[1:]), "."))
if instance != "" {
// Set instance tag
// From mapping table
mapping, inMappingNoSubTable := OidInstanceMapping[oid_key]
if inMappingNoSubTable {
// filter if the instance in not in
// OidInstanceMapping mapping map
if instance_name, exists := mapping[instance]; exists {
tags["instance"] = instance_name
} else {
continue
}
} else if oid.Instance != "" {
// From config files
tags["instance"] = oid.Instance
} else if instance != "" {
// Using last id of the current oid, ie:
// with .1.3.6.1.2.1.31.1.1.1.10.3
// instance is 3
tags["instance"] = instance
}
@@ -453,6 +788,7 @@ func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, a
fields := make(map[string]interface{})
fields[string(field_name)] = variable.Value
h.processedOids = append(h.processedOids, variable.Name)
acc.AddFields(field_name, fields, tags)
case gosnmp.NoSuchObject, gosnmp.NoSuchInstance:
// Oid not found

View File

@@ -69,6 +69,9 @@ func TestSNMPErrorBulk(t *testing.T) {
}
func TestSNMPGet1(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
get1 := Data{
Name: "oid1",
Unit: "octets",
@@ -104,6 +107,9 @@ func TestSNMPGet1(t *testing.T) {
}
func TestSNMPGet2(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
get1 := Data{
Name: "oid1",
Oid: "ifNumber",
@@ -139,6 +145,9 @@ func TestSNMPGet2(t *testing.T) {
}
func TestSNMPGet3(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
get1 := Data{
Name: "oid1",
Unit: "octets",
@@ -177,6 +186,9 @@ func TestSNMPGet3(t *testing.T) {
}
func TestSNMPEasyGet4(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
get1 := Data{
Name: "oid1",
Unit: "octets",
@@ -227,6 +239,9 @@ func TestSNMPEasyGet4(t *testing.T) {
}
func TestSNMPEasyGet5(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
get1 := Data{
Name: "oid1",
Unit: "octets",
@@ -277,6 +292,9 @@ func TestSNMPEasyGet5(t *testing.T) {
}
func TestSNMPEasyGet6(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
h := Host{
Address: testutil.GetLocalHost() + ":31161",
Community: "telegraf",
@@ -307,6 +325,9 @@ func TestSNMPEasyGet6(t *testing.T) {
}
func TestSNMPBulk1(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
bulk1 := Data{
Name: "oid1",
Unit: "octets",

View File

@@ -17,7 +17,11 @@ import (
"github.com/influxdata/telegraf/plugins/inputs"
)
const UDP_PACKET_SIZE int = 1500
const (
UDP_PACKET_SIZE int = 1500
defaultFieldName = "value"
)
var dropwarn = "ERROR: Message queue full. Discarding line [%s] " +
"You may want to increase allowed_pending_messages in the config\n"
@@ -113,9 +117,9 @@ type cachedcounter struct {
}
type cachedtimings struct {
name string
stats RunningStats
tags map[string]string
name string
fields map[string]RunningStats
tags map[string]string
}
func (_ *Statsd) Description() string {
@@ -169,16 +173,26 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error {
now := time.Now()
for _, metric := range s.timings {
// Defining a template to parse field names for timers allows us to split
// out multiple fields per timer. In this case we prefix each stat with the
// field name and store these all in a single measurement.
fields := make(map[string]interface{})
fields["mean"] = metric.stats.Mean()
fields["stddev"] = metric.stats.Stddev()
fields["upper"] = metric.stats.Upper()
fields["lower"] = metric.stats.Lower()
fields["count"] = metric.stats.Count()
for _, percentile := range s.Percentiles {
name := fmt.Sprintf("%v_percentile", percentile)
fields[name] = metric.stats.Percentile(percentile)
for fieldName, stats := range metric.fields {
var prefix string
if fieldName != defaultFieldName {
prefix = fieldName + "_"
}
fields[prefix+"mean"] = stats.Mean()
fields[prefix+"stddev"] = stats.Stddev()
fields[prefix+"upper"] = stats.Upper()
fields[prefix+"lower"] = stats.Lower()
fields[prefix+"count"] = stats.Count()
for _, percentile := range s.Percentiles {
name := fmt.Sprintf("%s%v_percentile", prefix, percentile)
fields[name] = stats.Percentile(percentile)
}
}
acc.AddFields(metric.name, fields, metric.tags, now)
}
if s.DeleteTimings {
@@ -370,11 +384,6 @@ func (s *Statsd) parseStatsdLine(line string) error {
// Parse the name & tags from bucket
m.name, m.field, m.tags = s.parseName(m.bucket)
// fields are not supported for timings, so if specified combine into
// the name
if (m.mtype == "ms" || m.mtype == "h") && m.field != "value" {
m.name += "_" + m.field
}
switch m.mtype {
case "c":
m.tags["metric_type"] = "counter"
@@ -433,7 +442,7 @@ func (s *Statsd) parseName(bucket string) (string, string, map[string]string) {
name = strings.Replace(name, "-", "__", -1)
}
if field == "" {
field = "value"
field = defaultFieldName
}
return name, field, tags
@@ -461,26 +470,32 @@ func parseKeyValue(keyvalue string) (string, string) {
func (s *Statsd) aggregate(m metric) {
switch m.mtype {
case "ms", "h":
// Check if the measurement exists
cached, ok := s.timings[m.hash]
if !ok {
cached = cachedtimings{
name: m.name,
tags: m.tags,
stats: RunningStats{
PercLimit: s.PercentileLimit,
},
name: m.name,
fields: make(map[string]RunningStats),
tags: m.tags,
}
}
// Check if the field exists. If we've not enabled multiple fields per timer
// this will be the default field name, eg. "value"
field, ok := cached.fields[m.field]
if !ok {
field = RunningStats{
PercLimit: s.PercentileLimit,
}
}
if m.samplerate > 0 {
for i := 0; i < int(1.0/m.samplerate); i++ {
cached.stats.AddValue(m.floatvalue)
field.AddValue(m.floatvalue)
}
s.timings[m.hash] = cached
} else {
cached.stats.AddValue(m.floatvalue)
s.timings[m.hash] = cached
field.AddValue(m.floatvalue)
}
cached.fields[m.field] = field
s.timings[m.hash] = cached
case "c":
// check if the measurement exists
_, ok := s.counters[m.hash]

View File

@@ -561,12 +561,12 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) {
// A 0 with invalid samplerate will add a single 0,
// plus the last bit of value 1
// which adds up to 12 individual datapoints to be cached
if cachedtiming.stats.n != 12 {
t.Errorf("Expected 11 additions, got %d", cachedtiming.stats.n)
if cachedtiming.fields[defaultFieldName].n != 12 {
t.Errorf("Expected 11 additions, got %d", cachedtiming.fields[defaultFieldName].n)
}
if cachedtiming.stats.upper != 1 {
t.Errorf("Expected max input to be 1, got %f", cachedtiming.stats.upper)
if cachedtiming.fields[defaultFieldName].upper != 1 {
t.Errorf("Expected max input to be 1, got %f", cachedtiming.fields[defaultFieldName].upper)
}
}
@@ -842,7 +842,105 @@ func TestParse_Timings(t *testing.T) {
}
acc.AssertContainsFields(t, "test_timing", valid)
}
// Tests low-level functionality of timings when multiple fields is enabled
// and a measurement template has been defined which can parse field names
func TestParse_Timings_MultipleFieldsWithTemplate(t *testing.T) {
s := NewStatsd()
s.Templates = []string{"measurement.field"}
s.Percentiles = []int{90}
acc := &testutil.Accumulator{}
validLines := []string{
"test_timing.success:1|ms",
"test_timing.success:11|ms",
"test_timing.success:1|ms",
"test_timing.success:1|ms",
"test_timing.success:1|ms",
"test_timing.error:2|ms",
"test_timing.error:22|ms",
"test_timing.error:2|ms",
"test_timing.error:2|ms",
"test_timing.error:2|ms",
}
for _, line := range validLines {
err := s.parseStatsdLine(line)
if err != nil {
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
}
}
s.Gather(acc)
valid := map[string]interface{}{
"success_90_percentile": float64(11),
"success_count": int64(5),
"success_lower": float64(1),
"success_mean": float64(3),
"success_stddev": float64(4),
"success_upper": float64(11),
"error_90_percentile": float64(22),
"error_count": int64(5),
"error_lower": float64(2),
"error_mean": float64(6),
"error_stddev": float64(8),
"error_upper": float64(22),
}
acc.AssertContainsFields(t, "test_timing", valid)
}
// Tests low-level functionality of timings when multiple fields is enabled
// but a measurement template hasn't been defined so we can't parse field names
// In this case the behaviour should be the same as normal behaviour
func TestParse_Timings_MultipleFieldsWithoutTemplate(t *testing.T) {
s := NewStatsd()
s.Templates = []string{}
s.Percentiles = []int{90}
acc := &testutil.Accumulator{}
validLines := []string{
"test_timing.success:1|ms",
"test_timing.success:11|ms",
"test_timing.success:1|ms",
"test_timing.success:1|ms",
"test_timing.success:1|ms",
"test_timing.error:2|ms",
"test_timing.error:22|ms",
"test_timing.error:2|ms",
"test_timing.error:2|ms",
"test_timing.error:2|ms",
}
for _, line := range validLines {
err := s.parseStatsdLine(line)
if err != nil {
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
}
}
s.Gather(acc)
expectedSuccess := map[string]interface{}{
"90_percentile": float64(11),
"count": int64(5),
"lower": float64(1),
"mean": float64(3),
"stddev": float64(4),
"upper": float64(11),
}
expectedError := map[string]interface{}{
"90_percentile": float64(22),
"count": int64(5),
"lower": float64(2),
"mean": float64(6),
"stddev": float64(8),
"upper": float64(22),
}
acc.AssertContainsFields(t, "test_timing_success", expectedSuccess)
acc.AssertContainsFields(t, "test_timing_error", expectedError)
}
func TestParse_Timings_Delete(t *testing.T) {

View File

@@ -0,0 +1,64 @@
# Kernel Input Plugin
This plugin is only available on Linux.
The kernel plugin gathers info about the kernel that doesn't fit into other
plugins. In general, it is the statistics available in `/proc/stat` that are
not covered by other plugins.
The metrics are documented in `man proc` under the `/proc/stat` section.
```
/proc/stat
kernel/system statistics. Varies with architecture. Common entries include:
page 5741 1808
The number of pages the system paged in and the number that were paged out (from disk).
swap 1 0
The number of swap pages that have been brought in and out.
intr 1462898
This line shows counts of interrupts serviced since boot time, for each of
the possible system interrupts. The first column is the total of all
interrupts serviced; each subsequent column is the total for a particular interrupt.
ctxt 115315
The number of context switches that the system underwent.
btime 769041601
boot time, in seconds since the Epoch, 1970-01-01 00:00:00 +0000 (UTC).
processes 86031
Number of forks since boot.
```
### Configuration:
```toml
# Get kernel statistics from /proc/stat
[[inputs.kernel]]
# no configuration
```
### Measurements & Fields:
- kernel
- boot_time (integer, seconds since epoch, `btime`)
- context_switches (integer, `ctxt`)
- disk_pages_in (integer, `page (0)`)
- disk_pages_out (integer, `page (1)`)
- interrupts (integer, `intr`)
- processes_forked (integer, `processes`)
### Tags:
None
### Example Output:
```
$ telegraf -config ~/ws/telegraf.conf -input-filter kernel -test
* Plugin: kernel, Collection 1
> kernel boot_time=1457505775i,context_switches=2626618i,disk_pages_in=5741i,disk_pages_out=1808i,interrupts=1472736i,processes_forked=10673i 1457613402960879816
```

View File

@@ -0,0 +1,58 @@
# Processes Input Plugin
This plugin gathers info about the total number of processes and groups
them by status (zombie, sleeping, running, etc.)
On linux this plugin requires access to procfs (/proc), on other OSes
it requires access to execute `ps`.
### Configuration:
```toml
# Get the number of processes and group them by status
[[inputs.processes]]
# no configuration
```
### Measurements & Fields:
- processes
- blocked (aka disk sleep or uninterruptible sleep)
- running
- sleeping
- stopped
- total
- zombie
- wait (freebsd only)
- idle (bsd only)
- paging (linux only)
- total_threads (linux only)
### Process State Mappings
Different OSes use slightly different State codes for their processes, these
state codes are documented in `man ps`, and I will give a mapping of what major
OS state codes correspond to in telegraf metrics:
```
Linux FreeBSD Darwin meaning
R R R running
S S S sleeping
Z Z Z zombie
T T T stopped
none I I idle (sleeping for longer than about 20 seconds)
D D,L U blocked (waiting in uninterruptible sleep, or locked)
W W none paging (linux kernel < 2.6 only), wait (freebsd)
```
### Tags:
None
### Example Output:
```
$ telegraf -config ~/ws/telegraf.conf -input-filter processes -test
* Plugin: processes, Collection 1
> processes blocked=8i,running=1i,sleeping=265i,stopped=0i,total=274i,zombie=0i,paging=0i,total_threads=687i 1457478636980905042
```

View File

@@ -0,0 +1,35 @@
# System Input Plugin
The system plugin gathers general stats on system load, uptime,
and number of users logged in. It is basically equivalent
to the unix `uptime` command.
### Configuration:
```toml
# Read metrics about system load & uptime
[[inputs.system]]
# no configuration
```
### Measurements & Fields:
- system
- load1 (float)
- load15 (float)
- load5 (float)
- n_users (integer)
- uptime (integer, seconds)
- uptime_format (string)
### Tags:
None
### Example Output:
```
$ telegraf -config ~/ws/telegraf.conf -input-filter system -test
* Plugin: system, Collection 1
> system load1=2.05,load15=2.38,load5=2.03,n_users=4i,uptime=239043i,uptime_format="2 days, 18:24" 1457546165399253452
```

View File

@@ -33,7 +33,7 @@ var sampleConfig = `
## Whether to report total system cpu stats or not
totalcpu = true
## Comment this line if you want the raw CPU time metrics
drop = ["time_*"]
fielddrop = ["time_*"]
`
func (_ *CPUStats) SampleConfig() string {
@@ -113,6 +113,10 @@ func totalCpuTime(t cpu.CPUTimesStat) float64 {
func init() {
inputs.Add("cpu", func() telegraf.Input {
return &CPUStats{ps: &systemPS{}}
return &CPUStats{
PerCPU: true,
TotalCPU: true,
ps: &systemPS{},
}
})
}

View File

@@ -14,6 +14,7 @@ type DiskStats struct {
Mountpoints []string
MountPoints []string
IgnoreFS []string `toml:"ignore_fs"`
}
func (_ *DiskStats) Description() string {
@@ -24,6 +25,10 @@ var diskSampleConfig = `
## By default, telegraf gather stats for all mountpoints.
## Setting mountpoints will restrict the stats to the specified mountpoints.
# mount_points = ["/"]
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
## present on /run, /var/run, /dev/shm or /dev).
ignore_fs = ["tmpfs", "devtmpfs"]
`
func (_ *DiskStats) SampleConfig() string {
@@ -36,12 +41,16 @@ func (s *DiskStats) Gather(acc telegraf.Accumulator) error {
s.MountPoints = s.Mountpoints
}
disks, err := s.ps.DiskUsage(s.MountPoints)
disks, err := s.ps.DiskUsage(s.MountPoints, s.IgnoreFS)
if err != nil {
return fmt.Errorf("error getting disk usage info: %s", err)
}
for _, du := range disks {
if du.Total == 0 {
// Skip dummy filesystem (procfs, cgroupfs, ...)
continue
}
tags := map[string]string{
"path": du.Path,
"fstype": du.Fstype,
@@ -79,11 +88,11 @@ func (_ *DiskIOStats) Description() string {
}
var diskIoSampleConfig = `
# By default, telegraf will gather stats for all devices including
# disk partitions.
# Setting devices will restrict the stats to the specified devices.
## By default, telegraf will gather stats for all devices including
## disk partitions.
## Setting devices will restrict the stats to the specified devices.
# devices = ["sda", "sdb"]
# Uncomment the following line if you do not need disk serial numbers.
## Uncomment the following line if you do not need disk serial numbers.
# skip_serial_number = true
`

View File

@@ -50,9 +50,9 @@ func TestDiskStats(t *testing.T) {
},
}
mps.On("DiskUsage", []string(nil)).Return(duAll, nil)
mps.On("DiskUsage", []string{"/", "/dev"}).Return(duFiltered, nil)
mps.On("DiskUsage", []string{"/", "/home"}).Return(duAll, nil)
mps.On("DiskUsage", []string(nil), []string(nil)).Return(duAll, nil)
mps.On("DiskUsage", []string{"/", "/dev"}, []string(nil)).Return(duFiltered, nil)
mps.On("DiskUsage", []string{"/", "/home"}, []string(nil)).Return(duAll, nil)
err = (&DiskStats{ps: &mps}).Gather(&acc)
require.NoError(t, err)

View File

@@ -0,0 +1,110 @@
// +build linux
package system
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"strconv"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
// /proc/stat file line prefixes to gather stats on:
var (
interrupts = []byte("intr")
context_switches = []byte("ctxt")
processes_forked = []byte("processes")
disk_pages = []byte("page")
boot_time = []byte("btime")
)
type Kernel struct {
statFile string
}
func (k *Kernel) Description() string {
return "Get kernel statistics from /proc/stat"
}
func (k *Kernel) SampleConfig() string { return "" }
func (k *Kernel) Gather(acc telegraf.Accumulator) error {
data, err := k.getProcStat()
if err != nil {
return err
}
fields := make(map[string]interface{})
dataFields := bytes.Fields(data)
for i, field := range dataFields {
switch {
case bytes.Equal(field, interrupts):
m, err := strconv.Atoi(string(dataFields[i+1]))
if err != nil {
return err
}
fields["interrupts"] = int64(m)
case bytes.Equal(field, context_switches):
m, err := strconv.Atoi(string(dataFields[i+1]))
if err != nil {
return err
}
fields["context_switches"] = int64(m)
case bytes.Equal(field, processes_forked):
m, err := strconv.Atoi(string(dataFields[i+1]))
if err != nil {
return err
}
fields["processes_forked"] = int64(m)
case bytes.Equal(field, boot_time):
m, err := strconv.Atoi(string(dataFields[i+1]))
if err != nil {
return err
}
fields["boot_time"] = int64(m)
case bytes.Equal(field, disk_pages):
in, err := strconv.Atoi(string(dataFields[i+1]))
if err != nil {
return err
}
out, err := strconv.Atoi(string(dataFields[i+2]))
if err != nil {
return err
}
fields["disk_pages_in"] = int64(in)
fields["disk_pages_out"] = int64(out)
}
}
acc.AddFields("kernel", fields, map[string]string{})
return nil
}
func (k *Kernel) getProcStat() ([]byte, error) {
if _, err := os.Stat(k.statFile); os.IsNotExist(err) {
return nil, fmt.Errorf("kernel: %s does not exist!", k.statFile)
} else if err != nil {
return nil, err
}
data, err := ioutil.ReadFile(k.statFile)
if err != nil {
return nil, err
}
return data, nil
}
func init() {
inputs.Add("kernel", func() telegraf.Input {
return &Kernel{
statFile: "/proc/stat",
}
})
}

View File

@@ -0,0 +1,164 @@
// +build linux
package system
import (
"io/ioutil"
"os"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
func TestFullProcFile(t *testing.T) {
tmpfile := makeFakeStatFile([]byte(statFile_Full))
defer os.Remove(tmpfile)
k := Kernel{
statFile: tmpfile,
}
acc := testutil.Accumulator{}
err := k.Gather(&acc)
assert.NoError(t, err)
fields := map[string]interface{}{
"boot_time": int64(1457505775),
"context_switches": int64(2626618),
"disk_pages_in": int64(5741),
"disk_pages_out": int64(1808),
"interrupts": int64(1472736),
"processes_forked": int64(10673),
}
acc.AssertContainsFields(t, "kernel", fields)
}
func TestPartialProcFile(t *testing.T) {
tmpfile := makeFakeStatFile([]byte(statFile_Partial))
defer os.Remove(tmpfile)
k := Kernel{
statFile: tmpfile,
}
acc := testutil.Accumulator{}
err := k.Gather(&acc)
assert.NoError(t, err)
fields := map[string]interface{}{
"boot_time": int64(1457505775),
"context_switches": int64(2626618),
"disk_pages_in": int64(5741),
"disk_pages_out": int64(1808),
"interrupts": int64(1472736),
}
acc.AssertContainsFields(t, "kernel", fields)
}
func TestInvalidProcFile1(t *testing.T) {
tmpfile := makeFakeStatFile([]byte(statFile_Invalid))
defer os.Remove(tmpfile)
k := Kernel{
statFile: tmpfile,
}
acc := testutil.Accumulator{}
err := k.Gather(&acc)
assert.Error(t, err)
}
func TestInvalidProcFile2(t *testing.T) {
tmpfile := makeFakeStatFile([]byte(statFile_Invalid2))
defer os.Remove(tmpfile)
k := Kernel{
statFile: tmpfile,
}
acc := testutil.Accumulator{}
err := k.Gather(&acc)
assert.Error(t, err)
}
func TestNoProcFile(t *testing.T) {
tmpfile := makeFakeStatFile([]byte(statFile_Invalid2))
os.Remove(tmpfile)
k := Kernel{
statFile: tmpfile,
}
acc := testutil.Accumulator{}
err := k.Gather(&acc)
assert.Error(t, err)
assert.Contains(t, err.Error(), "does not exist")
}
const statFile_Full = `cpu 6796 252 5655 10444977 175 0 101 0 0 0
cpu0 6796 252 5655 10444977 175 0 101 0 0 0
intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
ctxt 2626618
btime 1457505775
processes 10673
procs_running 2
procs_blocked 0
softirq 1031662 0 649485 20946 111071 11620 0 1 0 994 237545
page 5741 1808
swap 1 0
`
const statFile_Partial = `cpu 6796 252 5655 10444977 175 0 101 0 0 0
cpu0 6796 252 5655 10444977 175 0 101 0 0 0
intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
ctxt 2626618
btime 1457505775
procs_running 2
procs_blocked 0
softirq 1031662 0 649485 20946 111071 11620 0 1 0 994 237545
page 5741 1808
`
// missing btime measurement
const statFile_Invalid = `cpu 6796 252 5655 10444977 175 0 101 0 0 0
cpu0 6796 252 5655 10444977 175 0 101 0 0 0
intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
ctxt 2626618
btime
processes 10673
procs_running 2
procs_blocked 0
softirq 1031662 0 649485 20946 111071 11620 0 1 0 994 237545
page 5741 1808
swap 1 0
`
// missing second page measurement
const statFile_Invalid2 = `cpu 6796 252 5655 10444977 175 0 101 0 0 0
cpu0 6796 252 5655 10444977 175 0 101 0 0 0
intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
ctxt 2626618
processes 10673
procs_running 2
page 5741
procs_blocked 0
softirq 1031662 0 649485 20946 111071 11620 0 1 0 994 237545
`
func makeFakeStatFile(content []byte) string {
tmpfile, err := ioutil.TempFile("", "kerneltest")
if err != nil {
panic(err)
}
if _, err := tmpfile.Write(content); err != nil {
panic(err)
}
if err := tmpfile.Close(); err != nil {
panic(err)
}
return tmpfile.Name()
}

View File

@@ -33,8 +33,8 @@ func (m *MockPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) {
return r0, r1
}
func (m *MockPS) DiskUsage(mountPointFilter []string) ([]*disk.DiskUsageStat, error) {
ret := m.Called(mountPointFilter)
func (m *MockPS) DiskUsage(mountPointFilter []string, fstypeExclude []string) ([]*disk.DiskUsageStat, error) {
ret := m.Called(mountPointFilter, fstypeExclude)
r0 := ret.Get(0).([]*disk.DiskUsageStat)
r1 := ret.Error(1)

View File

@@ -0,0 +1,216 @@
// +build !windows
package system
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"runtime"
"strconv"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Processes struct {
execPS func() ([]byte, error)
readProcFile func(statFile string) ([]byte, error)
forcePS bool
forceProc bool
}
func (p *Processes) Description() string {
return "Get the number of processes and group them by status"
}
func (p *Processes) SampleConfig() string { return "" }
func (p *Processes) Gather(acc telegraf.Accumulator) error {
// Get an empty map of metric fields
fields := getEmptyFields()
// Decide if we will use 'ps' to get stats (use procfs otherwise)
usePS := true
if runtime.GOOS == "linux" {
usePS = false
}
if p.forcePS {
usePS = true
} else if p.forceProc {
usePS = false
}
// Gather stats from 'ps' or procfs
if usePS {
if err := p.gatherFromPS(fields); err != nil {
return err
}
} else {
if err := p.gatherFromProc(fields); err != nil {
return err
}
}
acc.AddFields("processes", fields, nil)
return nil
}
// Gets empty fields of metrics based on the OS
func getEmptyFields() map[string]interface{} {
fields := map[string]interface{}{
"blocked": int64(0),
"zombies": int64(0),
"stopped": int64(0),
"running": int64(0),
"sleeping": int64(0),
"total": int64(0),
}
switch runtime.GOOS {
case "freebsd":
fields["idle"] = int64(0)
fields["wait"] = int64(0)
case "darwin":
fields["idle"] = int64(0)
case "openbsd":
fields["idle"] = int64(0)
case "linux":
fields["paging"] = int64(0)
fields["total_threads"] = int64(0)
}
return fields
}
// exec `ps` to get all process states
func (p *Processes) gatherFromPS(fields map[string]interface{}) error {
out, err := p.execPS()
if err != nil {
return err
}
for i, status := range bytes.Fields(out) {
if i == 0 && string(status) == "STAT" {
// This is a header, skip it
continue
}
switch status[0] {
case 'W':
fields["wait"] = fields["wait"].(int64) + int64(1)
case 'U', 'D', 'L':
// Also known as uninterruptible sleep or disk sleep
fields["blocked"] = fields["blocked"].(int64) + int64(1)
case 'Z':
fields["zombies"] = fields["zombies"].(int64) + int64(1)
case 'T':
fields["stopped"] = fields["stopped"].(int64) + int64(1)
case 'R':
fields["running"] = fields["running"].(int64) + int64(1)
case 'S':
fields["sleeping"] = fields["sleeping"].(int64) + int64(1)
case 'I':
fields["idle"] = fields["idle"].(int64) + int64(1)
default:
log.Printf("processes: Unknown state [ %s ] from ps",
string(status[0]))
}
fields["total"] = fields["total"].(int64) + int64(1)
}
return nil
}
// get process states from /proc/(pid)/stat files
func (p *Processes) gatherFromProc(fields map[string]interface{}) error {
files, err := ioutil.ReadDir("/proc")
if err != nil {
return err
}
for _, file := range files {
if !file.IsDir() {
continue
}
statFile := path.Join("/proc", file.Name(), "stat")
data, err := p.readProcFile(statFile)
if err != nil {
return err
}
if data == nil {
continue
}
stats := bytes.Fields(data)
if len(stats) < 3 {
return fmt.Errorf("Something is terribly wrong with %s", statFile)
}
switch stats[2][0] {
case 'R':
fields["running"] = fields["running"].(int64) + int64(1)
case 'S':
fields["sleeping"] = fields["sleeping"].(int64) + int64(1)
case 'D':
fields["blocked"] = fields["blocked"].(int64) + int64(1)
case 'Z':
fields["zombies"] = fields["zombies"].(int64) + int64(1)
case 'T', 't':
fields["stopped"] = fields["stopped"].(int64) + int64(1)
case 'W':
fields["paging"] = fields["paging"].(int64) + int64(1)
default:
log.Printf("processes: Unknown state [ %s ] in file %s",
string(stats[2][0]), statFile)
}
fields["total"] = fields["total"].(int64) + int64(1)
threads, err := strconv.Atoi(string(stats[19]))
if err != nil {
log.Printf("processes: Error parsing thread count: %s", err)
continue
}
fields["total_threads"] = fields["total_threads"].(int64) + int64(threads)
}
return nil
}
func readProcFile(statFile string) ([]byte, error) {
if _, err := os.Stat(statFile); os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, err
}
data, err := ioutil.ReadFile(statFile)
if err != nil {
return nil, err
}
return data, nil
}
func execPS() ([]byte, error) {
bin, err := exec.LookPath("ps")
if err != nil {
return nil, err
}
out, err := exec.Command(bin, "axo", "state").Output()
if err != nil {
return nil, err
}
return out, err
}
func init() {
inputs.Add("processes", func() telegraf.Input {
return &Processes{
execPS: execPS,
readProcFile: readProcFile,
}
})
}

View File

@@ -0,0 +1,151 @@
package system
import (
"fmt"
"runtime"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestProcesses(t *testing.T) {
processes := &Processes{
execPS: execPS,
readProcFile: readProcFile,
}
var acc testutil.Accumulator
err := processes.Gather(&acc)
require.NoError(t, err)
assert.True(t, acc.HasIntField("processes", "running"))
assert.True(t, acc.HasIntField("processes", "sleeping"))
assert.True(t, acc.HasIntField("processes", "stopped"))
assert.True(t, acc.HasIntField("processes", "total"))
total, ok := acc.Get("processes")
require.True(t, ok)
assert.True(t, total.Fields["total"].(int64) > 0)
}
func TestFromPS(t *testing.T) {
processes := &Processes{
execPS: testExecPS,
forcePS: true,
}
var acc testutil.Accumulator
err := processes.Gather(&acc)
require.NoError(t, err)
fields := getEmptyFields()
fields["blocked"] = int64(4)
fields["zombies"] = int64(1)
fields["running"] = int64(4)
fields["sleeping"] = int64(34)
fields["total"] = int64(43)
acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{})
}
func TestFromPSError(t *testing.T) {
processes := &Processes{
execPS: testExecPSError,
forcePS: true,
}
var acc testutil.Accumulator
err := processes.Gather(&acc)
require.Error(t, err)
}
func TestFromProcFiles(t *testing.T) {
if runtime.GOOS != "linux" {
t.Skip("This test only runs on linux")
}
tester := tester{}
processes := &Processes{
readProcFile: tester.testProcFile,
forceProc: true,
}
var acc testutil.Accumulator
err := processes.Gather(&acc)
require.NoError(t, err)
fields := getEmptyFields()
fields["sleeping"] = tester.calls
fields["total_threads"] = tester.calls * 2
fields["total"] = tester.calls
acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{})
}
func testExecPS() ([]byte, error) {
return []byte(testPSOut), nil
}
// struct for counting calls to testProcFile
type tester struct {
calls int64
}
func (t *tester) testProcFile(_ string) ([]byte, error) {
t.calls++
return []byte(fmt.Sprintf(testProcStat, "S", "2")), nil
}
func testExecPSError() ([]byte, error) {
return []byte(testPSOut), fmt.Errorf("ERROR!")
}
const testPSOut = `
STAT
S
S
S
S
R
R
S
S
Ss
Ss
S
SNs
Ss
Ss
S
R+
S
U
S
S
S
S
Ss
S+
Ss
S
S+
S+
Ss
S+
Ss
S
R+
Ss
S
S+
S+
Ss
L
U
Z
D
S+
`
const testProcStat = `10 (rcuob/0) %s 2 0 0 0 -1 2129984 0 0 0 0 0 0 0 0 20 0 %s 0 11 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0
`

View File

@@ -14,7 +14,7 @@ import (
type PS interface {
CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error)
DiskUsage(mountPointFilter []string) ([]*disk.DiskUsageStat, error)
DiskUsage(mountPointFilter []string, fstypeExclude []string) ([]*disk.DiskUsageStat, error)
NetIO() ([]net.NetIOCountersStat, error)
NetProto() ([]net.NetProtoCountersStat, error)
DiskIO() (map[string]disk.DiskIOCountersStat, error)
@@ -53,6 +53,7 @@ func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) {
func (s *systemPS) DiskUsage(
mountPointFilter []string,
fstypeExclude []string,
) ([]*disk.DiskUsageStat, error) {
parts, err := disk.DiskPartitions(true)
if err != nil {
@@ -60,9 +61,13 @@ func (s *systemPS) DiskUsage(
}
// Make a "set" out of the filter slice
filterSet := make(map[string]bool)
mountPointFilterSet := make(map[string]bool)
for _, filter := range mountPointFilter {
filterSet[filter] = true
mountPointFilterSet[filter] = true
}
fstypeExcludeSet := make(map[string]bool)
for _, filter := range fstypeExclude {
fstypeExcludeSet[filter] = true
}
var usage []*disk.DiskUsageStat
@@ -71,7 +76,7 @@ func (s *systemPS) DiskUsage(
if len(mountPointFilter) > 0 {
// If the mount point is not a member of the filter set,
// don't gather info on it.
_, ok := filterSet[p.Mountpoint]
_, ok := mountPointFilterSet[p.Mountpoint]
if !ok {
continue
}
@@ -81,6 +86,12 @@ func (s *systemPS) DiskUsage(
if err != nil {
return nil, err
}
// If the mount point is a member of the exclude set,
// don't gather info on it.
_, ok := fstypeExcludeSet[p.Fstype]
if ok {
continue
}
du.Fstype = p.Fstype
usage = append(usage, du)
}

View File

@@ -31,11 +31,17 @@ func (_ *SystemStats) Gather(acc telegraf.Accumulator) error {
return err
}
users, err := host.Users()
if err != nil {
return err
}
fields := map[string]interface{}{
"load1": loadavg.Load1,
"load5": loadavg.Load5,
"load15": loadavg.Load15,
"uptime": hostinfo.Uptime,
"n_users": len(users),
"uptime_format": format_uptime(hostinfo.Uptime),
}
acc.AddFields("system", fields, nil)

View File

@@ -0,0 +1,30 @@
# TCP listener service input plugin
The TCP listener is a service input plugin that listens for messages on a TCP
socket and adds those messages to InfluxDB.
The plugin expects messages in the
[Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
### Configuration:
This is a sample configuration for the plugin.
```toml
# Generic TCP listener
[[inputs.tcp_listener]]
## Address and port to host TCP listener on
service_address = ":8094"
## Number of TCP messages allowed to queue up. Once filled, the
## TCP listener will start dropping packets.
allowed_pending_messages = 10000
## Maximum number of concurrent TCP connections to allow
max_tcp_connections = 250
## Data format to consume. This can be "json", "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
```

View File

@@ -0,0 +1,264 @@
package tcp_listener
import (
"bufio"
"fmt"
"log"
"net"
"sync"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
type TcpListener struct {
ServiceAddress string
AllowedPendingMessages int
MaxTCPConnections int `toml:"max_tcp_connections"`
sync.Mutex
// Lock for preventing a data race during resource cleanup
cleanup sync.Mutex
wg sync.WaitGroup
in chan []byte
done chan struct{}
// accept channel tracks how many active connections there are, if there
// is an available bool in accept, then we are below the maximum and can
// accept the connection
accept chan bool
// track the listener here so we can close it in Stop()
listener *net.TCPListener
// track current connections so we can close them in Stop()
conns map[string]*net.TCPConn
parser parsers.Parser
acc telegraf.Accumulator
}
var dropwarn = "ERROR: Message queue full. Discarding line [%s] " +
"You may want to increase allowed_pending_messages in the config\n"
const sampleConfig = `
## Address and port to host TCP listener on
service_address = ":8094"
## Number of TCP messages allowed to queue up. Once filled, the
## TCP listener will start dropping packets.
allowed_pending_messages = 10000
## Maximum number of concurrent TCP connections to allow
max_tcp_connections = 250
## Data format to consume. This can be "json", "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
`
func (t *TcpListener) SampleConfig() string {
return sampleConfig
}
func (t *TcpListener) Description() string {
return "Generic TCP listener"
}
// All the work is done in the Start() function, so this is just a dummy
// function.
func (t *TcpListener) Gather(_ telegraf.Accumulator) error {
return nil
}
func (t *TcpListener) SetParser(parser parsers.Parser) {
t.parser = parser
}
// Start starts the tcp listener service.
func (t *TcpListener) Start(acc telegraf.Accumulator) error {
t.Lock()
defer t.Unlock()
t.acc = acc
t.in = make(chan []byte, t.AllowedPendingMessages)
t.done = make(chan struct{})
t.accept = make(chan bool, t.MaxTCPConnections)
t.conns = make(map[string]*net.TCPConn)
for i := 0; i < t.MaxTCPConnections; i++ {
t.accept <- true
}
// Start listener
var err error
address, _ := net.ResolveTCPAddr("tcp", t.ServiceAddress)
t.listener, err = net.ListenTCP("tcp", address)
if err != nil {
log.Fatalf("ERROR: ListenUDP - %s", err)
return err
}
log.Println("TCP server listening on: ", t.listener.Addr().String())
t.wg.Add(2)
go t.tcpListen()
go t.tcpParser()
log.Printf("Started TCP listener service on %s\n", t.ServiceAddress)
return nil
}
// Stop cleans up all resources
func (t *TcpListener) Stop() {
t.Lock()
defer t.Unlock()
close(t.done)
t.listener.Close()
// Close all open TCP connections
// - get all conns from the t.conns map and put into slice
// - this is so the forget() function doesnt conflict with looping
// over the t.conns map
var conns []*net.TCPConn
t.cleanup.Lock()
for _, conn := range t.conns {
conns = append(conns, conn)
}
t.cleanup.Unlock()
for _, conn := range conns {
conn.Close()
}
t.wg.Wait()
close(t.in)
log.Println("Stopped TCP listener service on ", t.ServiceAddress)
}
// tcpListen listens for incoming TCP connections.
func (t *TcpListener) tcpListen() error {
defer t.wg.Done()
for {
select {
case <-t.done:
return nil
default:
// Accept connection:
conn, err := t.listener.AcceptTCP()
if err != nil {
return err
}
log.Printf("Received TCP Connection from %s", conn.RemoteAddr())
select {
case <-t.accept:
// not over connection limit, handle the connection properly.
t.wg.Add(1)
// generate a random id for this TCPConn
id := internal.RandomString(6)
t.remember(id, conn)
go t.handler(conn, id)
default:
// We are over the connection limit, refuse & close.
t.refuser(conn)
}
}
}
}
// refuser refuses a TCP connection
func (t *TcpListener) refuser(conn *net.TCPConn) {
// Tell the connection why we are closing.
fmt.Fprintf(conn, "Telegraf maximum concurrent TCP connections (%d)"+
" reached, closing.\nYou may want to increase max_tcp_connections in"+
" the Telegraf tcp listener configuration.\n", t.MaxTCPConnections)
conn.Close()
log.Printf("Refused TCP Connection from %s", conn.RemoteAddr())
log.Printf("WARNING: Maximum TCP Connections reached, you may want to" +
" adjust max_tcp_connections")
}
// handler handles a single TCP Connection
func (t *TcpListener) handler(conn *net.TCPConn, id string) {
// connection cleanup function
defer func() {
t.wg.Done()
conn.Close()
log.Printf("Closed TCP Connection from %s", conn.RemoteAddr())
// Add one connection potential back to channel when this one closes
t.accept <- true
t.forget(id)
}()
scanner := bufio.NewScanner(conn)
for {
select {
case <-t.done:
return
default:
if !scanner.Scan() {
return
}
buf := scanner.Bytes()
select {
case t.in <- buf:
default:
log.Printf(dropwarn, string(buf))
}
}
}
}
// tcpParser parses the incoming tcp byte packets
func (t *TcpListener) tcpParser() error {
defer t.wg.Done()
for {
select {
case <-t.done:
return nil
case packet := <-t.in:
if len(packet) == 0 {
continue
}
metrics, err := t.parser.Parse(packet)
if err == nil {
t.storeMetrics(metrics)
} else {
log.Printf("Malformed packet: [%s], Error: %s\n",
string(packet), err)
}
}
}
}
func (t *TcpListener) storeMetrics(metrics []telegraf.Metric) error {
t.Lock()
defer t.Unlock()
for _, m := range metrics {
t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
}
return nil
}
// forget a TCP connection
func (t *TcpListener) forget(id string) {
t.cleanup.Lock()
defer t.cleanup.Unlock()
delete(t.conns, id)
}
// remember a TCP connection
func (t *TcpListener) remember(id string, conn *net.TCPConn) {
t.cleanup.Lock()
defer t.cleanup.Unlock()
t.conns[id] = conn
}
func init() {
inputs.Add("tcp_listener", func() telegraf.Input {
return &TcpListener{}
})
}

View File

@@ -0,0 +1,259 @@
package tcp_listener
import (
"fmt"
"net"
"testing"
"time"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n"
testMsgs = `
cpu_load_short,host=server02 value=12.0 1422568543702900257
cpu_load_short,host=server03 value=12.0 1422568543702900257
cpu_load_short,host=server04 value=12.0 1422568543702900257
cpu_load_short,host=server05 value=12.0 1422568543702900257
cpu_load_short,host=server06 value=12.0 1422568543702900257
`
)
func newTestTcpListener() (*TcpListener, chan []byte) {
in := make(chan []byte, 1500)
listener := &TcpListener{
ServiceAddress: ":8194",
AllowedPendingMessages: 10000,
MaxTCPConnections: 250,
in: in,
done: make(chan struct{}),
}
return listener, in
}
func TestConnectTCP(t *testing.T) {
listener := TcpListener{
ServiceAddress: ":8194",
AllowedPendingMessages: 10000,
MaxTCPConnections: 250,
}
listener.parser, _ = parsers.NewInfluxParser()
acc := &testutil.Accumulator{}
require.NoError(t, listener.Start(acc))
defer listener.Stop()
time.Sleep(time.Millisecond * 25)
conn, err := net.Dial("tcp", "127.0.0.1:8194")
require.NoError(t, err)
// send single message to socket
fmt.Fprintf(conn, testMsg)
time.Sleep(time.Millisecond * 15)
acc.AssertContainsTaggedFields(t, "cpu_load_short",
map[string]interface{}{"value": float64(12)},
map[string]string{"host": "server01"},
)
// send multiple messages to socket
fmt.Fprintf(conn, testMsgs)
time.Sleep(time.Millisecond * 15)
hostTags := []string{"server02", "server03",
"server04", "server05", "server06"}
for _, hostTag := range hostTags {
acc.AssertContainsTaggedFields(t, "cpu_load_short",
map[string]interface{}{"value": float64(12)},
map[string]string{"host": hostTag},
)
}
}
// Test that MaxTCPConections is respected
func TestConcurrentConns(t *testing.T) {
listener := TcpListener{
ServiceAddress: ":8195",
AllowedPendingMessages: 10000,
MaxTCPConnections: 2,
}
listener.parser, _ = parsers.NewInfluxParser()
acc := &testutil.Accumulator{}
require.NoError(t, listener.Start(acc))
defer listener.Stop()
time.Sleep(time.Millisecond * 25)
_, err := net.Dial("tcp", "127.0.0.1:8195")
assert.NoError(t, err)
_, err = net.Dial("tcp", "127.0.0.1:8195")
assert.NoError(t, err)
// Connection over the limit:
conn, err := net.Dial("tcp", "127.0.0.1:8195")
assert.NoError(t, err)
net.Dial("tcp", "127.0.0.1:8195")
buf := make([]byte, 1500)
n, err := conn.Read(buf)
assert.NoError(t, err)
assert.Equal(t,
"Telegraf maximum concurrent TCP connections (2) reached, closing.\n"+
"You may want to increase max_tcp_connections in"+
" the Telegraf tcp listener configuration.\n",
string(buf[:n]))
_, err = conn.Write([]byte(testMsg))
assert.NoError(t, err)
time.Sleep(time.Millisecond * 10)
assert.Zero(t, acc.NFields())
}
// Test that MaxTCPConections is respected when max==1
func TestConcurrentConns1(t *testing.T) {
listener := TcpListener{
ServiceAddress: ":8196",
AllowedPendingMessages: 10000,
MaxTCPConnections: 1,
}
listener.parser, _ = parsers.NewInfluxParser()
acc := &testutil.Accumulator{}
require.NoError(t, listener.Start(acc))
defer listener.Stop()
time.Sleep(time.Millisecond * 25)
_, err := net.Dial("tcp", "127.0.0.1:8196")
assert.NoError(t, err)
// Connection over the limit:
conn, err := net.Dial("tcp", "127.0.0.1:8196")
assert.NoError(t, err)
net.Dial("tcp", "127.0.0.1:8196")
buf := make([]byte, 1500)
n, err := conn.Read(buf)
assert.NoError(t, err)
assert.Equal(t,
"Telegraf maximum concurrent TCP connections (1) reached, closing.\n"+
"You may want to increase max_tcp_connections in"+
" the Telegraf tcp listener configuration.\n",
string(buf[:n]))
_, err = conn.Write([]byte(testMsg))
assert.NoError(t, err)
time.Sleep(time.Millisecond * 10)
assert.Zero(t, acc.NFields())
}
// Test that MaxTCPConections is respected
func TestCloseConcurrentConns(t *testing.T) {
listener := TcpListener{
ServiceAddress: ":8195",
AllowedPendingMessages: 10000,
MaxTCPConnections: 2,
}
listener.parser, _ = parsers.NewInfluxParser()
acc := &testutil.Accumulator{}
require.NoError(t, listener.Start(acc))
time.Sleep(time.Millisecond * 25)
_, err := net.Dial("tcp", "127.0.0.1:8195")
assert.NoError(t, err)
_, err = net.Dial("tcp", "127.0.0.1:8195")
assert.NoError(t, err)
listener.Stop()
}
func TestRunParser(t *testing.T) {
var testmsg = []byte(testMsg)
listener, in := newTestTcpListener()
acc := testutil.Accumulator{}
listener.acc = &acc
defer close(listener.done)
listener.parser, _ = parsers.NewInfluxParser()
listener.wg.Add(1)
go listener.tcpParser()
in <- testmsg
time.Sleep(time.Millisecond * 25)
listener.Gather(&acc)
if a := acc.NFields(); a != 1 {
t.Errorf("got %v, expected %v", a, 1)
}
acc.AssertContainsTaggedFields(t, "cpu_load_short",
map[string]interface{}{"value": float64(12)},
map[string]string{"host": "server01"},
)
}
func TestRunParserInvalidMsg(t *testing.T) {
var testmsg = []byte("cpu_load_short")
listener, in := newTestTcpListener()
acc := testutil.Accumulator{}
listener.acc = &acc
defer close(listener.done)
listener.parser, _ = parsers.NewInfluxParser()
listener.wg.Add(1)
go listener.tcpParser()
in <- testmsg
time.Sleep(time.Millisecond * 25)
if a := acc.NFields(); a != 0 {
t.Errorf("got %v, expected %v", a, 0)
}
}
func TestRunParserGraphiteMsg(t *testing.T) {
var testmsg = []byte("cpu.load.graphite 12 1454780029")
listener, in := newTestTcpListener()
acc := testutil.Accumulator{}
listener.acc = &acc
defer close(listener.done)
listener.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil)
listener.wg.Add(1)
go listener.tcpParser()
in <- testmsg
time.Sleep(time.Millisecond * 25)
listener.Gather(&acc)
acc.AssertContainsFields(t, "cpu_load_graphite",
map[string]interface{}{"value": float64(12)})
}
func TestRunParserJSONMsg(t *testing.T) {
var testmsg = []byte("{\"a\": 5, \"b\": {\"c\": 6}}\n")
listener, in := newTestTcpListener()
acc := testutil.Accumulator{}
listener.acc = &acc
defer close(listener.done)
listener.parser, _ = parsers.NewJSONParser("udp_json_test", []string{}, nil)
listener.wg.Add(1)
go listener.tcpParser()
in <- testmsg
time.Sleep(time.Millisecond * 25)
listener.Gather(&acc)
acc.AssertContainsFields(t, "udp_json_test",
map[string]interface{}{
"a": float64(5),
"b_c": float64(6),
})
}

View File

@@ -0,0 +1,91 @@
# UDP listener service input plugin
The UDP listener is a service input plugin that listens for messages on a UDP
socket and adds those messages to InfluxDB.
The plugin expects messages in the
[Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
### Configuration:
This is a sample configuration for the plugin.
```toml
[[inputs.udp_listener]]
## Address and port to host UDP listener on
service_address = ":8092"
## Number of UDP messages allowed to queue up. Once filled, the
## UDP listener will start dropping packets.
allowed_pending_messages = 10000
## UDP packet size for the server to listen for. This will depend
## on the size of the packets that the client is sending, which is
## usually 1500 bytes.
udp_packet_size = 1500
## Data format to consume. This can be "json", "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
```
## A Note on UDP OS Buffer Sizes
Some OSes (most notably, Linux) place very restricive limits on the performance
of UDP protocols. It is _highly_ recommended that you increase these OS limits to
at least 8MB before trying to run large amounts of UDP traffic to your instance.
8MB is just a recommendation, and can be adjusted higher.
### Linux
Check the current UDP/IP receive buffer limit & default by typing the following
commands:
```
sysctl net.core.rmem_max
sysctl net.core.rmem_default
```
If the values are less than 8388608 bytes you should add the following lines to
the /etc/sysctl.conf file:
```
net.core.rmem_max=8388608
net.core.rmem_default=8388608
```
Changes to /etc/sysctl.conf do not take effect until reboot.
To update the values immediately, type the following commands as root:
```
sysctl -w net.core.rmem_max=8388608
sysctl -w net.core.rmem_default=8388608
```
### BSD/Darwin
On BSD/Darwin systems you need to add about a 15% padding to the kernel limit
socket buffer. Meaning if you want an 8MB buffer (8388608 bytes) you need to set
the kernel limit to `8388608*1.15 = 9646900`. This is not documented anywhere but
happens
[in the kernel here.](https://github.com/freebsd/freebsd/blob/master/sys/kern/uipc_sockbuf.c#L63-L64)
Check the current UDP/IP buffer limit by typing the following command:
```
sysctl kern.ipc.maxsockbuf
```
If the value is less than 9646900 bytes you should add the following lines
to the /etc/sysctl.conf file (create it if necessary):
```
kern.ipc.maxsockbuf=9646900
```
Changes to /etc/sysctl.conf do not take effect until reboot.
To update the values immediately, type the following commands as root:
```
sysctl -w kern.ipc.maxsockbuf=9646900
```

View File

@@ -0,0 +1,154 @@
package udp_listener
import (
"log"
"net"
"sync"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
type UdpListener struct {
ServiceAddress string
UDPPacketSize int `toml:"udp_packet_size"`
AllowedPendingMessages int
sync.Mutex
in chan []byte
done chan struct{}
parser parsers.Parser
// Keep the accumulator in this struct
acc telegraf.Accumulator
}
const UDP_PACKET_SIZE int = 1500
var dropwarn = "ERROR: Message queue full. Discarding line [%s] " +
"You may want to increase allowed_pending_messages in the config\n"
const sampleConfig = `
## Address and port to host UDP listener on
service_address = ":8092"
## Number of UDP messages allowed to queue up. Once filled, the
## UDP listener will start dropping packets.
allowed_pending_messages = 10000
## UDP packet size for the server to listen for. This will depend
## on the size of the packets that the client is sending, which is
## usually 1500 bytes, but can be as large as 65,535 bytes.
udp_packet_size = 1500
## Data format to consume. This can be "json", "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
`
func (u *UdpListener) SampleConfig() string {
return sampleConfig
}
func (u *UdpListener) Description() string {
return "Generic UDP listener"
}
// All the work is done in the Start() function, so this is just a dummy
// function.
func (u *UdpListener) Gather(_ telegraf.Accumulator) error {
return nil
}
func (u *UdpListener) SetParser(parser parsers.Parser) {
u.parser = parser
}
func (u *UdpListener) Start(acc telegraf.Accumulator) error {
u.Lock()
defer u.Unlock()
u.acc = acc
u.in = make(chan []byte, u.AllowedPendingMessages)
u.done = make(chan struct{})
go u.udpListen()
go u.udpParser()
log.Printf("Started UDP listener service on %s\n", u.ServiceAddress)
return nil
}
func (u *UdpListener) Stop() {
u.Lock()
defer u.Unlock()
close(u.done)
close(u.in)
log.Println("Stopped UDP listener service on ", u.ServiceAddress)
}
func (u *UdpListener) udpListen() error {
address, _ := net.ResolveUDPAddr("udp", u.ServiceAddress)
listener, err := net.ListenUDP("udp", address)
if err != nil {
log.Fatalf("ERROR: ListenUDP - %s", err)
}
defer listener.Close()
log.Println("UDP server listening on: ", listener.LocalAddr().String())
for {
select {
case <-u.done:
return nil
default:
buf := make([]byte, u.UDPPacketSize)
n, _, err := listener.ReadFromUDP(buf)
if err != nil {
log.Printf("ERROR: %s\n", err.Error())
}
select {
case u.in <- buf[:n]:
default:
log.Printf(dropwarn, string(buf[:n]))
}
}
}
}
func (u *UdpListener) udpParser() error {
for {
select {
case <-u.done:
return nil
case packet := <-u.in:
metrics, err := u.parser.Parse(packet)
if err == nil {
u.storeMetrics(metrics)
} else {
log.Printf("Malformed packet: [%s], Error: %s\n", packet, err)
}
}
}
}
func (u *UdpListener) storeMetrics(metrics []telegraf.Metric) error {
u.Lock()
defer u.Unlock()
for _, m := range metrics {
u.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
}
return nil
}
func init() {
inputs.Add("udp_listener", func() telegraf.Input {
return &UdpListener{
UDPPacketSize: UDP_PACKET_SIZE,
}
})
}

View File

@@ -0,0 +1,112 @@
package udp_listener
import (
"io/ioutil"
"log"
"testing"
"time"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/testutil"
)
func newTestUdpListener() (*UdpListener, chan []byte) {
in := make(chan []byte, 1500)
listener := &UdpListener{
ServiceAddress: ":8125",
UDPPacketSize: 1500,
AllowedPendingMessages: 10000,
in: in,
done: make(chan struct{}),
}
return listener, in
}
func TestRunParser(t *testing.T) {
log.SetOutput(ioutil.Discard)
var testmsg = []byte("cpu_load_short,host=server01 value=12.0 1422568543702900257")
listener, in := newTestUdpListener()
acc := testutil.Accumulator{}
listener.acc = &acc
defer close(listener.done)
listener.parser, _ = parsers.NewInfluxParser()
go listener.udpParser()
in <- testmsg
time.Sleep(time.Millisecond * 25)
listener.Gather(&acc)
if a := acc.NFields(); a != 1 {
t.Errorf("got %v, expected %v", a, 1)
}
acc.AssertContainsTaggedFields(t, "cpu_load_short",
map[string]interface{}{"value": float64(12)},
map[string]string{"host": "server01"},
)
}
func TestRunParserInvalidMsg(t *testing.T) {
log.SetOutput(ioutil.Discard)
var testmsg = []byte("cpu_load_short")
listener, in := newTestUdpListener()
acc := testutil.Accumulator{}
listener.acc = &acc
defer close(listener.done)
listener.parser, _ = parsers.NewInfluxParser()
go listener.udpParser()
in <- testmsg
time.Sleep(time.Millisecond * 25)
if a := acc.NFields(); a != 0 {
t.Errorf("got %v, expected %v", a, 0)
}
}
func TestRunParserGraphiteMsg(t *testing.T) {
log.SetOutput(ioutil.Discard)
var testmsg = []byte("cpu.load.graphite 12 1454780029")
listener, in := newTestUdpListener()
acc := testutil.Accumulator{}
listener.acc = &acc
defer close(listener.done)
listener.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil)
go listener.udpParser()
in <- testmsg
time.Sleep(time.Millisecond * 25)
listener.Gather(&acc)
acc.AssertContainsFields(t, "cpu_load_graphite",
map[string]interface{}{"value": float64(12)})
}
func TestRunParserJSONMsg(t *testing.T) {
log.SetOutput(ioutil.Discard)
var testmsg = []byte("{\"a\": 5, \"b\": {\"c\": 6}}\n")
listener, in := newTestUdpListener()
acc := testutil.Accumulator{}
listener.acc = &acc
defer close(listener.done)
listener.parser, _ = parsers.NewJSONParser("udp_json_test", []string{}, nil)
go listener.udpParser()
in <- testmsg
time.Sleep(time.Millisecond * 25)
listener.Gather(&acc)
acc.AssertContainsFields(t, "udp_json_test",
map[string]interface{}{
"a": float64(5),
"b_c": float64(6),
})
}

Some files were not shown because too many files have changed in this diff Show More