Compare commits

...

719 Commits

Author SHA1 Message Date
Cameron Sparr
434c08a357 Release 0.10.2 2016-02-04 11:04:29 -07:00
Cameron Sparr
bd9c5b6995 mqtt output: cleanup, implement TLS
Also normalize TLS config across all output plugins and normalize
comment strings as well.
2016-02-04 10:44:37 -07:00
Cameron Sparr
b941d270ce changelog update 2016-02-03 08:35:03 -07:00
Reginaldo Sousa
9406961125 Fix a bug when setting host header in httpjson
closes #634
2016-02-02 21:59:18 -07:00
Rune Darrud
0d391b66a3 Added support for Windows operating systems pre-Vista. 2016-02-02 21:57:38 -07:00
Cameron Sparr
a11e07e250 Minor change to forgotten config file exit 2016-02-01 17:44:19 -07:00
Cameron Sparr
d266dad1f4 Don't compile ping plugin on windows.
closes #496
2016-02-01 16:39:53 -07:00
Rune Darrud
331b700d1b Corrected a issue that came from code cleanup earlier
wherein missing performance counters caused it to return
early from the loop, instead of ignoring missing in
default configuration mode.

closes #625
2016-01-31 23:17:45 -07:00
Christoph Wegener
2163fde0a4 Fix memory leak: Remove signal.Notify code from plugins/inputs/win_perf_counters.(*Win_PerfCounters).Gather 2016-01-31 23:16:09 -07:00
Cameron Sparr
24a2aaef4b Ansible role in readme 2016-01-30 11:55:48 -07:00
Cameron Sparr
042cf517b2 Mention yum/apt repo in README
Also add `make windows-build` to Makefile

closes #618
2016-01-30 11:35:39 -07:00
Cameron Sparr
b97027ac9a Allow exec plugin to parse line-protocol
closes #613
2016-01-30 11:12:59 -07:00
Christoph Wegener
4ea3f82e50 Replace all single percentage characters with double
percentage characters in sampleConfig string so that fmt.Printf
will interpret them as literal percentage characters when
running 'telegraf.exe -sample-config'

closes #620
2016-01-30 10:10:55 -07:00
Cameron Sparr
38c4111e6c Add unit tests for the root telegraf package 2016-01-29 16:01:34 -07:00
Cameron Sparr
338341add8 Put windows dependencies into a separate Godeps file 2016-01-29 11:10:18 -07:00
Cameron Sparr
93bb679f9d Fix possible panic if stat is nil
closes #612
2016-01-29 10:47:30 -07:00
Pavel Yudin
40d859354f Add powerdns input plugin
closes #614
2016-01-29 09:40:04 -07:00
Cameron Sparr
9e7c8df384 statsd: allow template parsing fields. Default to value=
closes #602
2016-01-28 16:56:50 -07:00
Rune Darrud
f088dd7e00 Added plugin to read Windows performance counters
closes #575
2016-01-28 16:35:13 -07:00
Cameron Sparr
10c4e4f63f Fix datadog json marshalling
fixes #607
2016-01-28 16:12:33 -07:00
Cameron Sparr
962325cc40 Warn when metrics are being overwritten
closes #601
2016-01-28 14:00:14 -07:00
root
a9c33abfa5 sql server: update README.md
closes #594
2016-01-28 13:50:26 -07:00
Cameron Sparr
d835c19fce Insert . between msrmnt and field name in datadog output
fixes #600
2016-01-28 12:04:26 -07:00
Marcin Bunsch
1f1384afc6 Use a single measurement with fields for timings in statsd plugin.
closes #603
2016-01-28 12:03:48 -07:00
Cameron Sparr
9d4b55be19 Include all tag values in graphite output
closes #595
2016-01-28 10:58:35 -07:00
Cameron Sparr
c549ab907a Throughout telegraf, use telegraf.Metric rather than client.Point
closes #599
2016-01-27 23:47:32 -07:00
Cameron Sparr
9c0d14bb60 Create public models for telegraf metrics, accumlator, plugins
This will basically make the root directory a place for storing the
major telegraf interfaces, which will make telegraf's godoc looks quite
a bit nicer. And make it easier for contributors to lookup the few data
types that they actually care about.

closes #564
2016-01-27 15:42:50 -07:00
Cameron Sparr
a822d942cd 386 -> i386 2016-01-27 13:42:34 -07:00
Cameron Sparr
3a64a01f91 Release 0.10.1 2016-01-27 12:55:06 -07:00
Cameron Sparr
6ebb6bc7ee Fix SNMP unit tests on OSX, improve tag config doc
closes #592
2016-01-27 11:27:10 -07:00
Cameron Sparr
be95dfdd0e Update dependency hashes 2016-01-26 16:27:18 -07:00
Cameron Sparr
88890fa7c2 Update changelog 2016-01-26 16:05:20 -07:00
Andrea Sosso
f8930b9cbc Additional request header parameters for httpjson plugin
closes #471
2016-01-26 16:02:47 -07:00
Cameron Sparr
c10227a766 Update changelog and readme, and small tweaks to github_webhooks 2016-01-26 15:57:06 -07:00
Cameron Sparr
7e7e462de1 Merge branch 'ghWebhooks'
closes #573
2016-01-26 15:29:41 -07:00
root
a93e1ceac8 Add sqlserver input plugin
closes #589
2016-01-26 14:55:27 -07:00
Cameron Sparr
7f8469b66a Fixup some disk usage reporting, make it reflect df
fixes #513
2016-01-26 11:56:28 -07:00
Wu Taizeng
cf568487c8 Fix some inputs panic will lead to the telegraf exit
closes #585
closes #584
2016-01-26 10:48:24 -07:00
Jack Zampolin
4c74a2dd3a Fix naming issue 2016-01-25 17:34:44 -08:00
Jack Zampolin
a70452219b Remove internal dependancy 2016-01-25 17:28:28 -08:00
Ross McDonald
47ea2d5fb4 Added Amazon Linux logic to post-installation script.
closes #579
2016-01-25 17:49:12 -07:00
Ross McDonald
16540e35f1 Backporting fixes from the influxdb build script, along with a few improvements:
- Added iteration to tar/zip output name (in case of pre-releases)
- Switched 32-bit signifier to i386 from 386
- Tweaked upload settings to allow for folder paths in bucket names
2016-01-25 15:13:55 -07:00
Cameron Sparr
3bfb3a9fe2 Insert documentation into sample-config on JSON parsing
closes #521
2016-01-25 13:29:05 -07:00
Lukasz Jagiello
f9517dcf24 RabbitMQ plugin - extra fields:
Extra fields describing size of all message bodies in the queue.

* message_bytes
* message_bytes_ready
* message_bytes_unacknowledged
* message_bytes_ram
* message_bytes_persistent

More information about each field:
https://www.rabbitmq.com/man/rabbitmqctl.1.man.html

closes #577
2016-01-25 13:00:54 -07:00
Jack Zampolin
7878b22b09 Add README.md 2016-01-25 11:42:03 -08:00
Thibault Cohen
e6d7e4e309 Add snmp input plugin
closes #546
closes #40
2016-01-25 12:35:27 -07:00
Tim Raymond
40d0da404e Change configuration package to influxdata/config
We are unifying the way that we handle configuration across the products
into the influxdata/config package. This provides the same API as
naoina/toml that was used previously, but provides some additional
features such as support for documenting generated TOML configs as well
as support for handling default options. This replaces all usage of
naoina/toml with influxdata/config.
2016-01-25 12:02:44 -07:00
Cameron Sparr
8675bd125a add 'gdm restore' to adding a dependency instructions 2016-01-25 10:59:44 -07:00
Jack Zampolin
4e5dfa5d33 Address PR comments and merge conflicts 2016-01-25 09:56:57 -08:00
Jack Zampolin
89f5b77550 Fix merge conflict in all.go 2016-01-22 16:51:54 -08:00
Jack Zampolin
5b15cd9163 Change github.com/influxdata to github.com/influxdb where necessary 2016-01-22 16:48:42 -08:00
Jack Zampolin
dbf1383a38 Change github.com/influxdata to github.com/influxdata 2016-01-22 16:45:31 -08:00
Jack Zampolin
46b367e74b Add tests 2016-01-22 16:43:33 -08:00
Cameron Sparr
3da390682d Kinesis output shouldn't return an error for no reason 2016-01-22 17:32:36 -07:00
Cameron Sparr
5349a3b6d1 Implement a per-output fixed size metric buffer
Also moved some objects out of config.go and put them in their own
package, internal/models

fixes #568
closes #285
2016-01-22 16:29:02 -07:00
Cameron Sparr
f2ab5f61f5 Gather elasticsearch nodes in goroutines, handle errors
fixes #464
2016-01-21 17:00:44 -07:00
Cameron Sparr
e910a03af4 Changelog update 2016-01-21 16:44:35 -07:00
Cameron Sparr
4d0dc8b7c8 Refactor the docker plugin, use go-dockerclient throughout
fixes #503
fixes #463
2016-01-21 16:07:03 -07:00
Stephen Kwong
e0dc1ef5bd Add Cloudwatch output
closes #553
2016-01-21 09:11:52 -07:00
Cameron Sparr
f24f5e98dd Remove go get ./... from the Makefile 2016-01-20 15:01:08 -07:00
Cameron Sparr
6647cfc228 statsd: If parsing a value to int fails, try to float and cast to int
fixes #556
2016-01-20 14:30:57 -07:00
Jack Zampolin
ddcd99a1ce Push ghwebhooks branch 2016-01-20 12:19:03 -08:00
Cameron Sparr
55c07f23b0 Update contributing document 2016-01-20 12:33:49 -07:00
Cameron Sparr
8192572e23 Update changelog 2016-01-20 12:22:11 -07:00
Jack Zampolin
0cdf1b07e9 Fix issue 524 2016-01-20 10:57:35 -08:00
Jack Zampolin
8653bae6ac Change start implementation 2016-01-20 10:49:42 -08:00
Cameron Sparr
fc1aa7d3b4 Filter mount points before stats are collected
fixes #440
2016-01-20 11:46:59 -07:00
Jack Zampolin
8bdcd6d576 First commit for ghwebhooks service plugin 2016-01-19 23:14:11 -08:00
Cameron Sparr
d3925fe578 Include CPU usage percent with procstat data
closes #484
2016-01-19 22:52:55 -07:00
Cameron Sparr
d3a5cca1bc Collection interval random jittering
closes #460
2016-01-19 13:22:54 -07:00
Cameron Sparr
f3b553712a Changelog update 2016-01-19 11:21:05 -07:00
Cameron Sparr
839651fadb Change default statsd packet size to 1500, make configurable
Also modifying the internal UDP listener/parser code to make it able to
handle higher load. The udp listener will no longer do any parsing or
string conversion. It will simply read UDP packets as bytes and put them
into a channel. The parser thread will now deal with splitting the UDP
metrics into separated strings.

This could probably be made even better by leaving everything as byte
arrays.

fixes #543
2016-01-19 11:08:16 -07:00
Thibault Cohen
6a50fceea4 Replace plugins by inputs in some strings
closes #542
2016-01-19 10:09:37 -07:00
Cameron Sparr
7efe108686 Update Godeps file 2016-01-19 09:44:25 -07:00
Hannu Valtonen
c313af1b24 kafka: Add support for using TLS authentication for the kafka output
With the advent of Kafka 0.9.0+ it is possible to set up TLS client
certificate based authentication to limit access to Kafka.

Four new configuration variables are specified for setting up the
authentication. If they're not set the behavior stays the same as
before the change.

closes #541
2016-01-18 11:17:01 -07:00
Vinh
1388b1b58b Add phusion Passenger plugin
Gather metric by parsing XMLoutput of `passenger-status` utility.
More information of this utility:
https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html

closes #522
2016-01-18 11:14:04 -07:00
Thibault Cohen
551db20657 Add SIGHUP support to reload telegraf config
closes #539
2016-01-18 11:07:07 -07:00
Cameron Sparr
bc71e956a5 changelog bugfix update 2016-01-18 11:05:14 -07:00
Vinh
5af6974796 phpfpm plugin: enhance socket gathering and config
- If we detect errors when gathering stat via socket, return those error
  so it canbe appear in Telegraf log
- Improve fcgi client, also upgrade it to current version of Go at
  https://golang.org/src/net/http/fcgi/fcgi.go
- Add test for unix socket and fcgi
  to remotely connect but only as an extra url field.
- Allow customization of fpm status path
- Document about using of `host` in case `unixsocket` that it isn't used
- Documet upgrade for new data layout

closes #499
closes #502
closes #538
2016-01-18 10:56:45 -07:00
Eugene Dementiev
a712036b56 core: print error on output connect fail
closes #537
2016-01-16 17:32:23 -07:00
Eugene Dementiev
37b96c192b output amqp: Add ssl support
closes #536
2016-01-16 17:31:05 -07:00
Cameron Sparr
8cbdf0f907 Tweak config messages for graphite. Update changelog and readme
closes #494
2016-01-16 17:29:02 -07:00
Thibault Cohen
ef5c630d3a Add Graphite output 2016-01-16 17:19:27 -07:00
Cameron Sparr
6eea89f4c0 Make NSQ plugin compatible with version 0.10.0 2016-01-15 17:04:23 -07:00
Jeff Nickoloff
dbbb2d9877 NSQ Plugin
- Polls a set of NSQD REST endpoints and collects counters for all topics, channels, and clients

Signed-off-by: Jeff Nickoloff <jeff@allingeek.com>

closes #492
2016-01-15 16:09:31 -07:00
Cameron Sparr
c483e16d72 Add option to disable statsd name conversion
closes #467
closes #532
2016-01-15 15:58:09 -07:00
Cameron Sparr
40a5bad968 Update procstat doc 2016-01-15 15:55:52 -07:00
Kostas Botsas
1421bce371 Update README.md 2016-01-15 14:49:53 -08:00
Kostas Botsas
5e7dd6d51b Merge pull request #533 from influxdata/fix-interval-option-v0.10
interval options should have string value
2016-01-15 14:49:11 -08:00
Kostas Botsas
71f4e72b22 interval options should have string value
also mentioned name_override and name_prefix on top of name_suffix
2016-01-15 14:48:45 -08:00
Cameron Sparr
b24e71b232 Removing old package script, trim Makefile 2016-01-15 14:03:04 -07:00
Cameron Sparr
f60c090e4c Add a quiet mode to telegraf
closes #514
2016-01-15 13:31:04 -07:00
Cameron Sparr
50334e6bac Only compile the sensors plugin if the 'sensors' tag is set 2016-01-15 13:15:33 -07:00
Cameron Sparr
963a9429dd Tweak changelog for sensors plugin, and add a non-linux build file
closes #519
closes #168
2016-01-15 11:22:33 -07:00
Matt Davis
2eda8d64c7 Added infor to readme and changelog 2016-01-15 11:17:46 -07:00
Matt Davis
9b96c62e46 Change build configuration to linux only 2016-01-15 11:17:46 -07:00
Matt Davis
378b7467a4 Fixed an unused variable 2016-01-15 11:17:46 -07:00
Matt Davis
c0d98ecd4b Added initial support for gosensors module 2016-01-15 11:17:46 -07:00
Thibault Cohen
b44644b6bf Add response time to httpjson plugin
closes #475
2016-01-15 11:13:12 -07:00
Ross McDonald
7bfb42946e Switched to /etc/debian_version for Debian/Ubuntu distribution recognition in post-install.
closes #526
closes #525
2016-01-15 10:54:47 -07:00
Cameron Sparr
e8907acd28 Update Godeps and fix changelog 2014->2016 2016-01-14 22:35:05 -07:00
Kevin Fitzpatrick
d6ef3b1e02 Note on where to look for plugin information 2016-01-14 21:35:10 -07:00
Cameron Sparr
a39a7a7a03 Add an interface:"all" tag to the net protocol counters
fixes #508
2016-01-14 19:55:19 -07:00
Kostas Botsas
923be102b3 Align exec documentation with v0.10 updates 2016-01-14 15:55:53 -08:00
Hannu Valtonen
7531e218c1 build.py: Make build script work on both Python2.x and Python3.x
While at it also add a missing dependency on lsof required by the
netstat plugin.

closes #512
2016-01-14 10:28:50 -08:00
Thibault Cohen
3cc1fecb53 Ping input doesn't return response time metric when timeout
closes #506
2016-01-14 10:12:10 -08:00
Philip Silva
3c89847489 internal: FlattenJSON, flatten arrays as well
With HTTP JSON or Elasticsearch, one can also process values nested in arrays.
2016-01-14 09:52:58 -08:00
Cameron Sparr
fb837ca66d Add 0.10.0 blog post link to README 2016-01-14 09:21:01 -08:00
Cameron Sparr
2ec1ffdc11 Fix Telegraf s3 upload and readme links
fixes #505
2016-01-11 13:35:26 -07:00
Cameron Sparr
56509a61b9 Change 0.3.0 -> 0.10.0 2016-01-08 17:22:23 -07:00
Cameron Sparr
f37f8ac815 Update changelog and readme for package updates 2016-01-08 15:13:58 -07:00
Ross McDonald
231b5feb23 Merge pull request #497 from influxdata/rm-package-updates
Packaging Updates
2016-01-08 15:54:09 -06:00
Ross McDonald
81fa063338 Removed data directory entries, since Telegraf doesn't need them. 2016-01-08 15:34:11 -06:00
Ross McDonald
07b4a4dbca Added a build.py script for compiling and packaging. Added post and pre install scripts to handle installation and upgrades in a cleaner way. Minor fixes to the init script and service unit file. 2016-01-08 15:28:33 -06:00
Cameron Sparr
fd6daaa73b 0.3.0: update README and documentation 2016-01-08 15:25:04 -06:00
Cameron Sparr
6496d185ab add backwards-compatability for 'plugins', remove [inputs] and [outputs] headers 2016-01-08 12:49:50 -07:00
Cameron Sparr
7499c1f969 0.3.0: update README and documentation 2016-01-08 10:33:25 -07:00
Cameron Sparr
9c5db1057d renaming plugins -> inputs 2016-01-07 15:04:30 -07:00
Cameron Sparr
30d24a3c1c 0.3.0 documentation changes and improvements 2016-01-07 13:02:59 -07:00
Cameron Sparr
ad4af06802 Update Makefile and Godeps and various fixups 2016-01-07 12:33:26 -07:00
Cameron Sparr
64b98a9b61 0.3.0 unit tests: agent and prometheus 2016-01-07 10:52:46 -07:00
Cameron Sparr
4fdcb136bc 0.3.0 unit tests: internal 2016-01-07 10:23:38 -07:00
Cameron Sparr
0e398f5802 0.3.0 unit tests: amon, datadog, librato 2016-01-07 10:09:04 -07:00
Cameron Sparr
b9869eadc3 0.3.0 unit tests: influxdb 2016-01-07 01:11:52 -07:00
Cameron Sparr
936c5a8a7a 0.3.0 unit tests: rethinkdb, twemproxy, zfs 2016-01-06 22:16:04 -07:00
Cameron Sparr
10f19fade1 0.3.0 unit tests: statsd, trig, zookeeper 2016-01-06 18:19:18 -07:00
Cameron Sparr
c01594c2a4 0.3.0 unit tests: rabbitmq, redis 2016-01-06 18:13:00 -07:00
Cameron Sparr
ccbd7bb785 0.3.0 unit tests: procstat, prometheus, puppetagent 2016-01-06 17:56:30 -07:00
Cameron Sparr
6eb49dee5d 0.3.0 unit tests: mysql, nginx, phpfpm, ping, postgres 2016-01-06 17:37:56 -07:00
Cameron Sparr
6a4bf9fcff 0.3.0 unit tests: mailchimp, memcached, mongodb 2016-01-06 17:19:39 -07:00
Cameron Sparr
9ada89d51a 0.3.0 unit tests: jolokia, kafka_consumer, leofs, lustre2 2016-01-06 16:55:28 -07:00
Cameron Sparr
524fddedb4 0.3.0 unit tests: exec, httpjson, and haproxy 2016-01-06 16:11:16 -07:00
Cameron Sparr
c4a7711e02 0.3.0 unit tests: disque and elasticsearch 2016-01-05 23:48:59 -07:00
Cameron Sparr
2e20fc413c 0.3.0 unit tests: aerospike, apache, bcache 2016-01-05 23:48:59 -07:00
Cameron Sparr
498482d0f6 0.3.0 unit tests: system plugins 2016-01-05 23:48:59 -07:00
Cameron Sparr
4bd5b6a4d6 Fix httpjson panic for nil request body 2016-01-05 23:48:59 -07:00
Cameron Sparr
2e764cb22d 0.3.0 Removing internal parallelism: twemproxy and rabbitmq 2016-01-05 23:48:59 -07:00
Cameron Sparr
c8914679b7 0.3.0 Removing internal parallelism: procstat 2016-01-05 23:48:59 -07:00
Cameron Sparr
e25ac0d587 0.3.0 Removing internal parallelism: postgresql 2016-01-05 23:48:59 -07:00
Cameron Sparr
41374aabcb 0.3.0 Removing internal parallelism: httpjson and exec 2016-01-05 23:48:59 -07:00
Cameron Sparr
f60d846eb3 0.3.0 outputs: riemann 2016-01-05 23:48:59 -07:00
Cameron Sparr
96e54ab326 CHANGELOG update 2016-01-05 23:48:59 -07:00
Cameron Sparr
40a3feaad0 0.3.0 outputs: opentsdb 2016-01-05 23:48:59 -07:00
Cameron Sparr
2611931f82 0.3.0 output: librato 2016-01-05 23:48:59 -07:00
Cameron Sparr
ec39d10695 0.3.0 output: datadog and amon 2016-01-05 23:48:59 -07:00
Cameron Sparr
30d8ed411a 0.3.0: mongodb and jolokia 2016-01-05 23:48:59 -07:00
Cameron Sparr
64a832467e 0.3.0: postgresql and phpfpm 2016-01-05 23:48:59 -07:00
Cameron Sparr
9c5321c538 0.3.0 HAProxy rebase 2016-01-05 23:48:59 -07:00
Cameron Sparr
aba123dae0 0.3.0: rethinkdb 2016-01-05 23:48:59 -07:00
Cameron Sparr
5aca58ad2a 0.3.0: zookeeper and zfs 2016-01-05 23:48:59 -07:00
Cameron Sparr
a34418d724 backwards compatability for io->diskio change 2016-01-05 23:48:59 -07:00
Cameron Sparr
5f4262921a 0.3.0: trig and twemproxy 2016-01-05 23:48:59 -07:00
Cameron Sparr
6fcd05b855 0.3.0 redis & rabbitmq 2016-01-05 23:48:59 -07:00
Cameron Sparr
7746a2b3cd 0.3.0: prometheus & puppetagent 2016-01-05 23:48:59 -07:00
Cameron Sparr
2749dcd128 0.3.0: procstat 2016-01-05 23:48:59 -07:00
Cameron Sparr
92343d91d6 0.3.0: ping, mysql, nginx 2016-01-05 23:48:59 -07:00
Cameron Sparr
ce7b48143a 0.3.0: mailchimp & memcached 2016-01-05 23:48:59 -07:00
Cameron Sparr
e30e98a496 0.3.0: leofs & lustre2 2016-01-05 23:48:59 -07:00
Cameron Sparr
4798bd9d33 0.3.0 httpjson 2016-01-05 23:48:59 -07:00
Cameron Sparr
38d6cb97ad 0.3.0: HAProxy 2016-01-05 23:48:59 -07:00
Cameron Sparr
3be111a160 Breakout JSON flattening into internal package, exec & elasticsearch aggregation 2016-01-05 23:48:59 -07:00
Cameron Sparr
97a66b73cf Updating aerospike & apache plugins for 0.3.0 2016-01-05 23:48:59 -07:00
Cameron Sparr
50fc3ec974 Updating system plugins for 0.3.0 2016-01-05 23:48:59 -07:00
Filippo Vitale
ee8d99b955 fix too restrictive .gitignore
closes #483
2016-01-05 20:30:45 -07:00
Cameron Sparr
5bf7c4d241 Update circleci badge 2016-01-05 14:18:31 -07:00
Aleksei Magusev
c2b5f21832 Fix typo in telegraf.conf
Closes #456.
2016-01-04 01:30:49 +01:00
Cameron Sparr
bdac9b7241 Update 0.3.0 beta links in readme 2015-12-21 13:53:16 -08:00
Cameron Sparr
ec6eae9537 Links for the 0.3.0 beta version 2015-12-19 23:50:05 -07:00
Cameron Sparr
f607074899 remove Name from influxdb unit test 2015-12-18 16:39:23 -07:00
Cameron Sparr
0571eecb0c Remove 'Name' argument from influxdb plugin for 0.3.0 compatability
closes #449
2015-12-18 16:26:15 -07:00
Mark Rushakoff
4f3d6ddf17 Add influxdb plugin
This was primarily intended to consume InfluxDB-style expvars,
particularly InfluxDB's `/debug/vars` endpoint.

That endpoint follows a structure like

```json
{
  "httpd::8086": {
    "name": "httpd",
    "tags": {
      "bind": ":8086"
    },
    "values": {
      "pointsWrittenOK": 33756,
      "queryReq": 19,
      "queryRespBytes": 26973,
      "req": 428,
      "writeReq": 205,
      "writeReqBytes": 3939161
    }
  }
}
```

There are an arbitrary number of top-level keys in the JSON response at
the configured URLs, and this plugin will iterate through all of their
values looking for objects with keys "name", "tags", and "values"
indicating a metric to be consumed by telegraf.

Running this on current master of InfluxDB, I am able to record nearly
the same information that is normally stored in the `_internal`
database; the only measurement missing from `_internal` is `runtime`,
which is present under the "memstats" key but does not follow the format
and so is not consumed in this plugin.

```
$ influx -database=telegraf -execute 'SHOW FIELD KEYS FROM /influxdb/'

name: influxdb_influxdb_engine
----------------------------
fieldKey
blksWrite
blksWriteBytes
blksWriteBytesC
pointsWrite
pointsWriteDedupe

name: influxdb_influxdb_httpd
---------------------------
fieldKey
pingReq
pointsWrittenOK
queryReq
queryRespBytes
req
writeReq
writeReqBytes

name: influxdb_influxdb_shard
---------------------------
fieldKey
fieldsCreate
seriesCreate
writePointsOk
writeReq

name: influxdb_influxdb_subscriber
--------------------------------
fieldKey
pointsWritten

name: influxdb_influxdb_wal
-------------------------
fieldKey
autoFlush
flushDuration
idleFlush
memSize
metaFlush
pointsFlush
pointsWrite
pointsWriteReq
seriesFlush

name: influxdb_influxdb_write
---------------------------
fieldKey
pointReq
pointReqLocal
req
subWriteOk
writeOk
```
2015-12-18 15:41:16 -07:00
chrispeterson
34f0c593ad add additional stats that were already being collected
and rearrange the order to match the index order from the CSV endpoint

add test coverage. add back wretr.  remove check_status from recently added column

closes #445
2015-12-17 15:00:17 -07:00
jipperinbham
97ebcc2af1 close r.Body, remove network metrics, updated other sections as needed
closes #430
closes #452
2015-12-17 14:43:14 -07:00
Cameron Sparr
4852b5c11e Do not rely on external server for amon unit tests 2015-12-11 13:09:09 -07:00
Cameron Sparr
16ce06f621 Use gdm for dependency management 2015-12-11 12:22:16 -07:00
Cameron Sparr
811a54af6c Remove Godeps/ directory 2015-12-11 11:45:20 -07:00
Cameron Sparr
e02973b6f4 Go fmt kinesis output test file 2015-12-11 11:45:07 -07:00
James Lamb
b91eab6737 add amazon kinesis as an output plugin
closes #428
2015-12-11 11:29:03 -07:00
Allen Petersen
c89ef84df7 Separate pool tag and stat collection.
closes #427
2015-12-11 10:59:11 -07:00
Allen Petersen
e3c8a1131a Fix single dataset test.
The "two pool, one metic" test was only passing because of previous calls to Gather() had already populated the values.
2015-12-08 05:53:11 -08:00
Allen Petersen
eb78b9268f Add zfs pool stats collection. 2015-12-08 05:11:41 -08:00
Cameron Sparr
d62e63c448 Telegraf 0.2.4 version bump 2015-12-07 17:01:12 -07:00
Cameron Sparr
03e66d5b87 Implement Glob matching for pass/drop filters 2015-12-07 16:58:31 -07:00
Tait Clarridge
22afc99f1e Add support for pass/drop/tagpass/tagdrop for outputs
Reuses same logic as the plugins for filtering points, should be only
a marginal performance decrease to check all the points before writing
to the output.

Added examples to the README as well (for generic pass/drop as well as
output pass/drop/tagpass/tagdrop).

X-Github-Closes #398

closes #398
closes #401
2015-12-04 15:51:53 -07:00
Cameron Sparr
c83f220fc4 Resolve gopsutil & unit test issues with net proto stats 2015-12-04 15:12:18 -07:00
Nathaniel Cook
0d0a8e9b68 Add network protocol stats to the network plugin 2015-12-04 14:06:18 -07:00
Cameron Sparr
bcafadb68a Convert uptime to float64 for backwards compatability.
Fixes #390
2015-12-04 13:47:24 -07:00
Cameron Sparr
9999b2e3c6 Remove from test and test-short in Makefile 2015-12-04 12:27:56 -07:00
Cameron Sparr
6c23fb3173 Mailchimp report plugin 2015-12-04 12:25:16 -07:00
Cameron Sparr
e6517d4140 Update gopsutil godep dependency. Dont use godep go build anymore
godep seems to have a problem when dependencies have `internal`
packages. So removing `godep go build` and `godep go test` from the
build process in favor of just checking out the correct revisions using
`godep restore` into the regular GOPATH.

This basically means that we are not actually using anything within the
Godeps directory except Godeps.json. I should probably make a separate
go dependency management system that does this.
2015-12-04 12:22:16 -07:00
Cameron Sparr
00a6dbbe97 cpu plugin: update LastStats before returning
fixes #388
2015-12-03 16:23:49 -07:00
Cameron Sparr
4cf47dcd0f memcached plugin. Break out metric parsing into it's own func
And unit test it using a sample response string. This will make it
easier to see what other metrics are available to the plugin for adding
future metrics.
2015-12-03 13:53:37 -07:00
Cameron Sparr
03863bd84d memcached plugin: support unix sockets
closes #415
2015-12-03 13:25:43 -07:00
Cameron Sparr
7a2eeb7439 Add optional auth credentials to Jolokia plugin
closes #414
2015-12-03 11:48:20 -07:00
Cameron Sparr
6fb7d2883d io plugin, add an 'unknown' tag when the serial number can't be found
closes #405
2015-12-02 13:20:59 -07:00
Carlos J. Torres
a844c1ac74 redis_test.go with instantaneous input/output 2015-12-02 13:17:49 -07:00
Carlos J. Torres
a7b77d9658 add instantaneous input/output to redis plugin. 2015-12-02 13:17:49 -07:00
Mischa Gresser
3509713a23 Adding all memcached stats that return a single value
as described at
https://docs.oracle.com/cd/E17952_01/refman-5.0-en/ha-memcached-stats-general.html

closes #412
2015-12-02 13:14:12 -07:00
Regan Kuchan
4b3b41fea5 Create trig plugin
closes #404
2015-12-01 18:11:03 -07:00
Cameron Sparr
2be7fc072f Don't use panic-happy prometheus client With() function
Deals with part of #405, although it doesn't deal with the underlying
label error that is occuring.
2015-12-01 10:21:36 -07:00
Cameron Sparr
ca222a14de Make Prometheus output tests skipped in short mode.
Because they do some networking.

Fixes #385
2015-12-01 09:45:25 -07:00
Cameron Sparr
4aa94ee290 Update CHANGELOG and README for 0.2.3 2015-11-30 19:08:49 -07:00
Daniel Malon
5c051eb801 Parse statsd lines with multiple metric bits
closes #354
2015-11-30 15:25:35 -07:00
Cameron Sparr
3761f00062 Update etc/telegraf.conf file 2015-11-30 14:28:09 -07:00
Tait Clarridge
b705608b04 Change aerospike plugin server tag to aerospike_host
This is to avoid a conflict with the standard "host" tag that is
used everywhere.

closes #399
2015-11-30 10:43:28 -07:00
Cameron Sparr
a5f2d5ff21 Put Agent Config into the config package 2015-11-30 10:31:31 -07:00
Cameron Sparr
979e5f193a Overhaul config <-> agent coupling. Put config in it's own package. 2015-11-25 19:07:04 -07:00
Cameron Sparr
8dde60e869 Revert much of the newer config file parsing, fix tagdrop/tagpass 2015-11-25 19:06:36 -07:00
Cameron Sparr
224a570a08 Eliminate merging directory structures 2015-11-25 19:06:36 -07:00
Cameron Sparr
78f2ea89f8 Change plugin config to be specified as a list
This makes plugin configuration more similar to output configuration,
where we can specify multiple plugins as a list. The idea behind this is
that the Telegraf agent can handle the multi-processing and error
handling better than each plugin handling that internally. This will
also allow for having different plugin configurations for different
instances of the same type of plugin.
2015-11-25 19:06:36 -07:00
Tero Marttila
13ccf420d7 cmd/telegraf: -configdirectory only includes files ending in .conf
Closes #392
2015-11-25 19:05:51 -07:00
Eduard Carreras
d47740bd8d Add a comment indicating pattern uses pgrep -f 2015-11-25 19:05:22 -07:00
Eduard Carreras
e2aa0e8a35 Use pgrep with a pattern 2015-11-25 19:05:22 -07:00
Tero Marttila
d505be1fd4 cmd/telegraf: -configdirectory only includes files ending in .conf 2015-11-25 18:45:11 -07:00
gotyaoi
40fd33d1b0 GOPATH can have multiple : separated paths in it.
This means that simply adding /bin to the end is not enough. Instead of
setting GOBIN, this version prepends things to the PATH. If GOBIN is
already set, simply prepends GOBIN to PATH.  If not, appends /bin to
each component of GOPATH, then prepends that to PATH.

closes #386
2015-11-25 18:42:28 -07:00
Cameron Sparr
317a352a65 Skip measurements with NaN fields
fixes #389
2015-11-23 16:03:11 -07:00
Cameron Sparr
970bfce997 Fix kafka plugin and rename to kafka_consumer
fixes #371
2015-11-19 13:41:58 -07:00
Cameron Sparr
a3feddd8ed Riemann output: remove some of the object referencing/dereferencing
closes #378
closes #379
2015-11-18 15:34:05 -07:00
Cameron Sparr
a8294c2c34 Godep: Add raidman riemann client 2015-11-18 14:27:20 -07:00
Jeffrey Allen
0823eed546 Add riemann output
Closes #34
2015-11-18 13:56:22 -07:00
Cameron Sparr
f85bc6e7f7 Update README for 0.2.2 2015-11-18 12:09:09 -07:00
Cameron Sparr
21c4e70f33 Dont append to slices in mergeStruct 2015-11-18 11:48:46 -07:00
gunnaraasen
03a6f28d55 Use 'CREATE DATABASE IF NOT EXISTS' syntax
closes #376
2015-11-18 00:41:25 -07:00
Cameron Sparr
19e5d975ca Updating CHANGELOG and README for version 0.2.1 2015-11-16 16:23:50 -07:00
Cameron Sparr
5664625f67 Update README, CHANGELOG, and unit tests with list output 2015-11-16 10:43:03 -07:00
Daniel Malon
375045953f FreeBSD compatibility
- Use gopsutils istead of gosigar
- Bump go-dockerclient

closes #372
2015-11-16 10:32:58 -07:00
Cameron Sparr
b10b186cc8 Allow users to specify outputs as lists
This will provide the ability to specify multiple outputs for a single
type of output.

In essence, allowing this:

[outputs]

[[outputs.influxdb]]
  urls = ["udp://localhost:8089"]
  database = "udp-telegraf"

[[outputs.influxdb]]
  urls = ["http://myhost:8086"]
  database = "telegraf"

[[outputs.kafka]]
  brokers = ["192.168.99.100:9092"]
  topic = "telegraf"

closes #335
2015-11-16 10:01:28 -07:00
Cameron Sparr
bf8e0f4cae CHANGELOG update 2015-11-13 14:42:21 -07:00
Cameron Sparr
a6ae597dfc MQTT output unit tests w/ docker container 2015-11-13 13:42:06 -07:00
Cameron Sparr
b975419bc7 Apache plugin unit tests and README 2015-11-13 13:01:00 -07:00
Cameron Sparr
0f036d6bec InfluxDB output: add tests and a README 2015-11-13 10:42:35 -07:00
Codeb Fan
20fbfc7006 Twemproxy go fmt and bug fixups, CHANGELOG, README
closes #365
2015-11-13 09:43:48 -07:00
Codeb Fan
e167b72b16 Add plugin for Twemproxy
This plugin collects data from Twemproxy's stats interface
2015-11-13 09:40:29 -07:00
Cameron Sparr
68ef07bff6 Update CHANGELOG with UDP output 2015-11-12 16:02:46 -07:00
Cameron Sparr
10a20e208a Godep update and dependency resolution 2015-11-12 15:20:01 -07:00
Cameron Sparr
e10394ba3b Use the UDP client for writing to InfluxDB 2015-11-12 14:52:35 -07:00
鲁晓敏
019585f0db phpfpm: add socket fcgi support 2015-11-12 10:44:51 -07:00
鲁晓敏
e619845ffe measurement name should have prefix before ShouldPass check 2015-11-12 10:43:50 -07:00
Cameron Sparr
3012928452 Fix config file tab indentation 2015-11-12 09:52:35 -07:00
Cameron Sparr
352ccde52b Fix new error return of client.NewPoint 2015-11-11 15:38:22 -07:00
Cameron Sparr
92fb51026a Godep update: gopsutil 2015-11-11 15:38:22 -07:00
Cameron Sparr
acf9c1141a Change duration -> internal and implement private gopsutil methods 2015-11-11 15:38:22 -07:00
Cameron Sparr
a8bcc51071 Godep update: influxdb 2015-11-11 15:38:22 -07:00
Cameron Sparr
dcd1c6766c Godep save: gopsutil 2015-11-11 15:38:22 -07:00
Cameron Sparr
00ee2529bc Revert "redis: support IPv6 addresses with no port"
This reverts commit 2af97cdbcb.
2015-11-11 15:33:58 -07:00
Nicholas Katsaros
2af97cdbcb redis: support IPv6 addresses with no port
closes #356
2015-11-10 10:02:42 -07:00
martinrusev
1accab02ed Amon output
closes #350
2015-11-09 10:44:28 -07:00
Roman Statsevich
1a05899be0 removed "panic" from zfs plugin
also added zfs plugin to README.md

closes #341
2015-11-09 10:38:33 -07:00
Roman Statsevich
d54f6be639 add ZFS plugin 2015-11-09 10:37:36 -07:00
Subhachandra Chandra
00614026b3 Added parameters "Devices" and "SkipSerialNumber to DiskIO plugin.
"Devices" can be used to specify storage devices on which stats
should be reported. "SkipSerialNumber" can be used to omit
the device serial number.

Added tests to verify the new parameters.

closes #344
2015-11-06 17:11:57 -07:00
saiello
acf1da4d30 Added jolokia README.md
closes #337
2015-11-06 14:08:07 -07:00
saiello
921ffb7bdb Test for jolokia plugin 2015-11-06 14:07:02 -07:00
saiello
b2e22cbc59 Add fields value test methods 2015-11-06 14:07:02 -07:00
saiello
55c598f9ff Create a JolokiaClient. allowing to inject a stub implementation 2015-11-06 14:07:02 -07:00
saiello
eabc0875de Fixed sampleconfig 2015-11-06 14:07:02 -07:00
saiello
62270a3697 go fmt run over jolokia.go 2015-11-06 14:07:02 -07:00
saiello
40d8aeecb0 Use url.Parse to validate configuration params 2015-11-06 14:07:02 -07:00
saiello
2daa9ff260 Added Tags as toml field 2015-11-06 14:07:02 -07:00
Simone Aiello
25fd4297a8 Jolokia plugin first commit 2015-11-06 14:07:02 -07:00
cornerot
f05d89ed72 removed "panic" from bcache plugin
closes #343
2015-11-06 14:05:09 -07:00
Sean Beckett
4c2501be95 updating Golang crypto 2015-11-04 15:30:01 -08:00
Cameron Sparr
e2854232d0 Change HAProxy plugin tag from host to server
fixes #342
2015-11-03 11:21:58 -07:00
Cameron Sparr
6794fd06eb Suggest running as telegraf user in test mode in README
Fixes #330
2015-11-03 11:18:57 -07:00
Cameron Sparr
befc906167 Improve the HTTP JSON plugin README with more examples. 2015-11-03 10:16:59 -07:00
Cameron Sparr
422d240afb Mongodb should output 2 plugins in test mode
closes #336
2015-11-02 17:23:40 -07:00
Cameron Sparr
2b966b40f2 Completely tab-indent the Makefile 2015-11-02 14:32:06 -07:00
Sean Reifschneider
a992e16f7d On a package upgrade, restart telegraf.
closes #338
2015-11-02 13:01:58 -07:00
Cameron Sparr
0398dc1226 Dont overwrite 'host' tag in redis plugin
fixes #331
2015-11-02 11:30:49 -07:00
Eugene Dementiev
5592738603 [rabbitmq plugin] Add support for per-queue metrics
Also metrics now are gathered concurrently across servers. Fixes #185

fixes #185
closes #334
2015-11-02 11:13:24 -07:00
Eugene Dementiev
688ffd024b [amqp output] Add ability to specify influxdb database
and retention policy, as well as precision as amqp headers

closes #333
2015-11-02 11:12:09 -07:00
JP
4ac1c819e0 add elasticsearch README
closes #327
2015-11-02 11:04:43 -07:00
JP
a6e0ae2896 add ValidateTaggedFields func to testutil accumulator 2015-11-02 11:03:41 -07:00
JP
cb8499c264 optinally gather cluster and index health stats 2015-11-02 11:03:41 -07:00
Cameron Sparr
d2fb065d0d Prometheus client test refactor
closes #318
2015-10-28 16:25:15 -06:00
Tait Clarridge
4449f7f2fb Add prometheus_client service output module, update prometheus client
- Adds a client implementation using the prometheus go_client library
  that exposes metrics.

- Adds a new type of output "ServiceOutput" which follows inline with
  the "ServicePlugin", adding a Stop and Start method for the service

This change also requires the newer prometheus/client_golang code, so
the prometheus plugin needed to be changed.

Added the following to Godep:
    - bitbucket.org/ww/goautoneg (in github.com/common/expfmt/encode.go)
    - prometheus/common/expfmt (in plugins/prometheus.go)
    - github.com/prometheus/common/model (in plugins/prometheus.go)
    - github.com/prometheus/procfs (in github.com/client_golang/prometheus)
    - github.com/beorn7/perks/quantile (in github.com/client_golang/prometheus)

X-Github-Meta: closes #306
2015-10-28 15:28:39 -06:00
JP
7cc60dfb8f update mongostat from github.com/mongodb/mongo-tools
closes #323
2015-10-28 15:26:04 -06:00
Cameron Sparr
028bae8f04 Run make in circle, don't build arm and 32-bit 2015-10-28 12:30:58 -06:00
Cameron Sparr
fa9555c430 Execute "long" unit tests using docker containers
fixes #293
2015-10-28 11:45:04 -06:00
Cameron Sparr
48d11f0a5c Mongostat diff bug, less equal to less 2015-10-28 10:44:09 -06:00
Cameron Sparr
09a0c3b40f Update README & CHANGELOG with docker and NSQ changes 2015-10-27 15:47:27 -06:00
Jonathan Cross
e622bd5e7f fixing test for NoError
closes #325
2015-10-27 15:44:22 -06:00
Jonathan Cross
0d31f40e16 use index 0 of server array for nsq test 2015-10-27 15:44:22 -06:00
Jonathan Cross
e13500fc4f updated for new output Write function
removed HTTP listener port in docker compose. Not being used by plugin.
2015-10-27 15:44:22 -06:00
Jonathan Cross
2a76942a74 NSQ Output plugin
NSQ output plugin, following the NSQ methodology output is a producer
to one instance of NSQD. The go library does not accept array values be
default for a Producer. Additionally service discovery is generally
done as a consumer.

Follows same methodology as Kafka Output without the tag reference.
2015-10-27 15:44:22 -06:00
Cameron Sparr
c73c28de7e Update CHANGELOG with version 0.2.0 2015-10-27 15:43:47 -06:00
Ellison Marks
9e0ec0927c Making sure telegraf.d directory is created by packages. 2015-10-27 11:32:00 -07:00
Ellison Marks
23e6715a02 Making the field name matching when merging respect the toml struct tag.
If the field has a toml struct tag, don't try fuzzy matching, thanks to
@ekini.
2015-10-27 11:31:43 -07:00
Cameron Sparr
f7eae86cdb Update README to version 0.2.0 2015-10-26 22:05:27 -06:00
Cameron Sparr
889c0a50a4 Fixup random interval jittering 2015-10-26 13:34:31 -06:00
JP
7d15061984 add librato output plugin, update datadog plugin to skip non-number metrics
closes #322
2015-10-26 13:29:53 -06:00
Tait Clarridge
ccbfb038ee Change aerospike default config to localhost
The default config was in a non-runnable state if one were to
attempt to use it with the docker-machine setup. Changed to localhost.

closes #321
2015-10-26 10:57:10 -06:00
palkan
cb951ebd28 Add httpjson readme
closes #275
2015-10-23 18:34:27 -06:00
palkan
d35c78e933 Rename Tags to TagKeys 2015-10-23 18:33:04 -06:00
palkan
e9356c893b [Fix #190] Add httpjson tags support 2015-10-23 18:33:04 -06:00
JP
869483617b add host to metric, replace '_' with '.'
closes #312
2015-10-23 18:25:26 -06:00
palkan
df96958fb8 Use specific mysql version with docker
closes #315
2015-10-23 17:35:49 -06:00
palkan
de7ad9dfbc Replace opentsb docker image with the official one
closes #314
2015-10-23 17:34:12 -06:00
palkan
bf1cf4557e Update kafka reamde; improve intergration tests
closes #313
2015-10-23 17:33:23 -06:00
Cameron Sparr
86d20496ea Fix MySQL DSN -> tags parsing
Closes #297
2015-10-22 17:16:19 -06:00
Cameron Sparr
ae7ad2230f Support printing output with usage flag too 2015-10-22 14:24:51 -06:00
Ellison Marks
2007064c47 Fix for tags in the config not being applied to the agent.
fixes #302
closes #308
2015-10-22 13:58:59 -06:00
Cameron Sparr
c8852339c9 Do not fail Connect() in influxdb output when db creation fails
Fixes #304
2015-10-22 11:14:10 -06:00
Cameron Sparr
eb0a19062e When MongoDB freezes or restarts, do not report negative diffs
Fixes #253
2015-10-22 10:55:26 -06:00
Cameron Sparr
2f08577967 Fix output panic for -test flag 2015-10-21 18:32:43 -06:00
Cameron Sparr
891f3af504 Update CHANGELOG & README with aerospike plugin 2015-10-21 18:08:43 -06:00
Tait Clarridge
c5f200917a Add aerospike plugin support
- Does not use the aerospike client, but sends the stats command
  using the aerospike required format
- Queries available namespaces and gets stats for all of them

closes #300
2015-10-21 18:04:45 -06:00
Cameron Sparr
21622a1a17 Update CHANGELOG with new flushing options 2015-10-21 17:37:15 -06:00
Cameron Sparr
a1067fa4ae Normalize collection interval to nearest interval
closes #301
2015-10-21 17:31:27 -06:00
Ellison Marks
4395a46190 Tests for LoadDirectory.
closes #295
2015-10-21 14:07:09 -06:00
gotyaoi
c938523cd5 Implementing LoadDirectory. 2015-10-21 12:00:22 -07:00
gotyaoi
ae10fc7fb4 Fixing old tests and adding new ones for new code. 2015-10-21 12:00:21 -07:00
gotyaoi
0299a17da1 Moving the Duration wrapper to it's own package to break import loops. 2015-10-21 12:00:21 -07:00
gotyaoi
d77cfd6ecc Adding testify/suite to godep. 2015-10-21 11:59:20 -07:00
gotyaoi
03d79996de Moving away from passing around *ast.Tables.
Config in the config directory will need to be merged into the main
config, which is difficult to do using the *ast.Tables. Get the config
into structs as soon as possible and then merge the structs.
2015-10-21 11:59:19 -07:00
Eugene Dementiev
553208a960 Combine BatchPoints with the same RoutingTag to one message in amqp output
closes #287
2015-10-21 11:53:08 -06:00
Cameron Sparr
dfc59866e8 Add support for retrying output writes, using independent threads
Fixes #285
2015-10-21 11:17:01 -06:00
Cameron Sparr
ac685d19f8 Clean up logging messages and add flusher startup delay
Fixes #294
2015-10-20 16:45:31 -06:00
Joseph Dykstra
dd2e9e08df Add periods to the end of sentences
Closes #288
2015-10-20 14:20:30 -06:00
Roman Statsevich
499b5befd6 add bcache plugin
Closes #286
2015-10-20 14:17:09 -06:00
Cameron Sparr
c26ce9c4fe Utilizing new client and overhauling Accumulator interface
Fixes #280
Fixes #281
Fixes #289
2015-10-20 13:53:58 -06:00
Cameron Sparr
6263bc2d1b Godep update: influxdb 2015-10-20 10:17:34 -06:00
Cameron Sparr
f7504fb5eb InfluxDB does not accept uint64, so cast them down to int64
Fixes #290
2015-10-19 18:53:40 -06:00
Tyler Nisonoff
6869362f43 added keyspace hitrate measurement
Closes #283
2015-10-18 18:00:34 -06:00
Tyler Nisonoff
7600cc87d8 added connections measurement with user tag
Closes #284
2015-10-18 17:43:36 -06:00
Jonathan Cross
3192c78d96 fixed test to check actual value
Closes #273

caught a typo :D using it
2015-10-18 17:39:53 -06:00
Jonathan Cross
c3dad00c1b PuppetAgent Plugin
Added PuppetAgent Plugin reads last_run_summary file
2015-10-18 17:37:11 -06:00
Cameron Sparr
a1bad378d2 Turn off GOGC for faster build time in CI 2015-10-18 15:56:47 -06:00
Cameron Sparr
73f1ed4f25 Use Unix() int64 time for comparing timestamps in kafka consumer 2015-10-16 16:58:52 -06:00
Cameron Sparr
b28b4bd71e Fix ApplyTemplate change in graphite parser 2015-10-16 16:43:31 -06:00
Cameron Sparr
b15928c95e godep update: influxdb 2015-10-16 16:26:58 -06:00
Cameron Sparr
97d4f9e0ff Run go fmt in CI 2015-10-16 13:08:32 -06:00
Cameron Sparr
0986caf0ad Fix Go vet issue, test accumulator should be passed by reference with lock
Closes #276
2015-10-16 11:21:44 -06:00
palkan
9cccf8f88a Add locking to test accumulator 2015-10-16 10:58:46 -06:00
Joseph Dykstra
3ae5b4b280 Fix typos
Closes #270
2015-10-16 10:54:51 -06:00
Oskar Risberg
62b0e25b84 Add phpfpm to readme
Closes #274
2015-10-16 10:53:36 -06:00
Cameron Sparr
4e5ed9d3b9 Change config file indentation to 2 spaces 2015-10-15 15:53:29 -06:00
Sean Reifschneider
555436a222 Fix for init script for other procs with "telegraf"
The init script fails if another process has the word "telegraf" in
it, for example if you aren running "vi /etc/opt/telegraf/telegraf.conf"
or "tail -f /var/log/telegraf/telegraf.log".  This is because
the "-f" flag to "pgrep" will show processes with the search
string anywhere in the command-line.

This patch turns it around and gets the "ps" output for the process
in the pidfile, and if that line has "telegraf" in it, it considers
it to be running.

Closes #266
Closes #267
2015-10-15 15:06:05 -06:00
Cameron Sparr
6977119f1e Statsd plugin, tags and timings
Closes #237
Closes #39
2015-10-15 12:07:36 -06:00
Cameron Sparr
52be516fa3 wget and install go1.5.1 on machine 2015-10-14 17:54:00 -06:00
Cameron Sparr
2dd3eee58e Use graphite parser for templating, godep update to head 2015-10-14 17:54:00 -06:00
Cameron Sparr
d40351286a Refactoring gauges to support floats, unit tests 2015-10-14 17:54:00 -06:00
Cameron Sparr
d84a258b0a Statsd: unit tests for gauges, sets, counters 2015-10-14 17:54:00 -06:00
Cameron Sparr
eb2a4dc724 Statsd listener plugin
implement gauges, sets, counters
2015-10-14 17:54:00 -06:00
Cameron Sparr
316fa1cc01 Add recently-added plugins to list 2015-10-14 17:53:47 -06:00
Sean Reifschneider
04e2db1f41 Issue #264: Fixes for logrotate config file.
This adds copytruncate and dateext to match the Influxdb logrotate file,
and removes nocreate (again, to match influxdb).

Closes #264
Closes #265
2015-10-14 17:51:37 -06:00
Jonathan Cross
2f7d781635 remove zookeeper declaration
since spotify/kafka docker image already exposes zookeeper

Closes #262
2015-10-14 17:49:23 -06:00
Jonathan Cross
88ff269370 added measurement prefix 2015-10-14 17:48:21 -06:00
Jonathan Cross
7121e1a3b0 fixes based on comments 2015-10-14 17:48:21 -06:00
Jonathan Cross
8fd06b96d7 Zookeeper plugin
Created a zookeeper plugin that fetches from the ‘mntr’ command will
output measurements that are int and string based
2015-10-14 17:48:21 -06:00
Cameron Sparr
181c3cdc28 Update CHANGELOG with recent bugfixes
Closes #261
2015-10-13 17:58:17 -06:00
Eugene Dementiev
ccfa913186 Fix crash if login/password is incorrect in rabbitmq plugin. Closes #260
Closes #260
2015-10-13 17:54:29 -06:00
Eugene Dementiev
2a9f31bfea Add sample for exec plugin. Fixes #245
Closes #258
2015-10-13 17:53:18 -06:00
Vinh
0bc76f094a Add PHPFPM stat
- HTTP status or Socket status
- Collect those metric:
    accepted conn:
    listen queue:
    max listen queue:
    listen queue len:
    idle processes:
    active processes:
    total processes:
    max active processes:
    max children reached:
    slow requests:
- Tag metric with: `host` and `pool` name

Closes #255
2015-10-12 15:40:42 -06:00
Shirou WAKAYAMA
d394003739 add UDP socket counts and rename to 'netstat'.
Closes #244
2015-10-12 00:08:35 -06:00
Shirou WAKAYAMA
17dd058308 add REAME about TCP Connection plugin. 2015-10-12 00:05:10 -06:00
Shirou WAKAYAMA
99b1a3071d add NetConnections to the mockPS. 2015-10-12 00:05:10 -06:00
Shirou WAKAYAMA
dc38e448bd add tcp connections stat plugin. 2015-10-12 00:05:10 -06:00
Michael Bushey
1d1180ec0c telegraf-agent.toml: Fix example port and use complete examples for mysql plugin 2015-10-09 15:52:46 -07:00
Cameron Sparr
81539c4ed6 Merge pull request #252 from aristanetworks/master
Added Mountpoints option to system/disk plugin to report stats for selected mountpoints
2015-10-09 14:23:18 -06:00
subhachandrachandra
cf1dcfe37c Dropped SkipInodeUsage option as "drop" achieves the same results.
Fixed a bug in restricting Disk reporting to specific mountpoints
Added tests for the Disk.Mountpoints option
Fixed minor bug in usage of assert for the cpu tests where expected and actual values were swapped.
2015-10-08 14:17:04 -07:00
Cameron Sparr
7293376973 Race condition fix: copy BatchPoints into goroutine
Fixes #250
2015-10-08 14:27:22 -06:00
Cameron Sparr
d9f1a60a64 godep update: gopsutil 2015-10-07 16:12:55 -06:00
subhachandrachandra
4f6526e1a5 Merge remote-tracking branch 'upstream/master' 2015-10-07 14:49:47 -07:00
subhachandrachandra
e6ea09f482 Added Mountpoints and SkipInodeUsage options to the Disk plugin to control
which mountpoint stats get reported for and to skip inode stats.
2015-10-07 14:42:11 -07:00
Cameron Sparr
d620651ef6 procstat plugin, consolidate PID-getting 2015-10-07 14:13:33 -06:00
Cameron Sparr
9221f93be9 Allow procstat plugin to handle multiple PIDs from pgrep
Closes #248
2015-10-07 13:48:55 -06:00
Cameron Sparr
795ea49093 Add pid tag to procstat plugin, dont exit on error, only log 2015-10-07 11:42:50 -06:00
Ranjib Dey
6827459b9f fix typo in sample config and README
Closes #240
2015-10-07 11:19:55 -06:00
Ranjib Dey
e424d47ce6 fix plugin registration name 2015-10-07 11:11:47 -06:00
Ranjib Dey
ca0e732331 fix toml struct string 2015-10-07 11:11:47 -06:00
Ranjib Dey
8e52905ea9 add readme for procstat plugin 2015-10-07 11:11:12 -06:00
Cameron Sparr
5cc26bb640 godep update for procstat 2015-10-07 11:11:12 -06:00
Ranjib Dey
fdf00c1be6 Monitor process by pidfile or exe name 2015-10-07 11:11:12 -06:00
Cameron Sparr
47258a7093 Godep update: gopsutil 2015-10-07 10:41:44 -06:00
cornerot
5112d077d5 add tabs in the apache sampleConfig var
Closes #246
2015-10-06 10:34:03 -06:00
Cameron Sparr
b4e8a23da4 godep update: gopsutil 2015-10-05 11:10:24 -06:00
Shirou WAKAYAMA
63e9a4ae68 Fix godeps for MQTT output and remove hostname setting
Closes #241
2015-10-05 10:56:43 -06:00
Shirou WAKAYAMA
7e96a9afda Change MQTT output topic format to split plugin name. 2015-10-05 10:43:46 +09:00
Shirou WAKAYAMA
f5a225f1e0 update Godep.json 2015-10-04 23:57:40 +09:00
Shirou WAKAYAMA
6f4a3816a5 Add MQTT output. 2015-10-04 22:52:29 +09:00
subhachandrachandra
29363794c1 Merge remote-tracking branch 'upstream/master' 2015-09-30 17:02:58 -07:00
Cameron Sparr
64a3a718e6 CHANGELOG feature updates 2015-09-29 14:15:23 -07:00
Cameron Sparr
b01c28ebc6 Clean up additional logging and always print basic agent config 2015-09-29 14:06:49 -07:00
Cameron Sparr
f5d1aaf7d9 Memory plugin: re-add cached and buffered to memory plugin 2015-09-28 17:05:42 -07:00
Cameron Sparr
f6f45881da Add more logging to telegraf 2015-09-28 16:57:03 -07:00
Nick Jones
cd7468f3be Fix conditional test against useradd so it's compatible with Dash
The test to see which version of `useradd` is installed uses 'bashisms'
that fail on Ubuntu due to the fact that `/bin/sh` is symlinked to Dash,
causing the telegraf account to be created without the `--system` option
ever being passed.

This change amends the syntax so that it's POSIX-compatible and more
portable as a result.
2015-09-28 14:04:46 +01:00
subhachandrachandra
cd93b9ae0b Merge remote-tracking branch 'upstream/master'
Conflicts:
	Godeps/_workspace/src/github.com/shirou/gopsutil/mem/mem_darwin.go
	Godeps/_workspace/src/github.com/shirou/gopsutil/mem/mem_linux.go
2015-09-25 15:19:11 -07:00
Eugene Dementiev
0ffaafd788 Fix packages provides: now new version of package replaces the old one 2015-09-24 13:29:37 -07:00
Eugene Dementiev
c6283d1b5a AMQP auto reconnect feature
Closes #207
2015-09-24 10:30:00 -07:00
Josh Palay
24527859e6 Fix printf format issue
Closes #227
2015-09-23 15:44:25 -07:00
Josh Palay
0c6c5718fe Adds command intervals to exec plugin 2015-09-23 15:42:48 -07:00
Ruslan Islamgaliev
c4bbc18cb6 Make nginx_test check port in nginx module tags
Closes #223
2015-09-23 15:41:46 -07:00
Ruslan Islamgaliev
6e76759225 Add port tag to nginx plugin 2015-09-23 15:40:43 -07:00
Cameron Sparr
87ed2d4a21 Update CHANGELOG with ekini's changes and docker plugin
Closes #226
2015-09-23 15:37:21 -07:00
Eugene Dementiev
74b3309225 Add timestamps to points in Kafka/AMQP outputs 2015-09-23 15:31:37 -07:00
Cameron Sparr
1d741cbfc5 Update godep of go-dockerclient for Label access 2015-09-23 14:25:26 -07:00
Cameron Sparr
12420db4b9 docker plugin: Add docker labels as tags in
Closes #90
2015-09-23 14:20:15 -07:00
Cameron Sparr
aad6a7e262 Only run the cpu plugin twice when using -test
This can be easily extended to other plugins that need it by tacking
their name onto the switch statement. Also eliminating the unused
TestAllPlugins code and cleaning up some stray Printlns
2015-09-23 13:56:14 -07:00
Cameron Sparr
b12b804f0a Make redis password config more clear.
Also make certain that the 'host' tag does not include the password for
security reasons

Closes #225
2015-09-23 13:28:36 -07:00
Cameron Sparr
64d38ed17e Remove duplicate opentsdb docker images 2015-09-22 21:10:21 -06:00
Cameron Sparr
f8d64a7378 Redis: include per-db keyspace info
Closes #205
2015-09-22 19:46:50 -07:00
Cameron Sparr
b92a0d5126 Redis plugin, add key metrics and simplify parsing 2015-09-22 16:27:22 -07:00
Cameron Sparr
e0372358df Update changelog with info about filtering 2015-09-22 16:09:24 -07:00
Cameron Sparr
1bce6e3faf Updating README and CHANGELOG for 0.1.9 2015-09-22 13:43:37 -07:00
Cameron Sparr
81dd281789 Remove gvm from packaging script 2015-09-22 11:46:51 -07:00
Cameron Sparr
f7b38dc270 Update deb/rpm package config, package script 2015-09-22 10:56:01 -07:00
Cameron Sparr
ec9819071a Add -outputfilter flag, and refactor the filter flag to work for -sample-config
Closes #211
Issue #199
2015-09-22 10:56:01 -07:00
Ruslan Islamgaliev
72edc3c4fe Select default apache port depending on url scheme 2015-09-22 10:53:53 -07:00
Ruslan Islamgaliev
5657e8d1da Add port tag to apache plugin 2015-09-22 10:52:45 -07:00
Cameron Sparr
0700e0cf94 Update gopsutil godep dependency
Closes #219
2015-09-22 09:28:37 -07:00
Cameron Sparr
1cd2db9f8c Memory plugin: use 'available' instead of 'actual_'
Closes #214
2015-09-21 17:39:39 -07:00
Cameron Sparr
10d411c4f7 Update new memory unit tests, documentation 2015-09-21 17:22:24 -07:00
Cameron Sparr
167b8b8eb8 Godep update gopsutil to get darwin mem fix 2015-09-21 17:22:24 -07:00
Cameron Sparr
74da03d9fa Refactor memory stats, remove some, add 'actual_' stats 2015-09-21 17:22:23 -07:00
Cameron Sparr
b8a58dad65 Fix CPU unit tests for time_ prefix 2015-09-21 17:08:50 -07:00
Cameron Sparr
b012713cf2 Adding time_ prefix to all CPU time measurements 2015-09-21 10:23:46 -07:00
Cameron Sparr
82d914149e Adding a retry to the initial telegraf database connection
Fixes #187
2015-09-18 18:03:47 -07:00
Eugene Dementiev
450f5e03a5 Add shebang to postinstall script (fixes installation on Debian family)
Closes #212
2015-09-18 15:07:11 -07:00
Eugene Dementiev
b04706b875 Fix makefile warning for go1.5 2015-09-18 21:16:08 +03:00
Cameron Sparr
10b0438201 Remove cpu_usage_busy, this is simply 100-cpu_usage_idle 2015-09-17 17:46:35 -07:00
Cameron Sparr
0270ace3d4 Add a CPU collection plugin README 2015-09-17 17:46:34 -07:00
Cameron Sparr
bbb27fa484 Update gopsutil dependency to enable 32-bit builds 2015-09-17 17:46:34 -07:00
Cameron Sparr
df15e7b379 Remove non-existent 'stolen' cpu stat, fix measurement names 2015-09-17 17:46:34 -07:00
Cameron Sparr
df651ab98e Properly vendor the gopsutil dependency 2015-09-17 17:46:34 -07:00
Cameron Sparr
dd7a3b37b0 Delete 'vendored' gopsutil directory 2015-09-17 17:46:34 -07:00
Tim Allen
94a623c00e Check if file exists before running disk usage on it. Not all mounts are normal files.
Closes #208
2015-09-17 17:45:58 -07:00
Cameron Sparr
6cb0f2d392 Revert godep updates, needs a fix in influxdb repo 2015-09-17 00:57:39 -07:00
Cameron Sparr
17e165382f Add amqp/rabbitmq to output list in readme 2015-09-16 17:30:10 -07:00
Cameron Sparr
733ba07312 Changing AddValues to AddFields and temp disabling adding w time
Currently adding with time is broken, because InfluxDB does not support
using precision for timestamp truncation both with and without
timestamps. This will be re-enabled once we fix InfluxDB to use the
precision argument for truncation in all cases, and a "unit" argument
in the line-protocol for adding points with non-nanosecond stamps

Fixes #175
2015-09-16 16:59:48 -07:00
Cameron Sparr
46cd9ff9f5 Update influxdb godeps for line-protocol precision fix 2015-09-16 16:59:48 -07:00
Cameron Sparr
66ed4f7328 mysql plugin: don't emit blank tags
closes #201
2015-09-16 14:24:38 -07:00
Cameron Sparr
3be6d84675 Catching up on some CHANGELOG updates 2015-09-16 14:23:57 -07:00
Eugene Dementiev
406e980fae install and init script for el5
Fixes #186
Closes #203
2015-09-16 14:19:57 -07:00
Oliver Buschjost
211065565f Add HTTP 5xx stats to HAProxy plugin. Closes #194 2015-09-16 14:10:09 -07:00
Cameron Sparr
d979ee5573 AMQP routing tag doc & add routing tag for Kafka
closes #200
2015-09-16 12:10:26 -07:00
Roman Plessl
c843b53c30 added docker image unit test with OpenTSDB 2015-09-16 11:01:17 -07:00
Eugene Dementiev
5d280e4d25 AMQP output plugin typo fixes and added README and RoutingTag 2015-09-16 10:59:29 -07:00
Eugene Dementiev
f00d43aa09 Added amqp output 2015-09-16 10:58:38 -07:00
Cameron Sparr
2e68d3cb3c Merge pull request #198 from mced/fix_mem_used_perc
[fix] mem_used_perc returns percentage of used mem
2015-09-15 15:24:48 -07:00
Cédric Menassa
4d6f11b61f [fix] mem_used_perc returns percentage of used mem 2015-09-15 12:58:51 +02:00
Kevin Bouwkamp
aac9ba6c1e add bugfix in CHANGELOG and some notes in pg README
Closes #192
2015-09-14 18:48:01 -07:00
Kevin Bouwkamp
d926a3b5da no longer duplicate ignored columns here 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
fa5753c579 Makes the test also work across pg versions 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
3fa3b2d836 add some comments 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
76041e84e8 fix some more indentation... 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
19c6572926 Add a few notes about the connection strings 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
2217fb8c58 uncomment to skip test in short mode 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
50fcb3914d Generating metric information dynamically. Makes compatible with postgresql versions < 9.2 2015-09-14 18:47:06 -07:00
Roman Plessl
9a0c0886ce added more UNIT test cases for covering all parts of the code
added debug statement for debugging OpenTSDB communication

Closes #182
2015-09-14 18:43:56 -07:00
Roman Plessl
fc41cc9878 added prefix settings of the module and rearrange go test code 2015-09-14 18:41:43 -07:00
Roman Plessl
08b220a1fb added docker image unit test with OpenTSDB 2015-09-14 18:41:43 -07:00
Roman Plessl
7e3beaf822 fix spaces with gofmt 2015-09-14 18:41:43 -07:00
Roman Plessl
d2150efc19 added readme as suggested / whished in #177 2015-09-14 18:41:43 -07:00
Roman Plessl
380146b75b added opentsdb as sink 2015-09-14 18:41:43 -07:00
Roman Plessl
2bf096cfc7 adds opentsdb telnet output plugin 2015-09-14 18:41:42 -07:00
Roman Plessl
cb887dee81 change/fix expected test result 2015-09-14 18:41:42 -07:00
Roman Plessl
2ee7d5eeb6 code improvements after running tests / compile step 2015-09-14 18:41:42 -07:00
mced
6d6158ff08 [fix] mem_used_perc returns percentage of used mem
Closes #189
2015-09-14 12:18:31 -07:00
Cameron Sparr
11126cf4ae Add a server name tag to the RabbitMQ server list
Fixes #183
2015-09-11 16:46:49 -07:00
Ruslan Islamgaliev
bd00f46d8b Fix docker stats to make it work on centos 7.
issue #58
issue #84
2015-09-11 16:26:08 -07:00
Cameron Sparr
d8482cc286 darwin net plugin fix, really need to godep vendor gopsutil 2015-09-10 13:57:57 -06:00
Cameron Sparr
f7a4317990 Fix multiple redis server bug, do not cache the TCP connections
Fixes #178
2015-09-10 11:51:15 -06:00
Vye Wilson
a55f6498c8 Makefile will now honor GOBIN, if set
Closes #181
2015-09-10 11:50:11 -06:00
Cameron Sparr
81f4aa9a5d Fix bug in setting the precision before gathering metrics
Closes #175
2015-09-09 21:29:55 -06:00
Cameron Sparr
3c7c8926fb Support InfluxDB clusters
Closes #143
2015-09-09 17:57:17 -06:00
Cameron Sparr
a7ed46160a Re-arrange repo files for root dir cleanup 2015-09-09 12:38:51 -06:00
Cameron Sparr
a9b97c7a2b Bump go version number to 1.5 2015-09-09 12:07:58 -06:00
Cameron Sparr
0780ad4ad9 README updates for systemd and deb/rpm install 2015-09-09 12:04:59 -06:00
Cameron Sparr
bf9992b613 Update telegraf.service and packaging script for systemd
Deals with most of #170
2015-09-08 18:23:18 -06:00
Cameron Sparr
8c5e1ff0a0 Update README plugins list 2015-09-04 17:05:50 -06:00
Cameron Sparr
b3044a6e2b Put all ARCH binaries on the README 2015-09-04 16:37:07 -06:00
Cameron Sparr
6260dd1018 Makefile rule for building all linux binaries, and upload all ARCHs 2015-09-04 14:12:50 -06:00
Cameron Sparr
e47801074e package.sh script fixes for uploading binaries 2015-09-04 13:19:13 -06:00
Cameron Sparr
6d42973d7c Update package script and readme for 0.1.8 2015-09-04 12:53:29 -06:00
Cameron Sparr
68e41f130c Ping plugin
Closes #167
2015-09-04 11:20:49 -06:00
Cameron Sparr
65b33a848e Fix default installed config for consistency 2015-09-02 14:25:40 -06:00
Cameron Sparr
5bfb6df0e0 Write data in UTC by default and use 's' precision
Closes #159
Closes #162
2015-09-02 14:19:36 -06:00
Cameron Sparr
13061d1ec7 package.sh: upload raw binaries to S3
Closes #166
2015-09-02 12:05:29 -06:00
nickscript0
0143a4227e add additional metrics to mysql plugin tests
Closes #165
2015-09-02 11:49:16 -06:00
nickscript0
3f63bcde12 add additional MySQL metrics 2015-09-02 11:48:38 -06:00
Michael Wood
b86c6bba4e README: Say when tagpass/tagdrop are valid from.
closes #163
2015-09-02 09:33:05 -06:00
Cameron Sparr
4d19fc0860 Fixup for g->r change, io.reader was already using 'r' 2015-08-31 16:15:38 -06:00
Cameron Sparr
9c57c30e57 Redis plugin internal names consistency fix, g -> r 2015-08-31 15:57:52 -06:00
Cameron Sparr
9969c4e810 Add system uptime metric, string formatted AND in float64
closes #150
2015-08-31 14:43:34 -06:00
Alexander Oleinik
e2bc5d80c9 Apache Plugin
Closes #158
Fixes #132
2015-08-31 10:17:18 -06:00
Michael Desa
ab191e2b58 Rename DEPENDENCY_LICENSES LICENSE_OF_DEPENDENCIES
Closes #155
Closes #154
2015-08-28 19:37:23 -06:00
Michael Desa
d418a6e872 Add list of dependency licenses 2015-08-28 16:17:46 -07:00
Cameron Sparr
bdfd1aef62 Update README with 0.1.7 and make separate CONTRIBUTING file 2015-08-28 10:21:22 -06:00
Cameron Sparr
ff2de0c715 Only build the docker plugin on linux 2015-08-27 17:09:18 -06:00
Cameron Sparr
5b78b1e548 Clean up agent error handling and logging of outputs/plugins
Closes #145
2015-08-27 13:41:19 -06:00
Cameron Sparr
d1f965ae30 Kafka output producer, send telegraf metrics to Kafka brokers
Closes #38
2015-08-26 17:03:58 -06:00
Cameron Sparr
434267898b Indent the toml config for readability 2015-08-26 09:22:03 -06:00
Cameron Sparr
a00510a73c Outputs enhancement to require Description and SampleConfig functions
Closes #142
2015-08-26 07:34:26 -06:00
Cameron Sparr
846fd31121 Improve build from source instructions
Closes #141
2015-08-25 18:18:56 -06:00
Cameron Sparr
ab4344a781 Merge problem, re-enable non-standard DB names 2015-08-25 16:52:16 -06:00
Cameron Sparr
ac97fefb91 makefile: ADVERTISED_HOST needs only be set during docker-compose target 2015-08-25 16:34:30 -06:00
subhachandrachandra
8d034f544c Fixed memory reporting for Linux systems
/proc/meminfo reports memory in KiloBytes and so needs a multiplier of 1024 instead of 1000.
The kernel reports in terms of pages and the proc filesystem is left shifting by 2 for 4KB pages to get KB. Since this is a binary shift, Bytes will need to shift by 10 and so get multiplied by 1024.

From the kernel code. PAGE_SHIFT = 12 for 4KB pages
"MemTotal:       %8lu kB\n", K(i.totalram)

Closes #131
2015-08-25 14:18:14 -06:00
subhachandrachandra
ca1d2c7000 Fixed total memory reporting for Darwin systems. hw.memsize is reported as bytes instead of pages. 2015-08-25 14:16:18 -06:00
Bruno Bigras
0acf15c025 Typo: prec -> perc
Closes #140
2015-08-25 14:15:12 -06:00
Cameron Sparr
94eed9b43c Add MySQL server address tag to all measurements
Closes #138
2015-08-25 13:58:55 -06:00
Bruno Bigras
8a6665c03f memcached: fix when a value contains a space
Fixes #137
Closes #139
2015-08-25 13:14:40 -06:00
Cameron Sparr
85ae6fffbb Vagrantfile: do a one-way rsync so that binaries don't get shared between VMs and host 2015-08-25 11:54:12 -06:00
Cameron Sparr
bd85a36cb1 Fixes #130, document mysql plugin better, README 2015-08-24 15:08:16 -06:00
Cameron Sparr
a449e4b47c Add #136 to CHANGELOG 2015-08-24 14:56:50 -06:00
Cameron Sparr
42602a3f35 Provide a -usage flag for printing the usage of a single plugin
Closes #136
2015-08-24 14:52:46 -06:00
Cameron Sparr
50f902cb02 Fixes #128, add system load and swap back to default Telegraf config 2015-08-24 13:26:21 -06:00
nickscript0
b014ac12ee Update CHANGELOG.md 2015-08-24 13:09:23 -06:00
nickscript0
610f24e0cd Update CHANGELOG.md 2015-08-24 13:09:23 -06:00
nsvarich
f45f7e56fd add plugin.name to error message 2015-08-24 13:09:23 -06:00
nickscript0
afe366d6b7 go fmt remove whitespace 2015-08-24 13:09:23 -06:00
nickscript0
1daa059ef9 Log plugin errors in crankParallel and crankSeparate cases. Previously errors weren't logged in these cases. 2015-08-24 13:09:23 -06:00
Cameron Sparr
9777aa6165 Update README to point to url without 'v' prepended to version 2015-08-24 10:48:21 -06:00
Cameron Sparr
143ec1a019 Filter out the 'v' from the version tag, issue #134 2015-08-24 10:39:15 -06:00
subhachandrachandra
13ee9ff37b Fixed memory reporting for Linux systems
/proc/meminfo reports memory in KiloBytes and so needs a multiplier of 1024 instead of 1000.
The kernel reports in terms of pages and the proc filesystem is left shifting by 2 for 4KB pages to get KB. Since this is a binary shift, Bytes will need to shift by 10 and so get multiplied by 1024.

From the kernel code. PAGE_SHIFT = 12 for 4KB pages
"MemTotal:       %8lu kB\n", K(i.totalram)
2015-08-21 16:08:54 -07:00
subhachandrachandra
a3c846b73e Fixed total memory reporting for Darwin systems. hw.memsize is reported as bytes instead of pages. 2015-08-21 15:15:19 -07:00
Cameron Sparr
3d05575e9d Fix for #129 README typo in the 0.1.6 package name url 2015-08-21 10:43:19 -06:00
Cameron Sparr
9d00b5e165 Version= doesnt work on go1.4.2
fixing makefile & vagrantfile & build script to reflect that
2015-08-20 16:43:25 -06:00
Cameron Sparr
a29b39e17a README typo fix 2015-08-20 15:18:45 -06:00
Cameron Sparr
8273679634 0.1.6, update changelog, readme, plugins list 2015-08-20 14:45:04 -06:00
Cameron Sparr
f8c1e953d4 godep update influxdb to 0.9.3-rc1 2015-08-20 14:26:44 -06:00
Cameron Sparr
532d953b5a fix for #126, nginx plugin not catching net.SplitHostPort error 2015-08-20 11:26:49 -06:00
Cameron Sparr
ecfdafab06 Add a simple integration test at the end of circle-test.sh similar to homebrew test 2015-08-20 10:07:17 -06:00
Cameron Sparr
9bc39987f1 Change -X main.Version <n> to -X main.Version=<n> for go1.5 2015-08-20 08:46:08 -06:00
Simon Fraser
601b444a60 fix segv on error 2015-08-20 07:49:46 -06:00
Cameron Sparr
4b0671205d packaging script fix, make_dir_tree is req'd 2015-08-19 15:02:55 -06:00
Cameron Sparr
db634f4c0b Fix for issue #121, update etc/config.sample.toml 2015-08-19 14:40:35 -06:00
Josh Palay
7d9efd7cff Modifications to httpjson plugin 2015-08-19 13:25:21 -06:00
Alvaro Morales
06ef2a72c5 Add httpjson plugin 2015-08-19 13:24:07 -06:00
Cameron Sparr
5e8b6dd164 Update CHANGELOG with some recent additions 2015-08-18 15:58:51 -06:00
Cameron Sparr
03c7d564d9 Merge pull request #118 from srfraser/diskusage_windows_fix
Get disk usage stats working on windows
2015-08-18 15:58:13 -06:00
Cameron Sparr
c3ec3f4bc8 Fix issue #119, remove the _workspace/pkg directory from git tracking 2015-08-18 15:51:40 -06:00
Simon Fraser
7273e2e6f2 Get disk usage stats working on windows 2015-08-18 21:31:33 +01:00
Cameron Sparr
af770e042a Update README to reflect new release of 0.1.4 & 0.1.5 2015-08-18 12:50:07 -06:00
Cameron Sparr
07a1bffc60 Updating the packaging script to assume tag has already been set 2015-08-18 12:50:07 -06:00
Cameron Sparr
183e79398d Fix build, testify got removed from godeps somehow 2015-08-18 12:49:25 -06:00
Cameron Sparr
d98bedd6e1 Telegraf 0.1.5, update InfluxDB client to HEAD 2015-08-18 12:12:09 -06:00
Cameron Sparr
461245c83d Telegraf 0.1.4, update godep to point to InfluxDB client 0.9.2 2015-08-18 12:09:52 -06:00
Cameron Sparr
6fcbb7bdb0 Update Makefile with new build requirements 2015-08-18 10:36:13 -06:00
Cameron Sparr
2304d03b40 Add build function to circle-test.sh, and remove release.sh 2015-08-18 09:24:49 -06:00
Cameron Sparr
4e3213f3bd godep: vendor all dependencies & add circle-test.sh
Vendor dependencies and use circle-test.sh to run CI process, because
the CircleCI autobuild operations are not compatible with using godep.
2015-08-17 17:38:44 -06:00
Josh Palay
55fb249f6b exec plugin doesn't crash when given null JSON values 2015-08-14 17:06:14 -06:00
Cameron Sparr
4d614b3088 README update to address issue #113 2015-08-14 15:40:29 -06:00
Cameron Sparr
cad0a762a0 Merge branch 'jipperinbham-datadog-output' 2015-08-14 09:45:12 -06:00
JP
0ae5075cc9 fix tests, remove debug prints 2015-08-13 20:42:57 -05:00
JP
3145a732f2 fix merge conflicts, update import paths 2015-08-13 18:57:05 -05:00
JP
ceaf6fd67a add datadog output 2015-08-13 18:54:09 -05:00
Cameron Sparr
c26fa33094 Release 0.1.5, updating CHANGELOG and README 2015-08-13 15:46:17 -06:00
Cameron Sparr
b199d7a9fe Put quotes around potentially empty bash variables 2015-08-13 15:38:05 -06:00
Cameron Sparr
0e65d8e64e Rebase and fixups for PR #111, fixes issue #33 2015-08-13 14:47:51 -06:00
Josh Palay
1e742aec04 Adds cpu busy time and percentages 2015-08-13 14:30:32 -06:00
Cameron Sparr
ba1e4917d1 Removing DefaultConfig function because there's really no point 2015-08-13 14:26:02 -06:00
Cameron Sparr
4ce61875a4 README updates for readability and ease of use 2015-08-13 14:01:08 -06:00
Cameron Sparr
04963f12a3 Allow a PerCPU configuration variable, issue #108 2015-08-13 13:30:11 -06:00
Cameron Sparr
5d4b6c41a8 circle.yml: verify that golint violations == 0 for some dirs 2015-08-12 15:51:43 -06:00
Cameron Sparr
5cb3a096c1 Fix influx.toml and ListTags string printing 2015-08-12 14:59:48 -06:00
JP
ddf438dac0 add missing import and Tag marshalling 2015-08-12 15:17:50 -05:00
Cameron Sparr
ed13924c5a Merge pull request #109 from influxdb/pr-107
Merge of PR #107, Allow Telegraf to output data to multiple locations beyond InfluxDB, such as Riemann or Kafka
2015-08-12 11:21:35 -06:00
Cameron Sparr
5cc6f88ade Update changelog with PR #107, thanks @jipperinbham 2015-08-12 11:08:45 -06:00
Cameron Sparr
32124a7913 Adding a Close() function to the Output interface and to the agent 2015-08-12 11:04:25 -06:00
Cameron Sparr
08042089f9 Followup to issue #77, create configured database name from toml file 2015-08-12 10:54:13 -06:00
JP
53969ae054 move tags to influxdb struct, update all sample configs 2015-08-12 10:23:00 -06:00
Cameron Sparr
16c424de2a Print version number on startup, issue #104 2015-08-11 14:23:16 -06:00
Cameron Sparr
9e2f8f664b Followup to issue #77, create configured database name from toml file 2015-08-11 14:02:04 -06:00
Cameron Sparr
343d8f87b4 Update CHANGELOG with fix for issue #101
I really need to remember to do this with the initial commit.
2015-08-11 11:28:36 -06:00
Cameron Sparr
374a0af084 Fix for issue #101, switch back from master branch if building locally 2015-08-11 11:07:39 -06:00
Cameron Sparr
9f2e6d6172 Update CHANGELOG with PR #106 2015-08-11 11:04:50 -06:00
Cameron Sparr
af647990ab Merge pull request #106 from zepouet/master
New option to filter the plugins to run at startup
2015-08-11 11:02:26 -06:00
nicolas
9b2b1df7e2 Go FMT missing
Merge branch 'master' of https://github.com/zepouet/telegraf
2015-08-11 19:01:51 +02:00
nicolas
abdef7c326 Go FMT missing... 2015-08-11 19:01:37 +02:00
Cameron Sparr
b312e48d31 Revert "PR #59, implementation of multiple outputs"
This reverts commit 48a075529a, reversing
changes made to 924700f381.
2015-08-11 10:34:00 -06:00
Cameron Sparr
48a075529a PR #59, implementation of multiple outputs 2015-08-11 10:21:00 -06:00
Cameron Sparr
7f22211e4b Update changelog with PR #103 2015-08-11 10:19:29 -06:00
Simon Fraser
a63c3c8e0b Ensure tests pass now that we're passing fstype around
go fmt checks

Rework the example configuration snippets
2015-08-11 10:19:29 -06:00
Simon Fraser
bba162c55b to filter by filesystem type, we need to pass that up the chain 2015-08-11 10:19:29 -06:00
Simon Fraser
540ba6d6ae tag filtering description added 2015-08-11 10:19:29 -06:00
Simon Fraser
29e8ce68e4 Modify ShouldPass so that it checks the tags of a metric, if configured.
A plugin can have 'tagpass' and 'tagdrop' subsections:

[disk.tagpass]

And tagname = array lists of things to filter by:

fstype = [ "ext4", "xfs" ]
path = [ "/", /opt", "/home" ]

[disk.tagdrop]
path = [ "/" ]
2015-08-11 10:19:29 -06:00
Nicolas
5691253acd Update Readme with new option filter and add usage chapter with --help 2015-08-11 18:18:52 +02:00
Simon Fraser
cd5c85a245 ShouldPass needs to know the tags being used 2015-08-11 10:13:55 -06:00
Cameron Sparr
7e1d1c19e6 Fix for issue #77, create telegraf database if not exists 2015-08-11 10:13:55 -06:00
Cameron Sparr
46cdb40800 Automate circleci package process 2015-08-11 10:09:26 -06:00
Cameron Sparr
e3c6101b93 Back to regular circle.yml, make and artifact linux binaries
Remove the circle-test.sh script because that environment was having
problems building all of gopsutil.
2015-08-11 10:09:26 -06:00
Simon Fraser
448aeb9c55 fix filename for logrotate config 2015-08-11 10:09:26 -06:00
Simon Fraser
5e55104aa6 Log rotation configuration file, and package.sh modifications to add it to deb and rpm 2015-08-11 10:09:26 -06:00
Cameron Sparr
03cd83dc82 Massive retro-active changelog update 2015-08-11 10:09:26 -06:00
Cameron Sparr
0cebae8e23 README long-line fixing and a couple typos 2015-08-11 10:09:26 -06:00
Cameron Sparr
38bbe7567a Fail and exit telegraf if no plugins are found loaded, issue #26 2015-08-11 10:09:26 -06:00
mocchira
fc95e8401a Add LeoFS plugin 2015-08-11 10:09:26 -06:00
Cameron Sparr
b70f821a10 Revert "Add log rotation to /etc/logrotate.d for deb and rpm packages" 2015-08-11 10:09:25 -06:00
Cameron Sparr
95bb21f3f5 Using gvm & shell test file to manage circleci go environment 2015-08-11 10:09:25 -06:00
Alvaro Morales
94741d52ed Remove simplejson dependency in exec plugin 2015-08-11 10:09:25 -06:00
Cameron Sparr
1ac6da4a8b Fix for issue #93, just use github path instead of gopkg.in 2015-08-11 10:09:25 -06:00
Alvaro Morales
090c0a60fa Add exec plugin 2015-08-11 10:09:25 -06:00
nicolas
e7ca9113bc Add filtering options to select plugin at startup 2015-08-11 17:50:36 +02:00
Cameron Sparr
924700f381 Update changelog with PR #103 2015-08-10 19:22:39 -06:00
Simon Fraser
d280b968d7 Ensure tests pass now that we're passing fstype around
go fmt checks

Rework the example configuration snippets
2015-08-10 19:20:49 -06:00
Simon Fraser
1d8c7a74d6 to filter by filesystem type, we need to pass that up the chain 2015-08-10 19:19:36 -06:00
Simon Fraser
c1dc77c69c tag filtering description added 2015-08-10 19:19:35 -06:00
Simon Fraser
3ecb5a20a5 Modify ShouldPass so that it checks the tags of a metric, if configured.
A plugin can have 'tagpass' and 'tagdrop' subsections:

[disk.tagpass]

And tagname = array lists of things to filter by:

fstype = [ "ext4", "xfs" ]
path = [ "/", /opt", "/home" ]

[disk.tagdrop]
path = [ "/" ]
2015-08-10 19:19:35 -06:00
Simon Fraser
0c1460062d ShouldPass needs to know the tags being used 2015-08-10 19:19:35 -06:00
Cameron Sparr
c0cef8ca43 Fix for issue #77, create telegraf database if not exists 2015-08-10 16:33:18 -06:00
Cameron Sparr
a3e20ab2d6 Automate circleci package process 2015-08-10 13:41:05 -06:00
Cameron Sparr
7a23eb69eb Back to regular circle.yml, make and artifact linux binaries
Remove the circle-test.sh script because that environment was having
problems building all of gopsutil.
2015-08-10 10:47:32 -06:00
Simon Fraser
7da12dc324 fix filename for logrotate config 2015-08-08 23:04:42 +01:00
Simon Fraser
ed9b43e2cc Log rotation configuration file, and package.sh modifications to add it to deb and rpm 2015-08-08 22:10:32 +01:00
Cameron Sparr
2cd56e43a8 Massive retro-active changelog update 2015-08-07 15:41:54 -06:00
JP
91f6c4b740 move tags to influxdb struct, update all sample configs 2015-08-07 15:31:25 -05:00
Cameron Sparr
c0249caef9 README long-line fixing and a couple typos 2015-08-07 09:19:45 -06:00
Cameron Sparr
e0d0bc0966 Fail and exit telegraf if no plugins are found loaded, issue #26 2015-08-07 09:00:40 -06:00
mocchira
24eb7d6bc9 Add LeoFS plugin 2015-08-07 08:58:24 +00:00
JP
48c10f9454 update config sample, marshal tags from toml 2015-08-06 21:03:27 -05:00
Cameron Sparr
d9b208260e Merge pull request #96 from influxdb/revert-87-logrotation
Revert "Add log rotation to /etc/logrotate.d for deb and rpm packages"
2015-08-06 14:11:56 -06:00
Cameron Sparr
5dd16399b3 Revert "Add log rotation to /etc/logrotate.d for deb and rpm packages" 2015-08-06 14:10:20 -06:00
Cameron Sparr
96014f8e94 Merge pull request #92 from Asana/exec
Add exec plugin
2015-08-06 13:21:12 -06:00
Cameron Sparr
5dd14f2ee2 Using gvm & shell test file to manage circleci go environment 2015-08-06 13:03:41 -06:00
Alvaro Morales
ad2e0bc4e3 Remove simplejson dependency in exec plugin 2015-08-06 12:01:42 -07:00
Cameron Sparr
85c61fb684 Fix for issue #93, just use github path instead of gopkg.in 2015-08-06 11:59:07 -06:00
JP
2601a09a83 resolve remaining build errors 2015-08-06 12:00:03 -05:00
JP
d318ef6df7 resolve go vet issues 2015-08-06 11:52:46 -05:00
JP
7ed19de44e fix issue with var rename 2015-08-06 11:49:02 -05:00
JP
72652ff16e resolve merge conflicts 2015-08-05 21:37:18 -05:00
JP
4a12471918 convert influxdb output to multiple outputs 2015-08-05 21:25:14 -05:00
Alvaro Morales
32cbbdbf73 Add exec plugin 2015-08-05 17:51:44 -07:00
Cameron Sparr
ab28707d71 Marking disque tests 'short', circleci container doesnt appear to support tcp? 2015-08-05 17:00:04 -06:00
Cameron Sparr
42a7203b1e Skip per-cpu unit test when in a circle ci container 2015-08-05 16:49:40 -06:00
Cameron Sparr
5259c50612 Mark more unit tests as 'integration' tests when they rely on external services/docker 2015-08-05 16:49:40 -06:00
Cameron Sparr
06a84def5f Merge pull request #71 from kureikain/haproxy_plugin
HAProxy plugin
2015-08-05 15:47:59 -06:00
Codeb Fan
d7bda01ccb Add Nginx plugin (ngx_http_stub_status_module)
Add plugin to collect Nginx basic status information (ngx_http_stub_status_module).
http://nginx.org/en/docs/http/ngx_http_stub_status_module.html
2015-08-05 15:33:28 -06:00
Cameron Sparr
890b2453f8 Adding Disque, Lustre, and memcached to the list of supported plugins 2015-08-05 15:19:58 -06:00
Cameron Sparr
df9e1669cf Merge pull request #76 from kotopes/redis-port-tag
add tag "port" to every redis metric
2015-08-05 15:01:36 -06:00
Cameron Sparr
8b491a46f3 Merge branch 'gfloyd-disque-plugin' 2015-08-05 14:47:26 -06:00
Cameron Sparr
c698dc9784 Build & unit test fixup 2015-08-05 14:47:12 -06:00
Cameron Sparr
77dd1e3d45 Adding Kafka docker container and utilizing it in unit tests 2015-08-05 14:46:31 -06:00
Cameron Sparr
b3cb8d0f53 Verify proper go formatting in circleci job 2015-08-05 14:46:31 -06:00
Cameron Sparr
260fc43281 go fmt fixes 2015-08-05 14:46:31 -06:00
Cameron Sparr
b4ef7bb3ed Adding circleci build badge 2015-08-05 14:46:30 -06:00
Simon Fraser
816313de30 Fix 'go vet' error, +build comment must be followed by a blank line 2015-08-05 14:46:30 -06:00
Cameron Sparr
bb7bdffada Creating circleci job to just lint and vet code 2015-08-05 14:46:30 -06:00
Simon Fraser
0647666c65 Add default log rotation 2015-08-05 14:46:30 -06:00
Simon Fraser
8255945ea7 Tests for the lustre plugin, initial commit 2015-08-05 14:46:30 -06:00
Simon Fraser
2364595697 Require validation for uint64 as well as int64 2015-08-05 14:46:30 -06:00
Simon Fraser
e442d754d0 Lustre filesystem plugin (http://lustre.org/)
The configuration allows users to override the /proc/ files
scanned for data, since that has been known to change with lustre
versions.
2015-08-05 14:46:30 -06:00
Simon Fraser
6b510652ed Add Lustre 2 plugin 2015-08-05 14:46:30 -06:00
Cameron Sparr
9ea5a88f84 Fix GetLocalHost testutil function for mac users (boot2docker) 2015-08-05 14:46:30 -06:00
Cameron Sparr
aa0adc98f9 Build & unit test fixup 2015-08-04 16:48:19 -06:00
Cameron Sparr
fdd2401f7b Adding Kafka docker container and utilizing it in unit tests 2015-08-04 16:30:05 -06:00
Cameron Sparr
6b820d91ae Verify proper go formatting in circleci job 2015-08-04 16:14:17 -06:00
Cameron Sparr
611ad26d1b go fmt fixes 2015-08-04 16:09:59 -06:00
Cameron Sparr
0911b5b2e8 Adding circleci build badge 2015-08-04 15:04:34 -06:00
Cameron Sparr
c660ff80bf Merge pull request #86 from srfraser/lustre2-plugin
Lustre2 plugin
2015-08-04 15:00:16 -06:00
Simon Fraser
ef098923d6 Fix 'go vet' error, +build comment must be followed by a blank line 2015-08-04 21:44:15 +01:00
Simon Fraser
a4f7ffea3f Merge branch 'master' of https://github.com/influxdb/telegraf into lustre2-plugin
Conflicts:
	testutil/accumulator.go
2015-08-04 21:39:17 +01:00
Cameron Sparr
c5deb9d557 Merge pull request #87 from srfraser/logrotation
Add log rotation to /etc/logrotate.d for deb and rpm packages
2015-08-04 14:14:17 -06:00
Cameron Sparr
3ff2ea8d4e Creating circleci job to just lint and vet code 2015-08-04 11:22:26 -06:00
Simon Fraser
85ecee3525 Add default log rotation 2015-08-04 15:30:27 +01:00
Simon Fraser
d09e5f37ab Tests for the lustre plugin, initial commit 2015-08-04 14:54:50 +01:00
Simon Fraser
7edcd7aaf5 Require validation for uint64 as well as int64 2015-08-04 14:53:45 +01:00
Simon Fraser
7def1364b5 Lustre filesystem plugin (http://lustre.org/)
The configuration allows users to override the /proc/ files
scanned for data, since that has been known to change with lustre
versions.
2015-08-04 13:48:09 +01:00
Simon Fraser
3b588f0502 Add Lustre 2 plugin 2015-08-04 13:47:50 +01:00
Cameron Sparr
03c520798e Fix GetLocalHost testutil function for mac users (boot2docker) 2015-08-03 21:01:52 -06:00
Graham Floyd
c0fa6af51b Add disque plugin 2015-07-31 14:46:46 -05:00
Todd Persen
a4d0c47fc6 Merge pull request #49 from marcosnils/container_services
Container services
2015-07-30 16:29:44 -07:00
Vinh
5bf00e87cc Add haproxy plugin 2015-07-22 17:14:31 -07:00
Evgeny Kulev
014ddd76f4 add tag "port" to every redis metric
see issue https://github.com/influxdb/telegraf/issues/74
2015-07-23 00:55:03 +03:00
Evan Phoenix
6eb4bdcf0e Merge pull request #53 from alvaromorales/rethinkdb-fix
Add rethinkdb plugin to all.go
2015-07-21 13:37:55 -07:00
Evan Phoenix
b4e032d9c9 Merge pull request #54 from jipperinbham/mongodb-plugin
add MongoDB plugin
2015-07-21 13:37:44 -07:00
Evan Phoenix
4ca39dfd1e Merge pull request #55 from brocaar/elasticsearch_plugin
Implement Elasticsearch plugin
2015-07-21 13:34:31 -07:00
Evan Phoenix
15ef62747a Merge pull request #60 from brocaar/connection_timeout
Add connection timeout configuration for InfluxDB.
2015-07-21 13:32:26 -07:00
Evan Phoenix
ad6dcb478d Merge pull request #63 from bewiwi/master
Fix redis : change ending call with "\r\n"
2015-07-21 13:31:51 -07:00
Evan Phoenix
0b7aa65dbf Merge pull request #64 from vic3lord/systemd_support
systemd unit support
2015-07-21 13:31:27 -07:00
Evan Phoenix
e484d4bbf4 Merge pull request #72 from vadimtk/master
Add TokuDB metrics to MySQL plugin
2015-07-21 13:27:13 -07:00
Evan Phoenix
e6ff9c6cd5 Merge pull request #73 from ianunruh/plugin/rabbitmq
Add simple RabbitMQ plugin
2015-07-21 13:26:59 -07:00
Ian Unruh
fad63b28d1 Add simple RabbitMQ plugin 2015-07-21 11:48:49 -07:00
Vadim Tkachenko
7a075e091d Add TokuDB metrics to MySQL plugin 2015-07-19 13:01:45 -07:00
Or Elimelech
0df4708267 systemd unit support 2015-07-14 21:42:33 +03:00
Loïc
d5b4e4ba60 Fix redis : change ending call with "\r\n" 2015-07-13 18:21:16 +02:00
Orne Brocaar
b717dc0742 Use string for InfluxDB timeout duration config. 2015-07-12 18:05:44 +02:00
Orne Brocaar
6ad37267e4 Add connection timeout configuration for InfluxDB. 2015-07-10 20:17:26 +02:00
Orne Brocaar
22d4d1fb42 Fix typo (tranport > transport). 2015-07-10 09:00:28 +02:00
JP
98b0543b26 fix merge conflicts 2015-07-09 15:09:43 -05:00
JP
c0512e720c add SSL support, change tag to hostname 2015-07-09 15:06:18 -05:00
Orne Brocaar
0f6664b260 Remove that it only reads indices stats. 2015-07-09 21:02:19 +02:00
Orne Brocaar
f76f99e789 Merge remote-tracking branch 'upstream/master' into elasticsearch_plugin 2015-07-09 21:01:06 +02:00
Orne Brocaar
e2d48f42cc Cleanup repeated logic. 2015-07-09 20:58:54 +02:00
Orne Brocaar
ec138cae62 Remove indices filter. 2015-07-09 20:53:54 +02:00
Orne Brocaar
986b89f5ed Cleanup tests. 2015-07-09 20:46:42 +02:00
Orne Brocaar
d799011039 Implement breakers stats. 2015-07-09 20:43:52 +02:00
Orne Brocaar
0faa1c886a Implement http stats. 2015-07-09 20:38:51 +02:00
Orne Brocaar
cb839d0fe8 Implement transport stats. 2015-07-09 20:36:22 +02:00
Orne Brocaar
ec4079733e Implement fs stats. 2015-07-09 20:32:56 +02:00
Orne Brocaar
4743c9ab16 Implement network stats. 2015-07-09 20:23:04 +02:00
Todd Persen
c4e5e743c4 Update README.md 2015-07-09 12:22:10 -06:00
Todd Persen
1d23681efe Update README.md 2015-07-09 12:20:23 -06:00
Todd Persen
56d49f2f4f Update CHANGELOG.md 2015-07-09 12:19:49 -06:00
Orne Brocaar
ac54b7cdd1 Implement thread-pool stats. 2015-07-09 20:18:24 +02:00
Todd Persen
efe2771a34 Merge pull request #56 from EmilS/plugins/kafka-consumer-readme
Adds README for Kafka consumer plugin
2015-07-09 12:13:10 -06:00
Orne Brocaar
10c4ec74cc Implement JVM stats. 2015-07-09 20:11:46 +02:00
Orne Brocaar
d90026646f Implement process stats. 2015-07-09 20:06:30 +02:00
Orne Brocaar
9cd1344740 Implement os stats. 2015-07-09 20:01:59 +02:00
Orne Brocaar
c6a9335bf2 Refactor parsing "indices" stats. 2015-07-09 19:51:51 +02:00
Orne Brocaar
6c87148cd4 Add node-id and node attributes to tags. 2015-07-09 18:41:16 +02:00
Orne Brocaar
3f6c46e1ec Add node_name to tags. 2015-07-08 23:07:10 +02:00
Emil Stolarsky
b3c13b7aef Adds README for Kafka consumer plugin 2015-07-08 15:45:02 -04:00
Orne Brocaar
55cfd5c904 Check that API reponse is 200. 2015-07-08 21:28:25 +02:00
Orne Brocaar
d38f2223a5 Implement Elasticsearch plugin (indices stats). 2015-07-08 21:14:51 +02:00
JP
86145d5eb5 add MongoDB plugin 2015-07-07 11:25:34 -05:00
Marcos Lilljedahl
ef335d9fd7 Add missing files 2015-07-06 22:23:24 -03:00
Marcos Lilljedahl
d2810ddc95 Add DOCKER_HOST support for tests
This allows to run tests in environments where DOCKER_HOST is used. This
is extremely helpful when using boot2docker to run docker
2015-07-06 22:18:31 -03:00
Alvaro Morales
037c43cd25 Add rethinkdb plugin to all.go. 2015-07-06 17:27:09 -07:00
Marcos Lilljedahl
aa86c16838 Add --no-recreate option to prepare target 2015-07-06 21:17:44 -03:00
Todd Persen
ed16a84e0d Merge pull request #50 from jseriff/master
update init.sh to use telegraf directories
2015-07-06 16:12:36 -07:00
Todd Persen
530a60a52d Merge pull request #52 from benfb/master
use influxdb/telegraf instead of influxdb/influxdb in changelog
2015-07-06 16:11:21 -07:00
Ben Bailey
48463681fb use influxdb/telegraf instead of influxdb/influxdb in changelog 2015-07-06 17:26:31 -05:00
jseriff
f5a8739b7c update init.sh to use telegraf directories
init.sh should use telegraf directories that are established in the package.sh as of 120218f
2015-07-06 11:07:06 -05:00
Marcos Lilljedahl
4471e2bdbb Use postgres default configuration 2015-07-06 03:46:53 -03:00
Marcos Lilljedahl
63552282d7 Remove circle ci implementation due to Golang bug.
I've tried to set-up circleci but I came across a Golang issue that was
preventing CPU tests to PASS. Instead of ignoring those tests I've
submitted an issue to Go (https://github.com/golang/go/issues/11609)
hoping that this gets fixed soon.
2015-07-06 03:38:08 -03:00
Marcos Lilljedahl
ae385b336d Remove unnecessary circleci configuration as we're using default
provided services

Update test users to use circleci default services
2015-07-06 02:20:25 -03:00
Marcos Lilljedahl
0db55007ab Add cirleci script 2015-07-06 01:51:13 -03:00
Marcos Lilljedahl
d545b197ea Add docker containers to test services.
This commit initializes the needed services which are not mocked
so tests can be executed in any environment with docker.

Some default modifications (i.e: connection strings) were also made to
current tests to accomodate them for this setup.

A docker-compose.yml file is provided with all the necessary parameters
for this services to be initialized. Future services can be added
easily by extending this configuration file

In addition a makefile has been introduced to simplify command execution
2015-07-06 01:46:43 -03:00
Todd Persen
bbc6fa57fa Update README.md for v0.1.3 2015-07-05 19:16:08 -07:00
346 changed files with 41507 additions and 13752 deletions

5
.gitignore vendored
View File

@@ -1,3 +1,6 @@
pkg/
tivan
.vagrant
/telegraf
.idea
*~
*#

View File

@@ -1,27 +1,434 @@
## v0.10.3 [unreleased]
### Release Notes
### Features
### Bugfixes
## v0.10.2 [2016-02-04]
### Release Notes
- Statsd timing measurements are now aggregated into a single measurement with
fields.
- Graphite output now inserts tags into the bucket in alphabetical order.
- Normalized TLS/SSL support for output plugins: MQTT, AMQP, Kafka
- `verify_ssl` config option was removed from Kafka because it was actually
doing the opposite of what it claimed to do (yikes). It's been replaced by
`insecure_skip_verify`
### Features
- [#575](https://github.com/influxdata/telegraf/pull/575): Support for collecting Windows Performance Counters. Thanks @TheFlyingCorpse!
- [#564](https://github.com/influxdata/telegraf/issues/564): features for plugin writing simplification. Internal metric data type.
- [#603](https://github.com/influxdata/telegraf/pull/603): Aggregate statsd timing measurements into fields. Thanks @marcinbunsch!
- [#601](https://github.com/influxdata/telegraf/issues/601): Warn when overwriting cached metrics.
- [#614](https://github.com/influxdata/telegraf/pull/614): PowerDNS input plugin. Thanks @Kasen!
- [#617](https://github.com/influxdata/telegraf/pull/617): exec plugin: parse influx line protocol in addition to JSON.
- [#628](https://github.com/influxdata/telegraf/pull/628): Windows perf counters: pre-vista support
### Bugfixes
- [#595](https://github.com/influxdata/telegraf/issues/595): graphite output should include tags to separate duplicate measurements.
- [#599](https://github.com/influxdata/telegraf/issues/599): datadog plugin tags not working.
- [#600](https://github.com/influxdata/telegraf/issues/600): datadog measurement/field name parsing is wrong.
- [#602](https://github.com/influxdata/telegraf/issues/602): Fix statsd field name templating.
- [#612](https://github.com/influxdata/telegraf/pull/612): Docker input panic fix if stats received are nil.
- [#634](https://github.com/influxdata/telegraf/pull/634): Properly set host headers in httpjson. Thanks @reginaldosousa!
## v0.10.1 [2016-01-27]
### Release Notes
- Telegraf now keeps a fixed-length buffer of metrics per-output. This buffer
defaults to 10,000 metrics, and is adjustable. The buffer is cleared when a
successful write to that output occurs.
- The docker plugin has been significantly overhauled to add more metrics
and allow for docker-machine (incl OSX) support.
[See the readme](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/docker/README.md)
for the latest measurements, fields, and tags. There is also now support for
specifying a docker endpoint to get metrics from.
### Features
- [#509](https://github.com/influxdata/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261!
- [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod!
- [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert!
- [#519](https://github.com/influxdata/telegraf/pull/519): Added a sensors input based on lm-sensors. Thanks @md14454!
- [#467](https://github.com/influxdata/telegraf/issues/467): Add option to disable statsd measurement name conversion.
- [#534](https://github.com/influxdata/telegraf/pull/534): NSQ input plugin. Thanks @allingeek!
- [#494](https://github.com/influxdata/telegraf/pull/494): Graphite output plugin. Thanks @titilambert!
- AMQP SSL support. Thanks @ekini!
- [#539](https://github.com/influxdata/telegraf/pull/539): Reload config on SIGHUP. Thanks @titilambert!
- [#522](https://github.com/influxdata/telegraf/pull/522): Phusion passenger input plugin. Thanks @kureikain!
- [#541](https://github.com/influxdata/telegraf/pull/541): Kafka output TLS cert support. Thanks @Ormod!
- [#551](https://github.com/influxdata/telegraf/pull/551): Statsd UDP read packet size now defaults to 1500 bytes, and is configurable.
- [#552](https://github.com/influxdata/telegraf/pull/552): Support for collection interval jittering.
- [#484](https://github.com/influxdata/telegraf/issues/484): Include usage percent with procstat metrics.
- [#553](https://github.com/influxdata/telegraf/pull/553): Amazon CloudWatch output. thanks @skwong2!
- [#503](https://github.com/influxdata/telegraf/pull/503): Support docker endpoint configuration.
- [#563](https://github.com/influxdata/telegraf/pull/563): Docker plugin overhaul.
- [#285](https://github.com/influxdata/telegraf/issues/285): Fixed-size buffer of points.
- [#546](https://github.com/influxdata/telegraf/pull/546): SNMP Input plugin. Thanks @titilambert!
- [#589](https://github.com/influxdata/telegraf/pull/589): Microsoft SQL Server input plugin. Thanks @zensqlmonitor!
- [#573](https://github.com/influxdata/telegraf/pull/573): Github webhooks consumer input. Thanks @jackzampolin!
- [#471](https://github.com/influxdata/telegraf/pull/471): httpjson request headers. Thanks @asosso!
### Bugfixes
- [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert!
- [#508](https://github.com/influxdata/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin
- [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain!
- [#543](https://github.com/influxdata/telegraf/issues/543): Statsd Packet size sometimes truncated.
- [#440](https://github.com/influxdata/telegraf/issues/440): Don't query filtered devices for disk stats.
- [#463](https://github.com/influxdata/telegraf/issues/463): Docker plugin not working on AWS Linux
- [#568](https://github.com/influxdata/telegraf/issues/568): Multiple output race condition.
- [#585](https://github.com/influxdata/telegraf/pull/585): Log stack trace and continue on Telegraf panic. Thanks @wutaizeng!
## v0.10.0 [2016-01-12]
### Release Notes
- Linux packages have been taken out of `opt`, the binary is now in `/usr/bin`
and configuration files are in `/etc/telegraf`
- **breaking change** `plugins` have been renamed to `inputs`. This was done because
`plugins` is too generic, as there are now also "output plugins", and will likely
be "aggregator plugins" and "filter plugins" in the future. Additionally,
`inputs/` and `outputs/` directories have been placed in the root-level `plugins/`
directory.
- **breaking change** the `io` plugin has been renamed `diskio`
- **breaking change** plugin measurements aggregated into a single measurement.
- **breaking change** `jolokia` plugin: must use global tag/drop/pass parameters
for configuration.
- **breaking change** `twemproxy` plugin: `prefix` option removed.
- **breaking change** `procstat` cpu measurements are now prepended with `cpu_time_`
instead of only `cpu_`
- **breaking change** some command-line flags have been renamed to separate words.
`-configdirectory` -> `-config-directory`, `-filter` -> `-input-filter`,
`-outputfilter` -> `-output-filter`
- The prometheus plugin schema has not been changed (measurements have not been
aggregated).
### Packaging change note:
RHEL/CentOS users upgrading from 0.2.x to 0.10.0 will probably have their
configurations overwritten by the upgrade. There is a backup stored at
/etc/telegraf/telegraf.conf.$(date +%s).backup.
### Features
- Plugin measurements aggregated into a single measurement.
- Added ability to specify per-plugin tags
- Added ability to specify per-plugin measurement suffix and prefix.
(`name_prefix` and `name_suffix`)
- Added ability to override base plugin measurement name. (`name_override`)
### Bugfixes
## v0.2.5 [unreleased]
### Features
- [#427](https://github.com/influxdata/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen!
- [#428](https://github.com/influxdata/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot!
- [#449](https://github.com/influxdata/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff
### Bugfixes
- [#430](https://github.com/influxdata/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham!
- [#452](https://github.com/influxdata/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham!
## v0.2.4 [2015-12-08]
### Features
- [#412](https://github.com/influxdata/telegraf/pull/412): Additional memcached stats. Thanks @mgresser!
- [#410](https://github.com/influxdata/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain!
- [#414](https://github.com/influxdata/telegraf/issues/414): Jolokia plugin auth parameters
- [#415](https://github.com/influxdata/telegraf/issues/415): memcached plugin: support unix sockets
- [#418](https://github.com/influxdata/telegraf/pull/418): memcached plugin additional unit tests.
- [#408](https://github.com/influxdata/telegraf/pull/408): MailChimp plugin.
- [#382](https://github.com/influxdata/telegraf/pull/382): Add system wide network protocol stats to `net` plugin.
- [#401](https://github.com/influxdata/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter!
### Bugfixes
- [#405](https://github.com/influxdata/telegraf/issues/405): Prometheus output cardinality issue
- [#388](https://github.com/influxdata/telegraf/issues/388): Fix collection hangup when cpu times decrement.
## v0.2.3 [2015-11-30]
### Release Notes
- **breaking change** The `kafka` plugin has been renamed to `kafka_consumer`.
and most of the config option names have changed.
This only affects the kafka consumer _plugin_ (not the
output). There were a number of problems with the kafka plugin that led to it
only collecting data once at startup, so the kafka plugin was basically non-
functional.
- Plugins can now be specified as a list, and multiple plugin instances of the
same type can be specified, like this:
```
[[inputs.cpu]]
percpu = false
totalcpu = true
[[inputs.cpu]]
percpu = true
totalcpu = false
drop = ["cpu_time"]
```
- Riemann output added
- Aerospike plugin: tag changed from `host` -> `aerospike_host`
### Features
- [#379](https://github.com/influxdata/telegraf/pull/379): Riemann output, thanks @allenj!
- [#375](https://github.com/influxdata/telegraf/pull/375): kafka_consumer service plugin.
- [#392](https://github.com/influxdata/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras!
- [#383](https://github.com/influxdata/telegraf/pull/383): Specify plugins as a list.
- [#354](https://github.com/influxdata/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC!
### Bugfixes
- [#371](https://github.com/influxdata/telegraf/issues/371): Kafka consumer plugin not functioning.
- [#389](https://github.com/influxdata/telegraf/issues/389): NaN value panic
## v0.2.2 [2015-11-18]
### Release Notes
- 0.2.1 has a bug where all lists within plugins get duplicated, this includes
lists of servers/URLs. 0.2.2 is being released solely to fix that bug
### Bugfixes
- [#377](https://github.com/influxdata/telegraf/pull/377): Fix for duplicate slices in inputs.
## v0.2.1 [2015-11-16]
### Release Notes
- Telegraf will no longer use docker-compose for "long" unit test, it has been
changed to just run docker commands in the Makefile. See `make docker-run` and
`make docker-kill`. `make test` will still run all unit tests with docker.
- Long unit tests are now run in CircleCI, with docker & race detector
- Redis plugin tag has changed from `host` to `server`
- HAProxy plugin tag has changed from `host` to `server`
- UDP output now supported
- Telegraf will now compile on FreeBSD
- Users can now specify outputs as lists, specifying multiple outputs of the
same type.
### Features
- [#325](https://github.com/influxdata/telegraf/pull/325): NSQ output. Thanks @jrxFive!
- [#318](https://github.com/influxdata/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter!
- [#338](https://github.com/influxdata/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac!
- [#337](https://github.com/influxdata/telegraf/pull/337): Jolokia plugin, thanks @saiello!
- [#350](https://github.com/influxdata/telegraf/pull/350): Amon output.
- [#365](https://github.com/influxdata/telegraf/pull/365): Twemproxy plugin by @codeb2cc
- [#317](https://github.com/influxdata/telegraf/issues/317): ZFS plugin, thanks @cornerot!
- [#364](https://github.com/influxdata/telegraf/pull/364): Support InfluxDB UDP output.
- [#370](https://github.com/influxdata/telegraf/pull/370): Support specifying multiple outputs, as lists.
- [#372](https://github.com/influxdata/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC!
### Bugfixes
- [#331](https://github.com/influxdata/telegraf/pull/331): Dont overwrite host tag in redis plugin.
- [#336](https://github.com/influxdata/telegraf/pull/336): Mongodb plugin should take 2 measurements.
- [#351](https://github.com/influxdata/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes
- [#360](https://github.com/influxdata/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo!
## v0.2.0 [2015-10-27]
### Release Notes
- The -test flag will now only output 2 collections for plugins that need it
- There is a new agent configuration option: `flush_interval`. This option tells
Telegraf how often to flush data to InfluxDB and other output sinks. For example,
users can set `interval = "2s"` and `flush_interval = "60s"` for Telegraf to
collect data every 2 seconds, and flush every 60 seconds.
- `precision` and `utc` are no longer valid agent config values. `precision` has
moved to the `influxdb` output config, where it will continue to default to "s"
- debug and test output will now print the raw line-protocol string
- Telegraf will now, by default, round the collection interval to the nearest
even interval. This means that `interval="10s"` will collect every :00, :10, etc.
To ease scale concerns, flushing will be "jittered" by a random amount so that
all Telegraf instances do not flush at the same time. Both of these options can
be controlled via the `round_interval` and `flush_jitter` config options.
- Telegraf will now retry metric flushes twice
### Features
- [#205](https://github.com/influxdata/telegraf/issues/205): Include per-db redis keyspace info
- [#226](https://github.com/influxdata/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini
- [#90](https://github.com/influxdata/telegraf/issues/90): Add Docker labels to tags in docker plugin
- [#223](https://github.com/influxdata/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee!
- [#227](https://github.com/influxdata/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay!
- [#241](https://github.com/influxdata/telegraf/pull/241): MQTT Output. Thanks @shirou!
- Memory plugin: cached and buffered measurements re-added
- Logging: additional logging for each collection interval, track the number
of metrics collected and from how many inputs.
- [#240](https://github.com/influxdata/telegraf/pull/240): procstat plugin, thanks @ranjib!
- [#244](https://github.com/influxdata/telegraf/pull/244): netstat plugin, thanks @shirou!
- [#262](https://github.com/influxdata/telegraf/pull/262): zookeeper plugin, thanks @jrxFive!
- [#237](https://github.com/influxdata/telegraf/pull/237): statsd service plugin, thanks @sparrc
- [#273](https://github.com/influxdata/telegraf/pull/273): puppet agent plugin, thats @jrxFive!
- [#280](https://github.com/influxdata/telegraf/issues/280): Use InfluxDB client v2.
- [#281](https://github.com/influxdata/telegraf/issues/281): Eliminate need to deep copy Batch Points.
- [#286](https://github.com/influxdata/telegraf/issues/286): bcache plugin, thanks @cornerot!
- [#287](https://github.com/influxdata/telegraf/issues/287): Batch AMQP output, thanks @ekini!
- [#301](https://github.com/influxdata/telegraf/issues/301): Collect on even intervals
- [#298](https://github.com/influxdata/telegraf/pull/298): Support retrying output writes
- [#300](https://github.com/influxdata/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter!
- [#322](https://github.com/influxdata/telegraf/issues/322): Librato output. Thanks @jipperinbham!
### Bugfixes
- [#228](https://github.com/influxdata/telegraf/pull/228): New version of package will replace old one. Thanks @ekini!
- [#232](https://github.com/influxdata/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime!
- [#261](https://github.com/influxdata/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini!
- [#245](https://github.com/influxdata/telegraf/issues/245): Document Exec plugin example. Thanks @ekini!
- [#264](https://github.com/influxdata/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac!
- [#290](https://github.com/influxdata/telegraf/issues/290): Fix some plugins sending their values as strings.
- [#289](https://github.com/influxdata/telegraf/issues/289): Fix accumulator panic on nil tags.
- [#302](https://github.com/influxdata/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi!
## v0.1.9 [2015-09-22]
### Release Notes
- InfluxDB output config change: `url` is now `urls`, and is a list. Config files
will still be backwards compatible if only `url` is specified.
- The -test flag will now output two metric collections
- Support for filtering telegraf outputs on the CLI -- Telegraf will now
allow filtering of output sinks on the command-line using the `-outputfilter`
flag, much like how the `-filter` flag works for inputs.
- Support for filtering on config-file creation -- Telegraf now supports
filtering to -sample-config command. You can now run
`telegraf -sample-config -filter cpu -outputfilter influxdb` to get a config
file with only the cpu plugin defined, and the influxdb output defined.
- **Breaking Change**: The CPU collection plugin has been refactored to fix some
bugs and outdated dependency issues. At the same time, I also decided to fix
a naming consistency issue, so cpu_percentageIdle will become cpu_usage_idle.
Also, all CPU time measurements now have it indicated in their name, so cpu_idle
will become cpu_time_idle. Additionally, cpu_time measurements are going to be
dropped in the default config.
- **Breaking Change**: The memory plugin has been refactored and some measurements
have been renamed for consistency. Some measurements have also been removed from being outputted. They are still being collected by gopsutil, and could easily be
re-added in a "verbose" mode if there is demand for it.
### Features
- [#143](https://github.com/influxdata/telegraf/issues/143): InfluxDB clustering support
- [#181](https://github.com/influxdata/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye!
- [#203](https://github.com/influxdata/telegraf/pull/200): AMQP output. Thanks @ekini!
- [#182](https://github.com/influxdata/telegraf/pull/182): OpenTSDB output. Thanks @rplessl!
- [#187](https://github.com/influxdata/telegraf/pull/187): Retry output sink connections on startup.
- [#220](https://github.com/influxdata/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee!
- [#217](https://github.com/influxdata/telegraf/pull/217): Add filtering for output sinks
and filtering when specifying a config file.
### Bugfixes
- [#170](https://github.com/influxdata/telegraf/issues/170): Systemd support
- [#175](https://github.com/influxdata/telegraf/issues/175): Set write precision before gathering metrics
- [#178](https://github.com/influxdata/telegraf/issues/178): redis plugin, multiple server thread hang bug
- Fix net plugin on darwin
- [#84](https://github.com/influxdata/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee!
- [#189](https://github.com/influxdata/telegraf/pull/189): Fix mem_used_perc. Thanks @mced!
- [#192](https://github.com/influxdata/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+
- [#203](https://github.com/influxdata/telegraf/issues/203): EL5 rpm support. Thanks @ekini!
- [#206](https://github.com/influxdata/telegraf/issues/206): CPU steal/guest values wrong on linux.
- [#212](https://github.com/influxdata/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini!
- [#212](https://github.com/influxdata/telegraf/issues/212): Fix makefile warning. Thanks @ekini!
## v0.1.8 [2015-09-04]
### Release Notes
- Telegraf will now write data in UTC at second precision by default
- Now using Go 1.5 to build telegraf
### Features
- [#150](https://github.com/influxdata/telegraf/pull/150): Add Host Uptime metric to system plugin
- [#158](https://github.com/influxdata/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4
- [#159](https://github.com/influxdata/telegraf/pull/159): Use second precision for InfluxDB writes
- [#165](https://github.com/influxdata/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0
- [#162](https://github.com/influxdata/telegraf/pull/162): Write UTC by default, provide option
- [#166](https://github.com/influxdata/telegraf/pull/166): Upload binaries to S3
- [#169](https://github.com/influxdata/telegraf/pull/169): Ping plugin
### Bugfixes
## v0.1.7 [2015-08-28]
### Features
- [#38](https://github.com/influxdata/telegraf/pull/38): Kafka output producer.
- [#133](https://github.com/influxdata/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0!
- [#136](https://github.com/influxdata/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin.
- [#137](https://github.com/influxdata/telegraf/issues/137): Memcached: fix when a value contains a space
- [#138](https://github.com/influxdata/telegraf/issues/138): MySQL server address tag.
- [#142](https://github.com/influxdata/telegraf/pull/142): Add Description and SampleConfig funcs to output interface
- Indent the toml config file for readability
### Bugfixes
- [#128](https://github.com/influxdata/telegraf/issues/128): system_load measurement missing.
- [#129](https://github.com/influxdata/telegraf/issues/129): Latest pkg url fix.
- [#131](https://github.com/influxdata/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra!
- [#140](https://github.com/influxdata/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc!
## v0.1.6 [2015-08-20]
### Features
- [#112](https://github.com/influxdata/telegraf/pull/112): Datadog output. Thanks @jipperinbham!
- [#116](https://github.com/influxdata/telegraf/pull/116): Use godep to vendor all dependencies
- [#120](https://github.com/influxdata/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales!
### Bugfixes
- [#113](https://github.com/influxdata/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility
- [#118](https://github.com/influxdata/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser!
- [#122](https://github.com/influxdata/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser!
- [#126](https://github.com/influxdata/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error
## v0.1.5 [2015-08-13]
### Features
- [#54](https://github.com/influxdata/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham!
- [#55](https://github.com/influxdata/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar!
- [#71](https://github.com/influxdata/telegraf/pull/71): HAProxy plugin. Thanks @kureikain!
- [#72](https://github.com/influxdata/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk!
- [#73](https://github.com/influxdata/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh!
- [#77](https://github.com/influxdata/telegraf/issues/77): Automatically create database.
- [#79](https://github.com/influxdata/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc!
- [#86](https://github.com/influxdata/telegraf/pull/86): Lustre2 plugin. Thanks srfraser!
- [#91](https://github.com/influxdata/telegraf/pull/91): Unit testing
- [#92](https://github.com/influxdata/telegraf/pull/92): Exec plugin. Thanks @alvaromorales!
- [#98](https://github.com/influxdata/telegraf/pull/98): LeoFS plugin. Thanks @mocchira!
- [#103](https://github.com/influxdata/telegraf/pull/103): Filter by metric tags. Thanks @srfraser!
- [#106](https://github.com/influxdata/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet!
- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham!
- [#108](https://github.com/influxdata/telegraf/issues/108): Support setting per-CPU and total-CPU gathering.
- [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay!
### Bugfixes
- [#85](https://github.com/influxdata/telegraf/pull/85): Fix GetLocalHost testutil function for mac users
- [#89](https://github.com/influxdata/telegraf/pull/89): go fmt fixes
- [#94](https://github.com/influxdata/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama
- [#101](https://github.com/influxdata/telegraf/issues/101): switch back from master branch if building locally
- [#99](https://github.com/influxdata/telegraf/issues/99): update integer output to new InfluxDB line protocol format
## v0.1.4 [2015-07-09]
### Features
- [#56](https://github.com/influxdata/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS!
### Bugfixes
- [#50](https://github.com/influxdata/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff!
- [#52](https://github.com/influxdata/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb!
## v0.1.3 [2015-07-05]
### Features
- [#35](https://github.com/influxdb/influxdb/pull/35): Add Kafka plugin. Thanks @EmilS!
- [#47](https://github.com/influxdb/influxdb/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
- [#35](https://github.com/influxdata/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS!
- [#47](https://github.com/influxdata/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
### Bugfixes
- [#45](https://github.com/influxdb/influxdb/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
- [#43](https://github.com/influxdb/influxdb/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
- [#45](https://github.com/influxdata/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
- [#43](https://github.com/influxdata/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
## v0.1.2 [2015-07-01]
### Features
- [#12](https://github.com/influxdb/influxdb/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
- [#14](https://github.com/influxdb/influxdb/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
- [#16](https://github.com/influxdb/influxdb/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
- [#21](https://github.com/influxdb/influxdb/pull/21): Add memcached plugin. Thanks @Yukki!
- [#12](https://github.com/influxdata/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
- [#14](https://github.com/influxdata/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
- [#16](https://github.com/influxdata/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
- [#21](https://github.com/influxdata/telegraf/pull/21): Add memcached plugin. Thanks @Yukki!
### Bugfixes
- [#13](https://github.com/influxdb/influxdb/pull/13): Fix the packaging script.
- [#19](https://github.com/influxdb/influxdb/pull/19): Add host name to metric tags. Thanks @sherifzain!
- [#20](https://github.com/influxdb/influxdb/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
- [#23](https://github.com/influxdb/influxdb/pull/23): Change name of folder for packages. Thanks @colinrymer!
- [#32](https://github.com/influxdb/influxdb/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
- [#13](https://github.com/influxdata/telegraf/pull/13): Fix the packaging script.
- [#19](https://github.com/influxdata/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain!
- [#20](https://github.com/influxdata/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
- [#23](https://github.com/influxdata/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer!
- [#32](https://github.com/influxdata/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
## v0.1.1 [2015-06-19]

218
CONFIGURATION.md Normal file
View File

@@ -0,0 +1,218 @@
# Telegraf Configuration
## Generating a Configuration File
A default Telegraf config file can be generated using the -sample-config flag:
`telegraf -sample-config > telegraf.conf`
To generate a file with specific inputs and outputs, you can use the
-input-filter and -output-filter flags:
`telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka`
## `[tags]` Configuration
Global tags can be specific in the `[tags]` section of the config file in
key="value" format. All metrics being gathered on this host will be tagged
with the tags specified here.
## `[agent]` Configuration
Telegraf has a few options you can configure under the `agent` section of the
config.
* **interval**: Default data collection interval for all inputs
* **round_interval**: Rounds collection interval to 'interval'
ie, if interval="10s" then always collect on :00, :10, :20, etc.
* **metric_buffer_limit**: Telegraf will cache metric_buffer_limit metrics
for each output, and will flush this buffer on a successful write.
* **collection_jitter**: Collection jitter is used to jitter
the collection by a random amount.
Each plugin will sleep for a random time within jitter before collecting.
This can be used to avoid many plugins querying things like sysfs at the
same time, which can have a measurable effect on the system.
* **flush_interval**: Default data flushing interval for all outputs.
You should not set this below
interval. Maximum flush_interval will be flush_interval + flush_jitter
* **flush_jitter**: Jitter the flush interval by a random amount.
This is primarily to avoid
large write spikes for users running a large number of telegraf instances.
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
* **debug**: Run telegraf in debug mode.
* **quiet**: Run telegraf in quiet mode.
* **hostname**: Override default hostname, if empty use os.Hostname().
## `[inputs.xxx]` Configuration
There are some configuration options that are configurable per input:
* **name_override**: Override the base name of the measurement.
(Default is the name of the input).
* **name_prefix**: Specifies a prefix to attach to the measurement name.
* **name_suffix**: Specifies a suffix to attach to the measurement name.
* **tags**: A map of tags to apply to a specific input's measurements.
* **interval**: How often to gather this metric. Normal plugins use a single
global interval, but if one particular input should be run less or more often,
you can configure that here.
#### Input Filters
There are also filters that can be configured per input:
* **pass**: An array of strings that is used to filter metrics generated by the
current input. Each string in the array is tested as a glob match against field names
and if it matches, the field is emitted.
* **drop**: The inverse of pass, if a field name matches, it is not emitted.
* **tagpass**: tag names and arrays of strings that are used to filter
measurements by the current input. Each string in the array is tested as a glob
match against the tag name, and if it matches the measurement is emitted.
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
emitted. This is tested on measurements that have passed the tagpass test.
#### Input Configuration Examples
This is a full working config that will output CPU data to an InfluxDB instance
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
measurements at a 10s interval and will collect per-cpu data, dropping any
fields which begin with `time_`.
```toml
[tags]
dc = "denver-1"
[agent]
interval = "10s"
# OUTPUTS
[[outputs.influxdb]]
url = "http://192.168.59.103:8086" # required.
database = "telegraf" # required.
precision = "s"
# INPUTS
[[inputs.cpu]]
percpu = true
totalcpu = false
# filter all fields beginning with 'time_'
drop = ["time_*"]
```
#### Input Config: tagpass and tagdrop
```toml
[[inputs.cpu]]
percpu = true
totalcpu = false
drop = ["cpu_time"]
# Don't collect CPU data for cpu6 & cpu7
[inputs.cpu.tagdrop]
cpu = [ "cpu6", "cpu7" ]
[[inputs.disk]]
[inputs.disk.tagpass]
# tagpass conditions are OR, not AND.
# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home)
# then the metric passes
fstype = [ "ext4", "xfs" ]
# Globs can also be used on the tag values
path = [ "/opt", "/home*" ]
```
#### Input Config: pass and drop
```toml
# Drop all metrics for guest & steal CPU usage
[[inputs.cpu]]
percpu = false
totalcpu = true
drop = ["usage_guest", "usage_steal"]
# Only store inode related metrics for disks
[[inputs.disk]]
pass = ["inodes*"]
```
#### Input config: prefix, suffix, and override
This plugin will emit measurements with the name `cpu_total`
```toml
[[inputs.cpu]]
name_suffix = "_total"
percpu = false
totalcpu = true
```
This will emit measurements with the name `foobar`
```toml
[[inputs.cpu]]
name_override = "foobar"
percpu = false
totalcpu = true
```
#### Input config: tags
This plugin will emit measurements with two additional tags: `tag1=foo` and
`tag2=bar`
```toml
[[inputs.cpu]]
percpu = false
totalcpu = true
[inputs.cpu.tags]
tag1 = "foo"
tag2 = "bar"
```
#### Multiple inputs of the same type
Additional inputs (or outputs) of the same type can be specified,
just define more instances in the config file. It is highly recommended that
you utilize `name_override`, `name_prefix`, or `name_suffix` config options
to avoid measurement collisions:
```toml
[[inputs.cpu]]
percpu = false
totalcpu = true
[[inputs.cpu]]
percpu = true
totalcpu = false
name_override = "percpu_usage"
drop = ["cpu_time*"]
```
## `[outputs.xxx]` Configuration
Telegraf also supports specifying multiple output sinks to send data to,
configuring each output sink is different, but examples can be
found by running `telegraf -sample-config`.
Outputs also support the same configurable options as inputs
(pass, drop, tagpass, tagdrop)
```toml
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
database = "telegraf"
precision = "s"
# Drop all measurements that start with "aerospike"
drop = ["aerospike*"]
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
database = "telegraf-aerospike-data"
precision = "s"
# Only accept aerospike data:
pass = ["aerospike*"]
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
database = "telegraf-cpu0-data"
precision = "s"
# Only store measurements where the tag "cpu" matches the value "cpu0"
[outputs.influxdb.tagpass]
cpu = ["cpu0"]
```

299
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,299 @@
## Steps for Contributing:
1. [Sign the CLA](http://influxdb.com/community/cla.html)
1. Make changes or write plugin (see below for details)
1. Add your plugin to `plugins/inputs/all/all.go` or `plugins/outputs/all/all.go`
1. If your plugin requires a new Go package,
[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency)
1. Write a README for your plugin, if it's an input plugin, it should be structured
like the [input example here](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md).
Output plugins READMEs are less structured,
but any information you can provide on how the data will look is appreciated.
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
for a good example.
## Sign the CLA
Before we can merge a pull request, you will need to sign the CLA,
which can be found [on our website](http://influxdb.com/community/cla.html)
## Adding a dependency
Assuming you can already build the project, run these in the telegraf directory:
1. `go get github.com/sparrc/gdm`
1. `gdm restore`
1. `gdm save`
## Input Plugins
This section is for developers who want to create new collection inputs.
Telegraf is entirely plugin driven. This interface allows for operators to
pick and chose what is gathered as well as makes it easy for developers
to create new ways of generating metrics.
Plugin authorship is kept as simple as possible to promote people to develop
and submit new inputs.
### Input Plugin Guidelines
* A plugin must conform to the `telegraf.Input` interface.
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
See below for a quick example.
* Input Plugins must be added to the
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
plugin can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this plugin does.
### Input interface
```go
type Input interface {
SampleConfig() string
Description() string
Gather(Accumulator) error
}
type Accumulator interface {
Add(measurement string,
value interface{},
tags map[string]string,
timestamp ...time.Time)
AddFields(measurement string,
fields map[string]interface{},
tags map[string]string,
timestamp ...time.Time)
}
```
### Accumulator
The way that a plugin emits metrics is by interacting with the Accumulator.
The `Add` function takes 3 arguments:
* **measurement**: A string description of the metric. For instance `bytes_read` or `
faults`.
* **value**: A value for the metric. This accepts 5 different types of value:
* **int**: The most common type. All int types are accepted but favor using `int64`
Useful for counters, etc.
* **float**: Favor `float64`, useful for gauges, percentages, etc.
* **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`,
etc.
* **string**: Typically used to indicate a message, or some kind of freeform
information.
* **time.Time**: Useful for indicating when a state last occurred, for instance `
light_on_since`.
* **tags**: This is a map of strings to strings to describe the where or who
about the metric. For instance, the `net` plugin adds a tag named `"interface"`
set to the name of the network interface, like `"eth0"`.
Let's say you've written a plugin that emits metrics about processes on the current host.
### Input Plugin Example
```go
package simple
// simple.go
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Simple struct {
Ok bool
}
func (s *Simple) Description() string {
return "a demo plugin"
}
func (s *Simple) SampleConfig() string {
return "ok = true # indicate if everything is fine"
}
func (s *Simple) Gather(acc inputs.Accumulator) error {
if s.Ok {
acc.Add("state", "pretty good", nil)
} else {
acc.Add("state", "not great", nil)
}
return nil
}
func init() {
inputs.Add("simple", func() telegraf.Input { return &Simple{} })
}
```
## Service Input Plugins
This section is for developers who want to create new "service" collection
inputs. A service plugin differs from a regular plugin in that it operates
a background service while Telegraf is running. One example would be the `statsd`
plugin, which operates a statsd server.
Service Input Plugins are substantially more complicated than a regular plugin, as they
will require threads and locks to verify data integrity. Service Input Plugins should
be avoided unless there is no way to create their behavior with a regular plugin.
Their interface is quite similar to a regular plugin, with the addition of `Start()`
and `Stop()` methods.
### Service Plugin Guidelines
* Same as the `Plugin` guidelines, except that they must conform to the
`inputs.ServiceInput` interface.
### Service Plugin interface
```go
type ServicePlugin interface {
SampleConfig() string
Description() string
Gather(Accumulator) error
Start() error
Stop()
}
```
## Output Plugins
This section is for developers who want to create a new output sink. Outputs
are created in a similar manner as collection plugins, and their interface has
similar constructs.
### Output Plugin Guidelines
* An output must conform to the `outputs.Output` interface.
* Outputs should call `outputs.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
output can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this output does.
### Output interface
```go
type Output interface {
Connect() error
Close() error
Description() string
SampleConfig() string
Write(metrics []telegraf.Metric) error
}
```
### Output Example
```go
package simpleoutput
// simpleoutput.go
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
)
type Simple struct {
Ok bool
}
func (s *Simple) Description() string {
return "a demo output"
}
func (s *Simple) SampleConfig() string {
return "url = localhost"
}
func (s *Simple) Connect() error {
// Make a connection to the URL here
return nil
}
func (s *Simple) Close() error {
// Close connection to the URL here
return nil
}
func (s *Simple) Write(metrics []telegraf.Metric) error {
for _, pt := range points {
// write `pt` to the output sink here
}
return nil
}
func init() {
outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} })
}
```
## Service Output Plugins
This section is for developers who want to create new "service" output. A
service output differs from a regular output in that it operates a background service
while Telegraf is running. One example would be the `prometheus_client` output,
which operates an HTTP server.
Their interface is quite similar to a regular output, with the addition of `Start()`
and `Stop()` methods.
### Service Output Guidelines
* Same as the `Output` guidelines, except that they must conform to the
`output.ServiceOutput` interface.
### Service Output interface
```go
type ServiceOutput interface {
Connect() error
Close() error
Description() string
SampleConfig() string
Write(metrics []telegraf.Metric) error
Start() error
Stop()
}
```
## Unit Tests
### Execute short tests
execute `make test-short`
### Execute long tests
As Telegraf collects metrics from several third-party services it becomes a
difficult task to mock each service as some of them have complicated protocols
which would take some time to replicate.
To overcome this situation we've decided to use docker containers to provide a
fast and reproducible environment to test those services which require it.
For other situations
(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/redis/redis_test.go)
a simple mock will suffice.
To execute Telegraf tests follow these simple steps:
- Install docker following [these](https://docs.docker.com/installation/)
instructions
- execute `make test`
**OSX users**: you will need to install `boot2docker` or `docker-machine`.
The Makefile will assume that you have a `docker-machine` box called `default` to
get the IP address.
### Unit test troubleshooting
Try cleaning up your test environment by executing `make docker-kill` and
re-running

59
Godeps Normal file
View File

@@ -0,0 +1,59 @@
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034
github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef
github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
github.com/armon/go-metrics 345426c77237ece5dab0e1605c3e4b35c3f54757
github.com/aws/aws-sdk-go 87b1e60a50b09e4812dee560b33a238f67305804
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
github.com/boltdb/bolt ee4a0888a9abe7eefe5a0992ca4cb06864839873
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
github.com/dancannon/gorethink 6f088135ff288deb9d5546f4c71919207f891a70
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
github.com/fsouza/go-dockerclient 7b651349f9479f5114913eefbfd3c4eeddd79ab4
github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3
github.com/go-sql-driver/mysql 7c7f556282622f94213bc028b4d0a7b6151ba239
github.com/gogo/protobuf e8904f58e872a473a5b91bc9bf3377d223555263
github.com/golang/protobuf 6aaa8d47701fa6cf07e914ec01fde3d4a1fe79c3
github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d
github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458
github.com/hashicorp/raft 057b893fd996696719e98b6c44649ea14968c811
github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee
github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24
github.com/influxdata/influxdb 697f48b4e62e514e701ffec39978b864a3c666e6
github.com/influxdb/influxdb 697f48b4e62e514e701ffec39978b864a3c666e6
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38
github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
github.com/pborman/uuid dee7705ef7b324f27ceb85a121c61f2c2e8ce988
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common 14ca1097bbe21584194c15e391a9dab95ad42a59
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil 85bf0974ed06e4e668595ae2b4de02e772a2819b
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/crypto 1f22c0103821b9390939b6776727195525381532
golang.org/x/net 04b9de9b512f58addf28c9853d50ebef61c3953e
golang.org/x/text 6d3c22c4525a4da167968fa2479be5524d2e8bd0
gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4

63
Godeps_windows Normal file
View File

@@ -0,0 +1,63 @@
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034
github.com/Shopify/sarama b1da1753dedcf77d053613b7eae907b98a2ddad5
github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
github.com/armon/go-metrics 345426c77237ece5dab0e1605c3e4b35c3f54757
github.com/aws/aws-sdk-go 2a34ea8812f32aae75b43400f9424a0559840659
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
github.com/boltdb/bolt ee4a0888a9abe7eefe5a0992ca4cb06864839873
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
github.com/dancannon/gorethink 6f088135ff288deb9d5546f4c71919207f891a70
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
github.com/fsouza/go-dockerclient 02a8beb401b20e112cff3ea740545960b667eab1
github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3
github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4
github.com/go-sql-driver/mysql 7c7f556282622f94213bc028b4d0a7b6151ba239
github.com/gogo/protobuf e8904f58e872a473a5b91bc9bf3377d223555263
github.com/golang/protobuf 45bba206dd5270d96bac4942dcfe515726613249
github.com/golang/snappy 1963d058044b19e16595f80d5050fa54e2070438
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d
github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458
github.com/hashicorp/raft 057b893fd996696719e98b6c44649ea14968c811
github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee
github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24
github.com/influxdata/influxdb 60df13fb566d07ff2cdd07aa23a4796a02b0df3c
github.com/influxdb/influxdb 60df13fb566d07ff2cdd07aa23a4796a02b0df3c
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38
github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f
github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
github.com/pborman/uuid dee7705ef7b324f27ceb85a121c61f2c2e8ce988
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common 14ca1097bbe21584194c15e391a9dab95ad42a59
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil 9d8191d6a6e17dcf43b10a20084a11e8c1aa92e6
github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/crypto 1f22c0103821b9390939b6776727195525381532
golang.org/x/net 04b9de9b512f58addf28c9853d50ebef61c3953e
golang.org/x/text 6fc2e00a0d64b1f7fc1212dae5b0c939cf6d9ac4
gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4

View File

@@ -0,0 +1,33 @@
# List
- github.com/Shopify/sarama [MIT LICENSE](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
- github.com/Sirupsen/logrus [MIT LICENSE](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
- github.com/cenkalti/backoff [MIT LICENSE](https://github.com/cenkalti/backoff/blob/master/LICENSE)
- github.com/dancannon/gorethink [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
- github.com/eapache/go-resiliency [MIT LICENSE](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
- github.com/eapache/queue [MIT LICENSE](https://github.com/eapache/queue/blob/master/LICENSE)
- github.com/fsouza/go-dockerclient [BSD LICENSE](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
- github.com/go-sql-driver/mysql [MPL LICENSE](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
- github.com/gogo/protobuf [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/gonuts/go-shellquote (No License, but the project it was forked from https://github.com/kballard/go-shellquote is [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)).
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
- github.com/naoina/toml [MIT LICENSE](https://github.com/naoina/toml/blob/master/LICENSE)
- github.com/prometheus/client_golang [APACHE LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
- github.com/samuel/go-zookeeper [BSD LICENSE](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
- github.com/stretchr/objx [MIT LICENSE](github.com/stretchr/objx)
- github.com/stretchr/testify [MIT LICENSE](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
- github.com/wvanbergen/kafka [MIT LICENSE](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
- golang.org/x/crypto/* [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
- internal Glob function [MIT LICENSE](https://github.com/ryanuber/go-glob/blob/master/LICENSE)

100
Makefile Normal file
View File

@@ -0,0 +1,100 @@
UNAME := $(shell sh -c 'uname')
VERSION := $(shell sh -c 'git describe --always --tags')
ifdef GOBIN
PATH := $(GOBIN):$(PATH)
else
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
endif
# Standard Telegraf build
default: prepare build
# Windows build
windows: prepare-windows build-windows
# Only run the build (no dependency grabbing)
build:
go build -o telegraf -ldflags \
"-X main.Version=$(VERSION)" \
./cmd/telegraf/telegraf.go
build-windows:
go build -o telegraf.exe -ldflags \
"-X main.Version=$(VERSION)" \
./cmd/telegraf/telegraf.go
# Build with race detector
dev: prepare
go build -race -o telegraf -ldflags \
"-X main.Version=$(VERSION)" \
./cmd/telegraf/telegraf.go
# Get dependencies and use gdm to checkout changesets
prepare:
go get github.com/sparrc/gdm
gdm restore
# Use the windows godeps file to prepare dependencies
prepare-windows:
go get github.com/sparrc/gdm
gdm restore -f Godeps_windows
# Run all docker containers necessary for unit tests
docker-run:
ifeq ($(UNAME), Darwin)
docker run --name kafka \
-e ADVERTISED_HOST=$(shell sh -c 'boot2docker ip || docker-machine ip default') \
-e ADVERTISED_PORT=9092 \
-p "2181:2181" -p "9092:9092" \
-d spotify/kafka
endif
ifeq ($(UNAME), Linux)
docker run --name kafka \
-e ADVERTISED_HOST=localhost \
-e ADVERTISED_PORT=9092 \
-p "2181:2181" -p "9092:9092" \
-d spotify/kafka
endif
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
docker run --name memcached -p "11211:11211" -d memcached
docker run --name postgres -p "5432:5432" -d postgres
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
docker run --name redis -p "6379:6379" -d redis
docker run --name aerospike -p "3000:3000" -d aerospike
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
docker run --name riemann -p "5555:5555" -d blalor/riemann
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
# Run docker containers necessary for CircleCI unit tests
docker-run-circle:
docker run --name kafka \
-e ADVERTISED_HOST=localhost \
-e ADVERTISED_PORT=9092 \
-p "2181:2181" -p "9092:9092" \
-d spotify/kafka
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
docker run --name aerospike -p "3000:3000" -d aerospike
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
docker run --name riemann -p "5555:5555" -d blalor/riemann
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
# Kill all docker containers, ignore errors
docker-kill:
-docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
-docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
# Run full unit tests using docker containers (includes setup and teardown)
test: docker-kill docker-run
# Sleeping for kafka leadership election, TSDB setup, etc.
sleep 60
# SUCCESS, running tests
go test -race ./...
# Run "short" unit tests
test-short:
go test -short ./...
.PHONY: test

318
README.md
View File

@@ -1,22 +1,84 @@
# Telegraf - A native agent for InfluxDB
# Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf)
Telegraf is an agent written in Go for collecting metrics from the system it's running on or from other services and writing them into InfluxDB.
Telegraf is an agent written in Go for collecting metrics from the system it's
running on, or from other services, and writing them into InfluxDB or other
[outputs](https://github.com/influxdata/telegraf#supported-output-plugins).
Design goals are to have a minimal memory footprint with a plugin system so that developers in the community can easily add support for collecting metrics from well known services (like Hadoop, or Postgres, or Redis) and third party APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
Design goals are to have a minimal memory footprint with a plugin system so
that developers in the community can easily add support for collecting metrics
from well known services (like Hadoop, Postgres, or Redis) and third party
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
We'll eagerly accept pull requests for new plugins and will manage the set of plugins that Telegraf supports. See the bottom of this doc for instructions on writing new plugins.
New input and output plugins are designed to be easy to contribute,
we'll eagerly accept pull
requests and will manage the set of plugins that Telegraf supports.
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
new plugins.
## Quickstart
## Installation:
* Build from source or download telegraf:
NOTE: Telegraf 0.10.x is **not** backwards-compatible with previous versions
of telegraf, both in the database layout and the configuration file. 0.2.x
will continue to be supported, see below for download links.
### Linux packages for Debian/Ubuntu and RHEL/CentOS:
For more details on the differences between Telegraf 0.2.x and 0.10.x, see
the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/).
### Linux deb and rpm Packages:
Latest:
* http://get.influxdb.org/telegraf/telegraf_0.10.2-1_amd64.deb
* http://get.influxdb.org/telegraf/telegraf-0.10.2-1.x86_64.rpm
0.2.x:
* http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb
* http://get.influxdb.org/telegraf/telegraf-0.2.4-1.x86_64.rpm
##### Package Instructions:
* Telegraf binary is installed in `/usr/bin/telegraf`
* Telegraf daemon configuration file is in `/etc/telegraf/telegraf.conf`
* On sysv systems, the telegraf daemon can be controlled via
`service telegraf [action]`
* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be
controlled via `systemctl [action] telegraf`
### yum/apt Repositories:
There is a yum/apt repo available for the whole InfluxData stack, see
[here](https://docs.influxdata.com/influxdb/v0.9/introduction/installation/#installation)
for instructions, replacing the `influxdb` package name with `telegraf`.
### Linux tarballs:
Latest:
* http://get.influxdb.org/telegraf/telegraf-0.10.2-1_linux_amd64.tar.gz
* http://get.influxdb.org/telegraf/telegraf-0.10.2-1_linux_i386.tar.gz
* http://get.influxdb.org/telegraf/telegraf-0.10.2-1_linux_arm.tar.gz
0.2.x:
* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz
* http://get.influxdb.org/telegraf/telegraf_linux_386_0.2.4.tar.gz
* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.2.4.tar.gz
##### tarball Instructions:
To install the full directory structure with config file, run:
```
http://get.influxdb.org/telegraf/telegraf_0.1.2_amd64.deb
http://get.influxdb.org/telegraf/telegraf-0.1.2-1.x86_64.rpm
sudo tar -C / -xvf ./telegraf-v0.10.2-1_linux_amd64.tar.gz
```
To extract only the binary, run:
```
tar -zxvf telegraf-v0.10.2-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
```
### Ansible Role:
Ansible role: https://github.com/rossmcdonald/telegraf
### OSX via Homebrew:
```
@@ -24,154 +86,146 @@ brew update
brew install telegraf
```
### How to use it:
### From Source:
* Run `telegraf -sample-config > telegraf.toml` to create an initial configuration
* Edit the configuration to match your needs
* Run `telegraf -config telegraf.toml -test` to output one full measurement sample to STDOUT
* Run `telegraf -config telegraf.toml` to gather and send metrics to InfluxDB
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
which gets installed via the Makefile
if you don't have it already. You also must build with golang version 1.5+.
1. [Install Go](https://golang.org/doc/install)
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
3. Run `go get github.com/influxdata/telegraf`
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
5. Run `make`
## Telegraf Options
## How to use it:
Telegraf has a few options you can configure under the `agent` section of the config. If you don't see an `agent` section run `telegraf -sample-config > telegraf.toml` to create a valid initial configuration:
```console
$ telegraf -help
Telegraf, The plugin-driven server agent for collecting and reporting metrics.
* **hostname**: The hostname is passed as a tag. By default this will be the value retured by `hostname` on the machine running Telegraf. You can override that value here.
* **interval**: How ofter to gather metrics. Uses a simple number + unit parser, ie "10s" for 10 seconds or "5m" for 5 minutes.
* **debug**: Set to true to gather and send metrics to STDOUT as well as InfluxDB.
Usage:
## Supported Plugins
telegraf <flags>
Telegraf currently has support for collecting metrics from:
The flags are:
* System (memory, CPU, network, etc.)
* Docker
* MySQL
* Prometheus (client libraries and exporters)
* PostgreSQL
* Redis
-config <file> configuration file to load
-test gather metrics once, print them to stdout, and exit
-sample-config print out full sample configuration to stdout
-config-directory directory containing additional *.conf files
-input-filter filter the input plugins to enable, separator is :
-output-filter filter the output plugins to enable, separator is :
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
-debug print metrics as they're generated to stdout
-quiet run in quiet mode
-version print the version to stdout
We'll be adding support for many more over the coming months. Read on if you want to add support for another service or third-party API.
Examples:
## Plugin Options
# generate a telegraf config file:
telegraf -sample-config > telegraf.conf
There are 3 configuration options that are configurable per plugin:
# generate config with only cpu input & influxdb output plugins defined
telegraf -sample-config -input-filter cpu -output-filter influxdb
* **pass**: An array of strings that is used to filter metrics generated by the current plugin. Each string in the array is tested as a prefix against metrics and if it matches, the metric is emitted.
* **drop**: The inverse of pass, if a metric matches, it is not emitted.
* **interval**: How often to gather this metric. Normal plugins use a single global interval, but if one particular plugin should be run less or more often, you can configure that here.
# run a single telegraf collection, outputing metrics to stdout
telegraf -config telegraf.conf -test
## Plugins
# run telegraf with all plugins defined in config file
telegraf -config telegraf.conf
This section is for developers that want to create new collection plugins. Telegraf is entirely plugin driven. This interface allows for operators to
pick and chose what is gathered as well as makes it easy for developers
to create new ways of generating metrics.
Plugin authorship is kept as simple as possible to promote people to develop
and submit new plugins.
## Guidelines
* A plugin must conform to the `plugins.Plugin` interface.
* Telegraf promises to run each plugin's Gather function serially. This means
developers don't have to worry about thread safety within these functions.
* Each generated metric automatically has the name of the plugin that generated
it prepended. This is to keep plugins honest.
* Plugins should call `plugins.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the `github.com/influxdb/telegraf/plugins/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this plugin does.
### Plugin interface
```go
type Plugin interface {
SampleConfig() string
Description() string
Gather(Accumulator) error
}
type Accumulator interface {
Add(measurement string, value interface{}, tags map[string]string)
AddValuesWithTime(measurement string, values map[string]interface{}, tags map[string]string, timestamp time.Time)
}
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
```
### Accumulator
## Configuration
The way that a plugin emits metrics is by interacting with the Accumulator.
See the [configuration guide](CONFIGURATION.md) for a rundown of the more advanced
configuration options.
The `Add` function takes 3 arguments:
* **measurement**: A string description of the metric. For instance `bytes_read` or `faults`.
* **value**: A value for the metric. This accepts 5 different types of value:
* **int**: The most common type. All int types are accepted but favor using `int64`
Useful for counters, etc.
* **float**: Favor `float64`, useful for gauges, percentages, etc.
* **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`, etc.
* **string**: Typically used to indicate a message, or some kind of freeform information.
* **time.Time**: Useful for indicating when a state last occurred, for instance `light_on_since`.
* **tags**: This is a map of strings to strings to describe the where or who about the metric. For instance, the `net` plugin adds a tag named `"interface"` set to the name of the network interface, like `"eth0"`.
## Supported Input Plugins
The `AddValuesWithTime` allows multiple values for a point to be passed. The values
used are the same type profile as **value** above. The **timestamp** argument
allows a point to be registered as having occurred at an arbitrary time.
Telegraf currently has support for collecting metrics from many sources. For
more information on each, please look at the directory of the same name in
`plugins/inputs`.
Let's say you've written a plugin that emits metrics about processes on the current host.
Currently implemented sources:
```go
* aerospike
* apache
* bcache
* disque
* docker
* elasticsearch
* exec (generic JSON-emitting executable plugin)
* haproxy
* httpjson (generic JSON-emitting http service plugin)
* influxdb
* jolokia
* leofs
* lustre2
* mailchimp
* memcached
* mongodb
* mysql
* nginx
* nsq
* phpfpm
* phusion passenger
* ping
* postgresql
* powerdns
* procstat
* prometheus
* puppetagent
* rabbitmq
* redis
* rethinkdb
* sql server (microsoft)
* twemproxy
* zfs
* zookeeper
* sensors
* snmp
* win_perf_counters (windows performance counters)
* system
* cpu
* mem
* net
* netstat
* disk
* diskio
* swap
type Process struct {
CPUTime float64
MemoryBytes int64
PID int
}
Telegraf can also collect metrics via the following service plugins:
func Gather(acc plugins.Accumulator) error {
for _, process := range system.Processes() {
tags := map[string]string {
"pid": fmt.Sprintf("%d", process.Pid),
}
* statsd
* kafka_consumer
* github_webhooks
acc.Add("cpu", process.CPUTime, tags)
acc.Add("memory", process.MemoryBytes, tags)
}
}
```
We'll be adding support for many more over the coming months. Read on if you
want to add support for another service or third-party API.
### Example
## Supported Output Plugins
```go
package simple
* influxdb
* amon
* amqp
* aws kinesis
* aws cloudwatch
* datadog
* graphite
* kafka
* librato
* mqtt
* nsq
* opentsdb
* prometheus
* riemann
// simple.go
import "github.com/influxdb/telegraf/plugins"
type Simple struct {
Ok bool
}
func (s *Simple) Description() string {
return "a demo plugin"
}
func (s *Simple) SampleConfig() string {
return "ok = true # indicate if everything is fine"
}
func (s *Simple) Gather(acc plugins.Accumulator) error {
if s.Ok {
acc.Add("state", "pretty good", nil)
} else {
acc.Add("state", "not great", nil)
}
return nil
}
func init() {
plugins.Add("simple", func() plugins.Plugin { return &Simple{} })
}
```
## Contributing
Please see the
[contributing guide](CONTRIBUTING.md)
for details on contributing a plugin to Telegraf.

122
Vagrantfile vendored
View File

@@ -1,122 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# All Vagrant configuration is done here. The most common configuration
# options are documented and commented below. For a complete reference,
# please see the online documentation at vagrantup.com.
# Every Vagrant virtual environment requires a box to build off of.
config.vm.box = "ubuntu/trusty64"
# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
# `vagrant box outdated`. This is not recommended.
# config.vm.box_check_update = false
# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine. In the example below,
# accessing "localhost:8080" will access port 80 on the guest machine.
# config.vm.network "forwarded_port", guest: 80, host: 8080
# Create a private network, which allows host-only access to the machine
# using a specific IP.
# config.vm.network "private_network", ip: "192.168.33.10"
# Create a public network, which generally matched to bridged network.
# Bridged networks make the machine appear as another physical device on
# your network.
# config.vm.network "public_network"
# If true, then any SSH connections made will enable agent forwarding.
# Default value: false
# config.ssh.forward_agent = true
# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
# argument is a set of non-required options.
config.vm.synced_folder "~/go", "/home/vagrant/go"
# Provider-specific configuration so you can fine-tune various
# backing providers for Vagrant. These expose provider-specific options.
# Example for VirtualBox:
#
config.vm.provider "virtualbox" do |vb|
# # Don't boot with headless mode
# vb.gui = true
#
# # Use VBoxManage to customize the VM. For example to change memory:
vb.customize ["modifyvm", :id, "--memory", "1024"]
end
#
# View the documentation for the provider you're using for more
# information on available options.
# Enable provisioning with CFEngine. CFEngine Community packages are
# automatically installed. For example, configure the host as a
# policy server and optionally a policy file to run:
#
# config.vm.provision "cfengine" do |cf|
# cf.am_policy_hub = true
# # cf.run_file = "motd.cf"
# end
#
# You can also configure and bootstrap a client to an existing
# policy server:
#
# config.vm.provision "cfengine" do |cf|
# cf.policy_server_address = "10.0.2.15"
# end
# Enable provisioning with Puppet stand alone. Puppet manifests
# are contained in a directory path relative to this Vagrantfile.
# You will need to create the manifests directory and a manifest in
# the file default.pp in the manifests_path directory.
#
# config.vm.provision "puppet" do |puppet|
# puppet.manifests_path = "manifests"
# puppet.manifest_file = "site.pp"
# end
# Enable provisioning with chef solo, specifying a cookbooks path, roles
# path, and data_bags path (all relative to this Vagrantfile), and adding
# some recipes and/or roles.
#
# config.vm.provision "chef_solo" do |chef|
# chef.cookbooks_path = "../my-recipes/cookbooks"
# chef.roles_path = "../my-recipes/roles"
# chef.data_bags_path = "../my-recipes/data_bags"
# chef.add_recipe "mysql"
# chef.add_role "web"
#
# # You may also specify custom JSON attributes:
# chef.json = { mysql_password: "foo" }
# end
# Enable provisioning with chef server, specifying the chef server URL,
# and the path to the validation key (relative to this Vagrantfile).
#
# The Opscode Platform uses HTTPS. Substitute your organization for
# ORGNAME in the URL and validation key.
#
# If you have your own Chef Server, use the appropriate URL, which may be
# HTTP instead of HTTPS depending on your configuration. Also change the
# validation key to validation.pem.
#
# config.vm.provision "chef_client" do |chef|
# chef.chef_server_url = "https://api.opscode.com/organizations/ORGNAME"
# chef.validation_key_path = "ORGNAME-validator.pem"
# end
#
# If you're using the Opscode platform, your validator client is
# ORGNAME-validator, replacing ORGNAME with your organization name.
#
# If you have your own Chef Server, the default validation client name is
# chef-validator, unless you changed the configuration.
#
# chef.validation_client_name = "ORGNAME-validator"
end

View File

@@ -1,100 +1,21 @@
package telegraf
import (
"fmt"
"sort"
"strings"
"sync"
"time"
import "time"
"github.com/influxdb/influxdb/client"
)
type Accumulator interface {
// Create a point with a value, decorating it with tags
// NOTE: tags is expected to be owned by the caller, don't mutate
// it after passing to Add.
Add(measurement string,
value interface{},
tags map[string]string,
t ...time.Time)
type BatchPoints struct {
mu sync.Mutex
AddFields(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
client.BatchPoints
Debug bool
Prefix string
Config *ConfiguredPlugin
}
func (bp *BatchPoints) Add(measurement string, val interface{}, tags map[string]string) {
bp.mu.Lock()
defer bp.mu.Unlock()
measurement = bp.Prefix + measurement
if bp.Config != nil {
if !bp.Config.ShouldPass(measurement) {
return
}
}
if bp.Debug {
var tg []string
for k, v := range tags {
tg = append(tg, fmt.Sprintf("%s=\"%s\"", k, v))
}
sort.Strings(tg)
fmt.Printf("> [%s] %s value=%v\n", strings.Join(tg, " "), measurement, val)
}
bp.Points = append(bp.Points, client.Point{
Measurement: measurement,
Tags: tags,
Fields: map[string]interface{}{
"value": val,
},
})
}
func (bp *BatchPoints) AddValuesWithTime(
measurement string,
values map[string]interface{},
tags map[string]string,
timestamp time.Time,
) {
bp.mu.Lock()
defer bp.mu.Unlock()
measurement = bp.Prefix + measurement
if bp.Config != nil {
if !bp.Config.ShouldPass(measurement) {
return
}
}
if bp.Debug {
var tg []string
for k, v := range tags {
tg = append(tg, fmt.Sprintf("%s=\"%s\"", k, v))
}
var vals []string
for k, v := range values {
vals = append(vals, fmt.Sprintf("%s=%v", k, v))
}
sort.Strings(tg)
sort.Strings(vals)
fmt.Printf("> [%s] %s %s\n", strings.Join(tg, " "), measurement, strings.Join(vals, " "))
}
bp.Points = append(bp.Points, client.Point{
Measurement: measurement,
Tags: tags,
Fields: values,
Time: timestamp,
})
Debug() bool
SetDebug(enabled bool)
}

292
agent.go
View File

@@ -1,292 +0,0 @@
package telegraf
import (
"fmt"
"log"
"net/url"
"os"
"sort"
"sync"
"time"
"github.com/influxdb/influxdb/client"
"github.com/influxdb/telegraf/plugins"
)
type runningPlugin struct {
name string
plugin plugins.Plugin
config *ConfiguredPlugin
}
type Agent struct {
Interval Duration
Debug bool
Hostname string
Config *Config
plugins []*runningPlugin
conn *client.Client
}
func NewAgent(config *Config) (*Agent, error) {
agent := &Agent{Config: config, Interval: Duration{10 * time.Second}}
err := config.ApplyAgent(agent)
if err != nil {
return nil, err
}
if agent.Hostname == "" {
hostname, err := os.Hostname()
if err != nil {
return nil, err
}
agent.Hostname = hostname
}
if config.Tags == nil {
config.Tags = map[string]string{}
}
config.Tags["host"] = agent.Hostname
return agent, nil
}
func (agent *Agent) Connect() error {
config := agent.Config
u, err := url.Parse(config.URL)
if err != nil {
return err
}
c, err := client.NewClient(client.Config{
URL: *u,
Username: config.Username,
Password: config.Password,
UserAgent: config.UserAgent,
})
if err != nil {
return err
}
agent.conn = c
return nil
}
func (a *Agent) LoadPlugins() ([]string, error) {
var names []string
for _, name := range a.Config.PluginsDeclared() {
creator, ok := plugins.Plugins[name]
if !ok {
return nil, fmt.Errorf("Undefined but requested plugin: %s", name)
}
plugin := creator()
config, err := a.Config.ApplyPlugin(name, plugin)
if err != nil {
return nil, err
}
a.plugins = append(a.plugins, &runningPlugin{name, plugin, config})
names = append(names, name)
}
sort.Strings(names)
return names, nil
}
func (a *Agent) crankParallel() error {
points := make(chan *BatchPoints, len(a.plugins))
var wg sync.WaitGroup
for _, plugin := range a.plugins {
if plugin.config.Interval != 0 {
continue
}
wg.Add(1)
go func(plugin *runningPlugin) {
defer wg.Done()
var acc BatchPoints
acc.Debug = a.Debug
acc.Prefix = plugin.name + "_"
acc.Config = plugin.config
plugin.plugin.Gather(&acc)
points <- &acc
}(plugin)
}
wg.Wait()
close(points)
var acc BatchPoints
acc.Tags = a.Config.Tags
acc.Time = time.Now()
acc.Database = a.Config.Database
for sub := range points {
acc.Points = append(acc.Points, sub.Points...)
}
_, err := a.conn.Write(acc.BatchPoints)
return err
}
func (a *Agent) crank() error {
var acc BatchPoints
acc.Debug = a.Debug
for _, plugin := range a.plugins {
acc.Prefix = plugin.name + "_"
acc.Config = plugin.config
err := plugin.plugin.Gather(&acc)
if err != nil {
return err
}
}
acc.Tags = a.Config.Tags
acc.Time = time.Now()
acc.Database = a.Config.Database
_, err := a.conn.Write(acc.BatchPoints)
return err
}
func (a *Agent) crankSeparate(shutdown chan struct{}, plugin *runningPlugin) error {
ticker := time.NewTicker(plugin.config.Interval)
for {
var acc BatchPoints
acc.Debug = a.Debug
acc.Prefix = plugin.name + "_"
acc.Config = plugin.config
err := plugin.plugin.Gather(&acc)
if err != nil {
return err
}
acc.Tags = a.Config.Tags
acc.Time = time.Now()
acc.Database = a.Config.Database
a.conn.Write(acc.BatchPoints)
select {
case <-shutdown:
return nil
case <-ticker.C:
continue
}
}
}
func (a *Agent) TestAllPlugins() error {
var names []string
for name, _ := range plugins.Plugins {
names = append(names, name)
}
sort.Strings(names)
var acc BatchPoints
acc.Debug = true
fmt.Printf("* Testing all plugins with default configuration\n")
for _, name := range names {
plugin := plugins.Plugins[name]()
fmt.Printf("* Plugin: %s\n", name)
acc.Prefix = name + "_"
err := plugin.Gather(&acc)
if err != nil {
return err
}
}
return nil
}
func (a *Agent) Test() error {
var acc BatchPoints
acc.Debug = true
for _, plugin := range a.plugins {
acc.Prefix = plugin.name + "_"
acc.Config = plugin.config
fmt.Printf("* Plugin: %s\n", plugin.name)
if plugin.config.Interval != 0 {
fmt.Printf("* Internal: %s\n", plugin.config.Interval)
}
err := plugin.plugin.Gather(&acc)
if err != nil {
return err
}
}
return nil
}
func (a *Agent) Run(shutdown chan struct{}) error {
if a.conn == nil {
err := a.Connect()
if err != nil {
return err
}
}
var wg sync.WaitGroup
for _, plugin := range a.plugins {
if plugin.config.Interval != 0 {
wg.Add(1)
go func(plugin *runningPlugin) {
defer wg.Done()
a.crankSeparate(shutdown, plugin)
}(plugin)
}
}
defer wg.Wait()
ticker := time.NewTicker(a.Interval.Duration)
for {
err := a.crankParallel()
if err != nil {
log.Printf("Error in plugins: %s", err)
}
select {
case <-shutdown:
return nil
case <-ticker.C:
continue
}
}
}

163
agent/accumulator.go Normal file
View File

@@ -0,0 +1,163 @@
package agent
import (
"fmt"
"log"
"math"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/models"
)
func NewAccumulator(
inputConfig *internal_models.InputConfig,
metrics chan telegraf.Metric,
) *accumulator {
acc := accumulator{}
acc.metrics = metrics
acc.inputConfig = inputConfig
return &acc
}
type accumulator struct {
sync.Mutex
metrics chan telegraf.Metric
defaultTags map[string]string
debug bool
inputConfig *internal_models.InputConfig
prefix string
}
func (ac *accumulator) Add(
measurement string,
value interface{},
tags map[string]string,
t ...time.Time,
) {
fields := make(map[string]interface{})
fields["value"] = value
ac.AddFields(measurement, fields, tags, t...)
}
func (ac *accumulator) AddFields(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if len(fields) == 0 || len(measurement) == 0 {
return
}
if !ac.inputConfig.Filter.ShouldTagsPass(tags) {
return
}
// Override measurement name if set
if len(ac.inputConfig.NameOverride) != 0 {
measurement = ac.inputConfig.NameOverride
}
// Apply measurement prefix and suffix if set
if len(ac.inputConfig.MeasurementPrefix) != 0 {
measurement = ac.inputConfig.MeasurementPrefix + measurement
}
if len(ac.inputConfig.MeasurementSuffix) != 0 {
measurement = measurement + ac.inputConfig.MeasurementSuffix
}
if tags == nil {
tags = make(map[string]string)
}
// Apply plugin-wide tags if set
for k, v := range ac.inputConfig.Tags {
if _, ok := tags[k]; !ok {
tags[k] = v
}
}
// Apply daemon-wide tags if set
for k, v := range ac.defaultTags {
if _, ok := tags[k]; !ok {
tags[k] = v
}
}
result := make(map[string]interface{})
for k, v := range fields {
// Filter out any filtered fields
if ac.inputConfig != nil {
if !ac.inputConfig.Filter.ShouldPass(k) {
continue
}
}
result[k] = v
// Validate uint64 and float64 fields
switch val := v.(type) {
case uint64:
// InfluxDB does not support writing uint64
if val < uint64(9223372036854775808) {
result[k] = int64(val)
} else {
result[k] = int64(9223372036854775807)
}
case float64:
// NaNs are invalid values in influxdb, skip measurement
if math.IsNaN(val) || math.IsInf(val, 0) {
if ac.debug {
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
"field, skipping",
measurement, k)
}
continue
}
}
}
fields = nil
if len(result) == 0 {
return
}
var timestamp time.Time
if len(t) > 0 {
timestamp = t[0]
} else {
timestamp = time.Now()
}
if ac.prefix != "" {
measurement = ac.prefix + measurement
}
m, err := telegraf.NewMetric(measurement, tags, result, timestamp)
if err != nil {
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
return
}
if ac.debug {
fmt.Println("> " + m.String())
}
ac.metrics <- m
}
func (ac *accumulator) Debug() bool {
return ac.debug
}
func (ac *accumulator) SetDebug(debug bool) {
ac.debug = debug
}
func (ac *accumulator) setDefaultTags(tags map[string]string) {
ac.defaultTags = tags
}
func (ac *accumulator) addDefaultTag(key, value string) {
ac.defaultTags[key] = value
}

380
agent/agent.go Normal file
View File

@@ -0,0 +1,380 @@
package agent
import (
cryptorand "crypto/rand"
"fmt"
"log"
"math/big"
"math/rand"
"os"
"runtime"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/config"
"github.com/influxdata/telegraf/internal/models"
)
// Agent runs telegraf and collects data based on the given config
type Agent struct {
Config *config.Config
}
// NewAgent returns an Agent struct based off the given Config
func NewAgent(config *config.Config) (*Agent, error) {
a := &Agent{
Config: config,
}
if a.Config.Agent.Hostname == "" {
hostname, err := os.Hostname()
if err != nil {
return nil, err
}
a.Config.Agent.Hostname = hostname
}
config.Tags["host"] = a.Config.Agent.Hostname
return a, nil
}
// Connect connects to all configured outputs
func (a *Agent) Connect() error {
for _, o := range a.Config.Outputs {
switch ot := o.Output.(type) {
case telegraf.ServiceOutput:
if err := ot.Start(); err != nil {
log.Printf("Service for output %s failed to start, exiting\n%s\n",
o.Name, err.Error())
return err
}
}
if a.Config.Agent.Debug {
log.Printf("Attempting connection to output: %s\n", o.Name)
}
err := o.Output.Connect()
if err != nil {
log.Printf("Failed to connect to output %s, retrying in 15s, error was '%s' \n", o.Name, err)
time.Sleep(15 * time.Second)
err = o.Output.Connect()
if err != nil {
return err
}
}
if a.Config.Agent.Debug {
log.Printf("Successfully connected to output: %s\n", o.Name)
}
}
return nil
}
// Close closes the connection to all configured outputs
func (a *Agent) Close() error {
var err error
for _, o := range a.Config.Outputs {
err = o.Output.Close()
switch ot := o.Output.(type) {
case telegraf.ServiceOutput:
ot.Stop()
}
}
return err
}
func panicRecover(input *internal_models.RunningInput) {
if err := recover(); err != nil {
trace := make([]byte, 2048)
runtime.Stack(trace, true)
log.Printf("FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
input.Name, err, trace)
log.Println("PLEASE REPORT THIS PANIC ON GITHUB with " +
"stack trace, configuration, and OS information: " +
"https://github.com/influxdata/telegraf/issues/new")
}
}
// gatherParallel runs the inputs that are using the same reporting interval
// as the telegraf agent.
func (a *Agent) gatherParallel(metricC chan telegraf.Metric) error {
var wg sync.WaitGroup
start := time.Now()
counter := 0
jitter := a.Config.Agent.CollectionJitter.Duration.Nanoseconds()
for _, input := range a.Config.Inputs {
if input.Config.Interval != 0 {
continue
}
wg.Add(1)
counter++
go func(input *internal_models.RunningInput) {
defer panicRecover(input)
defer wg.Done()
acc := NewAccumulator(input.Config, metricC)
acc.SetDebug(a.Config.Agent.Debug)
acc.setDefaultTags(a.Config.Tags)
if jitter != 0 {
nanoSleep := rand.Int63n(jitter)
d, err := time.ParseDuration(fmt.Sprintf("%dns", nanoSleep))
if err != nil {
log.Printf("Jittering collection interval failed for plugin %s",
input.Name)
} else {
time.Sleep(d)
}
}
if err := input.Input.Gather(acc); err != nil {
log.Printf("Error in input [%s]: %s", input.Name, err)
}
}(input)
}
if counter == 0 {
return nil
}
wg.Wait()
elapsed := time.Since(start)
if !a.Config.Agent.Quiet {
log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n",
a.Config.Agent.Interval.Duration, counter, elapsed)
}
return nil
}
// gatherSeparate runs the inputs that have been configured with their own
// reporting interval.
func (a *Agent) gatherSeparate(
shutdown chan struct{},
input *internal_models.RunningInput,
metricC chan telegraf.Metric,
) error {
defer panicRecover(input)
ticker := time.NewTicker(input.Config.Interval)
for {
var outerr error
start := time.Now()
acc := NewAccumulator(input.Config, metricC)
acc.SetDebug(a.Config.Agent.Debug)
acc.setDefaultTags(a.Config.Tags)
if err := input.Input.Gather(acc); err != nil {
log.Printf("Error in input [%s]: %s", input.Name, err)
}
elapsed := time.Since(start)
if !a.Config.Agent.Quiet {
log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
input.Config.Interval, input.Name, elapsed)
}
if outerr != nil {
return outerr
}
select {
case <-shutdown:
return nil
case <-ticker.C:
continue
}
}
}
// Test verifies that we can 'Gather' from all inputs with their configured
// Config struct
func (a *Agent) Test() error {
shutdown := make(chan struct{})
defer close(shutdown)
metricC := make(chan telegraf.Metric)
// dummy receiver for the point channel
go func() {
for {
select {
case <-metricC:
// do nothing
case <-shutdown:
return
}
}
}()
for _, input := range a.Config.Inputs {
acc := NewAccumulator(input.Config, metricC)
acc.SetDebug(true)
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
if input.Config.Interval != 0 {
fmt.Printf("* Internal: %s\n", input.Config.Interval)
}
if err := input.Input.Gather(acc); err != nil {
return err
}
// Special instructions for some inputs. cpu, for example, needs to be
// run twice in order to return cpu usage percentages.
switch input.Name {
case "cpu", "mongodb", "procstat":
time.Sleep(500 * time.Millisecond)
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name)
if err := input.Input.Gather(acc); err != nil {
return err
}
}
}
return nil
}
// flush writes a list of points to all configured outputs
func (a *Agent) flush() {
var wg sync.WaitGroup
wg.Add(len(a.Config.Outputs))
for _, o := range a.Config.Outputs {
go func(output *internal_models.RunningOutput) {
defer wg.Done()
err := output.Write()
if err != nil {
log.Printf("Error writing to output [%s]: %s\n",
output.Name, err.Error())
}
}(o)
}
wg.Wait()
}
// flusher monitors the points input channel and flushes on the minimum interval
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
// Inelegant, but this sleep is to allow the Gather threads to run, so that
// the flusher will flush after metrics are collected.
time.Sleep(time.Millisecond * 200)
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
for {
select {
case <-shutdown:
log.Println("Hang on, flushing any cached points before shutdown")
a.flush()
return nil
case <-ticker.C:
a.flush()
case m := <-metricC:
for _, o := range a.Config.Outputs {
o.AddPoint(m)
}
}
}
}
// jitterInterval applies the the interval jitter to the flush interval using
// crypto/rand number generator
func jitterInterval(ininterval, injitter time.Duration) time.Duration {
var jitter int64
outinterval := ininterval
if injitter.Nanoseconds() != 0 {
maxjitter := big.NewInt(injitter.Nanoseconds())
if j, err := cryptorand.Int(cryptorand.Reader, maxjitter); err == nil {
jitter = j.Int64()
}
outinterval = time.Duration(jitter + ininterval.Nanoseconds())
}
if outinterval.Nanoseconds() < time.Duration(500*time.Millisecond).Nanoseconds() {
log.Printf("Flush interval %s too low, setting to 500ms\n", outinterval)
outinterval = time.Duration(500 * time.Millisecond)
}
return outinterval
}
// Run runs the agent daemon, gathering every Interval
func (a *Agent) Run(shutdown chan struct{}) error {
var wg sync.WaitGroup
a.Config.Agent.FlushInterval.Duration = jitterInterval(
a.Config.Agent.FlushInterval.Duration,
a.Config.Agent.FlushJitter.Duration)
log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+
"Flush Interval:%s \n",
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet,
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
// channel shared between all input threads for accumulating points
metricC := make(chan telegraf.Metric, 1000)
// Round collection to nearest interval by sleeping
if a.Config.Agent.RoundInterval {
i := int64(a.Config.Agent.Interval.Duration)
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
}
ticker := time.NewTicker(a.Config.Agent.Interval.Duration)
wg.Add(1)
go func() {
defer wg.Done()
if err := a.flusher(shutdown, metricC); err != nil {
log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
close(shutdown)
}
}()
for _, input := range a.Config.Inputs {
// Start service of any ServicePlugins
switch p := input.Input.(type) {
case telegraf.ServiceInput:
if err := p.Start(); err != nil {
log.Printf("Service for input %s failed to start, exiting\n%s\n",
input.Name, err.Error())
return err
}
defer p.Stop()
}
// Special handling for inputs that have their own collection interval
// configured. Default intervals are handled below with gatherParallel
if input.Config.Interval != 0 {
wg.Add(1)
go func(input *internal_models.RunningInput) {
defer wg.Done()
if err := a.gatherSeparate(shutdown, input, metricC); err != nil {
log.Printf(err.Error())
}
}(input)
}
}
defer wg.Wait()
for {
if err := a.gatherParallel(metricC); err != nil {
log.Printf(err.Error())
}
select {
case <-shutdown:
return nil
case <-ticker.C:
continue
}
}
}

175
agent/agent_test.go Normal file
View File

@@ -0,0 +1,175 @@
package agent
import (
"github.com/stretchr/testify/assert"
"testing"
"time"
"github.com/influxdata/telegraf/internal/config"
// needing to load the plugins
_ "github.com/influxdata/telegraf/plugins/inputs/all"
// needing to load the outputs
_ "github.com/influxdata/telegraf/plugins/outputs/all"
)
func TestAgent_LoadPlugin(t *testing.T) {
c := config.NewConfig()
c.InputFilters = []string{"mysql"}
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ := NewAgent(c)
assert.Equal(t, 1, len(a.Config.Inputs))
c = config.NewConfig()
c.InputFilters = []string{"foo"}
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 0, len(a.Config.Inputs))
c = config.NewConfig()
c.InputFilters = []string{"mysql", "foo"}
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 1, len(a.Config.Inputs))
c = config.NewConfig()
c.InputFilters = []string{"mysql", "redis"}
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 2, len(a.Config.Inputs))
c = config.NewConfig()
c.InputFilters = []string{"mysql", "foo", "redis", "bar"}
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 2, len(a.Config.Inputs))
}
func TestAgent_LoadOutput(t *testing.T) {
c := config.NewConfig()
c.OutputFilters = []string{"influxdb"}
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ := NewAgent(c)
assert.Equal(t, 2, len(a.Config.Outputs))
c = config.NewConfig()
c.OutputFilters = []string{"kafka"}
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 1, len(a.Config.Outputs))
c = config.NewConfig()
c.OutputFilters = []string{}
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 3, len(a.Config.Outputs))
c = config.NewConfig()
c.OutputFilters = []string{"foo"}
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 0, len(a.Config.Outputs))
c = config.NewConfig()
c.OutputFilters = []string{"influxdb", "foo"}
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 2, len(a.Config.Outputs))
c = config.NewConfig()
c.OutputFilters = []string{"influxdb", "kafka"}
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
assert.Equal(t, 3, len(c.Outputs))
a, _ = NewAgent(c)
assert.Equal(t, 3, len(a.Config.Outputs))
c = config.NewConfig()
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 3, len(a.Config.Outputs))
}
func TestAgent_ZeroJitter(t *testing.T) {
flushinterval := jitterInterval(time.Duration(10*time.Second),
time.Duration(0*time.Second))
actual := flushinterval.Nanoseconds()
exp := time.Duration(10 * time.Second).Nanoseconds()
if actual != exp {
t.Errorf("Actual %v, expected %v", actual, exp)
}
}
func TestAgent_ZeroInterval(t *testing.T) {
min := time.Duration(500 * time.Millisecond).Nanoseconds()
max := time.Duration(5 * time.Second).Nanoseconds()
for i := 0; i < 1000; i++ {
flushinterval := jitterInterval(time.Duration(0*time.Second),
time.Duration(5*time.Second))
actual := flushinterval.Nanoseconds()
if actual > max {
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
break
}
if actual < min {
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
break
}
}
}
func TestAgent_ZeroBoth(t *testing.T) {
flushinterval := jitterInterval(time.Duration(0*time.Second),
time.Duration(0*time.Second))
actual := flushinterval
exp := time.Duration(500 * time.Millisecond)
if actual != exp {
t.Errorf("Actual %v, expected %v", actual, exp)
}
}
func TestAgent_JitterMax(t *testing.T) {
max := time.Duration(32 * time.Second).Nanoseconds()
for i := 0; i < 1000; i++ {
flushinterval := jitterInterval(time.Duration(30*time.Second),
time.Duration(2*time.Second))
actual := flushinterval.Nanoseconds()
if actual > max {
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
break
}
}
}
func TestAgent_JitterMin(t *testing.T) {
min := time.Duration(30 * time.Second).Nanoseconds()
for i := 0; i < 1000; i++ {
flushinterval := jitterInterval(time.Duration(30*time.Second),
time.Duration(2*time.Second))
actual := flushinterval.Nanoseconds()
if actual < min {
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
break
}
}
}

View File

@@ -1,61 +0,0 @@
package telegraf
/*
func TestAgent_DrivesMetrics(t *testing.T) {
var (
plugin plugins.MockPlugin
)
defer plugin.AssertExpectations(t)
defer metrics.AssertExpectations(t)
a := &Agent{
plugins: []plugins.Plugin{&plugin},
Config: &Config{},
}
plugin.On("Add", "foo", 1.2, nil).Return(nil)
plugin.On("Add", "bar", 888, nil).Return(nil)
err := a.crank()
require.NoError(t, err)
}
func TestAgent_AppliesTags(t *testing.T) {
var (
plugin plugins.MockPlugin
metrics MockMetrics
)
defer plugin.AssertExpectations(t)
defer metrics.AssertExpectations(t)
a := &Agent{
plugins: []plugins.Plugin{&plugin},
metrics: &metrics,
Config: &Config{
Tags: map[string]string{
"dc": "us-west-1",
},
},
}
m1 := cypress.Metric()
m1.Add("name", "foo")
m1.Add("value", 1.2)
msgs := []*cypress.Message{m1}
m2 := cypress.Metric()
m2.Timestamp = m1.Timestamp
m2.Add("name", "foo")
m2.Add("value", 1.2)
m2.AddTag("dc", "us-west-1")
plugin.On("Read").Return(msgs, nil)
metrics.On("Receive", m2).Return(nil)
err := a.crank()
require.NoError(t, err)
}
*/

713
build.py Executable file
View File

@@ -0,0 +1,713 @@
#!/usr/bin/env python
#
# This is the Telegraf build script.
#
# Current caveats:
# - Does not checkout the correct commit/branch (for now, you will need to do so manually)
# - Has external dependencies for packaging (fpm) and uploading (boto)
#
import sys
import os
import subprocess
import time
import datetime
import shutil
import tempfile
import hashlib
import re
debug = False
# PACKAGING VARIABLES
INSTALL_ROOT_DIR = "/usr/bin"
LOG_DIR = "/var/log/telegraf"
SCRIPT_DIR = "/usr/lib/telegraf/scripts"
CONFIG_DIR = "/etc/telegraf"
LOGROTATE_DIR = "/etc/logrotate.d"
INIT_SCRIPT = "scripts/init.sh"
SYSTEMD_SCRIPT = "scripts/telegraf.service"
LOGROTATE_SCRIPT = "etc/logrotate.d/telegraf"
DEFAULT_CONFIG = "etc/telegraf.conf"
POSTINST_SCRIPT = "scripts/post-install.sh"
PREINST_SCRIPT = "scripts/pre-install.sh"
# META-PACKAGE VARIABLES
PACKAGE_LICENSE = "MIT"
PACKAGE_URL = "https://github.com/influxdata/telegraf"
MAINTAINER = "support@influxdb.com"
VENDOR = "InfluxData"
DESCRIPTION = "Plugin-driven server agent for reporting metrics into InfluxDB."
# SCRIPT START
prereqs = [ 'git', 'go' ]
optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ]
fpm_common_args = "-f -s dir --log error \
--vendor {} \
--url {} \
--license {} \
--maintainer {} \
--config-files {} \
--config-files {} \
--after-install {} \
--before-install {} \
--description \"{}\"".format(
VENDOR,
PACKAGE_URL,
PACKAGE_LICENSE,
MAINTAINER,
CONFIG_DIR + '/telegraf.conf',
LOGROTATE_DIR + '/telegraf',
POSTINST_SCRIPT,
PREINST_SCRIPT,
DESCRIPTION)
targets = {
'telegraf' : './cmd/telegraf/telegraf.go',
}
supported_builds = {
'darwin': [ "amd64", "i386" ],
'windows': [ "amd64", "i386", "arm" ],
'linux': [ "amd64", "i386", "arm" ]
}
supported_packages = {
"darwin": [ "tar", "zip" ],
"linux": [ "deb", "rpm", "tar", "zip" ],
"windows": [ "tar", "zip" ],
}
def run(command, allow_failure=False, shell=False):
out = None
if debug:
print("[DEBUG] {}".format(command))
try:
if shell:
out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell)
else:
out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
out = out.decode("utf8")
except subprocess.CalledProcessError as e:
print("")
print("")
print("Executed command failed!")
print("-- Command run was: {}".format(command))
print("-- Failure was: {}".format(e.output))
if allow_failure:
print("Continuing...")
return None
else:
print("")
print("Stopping.")
sys.exit(1)
except OSError as e:
print("")
print("")
print("Invalid command!")
print("-- Command run was: {}".format(command))
print("-- Failure was: {}".format(e))
if allow_failure:
print("Continuing...")
return out
else:
print("")
print("Stopping.")
sys.exit(1)
else:
return out
def create_temp_dir(prefix=None):
if prefix is None:
return tempfile.mkdtemp(prefix="telegraf-build.")
else:
return tempfile.mkdtemp(prefix=prefix)
def get_current_version():
command = "git describe --always --tags --abbrev=0"
out = run(command)
return out.strip()
def get_current_commit(short=False):
command = None
if short:
command = "git log --pretty=format:'%h' -n 1"
else:
command = "git rev-parse HEAD"
out = run(command)
return out.strip('\'\n\r ')
def get_current_branch():
command = "git rev-parse --abbrev-ref HEAD"
out = run(command)
return out.strip()
def get_system_arch():
arch = os.uname()[4]
if arch == "x86_64":
arch = "amd64"
return arch
def get_system_platform():
if sys.platform.startswith("linux"):
return "linux"
else:
return sys.platform
def get_go_version():
out = run("go version")
matches = re.search('go version go(\S+)', out)
if matches is not None:
return matches.groups()[0].strip()
return None
def check_path_for(b):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
full_path = os.path.join(path, b)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
def check_environ(build_dir = None):
print("\nChecking environment:")
for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
print("\t- {} -> {}".format(v, os.environ.get(v)))
cwd = os.getcwd()
if build_dir == None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
print("\n!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures.")
def check_prereqs():
print("\nChecking for dependencies:")
for req in prereqs:
path = check_path_for(req)
if path is None:
path = '?'
print("\t- {} -> {}".format(req, path))
for req in optional_prereqs:
path = check_path_for(req)
if path is None:
path = '?'
print("\t- {} (optional) -> {}".format(req, path))
print("")
def upload_packages(packages, bucket_name=None, nightly=False):
if debug:
print("[DEBUG] upload_packags: {}".format(packages))
try:
import boto
from boto.s3.key import Key
except ImportError:
print "!! Cannot upload packages without the 'boto' python library."
return 1
print("Uploading packages to S3...")
print("")
c = boto.connect_s3()
if bucket_name is None:
bucket_name = 'get.influxdb.org/telegraf'
bucket = c.get_bucket(bucket_name.split('/')[0])
print("\t - Using bucket: {}".format(bucket_name))
for p in packages:
if '/' in bucket_name:
# Allow for nested paths within the bucket name (ex:
# bucket/telegraf). Assuming forward-slashes as path
# delimiter.
name = os.path.join('/'.join(bucket_name.split('/')[1:]),
os.path.basename(p))
else:
name = os.path.basename(p)
if bucket.get_key(name) is None or nightly:
print("\t - Uploading {} to {}...".format(name, bucket_name))
k = Key(bucket)
k.key = name
if nightly:
n = k.set_contents_from_filename(p, replace=True)
else:
n = k.set_contents_from_filename(p, replace=False)
k.make_public()
else:
print("\t - Not uploading {}, already exists.".format(p))
print("")
def run_tests(race, parallel, timeout, no_vet):
get_command = "go get -d -t ./..."
print("Retrieving Go dependencies...")
sys.stdout.flush()
run(get_command)
print("Running tests:")
print("\tRace: ", race)
if parallel is not None:
print("\tParallel:", parallel)
if timeout is not None:
print("\tTimeout:", timeout)
sys.stdout.flush()
p = subprocess.Popen(["go", "fmt", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if len(out) > 0 or len(err) > 0:
print("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.")
print(out)
print(err)
return False
if not no_vet:
p = subprocess.Popen(["go", "tool", "vet", "-composites=false", "./"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if len(out) > 0 or len(err) > 0:
print("Go vet failed. Please run 'go vet ./...' and fix any errors.")
print(out)
print(err)
return False
else:
print("Skipping go vet ...")
sys.stdout.flush()
test_command = "go test -v"
if race:
test_command += " -race"
if parallel is not None:
test_command += " -parallel {}".format(parallel)
if timeout is not None:
test_command += " -timeout {}".format(timeout)
test_command += " ./..."
code = os.system(test_command)
if code != 0:
print("Tests Failed")
return False
else:
print("Tests Passed")
return True
def build(version=None,
branch=None,
commit=None,
platform=None,
arch=None,
nightly=False,
rc=None,
race=False,
clean=False,
outdir=".",
goarm_version="6"):
print("-------------------------")
print("")
print("Build plan:")
print("\t- version: {}".format(version))
if rc:
print("\t- release candidate: {}".format(rc))
print("\t- commit: {}".format(commit))
print("\t- branch: {}".format(branch))
print("\t- platform: {}".format(platform))
print("\t- arch: {}".format(arch))
if arch == 'arm' and goarm_version:
print("\t- ARM version: {}".format(goarm_version))
print("\t- nightly? {}".format(str(nightly).lower()))
print("\t- race enabled? {}".format(str(race).lower()))
print("")
if not os.path.exists(outdir):
os.makedirs(outdir)
elif clean and outdir != '/':
print("Cleaning build directory...")
shutil.rmtree(outdir)
os.makedirs(outdir)
if rc:
# If a release candidate, update the version information accordingly
version = "{}rc{}".format(version, rc)
# Set architecture to something that Go expects
if arch == 'i386':
arch = '386'
elif arch == 'x86_64':
arch = 'amd64'
print("Starting build...")
for b, c in targets.items():
print("\t- Building '{}'...".format(os.path.join(outdir, b)))
build_command = ""
build_command += "GOOS={} GOARCH={} ".format(platform, arch)
if arch == "arm" and goarm_version:
if goarm_version not in ["5", "6", "7", "arm64"]:
print("!! Invalid ARM build version: {}".format(goarm_version))
build_command += "GOARM={} ".format(goarm_version)
build_command += "go build -o {} ".format(os.path.join(outdir, b))
if race:
build_command += "-race "
go_version = get_go_version()
if "1.4" in go_version:
build_command += "-ldflags=\"-X main.buildTime '{}' ".format(datetime.datetime.utcnow().isoformat())
build_command += "-X main.Version {} ".format(version)
build_command += "-X main.Branch {} ".format(get_current_branch())
build_command += "-X main.Commit {}\" ".format(get_current_commit())
else:
build_command += "-ldflags=\"-X main.buildTime='{}' ".format(datetime.datetime.utcnow().isoformat())
build_command += "-X main.Version={} ".format(version)
build_command += "-X main.Branch={} ".format(get_current_branch())
build_command += "-X main.Commit={}\" ".format(get_current_commit())
build_command += c
run(build_command, shell=True)
print("")
def create_dir(path):
try:
os.makedirs(path)
except OSError as e:
print(e)
def rename_file(fr, to):
try:
os.rename(fr, to)
except OSError as e:
print(e)
# Return the original filename
return fr
else:
# Return the new filename
return to
def copy_file(fr, to):
try:
shutil.copy(fr, to)
except OSError as e:
print(e)
def create_package_fs(build_root):
print("\t- Creating a filesystem hierarchy from directory: {}".format(build_root))
# Using [1:] for the path names due to them being absolute
# (will overwrite previous paths, per 'os.path.join' documentation)
dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ]
for d in dirs:
create_dir(os.path.join(build_root, d))
os.chmod(os.path.join(build_root, d), 0o755)
def package_scripts(build_root):
print("\t- Copying scripts and sample configuration to build directory")
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"))
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644)
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"))
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644)
def go_get(update=False):
get_command = None
if update:
get_command = "go get -u -f -d ./..."
else:
get_command = "go get -d ./..."
print("Retrieving Go dependencies...")
run(get_command)
def generate_md5_from_file(path):
m = hashlib.md5()
with open(path, 'rb') as f:
while True:
data = f.read(4096)
if not data:
break
m.update(data)
return m.hexdigest()
def build_packages(build_output, version, nightly=False, rc=None, iteration=1):
outfiles = []
tmp_build_dir = create_temp_dir()
if debug:
print("[DEBUG] build_output = {}".format(build_output))
try:
print("-------------------------")
print("")
print("Packaging...")
for p in build_output:
# Create top-level folder displaying which platform (linux, etc)
create_dir(os.path.join(tmp_build_dir, p))
for a in build_output[p]:
current_location = build_output[p][a]
# Create second-level directory displaying the architecture (amd64, etc)p
build_root = os.path.join(tmp_build_dir, p, a)
# Create directory tree to mimic file system of package
create_dir(build_root)
create_package_fs(build_root)
# Copy in packaging and miscellaneous scripts
package_scripts(build_root)
# Copy newly-built binaries to packaging directory
for b in targets:
if p == 'windows':
b = b + '.exe'
fr = os.path.join(current_location, b)
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], b)
print("\t- [{}][{}] - Moving from '{}' to '{}'".format(p, a, fr, to))
copy_file(fr, to)
# Package the directory structure
for package_type in supported_packages[p]:
print("\t- Packaging directory '{}' as '{}'...".format(build_root, package_type))
name = "telegraf"
# Reset version, iteration, and current location on each run
# since they may be modified below.
package_version = version
package_iteration = iteration
current_location = build_output[p][a]
if package_type in ['zip', 'tar']:
if nightly:
name = '{}-nightly_{}_{}'.format(name, p, a)
else:
name = '{}-{}-{}_{}_{}'.format(name, package_version, package_iteration, p, a)
if package_type == 'tar':
# Add `tar.gz` to path to reduce package size
current_location = os.path.join(current_location, name + '.tar.gz')
if rc is not None:
package_iteration = "0.rc{}".format(rc)
if a == '386':
a = 'i386'
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
fpm_common_args,
name,
a,
package_type,
package_version,
package_iteration,
build_root,
current_location)
if package_type == "rpm":
fpm_command += "--depends coreutils "
fpm_command += "--depends lsof"
out = run(fpm_command, shell=True)
matches = re.search(':path=>"(.*)"', out)
outfile = None
if matches is not None:
outfile = matches.groups()[0]
if outfile is None:
print("[ COULD NOT DETERMINE OUTPUT ]")
else:
# Strip nightly version (the unix epoch) from filename
if nightly and package_type in ['deb', 'rpm']:
outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly"))
outfiles.append(os.path.join(os.getcwd(), outfile))
# Display MD5 hash for generated package
print("\t\tMD5 = {}".format(generate_md5_from_file(outfile)))
print("")
if debug:
print("[DEBUG] package outfiles: {}".format(outfiles))
return outfiles
finally:
# Cleanup
shutil.rmtree(tmp_build_dir)
def print_usage():
print("Usage: ./build.py [options]")
print("")
print("Options:")
print("\t --outdir=<path> \n\t\t- Send build output to a specified path. Defaults to ./build.")
print("\t --arch=<arch> \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386, arm, or all")
print("\t --goarm=<arm version> \n\t\t- Build for specified ARM version (when building for ARM). Default value is: 6")
print("\t --platform=<platform> \n\t\t- Build for specified platform. Acceptable values: linux, windows, darwin, or all")
print("\t --version=<version> \n\t\t- Version information to apply to build metadata. If not specified, will be pulled from repo tag.")
print("\t --commit=<commit> \n\t\t- Use specific commit for build (currently a NOOP).")
print("\t --branch=<branch> \n\t\t- Build from a specific branch (currently a NOOP).")
print("\t --rc=<rc number> \n\t\t- Whether or not the build is a release candidate (affects version information).")
print("\t --iteration=<iteration number> \n\t\t- The iteration to display on the package output (defaults to 0 for RC's, and 1 otherwise).")
print("\t --race \n\t\t- Whether the produced build should have race detection enabled.")
print("\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s).")
print("\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information).")
print("\t --update \n\t\t- Whether dependencies should be updated prior to building.")
print("\t --test \n\t\t- Run Go tests. Will not produce a build.")
print("\t --parallel \n\t\t- Run Go tests in parallel up to the count specified.")
print("\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s.")
print("\t --clean \n\t\t- Clean the build output directory prior to creating build.")
print("\t --no-get \n\t\t- Do not run `go get` before building.")
print("\t --bucket=<S3 bucket>\n\t\t- Full path of the bucket to upload packages to (must also specify --upload).")
print("\t --debug \n\t\t- Displays debug output.")
print("")
def print_package_summary(packages):
print(packages)
def main():
# Command-line arguments
outdir = "build"
commit = None
target_platform = None
target_arch = None
nightly = False
race = False
branch = None
version = get_current_version()
rc = None
package = False
update = False
clean = False
upload = False
test = False
parallel = None
timeout = None
iteration = 1
no_vet = False
goarm_version = "6"
run_get = True
upload_bucket = None
global debug
for arg in sys.argv[1:]:
if '--outdir' in arg:
# Output directory. If none is specified, then builds will be placed in the same directory.
output_dir = arg.split("=")[1]
if '--commit' in arg:
# Commit to build from. If none is specified, then it will build from the most recent commit.
commit = arg.split("=")[1]
if '--branch' in arg:
# Branch to build from. If none is specified, then it will build from the current branch.
branch = arg.split("=")[1]
elif '--arch' in arg:
# Target architecture. If none is specified, then it will build for the current arch.
target_arch = arg.split("=")[1]
elif '--platform' in arg:
# Target platform. If none is specified, then it will build for the current platform.
target_platform = arg.split("=")[1]
elif '--version' in arg:
# Version to assign to this build (0.9.5, etc)
version = arg.split("=")[1]
elif '--rc' in arg:
# Signifies that this is a release candidate build.
rc = arg.split("=")[1]
elif '--race' in arg:
# Signifies that race detection should be enabled.
race = True
elif '--package' in arg:
# Signifies that packages should be built.
package = True
elif '--nightly' in arg:
# Signifies that this is a nightly build.
nightly = True
elif '--update' in arg:
# Signifies that dependencies should be updated.
update = True
elif '--upload' in arg:
# Signifies that the resulting packages should be uploaded to S3
upload = True
elif '--test' in arg:
# Run tests and exit
test = True
elif '--parallel' in arg:
# Set parallel for tests.
parallel = int(arg.split("=")[1])
elif '--timeout' in arg:
# Set timeout for tests.
timeout = arg.split("=")[1]
elif '--clean' in arg:
# Signifies that the outdir should be deleted before building
clean = True
elif '--iteration' in arg:
iteration = arg.split("=")[1]
elif '--no-vet' in arg:
no_vet = True
elif '--goarm' in arg:
# Signifies GOARM flag to pass to build command when compiling for ARM
goarm_version = arg.split("=")[1]
elif '--bucket' in arg:
# The bucket to upload the packages to, relies on boto
upload_bucket = arg.split("=")[1]
elif '--no-get' in arg:
run_get = False
elif '--debug' in arg:
print "[DEBUG] Using debug output"
debug = True
elif '--help' in arg:
print_usage()
return 0
else:
print("!! Unknown argument: {}".format(arg))
print_usage()
return 1
if nightly:
if rc:
print("!! Cannot be both nightly and a release candidate! Stopping.")
return 1
# In order to support nightly builds on the repository, we are adding the epoch timestamp
# to the version so that version numbers are always greater than the previous nightly.
version = "{}.n{}".format(version, int(time.time()))
# Pre-build checks
check_environ()
check_prereqs()
if not commit:
commit = get_current_commit(short=True)
if not branch:
branch = get_current_branch()
if not target_arch:
if 'arm' in get_system_arch():
# Prevent uname from reporting ARM arch (eg 'armv7l')
target_arch = "arm"
else:
target_arch = get_system_arch()
if not target_platform:
target_platform = get_system_platform()
if rc or nightly:
# If a release candidate or nightly, set iteration to 0 (instead of 1)
iteration = 0
if target_arch == '386':
target_arch = 'i386'
elif target_arch == 'x86_64':
target_arch = 'amd64'
build_output = {}
if test:
if not run_tests(race, parallel, timeout, no_vet):
return 1
return 0
if run_get:
go_get(update=update)
platforms = []
single_build = True
if target_platform == 'all':
platforms = list(supported_builds.keys())
single_build = False
else:
platforms = [target_platform]
for platform in platforms:
build_output.update( { platform : {} } )
archs = []
if target_arch == "all":
single_build = False
archs = supported_builds.get(platform)
else:
archs = [target_arch]
for arch in archs:
od = outdir
if not single_build:
od = os.path.join(outdir, platform, arch)
build(version=version,
branch=branch,
commit=commit,
platform=platform,
arch=arch,
nightly=nightly,
rc=rc,
race=race,
clean=clean,
outdir=od,
goarm_version=goarm_version)
build_output.get(platform).update( { arch : od } )
# Build packages
if package:
if not check_path_for("fpm"):
print("!! Cannot package without command 'fpm'. Stopping.")
return 1
packages = build_packages(build_output, version, nightly=nightly, rc=rc, iteration=iteration)
# Optionally upload to S3
if upload:
upload_packages(packages, bucket_name=upload_bucket, nightly=nightly)
return 0
if __name__ == '__main__':
sys.exit(main())

18
circle.yml Normal file
View File

@@ -0,0 +1,18 @@
machine:
services:
- docker
post:
- sudo service zookeeper stop
- go version
- go version | grep 1.5.2 || sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.5.2.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.5.2.linux-amd64.tar.gz
- go version
dependencies:
override:
- docker info
test:
override:
- bash scripts/circle-test.sh

View File

@@ -7,115 +7,234 @@ import (
"os"
"os/signal"
"strings"
"syscall"
"github.com/influxdb/telegraf"
_ "github.com/influxdb/telegraf/plugins/all"
"github.com/influxdata/telegraf/agent"
"github.com/influxdata/telegraf/internal/config"
_ "github.com/influxdata/telegraf/plugins/inputs/all"
_ "github.com/influxdata/telegraf/plugins/outputs/all"
)
var fDebug = flag.Bool("debug", false, "show metrics as they're generated to stdout")
var fDebug = flag.Bool("debug", false,
"show metrics as they're generated to stdout")
var fQuiet = flag.Bool("quiet", false,
"run in quiet mode")
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
var fConfig = flag.String("config", "", "configuration file to load")
var fConfigDirectory = flag.String("config-directory", "",
"directory containing additional *.conf files")
var fVersion = flag.Bool("version", false, "display the version")
var fSampleConfig = flag.Bool("sample-config", false, "print out full sample configuration")
var fSampleConfig = flag.Bool("sample-config", false,
"print out full sample configuration")
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
var fInputFilters = flag.String("input-filter", "",
"filter the inputs to enable, separator is :")
var fOutputFilters = flag.String("output-filter", "",
"filter the outputs to enable, separator is :")
var fUsage = flag.String("usage", "",
"print usage for a plugin, ie, 'telegraf -usage mysql'")
var Version = "unreleased"
var Commit = ""
var fInputFiltersLegacy = flag.String("filter", "",
"filter the inputs to enable, separator is :")
var fOutputFiltersLegacy = flag.String("outputfilter", "",
"filter the outputs to enable, separator is :")
var fConfigDirectoryLegacy = flag.String("configdirectory", "",
"directory containing additional *.conf files")
// Telegraf version
// -ldflags "-X main.Version=`git describe --always --tags`"
var Version string
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
Usage:
telegraf <flags>
The flags are:
-config <file> configuration file to load
-test gather metrics once, print them to stdout, and exit
-sample-config print out full sample configuration to stdout
-config-directory directory containing additional *.conf files
-input-filter filter the input plugins to enable, separator is :
-output-filter filter the output plugins to enable, separator is :
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
-debug print metrics as they're generated to stdout
-quiet run in quiet mode
-version print the version to stdout
Examples:
# generate a telegraf config file:
telegraf -sample-config > telegraf.conf
# generate config with only cpu input & influxdb output plugins defined
telegraf -sample-config -input-filter cpu -output-filter influxdb
# run a single telegraf collection, outputing metrics to stdout
telegraf -config telegraf.conf -test
# run telegraf with all plugins defined in config file
telegraf -config telegraf.conf
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
`
func main() {
flag.Parse()
reload := make(chan bool, 1)
reload <- true
for <-reload {
reload <- false
flag.Usage = func() { usageExit(0) }
flag.Parse()
if *fVersion {
fmt.Printf("InfluxDB Telegraf agent - Version %s\n", Version)
return
}
if *fSampleConfig {
telegraf.PrintSampleConfig()
return
}
var (
config *telegraf.Config
err error
)
if *fConfig != "" {
config, err = telegraf.LoadConfig(*fConfig)
if err != nil {
log.Fatal(err)
if flag.NFlag() == 0 {
usageExit(0)
}
} else {
config = telegraf.DefaultConfig()
}
ag, err := telegraf.NewAgent(config)
if err != nil {
log.Fatal(err)
}
var inputFilters []string
if *fInputFiltersLegacy != "" {
inputFilter := strings.TrimSpace(*fInputFiltersLegacy)
inputFilters = strings.Split(":"+inputFilter+":", ":")
}
if *fInputFilters != "" {
inputFilter := strings.TrimSpace(*fInputFilters)
inputFilters = strings.Split(":"+inputFilter+":", ":")
}
if *fDebug {
ag.Debug = true
}
var outputFilters []string
if *fOutputFiltersLegacy != "" {
outputFilter := strings.TrimSpace(*fOutputFiltersLegacy)
outputFilters = strings.Split(":"+outputFilter+":", ":")
}
if *fOutputFilters != "" {
outputFilter := strings.TrimSpace(*fOutputFilters)
outputFilters = strings.Split(":"+outputFilter+":", ":")
}
plugins, err := ag.LoadPlugins()
if err != nil {
log.Fatal(err)
}
if *fVersion {
v := fmt.Sprintf("Telegraf - Version %s", Version)
fmt.Println(v)
return
}
if *fSampleConfig {
config.PrintSampleConfig(inputFilters, outputFilters)
return
}
if *fUsage != "" {
if err := config.PrintInputConfig(*fUsage); err != nil {
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
log.Fatalf("%s and %s", err, err2)
}
}
return
}
var (
c *config.Config
err error
)
if *fTest {
if *fConfig != "" {
err = ag.Test()
c = config.NewConfig()
c.OutputFilters = outputFilters
c.InputFilters = inputFilters
err = c.LoadConfig(*fConfig)
if err != nil {
log.Fatal(err)
}
} else {
err = ag.TestAllPlugins()
fmt.Println("You must specify a config file. See telegraf --help")
os.Exit(1)
}
if *fConfigDirectoryLegacy != "" {
err = c.LoadDirectory(*fConfigDirectoryLegacy)
if err != nil {
log.Fatal(err)
}
}
if *fConfigDirectory != "" {
err = c.LoadDirectory(*fConfigDirectory)
if err != nil {
log.Fatal(err)
}
}
if len(c.Outputs) == 0 {
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
}
if len(c.Inputs) == 0 {
log.Fatalf("Error: no inputs found, did you provide a valid config file?")
}
ag, err := agent.NewAgent(c)
if err != nil {
log.Fatal(err)
}
return
}
err = ag.Connect()
if err != nil {
log.Fatal(err)
}
shutdown := make(chan struct{})
signals := make(chan os.Signal)
signal.Notify(signals, os.Interrupt)
go func() {
<-signals
close(shutdown)
}()
log.Print("InfluxDB Agent running")
log.Printf("Loaded plugins: %s", strings.Join(plugins, " "))
if ag.Debug {
log.Printf("Debug: enabled")
log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v\n",
ag.Interval, ag.Debug, ag.Hostname)
}
if config.URL != "" {
log.Printf("Sending metrics to: %s", config.URL)
log.Printf("Tags enabled: %v", config.ListTags())
}
if *fPidfile != "" {
f, err := os.Create(*fPidfile)
if err != nil {
log.Fatalf("Unable to create pidfile: %s", err)
if *fDebug {
ag.Config.Agent.Debug = true
}
fmt.Fprintf(f, "%d\n", os.Getpid())
if *fQuiet {
ag.Config.Agent.Quiet = true
}
f.Close()
if *fTest {
err = ag.Test()
if err != nil {
log.Fatal(err)
}
return
}
err = ag.Connect()
if err != nil {
log.Fatal(err)
}
shutdown := make(chan struct{})
signals := make(chan os.Signal)
signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
go func() {
sig := <-signals
if sig == os.Interrupt {
close(shutdown)
}
if sig == syscall.SIGHUP {
log.Printf("Reloading Telegraf config\n")
<-reload
reload <- true
close(shutdown)
}
}()
log.Printf("Starting Telegraf (version %s)\n", Version)
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " "))
log.Printf("Tags enabled: %s", c.ListTags())
if *fPidfile != "" {
f, err := os.Create(*fPidfile)
if err != nil {
log.Fatalf("Unable to create pidfile: %s", err)
}
fmt.Fprintf(f, "%d\n", os.Getpid())
f.Close()
}
ag.Run(shutdown)
}
ag.Run(shutdown)
}
func usageExit(rc int) {
fmt.Println(usage)
os.Exit(rc)
}

302
config.go
View File

@@ -1,302 +0,0 @@
package telegraf
import (
"errors"
"fmt"
"io/ioutil"
"sort"
"strings"
"time"
"github.com/influxdb/telegraf/plugins"
"github.com/naoina/toml"
"github.com/naoina/toml/ast"
)
type Duration struct {
time.Duration
}
func (d *Duration) UnmarshalTOML(b []byte) error {
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
if err != nil {
return err
}
d.Duration = dur
return nil
}
type Config struct {
URL string
Username string
Password string
Database string
UserAgent string
Tags map[string]string
agent *ast.Table
plugins map[string]*ast.Table
}
func (c *Config) Plugins() map[string]*ast.Table {
return c.plugins
}
type ConfiguredPlugin struct {
Name string
Drop []string
Pass []string
Interval time.Duration
}
func (cp *ConfiguredPlugin) ShouldPass(measurement string) bool {
if cp.Pass != nil {
for _, pat := range cp.Pass {
if strings.HasPrefix(measurement, pat) {
return true
}
}
return false
}
if cp.Drop != nil {
for _, pat := range cp.Drop {
if strings.HasPrefix(measurement, pat) {
return false
}
}
return true
}
return true
}
func (c *Config) ApplyAgent(v interface{}) error {
if c.agent != nil {
return toml.UnmarshalTable(c.agent, v)
}
return nil
}
func (c *Config) ApplyPlugin(name string, v interface{}) (*ConfiguredPlugin, error) {
cp := &ConfiguredPlugin{Name: name}
if tbl, ok := c.plugins[name]; ok {
if node, ok := tbl.Fields["pass"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
cp.Pass = append(cp.Pass, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["drop"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
cp.Drop = append(cp.Drop, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["interval"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
cp.Interval = dur
}
}
}
delete(tbl.Fields, "drop")
delete(tbl.Fields, "pass")
delete(tbl.Fields, "interval")
return cp, toml.UnmarshalTable(tbl, v)
}
return cp, nil
}
func (c *Config) PluginsDeclared() []string {
var plugins []string
for name, _ := range c.plugins {
plugins = append(plugins, name)
}
sort.Strings(plugins)
return plugins
}
func DefaultConfig() *Config {
return &Config{}
}
var ErrInvalidConfig = errors.New("invalid configuration")
func LoadConfig(path string) (*Config, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
tbl, err := toml.Parse(data)
if err != nil {
return nil, err
}
c := &Config{
plugins: make(map[string]*ast.Table),
}
for name, val := range tbl.Fields {
subtbl, ok := val.(*ast.Table)
if !ok {
return nil, ErrInvalidConfig
}
switch name {
case "influxdb":
err := toml.UnmarshalTable(subtbl, c)
if err != nil {
return nil, err
}
case "agent":
c.agent = subtbl
default:
c.plugins[name] = subtbl
}
}
return c, nil
}
func (c *Config) ListTags() string {
var tags []string
for k, v := range c.Tags {
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(tags)
return strings.Join(tags, " ")
}
type hasConfig interface {
BasicConfig() string
}
type hasDescr interface {
Description() string
}
var header = `# Telegraf configuration
# If this file is missing an [agent] section, you must first generate a
# valid config with 'telegraf -sample-config > telegraf.toml'
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared plugins.
# Even if a plugin has no configuration, it must be declared in here
# to be active. Declaring a plugin means just specifying the name
# as a section with no variables. To deactivate a plugin, comment
# out the name and any variables.
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
# file would generate.
# One rule that plugins conform to is wherever a connection string
# can be passed, the values '' and 'localhost' are treated specially.
# They indicate to the plugin to use their own builtin configuration to
# connect to the local system.
# NOTE: The configuration has a few required parameters. They are marked
# with 'required'. Be sure to edit those to make this configuration work.
# Configuration for influxdb server to send metrics to
[influxdb]
# The full HTTP endpoint URL for your InfluxDB instance
url = "http://localhost:8086" # required.
# The target database for metrics. This database must already exist
database = "telegraf" # required.
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for the POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# tags = { "dc": "us-east-1" }
# Tags can also be specified via a normal map, but only one form at a time:
# [influxdb.tags]
# dc = "us-east-1"
# Configuration for telegraf itself
# [agent]
# interval = "10s"
# debug = false
# hostname = "prod3241"
# PLUGINS
`
func PrintSampleConfig() {
fmt.Printf(header)
var names []string
for name, _ := range plugins.Plugins {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
creator := plugins.Plugins[name]
plugin := creator()
fmt.Printf("# %s\n[%s]\n", plugin.Description(), name)
var config string
config = strings.TrimSpace(plugin.SampleConfig())
if config == "" {
fmt.Printf(" # no configuration\n\n")
} else {
fmt.Printf("\n")
lines := strings.Split(config, "\n")
for _, line := range lines {
fmt.Printf("%s\n", line)
}
fmt.Printf("\n")
}
}
}

View File

@@ -1,134 +0,0 @@
# Telegraf configuration
# If this file is missing an [agent] section, you must first generate a
# valid config with 'telegraf -sample-config > telegraf.toml'
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared plugins.
# Even if a plugin has no configuration, it must be declared in here
# to be active. Declaring a plugin means just specifying the name
# as a section with no variables. To deactivate a plugin, comment
# out the name and any variables.
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
# file would generate.
# One rule that plugins conform to is wherever a connection string
# can be passed, the values '' and 'localhost' are treated specially.
# They indicate to the plugin to use their own builtin configuration to
# connect to the local system.
# NOTE: The configuration has a few required parameters. They are marked
# with 'required'. Be sure to edit those to make this configuration work.
# Configuration for influxdb server to send metrics to
[influxdb]
# The full HTTP endpoint URL for your InfluxDB instance
url = "http://localhost:8086" # required.
# The target database for metrics. This database must already exist
database = "telegraf" # required.
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for the POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# tags = { "dc": "us-east-1" }
# Tags can also be specified via a normal map, but only one form at a time:
# [influxdb.tags]
# dc = "us-east-1"
# Configuration for telegraf itself
# [agent]
# interval = "10s"
# debug = false
# hostname = "prod3241"
# PLUGINS
# Read metrics about cpu usage
[cpu]
# no configuration
# Read metrics about disk usage by mount point
[disk]
# no configuration
# Read metrics about docker containers
[docker]
# no configuration
# Read metrics about disk IO by device
[io]
# no configuration
# Read metrics about memory usage
[mem]
# no configuration
# Read metrics from one or many mysql servers
[mysql]
# specify servers via a url matching:
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
# e.g. root:root@http://10.0.0.18/?tls=false
#
# If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
# Read metrics about network interface usage
[net]
# By default, telegraf gathers stats from any up interface (excluding loopback)
# Setting interfaces will tell it to gather these explicit interfaces,
# regardless of status.
#
# interfaces = ["eth0", ... ]
# Read metrics from one or many postgresql servers
[postgresql]
# specify servers via an array of tables
[[postgresql.servers]]
# specify address via a url matching:
# postgres://[pqgotest[:password]]@localhost?sslmode=[disable|verify-ca|verify-full]
# or a simple string:
# host=localhost user=pqotest password=... sslmode=...
#
# All connection parameters are optional. By default, the host is localhost
# and the user is the currently running user. For localhost, we default
# to sslmode=disable as well.
#
address = "sslmode=disable"
# A list of databases to pull metrics about. If not specified, metrics for all
# databases are gathered.
# databases = ["app_production", "blah_testing"]
# [[postgresql.servers]]
# address = "influx@remoteserver"
# Read metrics from one or many redis servers
[redis]
# An array of address to gather stats about. Specify an ip on hostname
# with optional port. ie localhost, 10.10.3.33:18832, etc.
#
# If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
# Read metrics about swap memory usage
[swap]
# no configuration
# Read metrics about system load
[system]
# no configuration

11
etc/logrotate.d/telegraf Normal file
View File

@@ -0,0 +1,11 @@
/var/log/telegraf/telegraf.log
{
rotate 6
daily
missingok
dateext
copytruncate
notifempty
compress
}

110
etc/telegraf.conf Normal file
View File

@@ -0,0 +1,110 @@
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
# Global tags can be specified here in key="value" format.
[tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
# Configuration for telegraf agent
[agent]
# Default data collection interval for all plugins
interval = "10s"
# Rounds collection interval to 'interval'
# ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
# Default data flushing interval for all outputs. You should not set this below
# interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
# Jitter the flush interval by a random amount. This is primarily to avoid
# large write spikes for users running a large number of telegraf instances.
# ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
# Run telegraf in debug mode
debug = false
# Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://localhost:8089"] # UDP endpoint example
urls = ["http://localhost:8086"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "telegraf" # required
# Precision of writes, valid values are n, u, ms, s, m, and h
# note: using second precision greatly helps InfluxDB compression
precision = "s"
# Connection timeout (for the connection with InfluxDB), formatted as a string.
# If not provided, will default to 0 (no timeout)
# timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
###############################################################################
# INPUTS #
###############################################################################
# Read metrics about cpu usage
[[inputs.cpu]]
# Whether to report per-cpu stats or not
percpu = true
# Whether to report total system cpu stats or not
totalcpu = true
# Comment this line if you want the raw CPU time metrics
drop = ["time_*"]
# Read metrics about disk usage by mount point
[[inputs.disk]]
# By default, telegraf gather stats for all mountpoints.
# Setting mountpoints will restrict the stats to the specified mountpoints.
# mount_points=["/"]
# Read metrics about disk IO by device
[[inputs.diskio]]
# By default, telegraf will gather stats for all devices including
# disk partitions.
# Setting devices will restrict the stats to the specified devices.
# devices = ["sda", "sdb"]
# Uncomment the following line if you do not need disk serial numbers.
# skip_serial_number = true
# Read metrics about memory usage
[[inputs.mem]]
# no configuration
# Read metrics about swap memory usage
[[inputs.swap]]
# no configuration
# Read metrics about system load & uptime
[[inputs.system]]
# no configuration
###############################################################################
# SERVICE INPUTS #
###############################################################################

31
input.go Normal file
View File

@@ -0,0 +1,31 @@
package telegraf
type Input interface {
// SampleConfig returns the default configuration of the Input
SampleConfig() string
// Description returns a one-sentence description on the Input
Description() string
// Gather takes in an accumulator and adds the metrics that the Input
// gathers. This is called every "interval"
Gather(Accumulator) error
}
type ServiceInput interface {
// SampleConfig returns the default configuration of the Input
SampleConfig() string
// Description returns a one-sentence description on the Input
Description() string
// Gather takes in an accumulator and adds the metrics that the Input
// gathers. This is called every "interval"
Gather(Accumulator) error
// Start starts the ServiceInput's service, whatever that may be
Start() error
// Stop stops the services and closes any necessary channels and connections
Stop()
}

595
internal/config/config.go Normal file
View File

@@ -0,0 +1,595 @@
package config
import (
"errors"
"fmt"
"io/ioutil"
"log"
"path/filepath"
"sort"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/models"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/config"
"github.com/naoina/toml/ast"
)
// Config specifies the URL/user/password for the database that telegraf
// will be logging to, as well as all the plugins that the user has
// specified
type Config struct {
Tags map[string]string
InputFilters []string
OutputFilters []string
Agent *AgentConfig
Inputs []*internal_models.RunningInput
Outputs []*internal_models.RunningOutput
}
func NewConfig() *Config {
c := &Config{
// Agent defaults:
Agent: &AgentConfig{
Interval: internal.Duration{Duration: 10 * time.Second},
RoundInterval: true,
FlushInterval: internal.Duration{Duration: 10 * time.Second},
FlushJitter: internal.Duration{Duration: 5 * time.Second},
},
Tags: make(map[string]string),
Inputs: make([]*internal_models.RunningInput, 0),
Outputs: make([]*internal_models.RunningOutput, 0),
InputFilters: make([]string, 0),
OutputFilters: make([]string, 0),
}
return c
}
type AgentConfig struct {
// Interval at which to gather information
Interval internal.Duration
// RoundInterval rounds collection interval to 'interval'.
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
RoundInterval bool
// CollectionJitter is used to jitter the collection by a random amount.
// Each plugin will sleep for a random time within jitter before collecting.
// This can be used to avoid many plugins querying things like sysfs at the
// same time, which can have a measurable effect on the system.
CollectionJitter internal.Duration
// Interval at which to flush data
FlushInterval internal.Duration
// FlushJitter Jitters the flush interval by a random amount.
// This is primarily to avoid large write spikes for users running a large
// number of telegraf instances.
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
FlushJitter internal.Duration
// MetricBufferLimit is the max number of metrics that each output plugin
// will cache. The buffer is cleared when a successful write occurs. When
// full, the oldest metrics will be overwritten.
MetricBufferLimit int
// TODO(cam): Remove UTC and Precision parameters, they are no longer
// valid for the agent config. Leaving them here for now for backwards-
// compatability
UTC bool `toml:"utc"`
Precision string
// Debug is the option for running in debug mode
Debug bool
// Quiet is the option for running in quiet mode
Quiet bool
Hostname string
}
// Inputs returns a list of strings of the configured inputs.
func (c *Config) InputNames() []string {
var name []string
for _, input := range c.Inputs {
name = append(name, input.Name)
}
return name
}
// Outputs returns a list of strings of the configured inputs.
func (c *Config) OutputNames() []string {
var name []string
for _, output := range c.Outputs {
name = append(name, output.Name)
}
return name
}
// ListTags returns a string of tags specified in the config,
// line-protocol style
func (c *Config) ListTags() string {
var tags []string
for k, v := range c.Tags {
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(tags)
return strings.Join(tags, " ")
}
var header = `# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
# Global tags can be specified here in key="value" format.
[tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
# Configuration for telegraf agent
[agent]
# Default data collection interval for all inputs
interval = "10s"
# Rounds collection interval to 'interval'
# ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
# Telegraf will cache metric_buffer_limit metrics for each output, and will
# flush this buffer on a successful write.
metric_buffer_limit = 10000
# Collection jitter is used to jitter the collection by a random amount.
# Each plugin will sleep for a random time within jitter before collecting.
# This can be used to avoid many plugins querying things like sysfs at the
# same time, which can have a measurable effect on the system.
collection_jitter = "0s"
# Default data flushing interval for all outputs. You should not set this below
# interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
# Jitter the flush interval by a random amount. This is primarily to avoid
# large write spikes for users running a large number of telegraf instances.
# ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
# Run telegraf in debug mode
debug = false
# Run telegraf in quiet mode
quiet = false
# Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
`
var pluginHeader = `
###############################################################################
# INPUTS #
###############################################################################
`
var serviceInputHeader = `
###############################################################################
# SERVICE INPUTS #
###############################################################################
`
// PrintSampleConfig prints the sample config
func PrintSampleConfig(pluginFilters []string, outputFilters []string) {
fmt.Printf(header)
// Filter outputs
var onames []string
for oname := range outputs.Outputs {
if len(outputFilters) == 0 || sliceContains(oname, outputFilters) {
onames = append(onames, oname)
}
}
sort.Strings(onames)
// Print Outputs
for _, oname := range onames {
creator := outputs.Outputs[oname]
output := creator()
printConfig(oname, output, "outputs")
}
// Filter inputs
var pnames []string
for pname := range inputs.Inputs {
if len(pluginFilters) == 0 || sliceContains(pname, pluginFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// Print Inputs
fmt.Printf(pluginHeader)
servInputs := make(map[string]telegraf.ServiceInput)
for _, pname := range pnames {
creator := inputs.Inputs[pname]
input := creator()
switch p := input.(type) {
case telegraf.ServiceInput:
servInputs[pname] = p
continue
}
printConfig(pname, input, "inputs")
}
// Print Service Inputs
fmt.Printf(serviceInputHeader)
for name, input := range servInputs {
printConfig(name, input, "inputs")
}
}
type printer interface {
Description() string
SampleConfig() string
}
func printConfig(name string, p printer, op string) {
fmt.Printf("\n# %s\n[[%s.%s]]", p.Description(), op, name)
config := p.SampleConfig()
if config == "" {
fmt.Printf("\n # no configuration\n")
} else {
fmt.Printf(config)
}
}
func sliceContains(name string, list []string) bool {
for _, b := range list {
if b == name {
return true
}
}
return false
}
// PrintInputConfig prints the config usage of a single input.
func PrintInputConfig(name string) error {
if creator, ok := inputs.Inputs[name]; ok {
printConfig(name, creator(), "inputs")
} else {
return errors.New(fmt.Sprintf("Input %s not found", name))
}
return nil
}
// PrintOutputConfig prints the config usage of a single output.
func PrintOutputConfig(name string) error {
if creator, ok := outputs.Outputs[name]; ok {
printConfig(name, creator(), "outputs")
} else {
return errors.New(fmt.Sprintf("Output %s not found", name))
}
return nil
}
func (c *Config) LoadDirectory(path string) error {
directoryEntries, err := ioutil.ReadDir(path)
if err != nil {
return err
}
for _, entry := range directoryEntries {
if entry.IsDir() {
continue
}
name := entry.Name()
if len(name) < 6 || name[len(name)-5:] != ".conf" {
continue
}
err := c.LoadConfig(filepath.Join(path, name))
if err != nil {
return err
}
}
return nil
}
// LoadConfig loads the given config file and applies it to c
func (c *Config) LoadConfig(path string) error {
tbl, err := config.ParseFile(path)
if err != nil {
return err
}
for name, val := range tbl.Fields {
subTable, ok := val.(*ast.Table)
if !ok {
return errors.New("invalid configuration")
}
switch name {
case "agent":
if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
log.Printf("Could not parse [agent] config\n")
return err
}
case "tags":
if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
log.Printf("Could not parse [tags] config\n")
return err
}
case "outputs":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case *ast.Table:
if err = c.addOutput(pluginName, pluginSubTable); err != nil {
return err
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addOutput(pluginName, t); err != nil {
return err
}
}
default:
return fmt.Errorf("Unsupported config format: %s",
pluginName)
}
}
case "inputs", "plugins":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case *ast.Table:
if err = c.addInput(pluginName, pluginSubTable); err != nil {
return err
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addInput(pluginName, t); err != nil {
return err
}
}
default:
return fmt.Errorf("Unsupported config format: %s",
pluginName)
}
}
// Assume it's an input input for legacy config file support if no other
// identifiers are present
default:
if err = c.addInput(name, subTable); err != nil {
return err
}
}
}
return nil
}
func (c *Config) addOutput(name string, table *ast.Table) error {
if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) {
return nil
}
creator, ok := outputs.Outputs[name]
if !ok {
return fmt.Errorf("Undefined but requested output: %s", name)
}
output := creator()
outputConfig, err := buildOutput(name, table)
if err != nil {
return err
}
if err := config.UnmarshalTable(table, output); err != nil {
return err
}
ro := internal_models.NewRunningOutput(name, output, outputConfig)
if c.Agent.MetricBufferLimit > 0 {
ro.PointBufferLimit = c.Agent.MetricBufferLimit
}
ro.Quiet = c.Agent.Quiet
c.Outputs = append(c.Outputs, ro)
return nil
}
func (c *Config) addInput(name string, table *ast.Table) error {
if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) {
return nil
}
// Legacy support renaming io input to diskio
if name == "io" {
name = "diskio"
}
creator, ok := inputs.Inputs[name]
if !ok {
return fmt.Errorf("Undefined but requested input: %s", name)
}
input := creator()
pluginConfig, err := buildInput(name, table)
if err != nil {
return err
}
if err := config.UnmarshalTable(table, input); err != nil {
return err
}
rp := &internal_models.RunningInput{
Name: name,
Input: input,
Config: pluginConfig,
}
c.Inputs = append(c.Inputs, rp)
return nil
}
// buildFilter builds a Filter (tagpass/tagdrop/pass/drop) to
// be inserted into the internal_models.OutputConfig/internal_models.InputConfig to be used for prefix
// filtering on tags and measurements
func buildFilter(tbl *ast.Table) internal_models.Filter {
f := internal_models.Filter{}
if node, ok := tbl.Fields["pass"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.Pass = append(f.Pass, str.Value)
f.IsActive = true
}
}
}
}
}
if node, ok := tbl.Fields["drop"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.Drop = append(f.Drop, str.Value)
f.IsActive = true
}
}
}
}
}
if node, ok := tbl.Fields["tagpass"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &internal_models.TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
tagfilter.Filter = append(tagfilter.Filter, str.Value)
}
}
}
f.TagPass = append(f.TagPass, *tagfilter)
f.IsActive = true
}
}
}
}
if node, ok := tbl.Fields["tagdrop"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &internal_models.TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
tagfilter.Filter = append(tagfilter.Filter, str.Value)
}
}
}
f.TagDrop = append(f.TagDrop, *tagfilter)
f.IsActive = true
}
}
}
}
delete(tbl.Fields, "drop")
delete(tbl.Fields, "pass")
delete(tbl.Fields, "tagdrop")
delete(tbl.Fields, "tagpass")
return f
}
// buildInput parses input specific items from the ast.Table,
// builds the filter and returns a
// internal_models.InputConfig to be inserted into internal_models.RunningInput
func buildInput(name string, tbl *ast.Table) (*internal_models.InputConfig, error) {
cp := &internal_models.InputConfig{Name: name}
if node, ok := tbl.Fields["interval"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
cp.Interval = dur
}
}
}
if node, ok := tbl.Fields["name_prefix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
cp.MeasurementPrefix = str.Value
}
}
}
if node, ok := tbl.Fields["name_suffix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
cp.MeasurementSuffix = str.Value
}
}
}
if node, ok := tbl.Fields["name_override"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
cp.NameOverride = str.Value
}
}
}
cp.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
log.Printf("Could not parse tags for input %s\n", name)
}
}
}
delete(tbl.Fields, "name_prefix")
delete(tbl.Fields, "name_suffix")
delete(tbl.Fields, "name_override")
delete(tbl.Fields, "interval")
delete(tbl.Fields, "tags")
cp.Filter = buildFilter(tbl)
return cp, nil
}
// buildOutput parses output specific items from the ast.Table, builds the filter and returns an
// internal_models.OutputConfig to be inserted into internal_models.RunningInput
// Note: error exists in the return for future calls that might require error
func buildOutput(name string, tbl *ast.Table) (*internal_models.OutputConfig, error) {
oc := &internal_models.OutputConfig{
Name: name,
Filter: buildFilter(tbl),
}
return oc, nil
}

View File

@@ -0,0 +1,121 @@
package config
import (
"testing"
"time"
"github.com/influxdata/telegraf/internal/models"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/inputs/exec"
"github.com/influxdata/telegraf/plugins/inputs/memcached"
"github.com/influxdata/telegraf/plugins/inputs/procstat"
"github.com/stretchr/testify/assert"
)
func TestConfig_LoadSingleInput(t *testing.T) {
c := NewConfig()
c.LoadConfig("./testdata/single_plugin.toml")
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"localhost"}
mConfig := &internal_models.InputConfig{
Name: "memcached",
Filter: internal_models.Filter{
Drop: []string{"other", "stuff"},
Pass: []string{"some", "strings"},
TagDrop: []internal_models.TagFilter{
internal_models.TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
},
TagPass: []internal_models.TagFilter{
internal_models.TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
},
IsActive: true,
},
Interval: 5 * time.Second,
}
mConfig.Tags = make(map[string]string)
assert.Equal(t, memcached, c.Inputs[0].Input,
"Testdata did not produce a correct memcached struct.")
assert.Equal(t, mConfig, c.Inputs[0].Config,
"Testdata did not produce correct memcached metadata.")
}
func TestConfig_LoadDirectory(t *testing.T) {
c := NewConfig()
err := c.LoadConfig("./testdata/single_plugin.toml")
if err != nil {
t.Error(err)
}
err = c.LoadDirectory("./testdata/subconfig")
if err != nil {
t.Error(err)
}
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"localhost"}
mConfig := &internal_models.InputConfig{
Name: "memcached",
Filter: internal_models.Filter{
Drop: []string{"other", "stuff"},
Pass: []string{"some", "strings"},
TagDrop: []internal_models.TagFilter{
internal_models.TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
},
TagPass: []internal_models.TagFilter{
internal_models.TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
},
IsActive: true,
},
Interval: 5 * time.Second,
}
mConfig.Tags = make(map[string]string)
assert.Equal(t, memcached, c.Inputs[0].Input,
"Testdata did not produce a correct memcached struct.")
assert.Equal(t, mConfig, c.Inputs[0].Config,
"Testdata did not produce correct memcached metadata.")
ex := inputs.Inputs["exec"]().(*exec.Exec)
ex.Command = "/usr/bin/myothercollector --foo=bar"
eConfig := &internal_models.InputConfig{
Name: "exec",
MeasurementSuffix: "_myothercollector",
}
eConfig.Tags = make(map[string]string)
assert.Equal(t, ex, c.Inputs[1].Input,
"Merged Testdata did not produce a correct exec struct.")
assert.Equal(t, eConfig, c.Inputs[1].Config,
"Merged Testdata did not produce correct exec metadata.")
memcached.Servers = []string{"192.168.1.1"}
assert.Equal(t, memcached, c.Inputs[2].Input,
"Testdata did not produce a correct memcached struct.")
assert.Equal(t, mConfig, c.Inputs[2].Config,
"Testdata did not produce correct memcached metadata.")
pstat := inputs.Inputs["procstat"]().(*procstat.Procstat)
pstat.PidFile = "/var/run/grafana-server.pid"
pConfig := &internal_models.InputConfig{Name: "procstat"}
pConfig.Tags = make(map[string]string)
assert.Equal(t, pstat, c.Inputs[3].Input,
"Merged Testdata did not produce a correct procstat struct.")
assert.Equal(t, pConfig, c.Inputs[3].Config,
"Merged Testdata did not produce correct procstat metadata.")
}

View File

@@ -0,0 +1,9 @@
[[inputs.memcached]]
servers = ["localhost"]
pass = ["some", "strings"]
drop = ["other", "stuff"]
interval = "5s"
[inputs.memcached.tagpass]
goodtag = ["mytag"]
[inputs.memcached.tagdrop]
badtag = ["othertag"]

View File

@@ -0,0 +1,4 @@
[[inputs.exec]]
# the command to run
command = "/usr/bin/myothercollector --foo=bar"
name_suffix = "_myothercollector"

View File

@@ -0,0 +1,9 @@
[[inputs.memcached]]
servers = ["192.168.1.1"]
pass = ["some", "strings"]
drop = ["other", "stuff"]
interval = "5s"
[inputs.memcached.tagpass]
goodtag = ["mytag"]
[inputs.memcached.tagdrop]
badtag = ["othertag"]

View File

@@ -0,0 +1,2 @@
[[inputs.procstat]]
pid_file = "/var/run/grafana-server.pid"

View File

@@ -0,0 +1,301 @@
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs.
# Even if a plugin has no configuration, it must be declared in here
# to be active. Declaring a plugin means just specifying the name
# as a section with no variables. To deactivate a plugin, comment
# out the name and any variables.
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
# file would generate.
# One rule that plugins conform to is wherever a connection string
# can be passed, the values '' and 'localhost' are treated specially.
# They indicate to the plugin to use their own builtin configuration to
# connect to the local system.
# NOTE: The configuration has a few required parameters. They are marked
# with 'required'. Be sure to edit those to make this configuration work.
# Tags can also be specified via a normal map, but only one form at a time:
[tags]
dc = "us-east-1"
# Configuration for telegraf agent
[agent]
# Default data collection interval for all plugins
interval = "10s"
# run telegraf in debug mode
debug = false
# Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
# The full HTTP endpoint URL for your InfluxDB instance
# Multiple urls can be specified for InfluxDB cluster support. Server to
# write to will be randomly chosen each interval.
urls = ["http://localhost:8086"] # required.
# The target database for metrics. This database must already exist
database = "telegraf" # required.
[[outputs.influxdb]]
urls = ["udp://localhost:8089"]
database = "udp-telegraf"
# Configuration for the Kafka server to send metrics to
[[outputs.kafka]]
# URLs of kafka brokers
brokers = ["localhost:9092"]
# Kafka topic for producer messages
topic = "telegraf"
# Telegraf tag to use as a routing key
# ie, if this tag exists, it's value will be used as the routing key
routing_tag = "host"
###############################################################################
# PLUGINS #
###############################################################################
# Read Apache status information (mod_status)
[[inputs.apache]]
# An array of Apache status URI to gather stats.
urls = ["http://localhost/server-status?auto"]
# Read metrics about cpu usage
[[inputs.cpu]]
# Whether to report per-cpu stats or not
percpu = true
# Whether to report total system cpu stats or not
totalcpu = true
# Comment this line if you want the raw CPU time metrics
drop = ["cpu_time"]
# Read metrics about disk usage by mount point
[[inputs.diskio]]
# no configuration
# Read metrics from one or many disque servers
[[inputs.disque]]
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
# 10.0.0.1:10000, etc.
#
# If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
# Read stats from one or more Elasticsearch servers or clusters
[[inputs.elasticsearch]]
# specify a list of one or more Elasticsearch servers
servers = ["http://localhost:9200"]
# set local to false when you want to read the indices stats from all nodes
# within the cluster
local = true
# Read flattened metrics from one or more commands that output JSON to stdout
[[inputs.exec]]
# the command to run
command = "/usr/bin/mycollector --foo=bar"
name_suffix = "_mycollector"
# Read metrics of haproxy, via socket or csv stats page
[[inputs.haproxy]]
# An array of address to gather stats about. Specify an ip on hostname
# with optional port. ie localhost, 10.10.3.33:1936, etc.
#
# If no servers are specified, then default to 127.0.0.1:1936
servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
# Or you can also use local socket(not work yet)
# servers = ["socket:/run/haproxy/admin.sock"]
# Read flattened metrics from one or more JSON HTTP endpoints
[[inputs.httpjson]]
# a name for the service being polled
name = "webserver_stats"
# URL of each server in the service's cluster
servers = [
"http://localhost:9999/stats/",
"http://localhost:9998/stats/",
]
# HTTP method to use (case-sensitive)
method = "GET"
# HTTP parameters (all values must be strings)
[httpjson.parameters]
event_type = "cpu_spike"
threshold = "0.75"
# Read metrics about disk IO by device
[[inputs.diskio]]
# no configuration
# read metrics from a Kafka topic
[[inputs.kafka_consumer]]
# topic(s) to consume
topics = ["telegraf"]
# an array of Zookeeper connection strings
zookeeper_peers = ["localhost:2181"]
# the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
# Maximum number of points to buffer between collection intervals
point_buffer = 100000
# Offset (must be either "oldest" or "newest")
offset = "oldest"
# Read metrics from a LeoFS Server via SNMP
[[inputs.leofs]]
# An array of URI to gather stats about LeoFS.
# Specify an ip or hostname with port. ie 127.0.0.1:4020
#
# If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port.
servers = ["127.0.0.1:4021"]
# Read metrics from local Lustre service on OST, MDS
[[inputs.lustre2]]
# An array of /proc globs to search for Lustre stats
# If not specified, the default will work on Lustre 2.5.x
#
# ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"]
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
# Read metrics about memory usage
[[inputs.mem]]
# no configuration
# Read metrics from one or many memcached servers
[[inputs.memcached]]
# An array of address to gather stats about. Specify an ip on hostname
# with optional port. ie localhost, 10.0.0.1:11211, etc.
#
# If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
# Read metrics from one or many MongoDB servers
[[inputs.mongodb]]
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
#
# If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port.
servers = ["127.0.0.1:27017"]
# Read metrics from one or many mysql servers
[[inputs.mysql]]
# specify servers via a url matching:
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
# e.g.
# servers = ["root:root@http://10.0.0.18/?tls=false"]
# servers = ["root:passwd@tcp(127.0.0.1:3306)/"]
#
# If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
# Read metrics about network interface usage
[[inputs.net]]
# By default, telegraf gathers stats from any up interface (excluding loopback)
# Setting interfaces will tell it to gather these explicit interfaces,
# regardless of status.
#
# interfaces = ["eth0", ... ]
# Read Nginx's basic status information (ngx_http_stub_status_module)
[[inputs.nginx]]
# An array of Nginx stub_status URI to gather stats.
urls = ["http://localhost/status"]
# Ping given url(s) and return statistics
[[inputs.ping]]
# urls to ping
urls = ["www.google.com"] # required
# number of pings to send (ping -c <COUNT>)
count = 1 # required
# interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
ping_interval = 0.0
# ping timeout, in s. 0 == no timeout (ping -t <TIMEOUT>)
timeout = 0.0
# interface to send ping from (ping -I <INTERFACE>)
interface = ""
# Read metrics from one or many postgresql servers
[[inputs.postgresql]]
# specify address via a url matching:
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
# or a simple string:
# host=localhost user=pqotest password=... sslmode=... dbname=app_production
#
# All connection parameters are optional. By default, the host is localhost
# and the user is the currently running user. For localhost, we default
# to sslmode=disable as well.
#
# Without the dbname parameter, the driver will default to a database
# with the same name as the user. This dbname is just for instantiating a
# connection with the server and doesn't restrict the databases we are trying
# to grab metrics for.
#
address = "sslmode=disable"
# A list of databases to pull metrics about. If not specified, metrics for all
# databases are gathered.
# databases = ["app_production", "blah_testing"]
# [[postgresql.servers]]
# address = "influx@remoteserver"
# Read metrics from one or many prometheus clients
[[inputs.prometheus]]
# An array of urls to scrape metrics from.
urls = ["http://localhost:9100/metrics"]
# Read metrics from one or many RabbitMQ servers via the management API
[[inputs.rabbitmq]]
# Specify servers via an array of tables
# name = "rmq-server-1" # optional tag
# url = "http://localhost:15672"
# username = "guest"
# password = "guest"
# A list of nodes to pull metrics about. If not specified, metrics for
# all nodes are gathered.
# nodes = ["rabbit@node1", "rabbit@node2"]
# Read metrics from one or many redis servers
[[inputs.redis]]
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port add password. ie redis://localhost, redis://10.10.3.33:18832,
# 10.0.0.1:10000, etc.
#
# If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
# Read metrics from one or many RethinkDB servers
[[inputs.rethinkdb]]
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105,
# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc.
#
# If no servers are specified, then 127.0.0.1 is used as the host and 28015 as the port.
servers = ["127.0.0.1:28015"]
# Read metrics about swap memory usage
[[inputs.swap]]
# no configuration
# Read metrics about system load & uptime
[[inputs.system]]
# no configuration

219
internal/internal.go Normal file
View File

@@ -0,0 +1,219 @@
package internal
import (
"bufio"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
"time"
)
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
// Duration just wraps time.Duration
type Duration struct {
Duration time.Duration
}
// UnmarshalTOML parses the duration from the TOML config file
func (d *Duration) UnmarshalTOML(b []byte) error {
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
if err != nil {
return err
}
d.Duration = dur
return nil
}
var NotImplementedError = errors.New("not implemented yet")
type JSONFlattener struct {
Fields map[string]interface{}
}
// FlattenJSON flattens nested maps/interfaces into a fields map
func (f *JSONFlattener) FlattenJSON(
fieldname string,
v interface{},
) error {
if f.Fields == nil {
f.Fields = make(map[string]interface{})
}
fieldname = strings.Trim(fieldname, "_")
switch t := v.(type) {
case map[string]interface{}:
for k, v := range t {
err := f.FlattenJSON(fieldname+"_"+k+"_", v)
if err != nil {
return err
}
}
case []interface{}:
for i, v := range t {
k := strconv.Itoa(i)
err := f.FlattenJSON(fieldname+"_"+k+"_", v)
if err != nil {
return nil
}
}
case float64:
f.Fields[fieldname] = t
case bool, string, nil:
// ignored types
return nil
default:
return fmt.Errorf("JSON Flattener: got unexpected type %T with value %v (%s)",
t, t, fieldname)
}
return nil
}
// ReadLines reads contents from a file and splits them by new lines.
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
func ReadLines(filename string) ([]string, error) {
return ReadLinesOffsetN(filename, 0, -1)
}
// ReadLines reads contents from file and splits them by new line.
// The offset tells at which line number to start.
// The count determines the number of lines to read (starting from offset):
// n >= 0: at most n lines
// n < 0: whole file
func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) {
f, err := os.Open(filename)
if err != nil {
return []string{""}, err
}
defer f.Close()
var ret []string
r := bufio.NewReader(f)
for i := 0; i < n+int(offset) || n < 0; i++ {
line, err := r.ReadString('\n')
if err != nil {
break
}
if i < int(offset) {
continue
}
ret = append(ret, strings.Trim(line, "\n"))
}
return ret, nil
}
// RandomString returns a random string of alpha-numeric characters
func RandomString(n int) string {
var bytes = make([]byte, n)
rand.Read(bytes)
for i, b := range bytes {
bytes[i] = alphanum[b%byte(len(alphanum))]
}
return string(bytes)
}
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files.
// you must give the full path to the files.
// If all files are blank and InsecureSkipVerify=false, returns a nil pointer.
func GetTLSConfig(
SSLCert, SSLKey, SSLCA string,
InsecureSkipVerify bool,
) (*tls.Config, error) {
t := &tls.Config{}
if SSLCert != "" && SSLKey != "" && SSLCA != "" {
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
if err != nil {
return nil, errors.New(fmt.Sprintf(
"Could not load TLS client key/certificate: %s",
err))
}
caCert, err := ioutil.ReadFile(SSLCA)
if err != nil {
return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s",
err))
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
t = &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: caCertPool,
InsecureSkipVerify: InsecureSkipVerify,
}
} else {
if InsecureSkipVerify {
t.InsecureSkipVerify = true
} else {
return nil, nil
}
}
// will be nil by default if nothing is provided
return t, nil
}
// Glob will test a string pattern, potentially containing globs, against a
// subject string. The result is a simple true/false, determining whether or
// not the glob pattern matched the subject text.
//
// Adapted from https://github.com/ryanuber/go-glob/blob/master/glob.go
// thanks Ryan Uber!
func Glob(pattern, measurement string) bool {
// Empty pattern can only match empty subject
if pattern == "" {
return measurement == pattern
}
// If the pattern _is_ a glob, it matches everything
if pattern == "*" {
return true
}
parts := strings.Split(pattern, "*")
if len(parts) == 1 {
// No globs in pattern, so test for match
return pattern == measurement
}
leadingGlob := strings.HasPrefix(pattern, "*")
trailingGlob := strings.HasSuffix(pattern, "*")
end := len(parts) - 1
for i, part := range parts {
switch i {
case 0:
if leadingGlob {
continue
}
if !strings.HasPrefix(measurement, part) {
return false
}
case end:
if len(measurement) > 0 {
return trailingGlob || strings.HasSuffix(measurement, part)
}
default:
if !strings.Contains(measurement, part) {
return false
}
}
// Trim evaluated text from measurement as we loop over the pattern.
idx := strings.Index(measurement, part) + len(part)
measurement = measurement[idx:]
}
// All parts of the pattern matched
return true
}

44
internal/internal_test.go Normal file
View File

@@ -0,0 +1,44 @@
package internal
import "testing"
func testGlobMatch(t *testing.T, pattern, subj string) {
if !Glob(pattern, subj) {
t.Errorf("%s should match %s", pattern, subj)
}
}
func testGlobNoMatch(t *testing.T, pattern, subj string) {
if Glob(pattern, subj) {
t.Errorf("%s should not match %s", pattern, subj)
}
}
func TestEmptyPattern(t *testing.T) {
testGlobMatch(t, "", "")
testGlobNoMatch(t, "", "test")
}
func TestPatternWithoutGlobs(t *testing.T) {
testGlobMatch(t, "test", "test")
}
func TestGlob(t *testing.T) {
for _, pattern := range []string{
"*test", // Leading glob
"this*", // Trailing glob
"*is*a*", // Lots of globs
"**test**", // Double glob characters
"**is**a***test*", // Varying number of globs
} {
testGlobMatch(t, pattern, "this_is_a_test")
}
for _, pattern := range []string{
"test*", // Implicit substring match should fail
"*is", // Partial match should fail
"*no*", // Globs without a match between them should fail
} {
testGlobNoMatch(t, pattern, "this_is_a_test")
}
}

92
internal/models/filter.go Normal file
View File

@@ -0,0 +1,92 @@
package internal_models
import (
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
)
// TagFilter is the name of a tag, and the values on which to filter
type TagFilter struct {
Name string
Filter []string
}
// Filter containing drop/pass and tagdrop/tagpass rules
type Filter struct {
Drop []string
Pass []string
TagDrop []TagFilter
TagPass []TagFilter
IsActive bool
}
func (f Filter) ShouldMetricPass(metric telegraf.Metric) bool {
if f.ShouldPass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) {
return true
}
return false
}
// ShouldPass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters
func (f Filter) ShouldPass(key string) bool {
if f.Pass != nil {
for _, pat := range f.Pass {
// TODO remove HasPrefix check, leaving it for now for legacy support.
// Cam, 2015-12-07
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
return true
}
}
return false
}
if f.Drop != nil {
for _, pat := range f.Drop {
// TODO remove HasPrefix check, leaving it for now for legacy support.
// Cam, 2015-12-07
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
return false
}
}
return true
}
return true
}
// ShouldTagsPass returns true if the metric should pass, false if should drop
// based on the tagdrop/tagpass filter parameters
func (f Filter) ShouldTagsPass(tags map[string]string) bool {
if f.TagPass != nil {
for _, pat := range f.TagPass {
if tagval, ok := tags[pat.Name]; ok {
for _, filter := range pat.Filter {
if internal.Glob(filter, tagval) {
return true
}
}
}
}
return false
}
if f.TagDrop != nil {
for _, pat := range f.TagDrop {
if tagval, ok := tags[pat.Name]; ok {
for _, filter := range pat.Filter {
if internal.Glob(filter, tagval) {
return false
}
}
}
}
return true
}
return true
}

View File

@@ -0,0 +1,177 @@
package internal_models
import (
"testing"
)
func TestFilter_Empty(t *testing.T) {
f := Filter{}
measurements := []string{
"foo",
"bar",
"barfoo",
"foo_bar",
"foo.bar",
"foo-bar",
"supercalifradjulisticexpialidocious",
}
for _, measurement := range measurements {
if !f.ShouldPass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
}
func TestFilter_Pass(t *testing.T) {
f := Filter{
Pass: []string{"foo*", "cpu_usage_idle"},
}
passes := []string{
"foo",
"foo_bar",
"foo.bar",
"foo-bar",
"cpu_usage_idle",
}
drops := []string{
"bar",
"barfoo",
"bar_foo",
"cpu_usage_busy",
}
for _, measurement := range passes {
if !f.ShouldPass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
for _, measurement := range drops {
if f.ShouldPass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement)
}
}
}
func TestFilter_Drop(t *testing.T) {
f := Filter{
Drop: []string{"foo*", "cpu_usage_idle"},
}
drops := []string{
"foo",
"foo_bar",
"foo.bar",
"foo-bar",
"cpu_usage_idle",
}
passes := []string{
"bar",
"barfoo",
"bar_foo",
"cpu_usage_busy",
}
for _, measurement := range passes {
if !f.ShouldPass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
for _, measurement := range drops {
if f.ShouldPass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement)
}
}
}
func TestFilter_TagPass(t *testing.T) {
filters := []TagFilter{
TagFilter{
Name: "cpu",
Filter: []string{"cpu-*"},
},
TagFilter{
Name: "mem",
Filter: []string{"mem_free"},
}}
f := Filter{
TagPass: filters,
}
passes := []map[string]string{
{"cpu": "cpu-total"},
{"cpu": "cpu-0"},
{"cpu": "cpu-1"},
{"cpu": "cpu-2"},
{"mem": "mem_free"},
}
drops := []map[string]string{
{"cpu": "cputotal"},
{"cpu": "cpu0"},
{"cpu": "cpu1"},
{"cpu": "cpu2"},
{"mem": "mem_used"},
}
for _, tags := range passes {
if !f.ShouldTagsPass(tags) {
t.Errorf("Expected tags %v to pass", tags)
}
}
for _, tags := range drops {
if f.ShouldTagsPass(tags) {
t.Errorf("Expected tags %v to drop", tags)
}
}
}
func TestFilter_TagDrop(t *testing.T) {
filters := []TagFilter{
TagFilter{
Name: "cpu",
Filter: []string{"cpu-*"},
},
TagFilter{
Name: "mem",
Filter: []string{"mem_free"},
}}
f := Filter{
TagDrop: filters,
}
drops := []map[string]string{
{"cpu": "cpu-total"},
{"cpu": "cpu-0"},
{"cpu": "cpu-1"},
{"cpu": "cpu-2"},
{"mem": "mem_free"},
}
passes := []map[string]string{
{"cpu": "cputotal"},
{"cpu": "cpu0"},
{"cpu": "cpu1"},
{"cpu": "cpu2"},
{"mem": "mem_used"},
}
for _, tags := range passes {
if !f.ShouldTagsPass(tags) {
t.Errorf("Expected tags %v to pass", tags)
}
}
for _, tags := range drops {
if f.ShouldTagsPass(tags) {
t.Errorf("Expected tags %v to drop", tags)
}
}
}

View File

@@ -0,0 +1,24 @@
package internal_models
import (
"time"
"github.com/influxdata/telegraf"
)
type RunningInput struct {
Name string
Input telegraf.Input
Config *InputConfig
}
// InputConfig containing a name, interval, and filter
type InputConfig struct {
Name string
NameOverride string
MeasurementPrefix string
MeasurementSuffix string
Tags map[string]string
Filter Filter
Interval time.Duration
}

View File

@@ -0,0 +1,78 @@
package internal_models
import (
"log"
"time"
"github.com/influxdata/telegraf"
)
const DEFAULT_POINT_BUFFER_LIMIT = 10000
type RunningOutput struct {
Name string
Output telegraf.Output
Config *OutputConfig
Quiet bool
PointBufferLimit int
metrics []telegraf.Metric
overwriteCounter int
}
func NewRunningOutput(
name string,
output telegraf.Output,
conf *OutputConfig,
) *RunningOutput {
ro := &RunningOutput{
Name: name,
metrics: make([]telegraf.Metric, 0),
Output: output,
Config: conf,
PointBufferLimit: DEFAULT_POINT_BUFFER_LIMIT,
}
return ro
}
func (ro *RunningOutput) AddPoint(point telegraf.Metric) {
if ro.Config.Filter.IsActive {
if !ro.Config.Filter.ShouldMetricPass(point) {
return
}
}
if len(ro.metrics) < ro.PointBufferLimit {
ro.metrics = append(ro.metrics, point)
} else {
log.Printf("WARNING: overwriting cached metrics, you may want to " +
"increase the metric_buffer_limit setting in your [agent] config " +
"if you do not wish to overwrite metrics.\n")
if ro.overwriteCounter == len(ro.metrics) {
ro.overwriteCounter = 0
}
ro.metrics[ro.overwriteCounter] = point
ro.overwriteCounter++
}
}
func (ro *RunningOutput) Write() error {
start := time.Now()
err := ro.Output.Write(ro.metrics)
elapsed := time.Since(start)
if err == nil {
if !ro.Quiet {
log.Printf("Wrote %d metrics to output %s in %s\n",
len(ro.metrics), ro.Name, elapsed)
}
ro.metrics = make([]telegraf.Metric, 0)
ro.overwriteCounter = 0
}
return err
}
// OutputConfig containing name and filter
type OutputConfig struct {
Name string
Filter Filter
}

115
metric.go Normal file
View File

@@ -0,0 +1,115 @@
package telegraf
import (
"bytes"
"time"
"github.com/influxdata/influxdb/client/v2"
"github.com/influxdata/influxdb/models"
)
type Metric interface {
// Name returns the measurement name of the metric
Name() string
// Name returns the tags associated with the metric
Tags() map[string]string
// Time return the timestamp for the metric
Time() time.Time
// UnixNano returns the unix nano time of the metric
UnixNano() int64
// Fields returns the fields for the metric
Fields() map[string]interface{}
// String returns a line-protocol string of the metric
String() string
// PrecisionString returns a line-protocol string of the metric, at precision
PrecisionString(precison string) string
// Point returns a influxdb client.Point object
Point() *client.Point
}
// metric is a wrapper of the influxdb client.Point struct
type metric struct {
pt *client.Point
}
// NewMetric returns a metric with the given timestamp. If a timestamp is not
// given, then data is sent to the database without a timestamp, in which case
// the server will assign local time upon reception. NOTE: it is recommended to
// send data with a timestamp.
func NewMetric(
name string,
tags map[string]string,
fields map[string]interface{},
t ...time.Time,
) (Metric, error) {
var T time.Time
if len(t) > 0 {
T = t[0]
}
pt, err := client.NewPoint(name, tags, fields, T)
if err != nil {
return nil, err
}
return &metric{
pt: pt,
}, nil
}
// ParseMetrics returns a slice of Metrics from a text representation of a
// metric (in line-protocol format)
// with each metric separated by newlines. If any metrics fail to parse,
// a non-nil error will be returned in addition to the metrics that parsed
// successfully.
func ParseMetrics(buf []byte) ([]Metric, error) {
// parse even if the buffer begins with a newline
buf = bytes.TrimPrefix(buf, []byte("\n"))
points, err := models.ParsePoints(buf)
metrics := make([]Metric, len(points))
for i, point := range points {
// Ignore error here because it's impossible that a model.Point
// wouldn't parse into client.Point properly
metrics[i], _ = NewMetric(point.Name(), point.Tags(),
point.Fields(), point.Time())
}
return metrics, err
}
func (m *metric) Name() string {
return m.pt.Name()
}
func (m *metric) Tags() map[string]string {
return m.pt.Tags()
}
func (m *metric) Time() time.Time {
return m.pt.Time()
}
func (m *metric) UnixNano() int64 {
return m.pt.UnixNano()
}
func (m *metric) Fields() map[string]interface{} {
return m.pt.Fields()
}
func (m *metric) String() string {
return m.pt.String()
}
func (m *metric) PrecisionString(precison string) string {
return m.pt.PrecisionString(precison)
}
func (m *metric) Point() *client.Point {
return m.pt
}

135
metric_test.go Normal file
View File

@@ -0,0 +1,135 @@
package telegraf
import (
"fmt"
"math"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
const validMs = `
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 1454105876344540456
`
const invalidMs = `
cpu, cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,host=foo usage_idle
cpu,host usage_idle=99
cpu,host=foo usage_idle=99 very bad metric
`
const validInvalidMs = `
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu1,host=foo,datacenter=us-east usage_idle=51,usage_busy=49
cpu,cpu=cpu2,host=foo,datacenter=us-east usage_idle=60,usage_busy=40
cpu,host usage_idle=99
`
func TestParseValidMetrics(t *testing.T) {
metrics, err := ParseMetrics([]byte(validMs))
assert.NoError(t, err)
assert.Len(t, metrics, 1)
m := metrics[0]
tags := map[string]string{
"host": "foo",
"datacenter": "us-east",
"cpu": "cpu0",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, int64(1454105876344540456), m.UnixNano())
}
func TestParseInvalidMetrics(t *testing.T) {
metrics, err := ParseMetrics([]byte(invalidMs))
assert.Error(t, err)
assert.Len(t, metrics, 0)
}
func TestParseValidAndInvalidMetrics(t *testing.T) {
metrics, err := ParseMetrics([]byte(validInvalidMs))
assert.Error(t, err)
assert.Len(t, metrics, 3)
}
func TestNewMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := NewMetric("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
func TestNewMetricString(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
}
m, err := NewMetric("cpu", tags, fields, now)
assert.NoError(t, err)
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
now.UnixNano())
assert.Equal(t, lineProto, m.String())
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
now.Unix())
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
}
func TestNewMetricStringNoTime(t *testing.T) {
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
}
m, err := NewMetric("cpu", tags, fields)
assert.NoError(t, err)
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99")
assert.Equal(t, lineProto, m.String())
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99")
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
}
func TestNewMetricFailNaN(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": math.NaN(),
}
_, err := NewMetric("cpu", tags, fields, now)
assert.Error(t, err)
}

31
output.go Normal file
View File

@@ -0,0 +1,31 @@
package telegraf
type Output interface {
// Connect to the Output
Connect() error
// Close any connections to the Output
Close() error
// Description returns a one-sentence description on the Output
Description() string
// SampleConfig returns the default configuration of the Output
SampleConfig() string
// Write takes in group of points to be written to the Output
Write(metrics []Metric) error
}
type ServiceOutput interface {
// Connect to the Output
Connect() error
// Close any connections to the Output
Close() error
// Description returns a one-sentence description on the Output
Description() string
// SampleConfig returns the default configuration of the Output
SampleConfig() string
// Write takes in group of points to be written to the Output
Write(metrics []Metric) error
// Start the "service" that will provide an Output
Start() error
// Stop the "service" that will provide an Output
Stop()
}

View File

@@ -1,349 +0,0 @@
#!/usr/bin/env bash
###########################################################################
# Packaging script which creates debian and RPM packages. It optionally
# tags the repo with the given version.
#
# Requirements: GOPATH must be set. 'fpm' must be on the path, and the AWS
# CLI tools must also be installed.
#
# https://github.com/jordansissel/fpm
# http://aws.amazon.com/cli/
#
# Packaging process: to package a build, simple execute:
#
# package.sh <version>
#
# where <version> is the desired version. If generation of a debian and RPM
# package is successful, the script will offer to tag the repo using the
# supplied version string.
#
# AWS upload: the script will also offer to upload the packages to S3. If
# this option is selected, the credentials should be present in the file
# ~/aws.conf. The contents should be of the form:
#
# [default]
# aws_access_key_id=<access ID>
# aws_secret_access_key=<secret key>
# region = us-east-1
#
# Trim the leading spaces when creating the file. The script will exit if
# S3 upload is requested, but this file does not exist.
AWS_FILE=~/aws.conf
INSTALL_ROOT_DIR=/opt/telegraf
TELEGRAF_LOG_DIR=/var/log/telegraf
CONFIG_ROOT_DIR=/etc/opt/telegraf
SAMPLE_CONFIGURATION=etc/config.sample.toml
INITD_SCRIPT=scripts/init.sh
TMP_WORK_DIR=`mktemp -d`
POST_INSTALL_PATH=`mktemp`
ARCH=`uname -i`
LICENSE=MIT
URL=influxdb.com
MAINTAINER=support@influxdb.com
VENDOR=InfluxDB
DESCRIPTION="InfluxDB Telegraf agent"
PKG_DEPS=(coreutils)
GO_VERSION="go1.4.2"
GOPATH_INSTALL=
BINS=(
telegraf
)
###########################################################################
# Helper functions.
# usage prints simple usage information.
usage() {
echo -e "$0 [<version>] [-h]\n"
cleanup_exit $1
}
# cleanup_exit removes all resources created during the process and exits with
# the supplied returned code.
cleanup_exit() {
rm -r $TMP_WORK_DIR
rm $POST_INSTALL_PATH
exit $1
}
# check_gopath sanity checks the value of the GOPATH env variable, and determines
# the path where build artifacts are installed. GOPATH may be a colon-delimited
# list of directories.
check_gopath() {
[ -z "$GOPATH" ] && echo "GOPATH is not set." && cleanup_exit 1
GOPATH_INSTALL=`echo $GOPATH | cut -d ':' -f 1`
[ ! -d "$GOPATH_INSTALL" ] && echo "GOPATH_INSTALL is not a directory." && cleanup_exit 1
echo "GOPATH ($GOPATH) looks sane, using $GOPATH_INSTALL for installation."
}
check_gvm() {
source $HOME/.gvm/scripts/gvm
which gvm
if [ $? -ne 0 ]; then
echo "gvm not found -- aborting."
cleanup_exit $1
fi
gvm use $GO_VERSION
if [ $? -ne 0 ]; then
echo "gvm cannot find Go version $GO_VERSION -- aborting."
cleanup_exit $1
fi
}
# check_clean_tree ensures that no source file is locally modified.
check_clean_tree() {
modified=$(git ls-files --modified | wc -l)
if [ $modified -ne 0 ]; then
echo "The source tree is not clean -- aborting."
cleanup_exit 1
fi
echo "Git tree is clean."
}
# update_tree ensures the tree is in-sync with the repo.
update_tree() {
git pull origin master
if [ $? -ne 0 ]; then
echo "Failed to pull latest code -- aborting."
cleanup_exit 1
fi
git fetch --tags
if [ $? -ne 0 ]; then
echo "Failed to fetch tags -- aborting."
cleanup_exit 1
fi
echo "Git tree updated successfully."
}
# check_tag_exists checks if the existing release already exists in the tags.
check_tag_exists () {
version=$1
git tag | grep -q "^v$version$"
if [ $? -eq 0 ]; then
echo "Proposed version $version already exists as a tag -- aborting."
cleanup_exit 1
fi
}
# make_dir_tree creates the directory structure within the packages.
make_dir_tree() {
work_dir=$1
version=$2
mkdir -p $work_dir/$INSTALL_ROOT_DIR/versions/$version/scripts
if [ $? -ne 0 ]; then
echo "Failed to create installation directory -- aborting."
cleanup_exit 1
fi
mkdir -p $work_dir/$CONFIG_ROOT_DIR
if [ $? -ne 0 ]; then
echo "Failed to create configuration directory -- aborting."
cleanup_exit 1
fi
}
# do_build builds the code. The version and commit must be passed in.
do_build() {
version=$1
commit=`git rev-parse HEAD`
if [ $? -ne 0 ]; then
echo "Unable to retrieve current commit -- aborting"
cleanup_exit 1
fi
for b in ${BINS[*]}; do
rm -f $GOPATH_INSTALL/bin/$b
done
go get -u -f ./...
if [ $? -ne 0 ]; then
echo "WARNING: failed to 'go get' packages."
fi
go install -a -ldflags="-X main.Version $version -X main.Commit $commit" ./...
if [ $? -ne 0 ]; then
echo "Build failed, unable to create package -- aborting"
cleanup_exit 1
fi
echo "Build completed successfully."
}
# generate_postinstall_script creates the post-install script for the
# package. It must be passed the version.
generate_postinstall_script() {
version=$1
cat <<EOF >$POST_INSTALL_PATH
rm -f $INSTALL_ROOT_DIR/telegraf
rm -f $INSTALL_ROOT_DIR/init.sh
ln -s $INSTALL_ROOT_DIR/versions/$version/telegraf $INSTALL_ROOT_DIR/telegraf
ln -s $INSTALL_ROOT_DIR/versions/$version/scripts/init.sh $INSTALL_ROOT_DIR/init.sh
rm -f /etc/init.d/telegraf
ln -sfn $INSTALL_ROOT_DIR/init.sh /etc/init.d/telegraf
chmod +x /etc/init.d/telegraf
if which update-rc.d > /dev/null 2>&1 ; then
update-rc.d -f telegraf remove
update-rc.d telegraf defaults
else
chkconfig --add telegraf
fi
if ! id telegraf >/dev/null 2>&1; then
useradd --system -U -M telegraf
fi
chown -R -L telegraf:telegraf $INSTALL_ROOT_DIR
chmod -R a+rX $INSTALL_ROOT_DIR
mkdir -p $TELEGRAF_LOG_DIR
chown -R -L telegraf:telegraf $TELEGRAF_LOG_DIR
EOF
echo "Post-install script created successfully at $POST_INSTALL_PATH"
}
###########################################################################
# Start the packaging process.
if [ $# -ne 1 ]; then
usage 1
elif [ $1 == "-h" ]; then
usage 0
else
VERSION=$1
fi
echo -e "\nStarting package process...\n"
check_gvm
check_gopath
check_clean_tree
update_tree
check_tag_exists $VERSION
do_build $VERSION
make_dir_tree $TMP_WORK_DIR $VERSION
###########################################################################
# Copy the assets to the installation directories.
for b in ${BINS[*]}; do
cp $GOPATH_INSTALL/bin/$b $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION
if [ $? -ne 0 ]; then
echo "Failed to copy binaries to packaging directory -- aborting."
cleanup_exit 1
fi
done
echo "${BINS[*]} copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION"
cp $INITD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts
if [ $? -ne 0 ]; then
echo "Failed to copy init.d script to packaging directory -- aborting."
cleanup_exit 1
fi
echo "$INITD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts"
cp $SAMPLE_CONFIGURATION $TMP_WORK_DIR/$CONFIG_ROOT_DIR/telegraf.conf
if [ $? -ne 0 ]; then
echo "Failed to copy $SAMPLE_CONFIGURATION to packaging directory -- aborting."
cleanup_exit 1
fi
generate_postinstall_script $VERSION
###########################################################################
# Create the actual packages.
echo -n "Commence creation of $ARCH packages, version $VERSION? [Y/n] "
read response
response=`echo $response | tr 'A-Z' 'a-z'`
if [ "x$response" == "xn" ]; then
echo "Packaging aborted."
cleanup_exit 1
fi
if [ $ARCH == "i386" ]; then
rpm_package=telegraf-$VERSION-1.i686.rpm
debian_package=telegraf_${VERSION}_i686.deb
deb_args="-a i686"
rpm_args="setarch i686"
elif [ $ARCH == "arm" ]; then
rpm_package=telegraf-$VERSION-1.armel.rpm
debian_package=telegraf_${VERSION}_armel.deb
else
rpm_package=telegraf-$VERSION-1.x86_64.rpm
debian_package=telegraf_${VERSION}_amd64.deb
fi
COMMON_FPM_ARGS="-C $TMP_WORK_DIR --vendor $VENDOR --url $URL --license $LICENSE --maintainer $MAINTAINER --after-install $POST_INSTALL_PATH --name telegraf --version $VERSION --config-files $CONFIG_ROOT_DIR ."
$rpm_args fpm -s dir -t rpm --description "$DESCRIPTION" $COMMON_FPM_ARGS
if [ $? -ne 0 ]; then
echo "Failed to create RPM package -- aborting."
cleanup_exit 1
fi
echo "RPM package created successfully."
fpm -s dir -t deb $deb_args --description "$DESCRIPTION" $COMMON_FPM_ARGS
if [ $? -ne 0 ]; then
echo "Failed to create Debian package -- aborting."
cleanup_exit 1
fi
echo "Debian package created successfully."
###########################################################################
# Offer to tag the repo.
echo -n "Tag source tree with v$VERSION and push to repo? [y/N] "
read response
response=`echo $response | tr 'A-Z' 'a-z'`
if [ "x$response" == "xy" ]; then
echo "Creating tag v$VERSION and pushing to repo"
git tag v$VERSION
if [ $? -ne 0 ]; then
echo "Failed to create tag v$VERSION -- aborting"
cleanup_exit 1
fi
git push origin v$VERSION
if [ $? -ne 0 ]; then
echo "Failed to push tag v$VERSION to repo -- aborting"
cleanup_exit 1
fi
else
echo "Not creating tag v$VERSION."
fi
###########################################################################
# Offer to publish the packages.
echo -n "Publish packages to S3? [y/N] "
read response
response=`echo $response | tr 'A-Z' 'a-z'`
if [ "x$response" == "xy" ]; then
echo "Publishing packages to S3."
if [ ! -e "$AWS_FILE" ]; then
echo "$AWS_FILE does not exist -- aborting."
cleanup_exit 1
fi
for filepath in `ls *.{deb,rpm}`; do
echo "Uploading $filepath to S3"
filename=`basename $filepath`
echo "Uploading $filename to s3://get.influxdb.org/telegraf/$filename"
AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $filepath s3://get.influxdb.org/telegraf/$filename --acl public-read --region us-east-1
if [ $? -ne 0 ]; then
echo "Upload failed -- aborting".
cleanup_exit 1
fi
done
else
echo "Not publishing packages to S3."
fi
###########################################################################
# All done.
echo -e "\nPackaging process complete."
cleanup_exit 0

View File

@@ -1,11 +0,0 @@
package all
import (
_ "github.com/influxdb/telegraf/plugins/kafka_consumer"
_ "github.com/influxdb/telegraf/plugins/memcached"
_ "github.com/influxdb/telegraf/plugins/mysql"
_ "github.com/influxdb/telegraf/plugins/postgresql"
_ "github.com/influxdb/telegraf/plugins/prometheus"
_ "github.com/influxdb/telegraf/plugins/redis"
_ "github.com/influxdb/telegraf/plugins/system"
)

View File

@@ -0,0 +1,39 @@
# Example Input Plugin
The example plugin gathers metrics about example things
### Configuration:
```
# Description
[[inputs.example]]
# SampleConfig
```
### Measurements & Fields:
<optional description>
- measurement1
- field1 (type, unit)
- field2 (float, percent)
- measurement2
- field3 (integer, bytes)
### Tags:
- All measurements have the following tags:
- tag1 (optional description)
- tag2
- measurement2 has the following tags:
- tag3
### Example Output:
Give an example `-test` output here
```
$ ./telegraf -config telegraf.conf -input-filter example -test
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455
```

View File

@@ -0,0 +1,265 @@
## Telegraf Plugin: Aerospike
#### Plugin arguments:
- **servers** string array: List of aerospike servers to query (def: 127.0.0.1:3000)
#### Description
The aerospike plugin queries aerospike server(s) and get node statistics. It also collects stats for
all the configured namespaces.
For what the measurements mean, please consult the [Aerospike Metrics Reference Docs](http://www.aerospike.com/docs/reference/metrics).
The metric names, to make it less complicated in querying, have replaced all `-` with `_` as Aerospike metrics come in both forms (no idea why).
# Measurements:
#### Aerospike Statistics [values]:
Meta:
- units: Integer
Measurement names:
- batch_index_queue
- batch_index_unused_buffers
- batch_queue
- batch_tree_count
- client_connections
- data_used_bytes_memory
- index_used_bytes_memory
- info_queue
- migrate_progress_recv
- migrate_progress_send
- migrate_rx_objs
- migrate_tx_objs
- objects
- ongoing_write_reqs
- partition_absent
- partition_actual
- partition_desync
- partition_object_count
- partition_ref_count
- partition_replica
- proxy_in_progress
- query_agg_avg_rec_count
- query_avg_rec_count
- query_lookup_avg_rec_count
- queue
- record_locks
- record_refs
- sindex_used_bytes_memory
- sindex_gc_garbage_cleaned
- system_free_mem_pct
- total_bytes_disk
- total_bytes_memory
- tree_count
- scans_active
- uptime
- used_bytes_disk
- used_bytes_memory
- cluster_size
- waiting_transactions
#### Aerospike Statistics [cumulative]:
Meta:
- units: Integer
Measurement names:
- batch_errors
- batch_index_complete
- batch_index_errors
- batch_index_initiate
- batch_index_timeout
- batch_initiate
- batch_timeout
- err_duplicate_proxy_request
- err_out_of_space
- err_replica_non_null_node
- err_replica_null_node
- err_rw_cant_put_unique
- err_rw_pending_limit
- err_rw_request_not_found
- err_storage_queue_full
- err_sync_copy_null_master
- err_sync_copy_null_node
- err_tsvc_requests
- err_write_fail_bin_exists
- err_write_fail_generation
- err_write_fail_generation_xdr
- err_write_fail_incompatible_type
- err_write_fail_key_exists
- err_write_fail_key_mismatch
- err_write_fail_not_found
- err_write_fail_noxdr
- err_write_fail_parameter
- err_write_fail_prole_delete
- err_write_fail_prole_generation
- err_write_fail_prole_unknown
- err_write_fail_unknown
- fabric_msgs_rcvd
- fabric_msgs_sent
- heartbeat_received_foreign
- heartbeat_received_self
- migrate_msgs_recv
- migrate_msgs_sent
- migrate_num_incoming_accepted
- migrate_num_incoming_refused
- proxy_action
- proxy_initiate
- proxy_retry
- proxy_retry_new_dest
- proxy_retry_q_full
- proxy_retry_same_dest
- proxy_unproxy
- query_abort
- query_agg
- query_agg_abort
- query_agg_err
- query_agg_success
- query_bad_records
- query_fail
- query_long_queue_full
- query_long_running
- query_lookup_abort
- query_lookup_err
- query_lookups
- query_lookup_success
- query_reqs
- query_short_queue_full
- query_short_running
- query_success
- query_tracked
- read_dup_prole
- reaped_fds
- rw_err_ack_badnode
- rw_err_ack_internal
- rw_err_ack_nomatch
- rw_err_dup_cluster_key
- rw_err_dup_internal
- rw_err_dup_send
- rw_err_write_cluster_key
- rw_err_write_internal
- rw_err_write_send
- sindex_ucgarbage_found
- sindex_gc_locktimedout
- sindex_gc_inactivity_dur
- sindex_gc_activity_dur
- sindex_gc_list_creation_time
- sindex_gc_list_deletion_time
- sindex_gc_objects_validated
- sindex_gc_garbage_found
- stat_cluster_key_err_ack_dup_trans_reenqueue
- stat_cluster_key_err_ack_rw_trans_reenqueue
- stat_cluster_key_prole_retry
- stat_cluster_key_regular_processed
- stat_cluster_key_trans_to_proxy_retry
- stat_deleted_set_object
- stat_delete_success
- stat_duplicate_operation
- stat_evicted_objects
- stat_evicted_objects_time
- stat_evicted_set_objects
- stat_expired_objects
- stat_nsup_deletes_not_shipped
- stat_proxy_errs
- stat_proxy_reqs
- stat_proxy_reqs_xdr
- stat_proxy_success
- stat_read_errs_notfound
- stat_read_errs_other
- stat_read_reqs
- stat_read_reqs_xdr
- stat_read_success
- stat_rw_timeout
- stat_slow_trans_queue_batch_pop
- stat_slow_trans_queue_pop
- stat_slow_trans_queue_push
- stat_write_errs
- stat_write_errs_notfound
- stat_write_errs_other
- stat_write_reqs
- stat_write_reqs_xdr
- stat_write_success
- stat_xdr_pipe_miss
- stat_xdr_pipe_writes
- stat_zero_bin_records
- storage_defrag_corrupt_record
- storage_defrag_wait
- transactions
- basic_scans_succeeded
- basic_scans_failed
- aggr_scans_succeeded
- aggr_scans_failed
- udf_bg_scans_succeeded
- udf_bg_scans_failed
- udf_delete_err_others
- udf_delete_reqs
- udf_delete_success
- udf_lua_errs
- udf_query_rec_reqs
- udf_read_errs_other
- udf_read_reqs
- udf_read_success
- udf_replica_writes
- udf_scan_rec_reqs
- udf_write_err_others
- udf_write_reqs
- udf_write_success
- write_master
- write_prole
#### Aerospike Statistics [percentage]:
Meta:
- units: percent (out of 100)
Measurement names:
- free_pct_disk
- free_pct_memory
# Measurements:
#### Aerospike Namespace Statistics [values]:
Meta:
- units: Integer
- tags: `namespace=<namespace>`
Measurement names:
- available_bin_names
- available_pct
- current_time
- data_used_bytes_memory
- index_used_bytes_memory
- master_objects
- max_evicted_ttl
- max_void_time
- non_expirable_objects
- objects
- prole_objects
- sindex_used_bytes_memory
- total_bytes_disk
- total_bytes_memory
- used_bytes_disk
- used_bytes_memory
#### Aerospike Namespace Statistics [cumulative]:
Meta:
- units: Integer
- tags: `namespace=<namespace>`
Measurement names:
- evicted_objects
- expired_objects
- set_deleted_objects
- set_evicted_objects
#### Aerospike Namespace Statistics [percentage]:
Meta:
- units: percent (out of 100)
- tags: `namespace=<namespace>`
Measurement names:
- free_pct_disk
- free_pct_memory

View File

@@ -0,0 +1,343 @@
package aerospike
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"net"
"strconv"
"strings"
"sync"
)
const (
MSG_HEADER_SIZE = 8
MSG_TYPE = 1 // Info is 1
MSG_VERSION = 2
)
var (
STATISTICS_COMMAND = []byte("statistics\n")
NAMESPACES_COMMAND = []byte("namespaces\n")
)
type aerospikeMessageHeader struct {
Version uint8
Type uint8
DataLen [6]byte
}
type aerospikeMessage struct {
aerospikeMessageHeader
Data []byte
}
// Taken from aerospike-client-go/types/message.go
func (msg *aerospikeMessage) Serialize() []byte {
msg.DataLen = msgLenToBytes(int64(len(msg.Data)))
buf := bytes.NewBuffer([]byte{})
binary.Write(buf, binary.BigEndian, msg.aerospikeMessageHeader)
binary.Write(buf, binary.BigEndian, msg.Data[:])
return buf.Bytes()
}
type aerospikeInfoCommand struct {
msg *aerospikeMessage
}
// Taken from aerospike-client-go/info.go
func (nfo *aerospikeInfoCommand) parseMultiResponse() (map[string]string, error) {
responses := make(map[string]string)
offset := int64(0)
begin := int64(0)
dataLen := int64(len(nfo.msg.Data))
// Create reusable StringBuilder for performance.
for offset < dataLen {
b := nfo.msg.Data[offset]
if b == '\t' {
name := nfo.msg.Data[begin:offset]
offset++
begin = offset
// Parse field value.
for offset < dataLen {
if nfo.msg.Data[offset] == '\n' {
break
}
offset++
}
if offset > begin {
value := nfo.msg.Data[begin:offset]
responses[string(name)] = string(value)
} else {
responses[string(name)] = ""
}
offset++
begin = offset
} else if b == '\n' {
if offset > begin {
name := nfo.msg.Data[begin:offset]
responses[string(name)] = ""
}
offset++
begin = offset
} else {
offset++
}
}
if offset > begin {
name := nfo.msg.Data[begin:offset]
responses[string(name)] = ""
}
return responses, nil
}
type Aerospike struct {
Servers []string
}
var sampleConfig = `
# Aerospike servers to connect to (with port)
# Default: servers = ["localhost:3000"]
#
# This plugin will query all namespaces the aerospike
# server has configured and get stats for them.
servers = ["localhost:3000"]
`
func (a *Aerospike) SampleConfig() string {
return sampleConfig
}
func (a *Aerospike) Description() string {
return "Read stats from an aerospike server"
}
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
if len(a.Servers) == 0 {
return a.gatherServer("127.0.0.1:3000", acc)
}
var wg sync.WaitGroup
var outerr error
for _, server := range a.Servers {
wg.Add(1)
go func(server string) {
defer wg.Done()
outerr = a.gatherServer(server, acc)
}(server)
}
wg.Wait()
return outerr
}
func (a *Aerospike) gatherServer(host string, acc telegraf.Accumulator) error {
aerospikeInfo, err := getMap(STATISTICS_COMMAND, host)
if err != nil {
return fmt.Errorf("Aerospike info failed: %s", err)
}
readAerospikeStats(aerospikeInfo, acc, host, "")
namespaces, err := getList(NAMESPACES_COMMAND, host)
if err != nil {
return fmt.Errorf("Aerospike namespace list failed: %s", err)
}
for ix := range namespaces {
nsInfo, err := getMap([]byte("namespace/"+namespaces[ix]+"\n"), host)
if err != nil {
return fmt.Errorf("Aerospike namespace '%s' query failed: %s", namespaces[ix], err)
}
readAerospikeStats(nsInfo, acc, host, namespaces[ix])
}
return nil
}
func getMap(key []byte, host string) (map[string]string, error) {
data, err := get(key, host)
if err != nil {
return nil, fmt.Errorf("Failed to get data: %s", err)
}
parsed, err := unmarshalMapInfo(data, string(key))
if err != nil {
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
}
return parsed, nil
}
func getList(key []byte, host string) ([]string, error) {
data, err := get(key, host)
if err != nil {
return nil, fmt.Errorf("Failed to get data: %s", err)
}
parsed, err := unmarshalListInfo(data, string(key))
if err != nil {
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
}
return parsed, nil
}
func get(key []byte, host string) (map[string]string, error) {
var err error
var data map[string]string
asInfo := &aerospikeInfoCommand{
msg: &aerospikeMessage{
aerospikeMessageHeader: aerospikeMessageHeader{
Version: uint8(MSG_VERSION),
Type: uint8(MSG_TYPE),
DataLen: msgLenToBytes(int64(len(key))),
},
Data: key,
},
}
cmd := asInfo.msg.Serialize()
addr, err := net.ResolveTCPAddr("tcp", host)
if err != nil {
return data, fmt.Errorf("Lookup failed for '%s': %s", host, err)
}
conn, err := net.DialTCP("tcp", nil, addr)
if err != nil {
return data, fmt.Errorf("Connection failed for '%s': %s", host, err)
}
defer conn.Close()
_, err = conn.Write(cmd)
if err != nil {
return data, fmt.Errorf("Failed to send to '%s': %s", host, err)
}
msgHeader := bytes.NewBuffer(make([]byte, MSG_HEADER_SIZE))
_, err = readLenFromConn(conn, msgHeader.Bytes(), MSG_HEADER_SIZE)
if err != nil {
return data, fmt.Errorf("Failed to read header: %s", err)
}
err = binary.Read(msgHeader, binary.BigEndian, &asInfo.msg.aerospikeMessageHeader)
if err != nil {
return data, fmt.Errorf("Failed to unmarshal header: %s", err)
}
msgLen := msgLenFromBytes(asInfo.msg.aerospikeMessageHeader.DataLen)
if int64(len(asInfo.msg.Data)) != msgLen {
asInfo.msg.Data = make([]byte, msgLen)
}
_, err = readLenFromConn(conn, asInfo.msg.Data, len(asInfo.msg.Data))
if err != nil {
return data, fmt.Errorf("Failed to read from connection to '%s': %s", host, err)
}
data, err = asInfo.parseMultiResponse()
if err != nil {
return data, fmt.Errorf("Failed to parse response from '%s': %s", host, err)
}
return data, err
}
func readAerospikeStats(
stats map[string]string,
acc telegraf.Accumulator,
host string,
namespace string,
) {
fields := make(map[string]interface{})
tags := map[string]string{
"aerospike_host": host,
"namespace": "_service",
}
if namespace != "" {
tags["namespace"] = namespace
}
for key, value := range stats {
// We are going to ignore all string based keys
val, err := strconv.ParseInt(value, 10, 64)
if err == nil {
if strings.Contains(key, "-") {
key = strings.Replace(key, "-", "_", -1)
}
fields[key] = val
}
}
acc.AddFields("aerospike", fields, tags)
}
func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) {
key = strings.TrimSuffix(key, "\n")
res := map[string]string{}
v, exists := infoMap[key]
if !exists {
return res, fmt.Errorf("Key '%s' missing from info", key)
}
values := strings.Split(v, ";")
for i := range values {
kv := strings.Split(values[i], "=")
if len(kv) > 1 {
res[kv[0]] = kv[1]
}
}
return res, nil
}
func unmarshalListInfo(infoMap map[string]string, key string) ([]string, error) {
key = strings.TrimSuffix(key, "\n")
v, exists := infoMap[key]
if !exists {
return []string{}, fmt.Errorf("Key '%s' missing from info", key)
}
values := strings.Split(v, ";")
return values, nil
}
func readLenFromConn(c net.Conn, buffer []byte, length int) (total int, err error) {
var r int
for total < length {
r, err = c.Read(buffer[total:length])
total += r
if err != nil {
break
}
}
return
}
// Taken from aerospike-client-go/types/message.go
func msgLenToBytes(DataLen int64) [6]byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(DataLen))
res := [6]byte{}
copy(res[:], b[2:])
return res
}
// Taken from aerospike-client-go/types/message.go
func msgLenFromBytes(buf [6]byte) int64 {
nbytes := append([]byte{0, 0}, buf[:]...)
DataLen := binary.BigEndian.Uint64(nbytes)
return int64(DataLen)
}
func init() {
inputs.Add("aerospike", func() telegraf.Input {
return &Aerospike{}
})
}

View File

@@ -0,0 +1,118 @@
package aerospike
import (
"reflect"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAerospikeStatistics(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
a := &Aerospike{
Servers: []string{testutil.GetLocalHost() + ":3000"},
}
var acc testutil.Accumulator
err := a.Gather(&acc)
require.NoError(t, err)
// Only use a few of the metrics
asMetrics := []string{
"transactions",
"stat_write_errs",
"stat_read_reqs",
"stat_write_reqs",
}
for _, metric := range asMetrics {
assert.True(t, acc.HasIntField("aerospike", metric), metric)
}
}
func TestAerospikeMsgLenFromToBytes(t *testing.T) {
var i int64 = 8
assert.True(t, i == msgLenFromBytes(msgLenToBytes(i)))
}
func TestReadAerospikeStatsNoNamespace(t *testing.T) {
// Also test for re-writing
var acc testutil.Accumulator
stats := map[string]string{
"stat-write-errs": "12345",
"stat_read_reqs": "12345",
}
readAerospikeStats(stats, &acc, "host1", "")
fields := map[string]interface{}{
"stat_write_errs": int64(12345),
"stat_read_reqs": int64(12345),
}
tags := map[string]string{
"aerospike_host": "host1",
"namespace": "_service",
}
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
}
func TestReadAerospikeStatsNamespace(t *testing.T) {
var acc testutil.Accumulator
stats := map[string]string{
"stat_write_errs": "12345",
"stat_read_reqs": "12345",
}
readAerospikeStats(stats, &acc, "host1", "test")
fields := map[string]interface{}{
"stat_write_errs": int64(12345),
"stat_read_reqs": int64(12345),
}
tags := map[string]string{
"aerospike_host": "host1",
"namespace": "test",
}
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
}
func TestAerospikeUnmarshalList(t *testing.T) {
i := map[string]string{
"test": "one;two;three",
}
expected := []string{"one", "two", "three"}
list, err := unmarshalListInfo(i, "test2")
assert.True(t, err != nil)
list, err = unmarshalListInfo(i, "test")
assert.True(t, err == nil)
equal := true
for ix := range expected {
if list[ix] != expected[ix] {
equal = false
break
}
}
assert.True(t, equal)
}
func TestAerospikeUnmarshalMap(t *testing.T) {
i := map[string]string{
"test": "key1=value1;key2=value2",
}
expected := map[string]string{
"key1": "value1",
"key2": "value2",
}
m, err := unmarshalMapInfo(i, "test")
assert.True(t, err == nil)
assert.True(t, reflect.DeepEqual(m, expected))
}

46
plugins/inputs/all/all.go Normal file
View File

@@ -0,0 +1,46 @@
package all
import (
_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
_ "github.com/influxdata/telegraf/plugins/inputs/docker"
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
_ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks"
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
_ "github.com/influxdata/telegraf/plugins/inputs/mongodb"
_ "github.com/influxdata/telegraf/plugins/inputs/mysql"
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql"
_ "github.com/influxdata/telegraf/plugins/inputs/powerdns"
_ "github.com/influxdata/telegraf/plugins/inputs/procstat"
_ "github.com/influxdata/telegraf/plugins/inputs/prometheus"
_ "github.com/influxdata/telegraf/plugins/inputs/puppetagent"
_ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq"
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
_ "github.com/influxdata/telegraf/plugins/inputs/system"
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
)

View File

@@ -0,0 +1,45 @@
# Telegraf plugin: Apache
#### Plugin arguments:
- **urls** []string: List of apache-status URLs to collect from.
#### Description
The Apache plugin collects from the /server-status?auto URL. See
[apache.org/server-status?auto](http://www.apache.org/server-status?auto) for an
example. And
[here](http://httpd.apache.org/docs/2.2/mod/mod_status.html) for the apache
mod_status documentation.
# Measurements:
Meta:
- tags: `port=<port>`, `server=url`
- apache_TotalAccesses
- apache_TotalkBytes
- apache_CPULoad
- apache_Uptime
- apache_ReqPerSec
- apache_BytesPerSec
- apache_BytesPerReq
- apache_BusyWorkers
- apache_IdleWorkers
- apache_ConnsTotal
- apache_ConnsAsyncWriting
- apache_ConnsAsyncKeepAlive
- apache_ConnsAsyncClosing
### Scoreboard measurements
- apache_scboard_waiting
- apache_scboard_starting
- apache_scboard_reading
- apache_scboard_sending
- apache_scboard_keepalive
- apache_scboard_dnslookup
- apache_scboard_closing
- apache_scboard_logging
- apache_scboard_finishing
- apache_scboard_idle_cleanup
- apache_scboard_open

View File

@@ -0,0 +1,171 @@
package apache
import (
"bufio"
"fmt"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Apache struct {
Urls []string
}
var sampleConfig = `
# An array of Apache status URI to gather stats.
urls = ["http://localhost/server-status?auto"]
`
func (n *Apache) SampleConfig() string {
return sampleConfig
}
func (n *Apache) Description() string {
return "Read Apache status information (mod_status)"
}
func (n *Apache) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
var outerr error
for _, u := range n.Urls {
addr, err := url.Parse(u)
if err != nil {
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
}
wg.Add(1)
go func(addr *url.URL) {
defer wg.Done()
outerr = n.gatherUrl(addr, acc)
}(addr)
}
wg.Wait()
return outerr
}
var tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}
var client = &http.Client{Transport: tr}
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
resp, err := client.Get(addr.String())
if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
}
tags := getTags(addr)
sc := bufio.NewScanner(resp.Body)
fields := make(map[string]interface{})
for sc.Scan() {
line := sc.Text()
if strings.Contains(line, ":") {
parts := strings.SplitN(line, ":", 2)
key, part := strings.Replace(parts[0], " ", "", -1), strings.TrimSpace(parts[1])
switch key {
case "Scoreboard":
for field, value := range n.gatherScores(part) {
fields[field] = value
}
default:
value, err := strconv.ParseFloat(part, 64)
if err != nil {
continue
}
fields[key] = value
}
}
}
acc.AddFields("apache", fields, tags)
return nil
}
func (n *Apache) gatherScores(data string) map[string]interface{} {
var waiting, open int = 0, 0
var S, R, W, K, D, C, L, G, I int = 0, 0, 0, 0, 0, 0, 0, 0, 0
for _, s := range strings.Split(data, "") {
switch s {
case "_":
waiting++
case "S":
S++
case "R":
R++
case "W":
W++
case "K":
K++
case "D":
D++
case "C":
C++
case "L":
L++
case "G":
G++
case "I":
I++
case ".":
open++
}
}
fields := map[string]interface{}{
"scboard_waiting": float64(waiting),
"scboard_starting": float64(S),
"scboard_reading": float64(R),
"scboard_sending": float64(W),
"scboard_keepalive": float64(K),
"scboard_dnslookup": float64(D),
"scboard_closing": float64(C),
"scboard_logging": float64(L),
"scboard_finishing": float64(G),
"scboard_idle_cleanup": float64(I),
"scboard_open": float64(open),
}
return fields
}
// Get tag(s) for the apache plugin
func getTags(addr *url.URL) map[string]string {
h := addr.Host
host, port, err := net.SplitHostPort(h)
if err != nil {
host = addr.Host
if addr.Scheme == "http" {
port = "80"
} else if addr.Scheme == "https" {
port = "443"
} else {
port = ""
}
}
return map[string]string{"server": host, "port": port}
}
func init() {
inputs.Add("apache", func() telegraf.Input {
return &Apache{}
})
}

View File

@@ -0,0 +1,73 @@
package apache
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
var apacheStatus = `
Total Accesses: 129811861
Total kBytes: 5213701865
CPULoad: 6.51929
Uptime: 941553
ReqPerSec: 137.87
BytesPerSec: 5670240
BytesPerReq: 41127.4
BusyWorkers: 270
IdleWorkers: 630
ConnsTotal: 1451
ConnsAsyncWriting: 32
ConnsAsyncKeepAlive: 945
ConnsAsyncClosing: 205
Scoreboard: WW_____W_RW_R_W__RRR____WR_W___WW________W_WW_W_____R__R_WR__WRWR_RRRW___R_RWW__WWWRW__R_RW___RR_RW_R__W__WR_WWW______WWR__R___R_WR_W___RW______RR________________W______R__RR______W________________R____R__________________________RW_W____R_____W_R_________________R____RR__W___R_R____RW______R____W______W_W_R_R______R__R_R__________R____W_______WW____W____RR__W_____W_R_______W__________W___W____________W_______WRR_R_W____W_____R____W_WW_R____RRW__W............................................................................................................................................................................................................................................................................................................WRRWR____WR__RR_R___RWR_________W_R____RWRRR____R_R__RW_R___WWW_RW__WR_RRR____W___R____WW_R__R___RR_W_W_RRRRWR__RRWR__RRW_W_RRRW_R_RR_W__RR_RWRR_R__R___RR_RR______R__RR____R_____W_R_R_R__R__R__________W____WW_R___R_R___R_________RR__RR____RWWWW___W_R________R_R____R_W___W___R___W_WRRWW_______R__W_RW_______R________RR__R________W_______________________W_W______________RW_________WR__R___R__R_______________WR_R_________W___RW_____R____________W____......................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................
`
func TestHTTPApache(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, apacheStatus)
}))
defer ts.Close()
a := Apache{
Urls: []string{ts.URL},
}
var acc testutil.Accumulator
err := a.Gather(&acc)
require.NoError(t, err)
fields := map[string]interface{}{
"TotalAccesses": float64(1.29811861e+08),
"TotalkBytes": float64(5.213701865e+09),
"CPULoad": float64(6.51929),
"Uptime": float64(941553),
"ReqPerSec": float64(137.87),
"BytesPerSec": float64(5.67024e+06),
"BytesPerReq": float64(41127.4),
"BusyWorkers": float64(270),
"IdleWorkers": float64(630),
"ConnsTotal": float64(1451),
"ConnsAsyncWriting": float64(32),
"ConnsAsyncKeepAlive": float64(945),
"ConnsAsyncClosing": float64(205),
"scboard_waiting": float64(630),
"scboard_starting": float64(0),
"scboard_reading": float64(157),
"scboard_sending": float64(113),
"scboard_keepalive": float64(0),
"scboard_dnslookup": float64(0),
"scboard_closing": float64(0),
"scboard_logging": float64(0),
"scboard_finishing": float64(0),
"scboard_idle_cleanup": float64(0),
"scboard_open": float64(2850),
}
acc.AssertContainsFields(t, "apache", fields)
}

View File

@@ -0,0 +1,89 @@
# Telegraf plugin: bcache
Get bcache stat from stats_total directory and dirty_data file.
# Measurements
Meta:
- tags: `backing_dev=dev bcache_dev=dev`
Measurement names:
- dirty_data
- bypassed
- cache_bypass_hits
- cache_bypass_misses
- cache_hit_ratio
- cache_hits
- cache_miss_collisions
- cache_misses
- cache_readaheads
### Description
```
dirty_data
Amount of dirty data for this backing device in the cache. Continuously
updated unlike the cache set's version, but may be slightly off.
bypassed
Amount of IO (both reads and writes) that has bypassed the cache
cache_bypass_hits
cache_bypass_misses
Hits and misses for IO that is intended to skip the cache are still counted,
but broken out here.
cache_hits
cache_misses
cache_hit_ratio
Hits and misses are counted per individual IO as bcache sees them; a
partial hit is counted as a miss.
cache_miss_collisions
Counts instances where data was going to be inserted into the cache from a
cache miss, but raced with a write and data was already present (usually 0
since the synchronization for cache misses was rewritten)
cache_readaheads
Count of times readahead occurred.
```
# Example output
Using this configuration:
```
[bcache]
# Bcache sets path
# If not specified, then default is:
# bcachePath = "/sys/fs/bcache"
#
# By default, telegraf gather stats for all bcache devices
# Setting devices will restrict the stats to the specified
# bcache devices.
# bcacheDevs = ["bcache0", ...]
```
When run with:
```
./telegraf -config telegraf.conf -input-filter bcache -test
```
It produces:
```
* Plugin: bcache, Collection 1
> [backing_dev="md10" bcache_dev="bcache0"] bcache_dirty_data value=11639194
> [backing_dev="md10" bcache_dev="bcache0"] bcache_bypassed value=5167704440832
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_bypass_hits value=146270986
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_bypass_misses value=0
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_hit_ratio value=90
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_hits value=511941651
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_miss_collisions value=157678
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_misses value=50647396
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_readaheads value=0
```

View File

@@ -0,0 +1,142 @@
package bcache
import (
"errors"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Bcache struct {
BcachePath string
BcacheDevs []string
}
var sampleConfig = `
# Bcache sets path
# If not specified, then default is:
# bcachePath = "/sys/fs/bcache"
#
# By default, telegraf gather stats for all bcache devices
# Setting devices will restrict the stats to the specified
# bcache devices.
# bcacheDevs = ["bcache0", ...]
`
func (b *Bcache) SampleConfig() string {
return sampleConfig
}
func (b *Bcache) Description() string {
return "Read metrics of bcache from stats_total and dirty_data"
}
func getTags(bdev string) map[string]string {
backingDevFile, _ := os.Readlink(bdev)
backingDevPath := strings.Split(backingDevFile, "/")
backingDev := backingDevPath[len(backingDevPath)-2]
bcacheDevFile, _ := os.Readlink(bdev + "/dev")
bcacheDevPath := strings.Split(bcacheDevFile, "/")
bcacheDev := bcacheDevPath[len(bcacheDevPath)-1]
return map[string]string{"backing_dev": backingDev, "bcache_dev": bcacheDev}
}
func prettyToBytes(v string) uint64 {
var factors = map[string]uint64{
"k": 1 << 10,
"M": 1 << 20,
"G": 1 << 30,
"T": 1 << 40,
"P": 1 << 50,
"E": 1 << 60,
}
var factor uint64
factor = 1
prefix := v[len(v)-1 : len(v)]
if factors[prefix] != 0 {
v = v[:len(v)-1]
factor = factors[prefix]
}
result, _ := strconv.ParseFloat(v, 32)
result = result * float64(factor)
return uint64(result)
}
func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error {
tags := getTags(bdev)
metrics, err := filepath.Glob(bdev + "/stats_total/*")
if len(metrics) < 0 {
return errors.New("Can't read any stats file")
}
file, err := ioutil.ReadFile(bdev + "/dirty_data")
if err != nil {
return err
}
rawValue := strings.TrimSpace(string(file))
value := prettyToBytes(rawValue)
fields := make(map[string]interface{})
fields["dirty_data"] = value
for _, path := range metrics {
key := filepath.Base(path)
file, err := ioutil.ReadFile(path)
rawValue := strings.TrimSpace(string(file))
if err != nil {
return err
}
if key == "bypassed" {
value := prettyToBytes(rawValue)
fields[key] = value
} else {
value, _ := strconv.ParseUint(rawValue, 10, 64)
fields[key] = value
}
}
acc.AddFields("bcache", fields, tags)
return nil
}
func (b *Bcache) Gather(acc telegraf.Accumulator) error {
bcacheDevsChecked := make(map[string]bool)
var restrictDevs bool
if len(b.BcacheDevs) != 0 {
restrictDevs = true
for _, bcacheDev := range b.BcacheDevs {
bcacheDevsChecked[bcacheDev] = true
}
}
bcachePath := b.BcachePath
if len(bcachePath) == 0 {
bcachePath = "/sys/fs/bcache"
}
bdevs, _ := filepath.Glob(bcachePath + "/*/bdev*")
if len(bdevs) < 1 {
return errors.New("Can't find any bcache device")
}
for _, bdev := range bdevs {
if restrictDevs {
bcacheDev := getTags(bdev)["bcache_dev"]
if !bcacheDevsChecked[bcacheDev] {
continue
}
}
b.gatherBcache(bdev, acc)
}
return nil
}
func init() {
inputs.Add("bcache", func() telegraf.Input {
return &Bcache{}
})
}

View File

@@ -0,0 +1,121 @@
package bcache
import (
"io/ioutil"
"os"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
const (
dirty_data = "1.5G"
bypassed = "4.7T"
cache_bypass_hits = "146155333"
cache_bypass_misses = "0"
cache_hit_ratio = "90"
cache_hits = "511469583"
cache_miss_collisions = "157567"
cache_misses = "50616331"
cache_readaheads = "2"
)
var (
testBcachePath = os.TempDir() + "/telegraf/sys/fs/bcache"
testBcacheUuidPath = testBcachePath + "/663955a3-765a-4737-a9fd-8250a7a78411"
testBcacheDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/bcache0"
testBcacheBackingDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/md10"
)
func TestBcacheGeneratesMetrics(t *testing.T) {
err := os.MkdirAll(testBcacheUuidPath, 0755)
require.NoError(t, err)
err = os.MkdirAll(testBcacheDevPath, 0755)
require.NoError(t, err)
err = os.MkdirAll(testBcacheBackingDevPath+"/bcache", 0755)
require.NoError(t, err)
err = os.Symlink(testBcacheBackingDevPath+"/bcache", testBcacheUuidPath+"/bdev0")
require.NoError(t, err)
err = os.Symlink(testBcacheDevPath, testBcacheUuidPath+"/bdev0/dev")
require.NoError(t, err)
err = os.MkdirAll(testBcacheUuidPath+"/bdev0/stats_total", 0755)
require.NoError(t, err)
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data",
[]byte(dirty_data), 0644)
require.NoError(t, err)
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed",
[]byte(bypassed), 0644)
require.NoError(t, err)
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits",
[]byte(cache_bypass_hits), 0644)
require.NoError(t, err)
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses",
[]byte(cache_bypass_misses), 0644)
require.NoError(t, err)
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio",
[]byte(cache_hit_ratio), 0644)
require.NoError(t, err)
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits",
[]byte(cache_hits), 0644)
require.NoError(t, err)
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions",
[]byte(cache_miss_collisions), 0644)
require.NoError(t, err)
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses",
[]byte(cache_misses), 0644)
require.NoError(t, err)
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads",
[]byte(cache_readaheads), 0644)
require.NoError(t, err)
fields := map[string]interface{}{
"dirty_data": uint64(1610612736),
"bypassed": uint64(5167704440832),
"cache_bypass_hits": uint64(146155333),
"cache_bypass_misses": uint64(0),
"cache_hit_ratio": uint64(90),
"cache_hits": uint64(511469583),
"cache_miss_collisions": uint64(157567),
"cache_misses": uint64(50616331),
"cache_readaheads": uint64(2),
}
tags := map[string]string{
"backing_dev": "md10",
"bcache_dev": "bcache0",
}
var acc testutil.Accumulator
// all devs
b := &Bcache{BcachePath: testBcachePath}
err = b.Gather(&acc)
require.NoError(t, err)
acc.AssertContainsTaggedFields(t, "bcache", fields, tags)
// one exist dev
b = &Bcache{BcachePath: testBcachePath, BcacheDevs: []string{"bcache0"}}
err = b.Gather(&acc)
require.NoError(t, err)
acc.AssertContainsTaggedFields(t, "bcache", fields, tags)
err = os.RemoveAll(os.TempDir() + "/telegraf")
require.NoError(t, err)
}

View File

@@ -0,0 +1,205 @@
package disque
import (
"bufio"
"errors"
"fmt"
"net"
"net/url"
"strconv"
"strings"
"sync"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Disque struct {
Servers []string
c net.Conn
buf []byte
}
var sampleConfig = `
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
# 10.0.0.1:10000, etc.
#
# If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
`
func (r *Disque) SampleConfig() string {
return sampleConfig
}
func (r *Disque) Description() string {
return "Read metrics from one or many disque servers"
}
var Tracking = map[string]string{
"uptime_in_seconds": "uptime",
"connected_clients": "clients",
"blocked_clients": "blocked_clients",
"used_memory": "used_memory",
"used_memory_rss": "used_memory_rss",
"used_memory_peak": "used_memory_peak",
"total_connections_received": "total_connections_received",
"total_commands_processed": "total_commands_processed",
"instantaneous_ops_per_sec": "instantaneous_ops_per_sec",
"latest_fork_usec": "latest_fork_usec",
"mem_fragmentation_ratio": "mem_fragmentation_ratio",
"used_cpu_sys": "used_cpu_sys",
"used_cpu_user": "used_cpu_user",
"used_cpu_sys_children": "used_cpu_sys_children",
"used_cpu_user_children": "used_cpu_user_children",
"registered_jobs": "registered_jobs",
"registered_queues": "registered_queues",
}
var ErrProtocolError = errors.New("disque protocol error")
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (g *Disque) Gather(acc telegraf.Accumulator) error {
if len(g.Servers) == 0 {
url := &url.URL{
Host: ":7711",
}
g.gatherServer(url, acc)
return nil
}
var wg sync.WaitGroup
var outerr error
for _, serv := range g.Servers {
u, err := url.Parse(serv)
if err != nil {
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err)
} else if u.Scheme == "" {
// fallback to simple string based address (i.e. "10.0.0.1:10000")
u.Scheme = "tcp"
u.Host = serv
u.Path = ""
}
wg.Add(1)
go func(serv string) {
defer wg.Done()
outerr = g.gatherServer(u, acc)
}(serv)
}
wg.Wait()
return outerr
}
const defaultPort = "7711"
func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
if g.c == nil {
_, _, err := net.SplitHostPort(addr.Host)
if err != nil {
addr.Host = addr.Host + ":" + defaultPort
}
c, err := net.Dial("tcp", addr.Host)
if err != nil {
return fmt.Errorf("Unable to connect to disque server '%s': %s", addr.Host, err)
}
if addr.User != nil {
pwd, set := addr.User.Password()
if set && pwd != "" {
c.Write([]byte(fmt.Sprintf("AUTH %s\r\n", pwd)))
r := bufio.NewReader(c)
line, err := r.ReadString('\n')
if err != nil {
return err
}
if line[0] != '+' {
return fmt.Errorf("%s", strings.TrimSpace(line)[1:])
}
}
}
g.c = c
}
g.c.Write([]byte("info\r\n"))
r := bufio.NewReader(g.c)
line, err := r.ReadString('\n')
if err != nil {
return err
}
if line[0] != '$' {
return fmt.Errorf("bad line start: %s", ErrProtocolError)
}
line = strings.TrimSpace(line)
szStr := line[1:]
sz, err := strconv.Atoi(szStr)
if err != nil {
return fmt.Errorf("bad size string <<%s>>: %s", szStr, ErrProtocolError)
}
var read int
fields := make(map[string]interface{})
tags := map[string]string{"host": addr.String()}
for read < sz {
line, err := r.ReadString('\n')
if err != nil {
return err
}
read += len(line)
if len(line) == 1 || line[0] == '#' {
continue
}
parts := strings.SplitN(line, ":", 2)
name := string(parts[0])
metric, ok := Tracking[name]
if !ok {
continue
}
val := strings.TrimSpace(parts[1])
ival, err := strconv.ParseUint(val, 10, 64)
if err == nil {
fields[metric] = ival
continue
}
fval, err := strconv.ParseFloat(val, 64)
if err != nil {
return err
}
fields[metric] = fval
}
acc.AddFields("disque", fields, tags)
return nil
}
func init() {
inputs.Add("disque", func() telegraf.Input {
return &Disque{}
})
}

View File

@@ -0,0 +1,217 @@
package disque
import (
"bufio"
"fmt"
"net"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
func TestDisqueGeneratesMetrics(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
l, err := net.Listen("tcp", "localhost:0")
require.NoError(t, err)
defer l.Close()
go func() {
c, err := l.Accept()
if err != nil {
return
}
buf := bufio.NewReader(c)
for {
line, err := buf.ReadString('\n')
if err != nil {
return
}
if line != "info\r\n" {
return
}
fmt.Fprintf(c, "$%d\n", len(testOutput))
c.Write([]byte(testOutput))
}
}()
addr := fmt.Sprintf("disque://%s", l.Addr().String())
r := &Disque{
Servers: []string{addr},
}
var acc testutil.Accumulator
err = r.Gather(&acc)
require.NoError(t, err)
fields := map[string]interface{}{
"uptime": uint64(1452705),
"clients": uint64(31),
"blocked_clients": uint64(13),
"used_memory": uint64(1840104),
"used_memory_rss": uint64(3227648),
"used_memory_peak": uint64(89603656),
"total_connections_received": uint64(5062777),
"total_commands_processed": uint64(12308396),
"instantaneous_ops_per_sec": uint64(18),
"latest_fork_usec": uint64(1644),
"registered_jobs": uint64(360),
"registered_queues": uint64(12),
"mem_fragmentation_ratio": float64(1.75),
"used_cpu_sys": float64(19585.73),
"used_cpu_user": float64(11255.96),
"used_cpu_sys_children": float64(1.75),
"used_cpu_user_children": float64(1.91),
}
acc.AssertContainsFields(t, "disque", fields)
}
func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
l, err := net.Listen("tcp", "localhost:0")
require.NoError(t, err)
defer l.Close()
go func() {
c, err := l.Accept()
if err != nil {
return
}
buf := bufio.NewReader(c)
for {
line, err := buf.ReadString('\n')
if err != nil {
return
}
if line != "info\r\n" {
return
}
fmt.Fprintf(c, "$%d\n", len(testOutput))
c.Write([]byte(testOutput))
}
}()
addr := fmt.Sprintf("disque://%s", l.Addr().String())
r := &Disque{
Servers: []string{addr},
}
var acc testutil.Accumulator
err = r.Gather(&acc)
require.NoError(t, err)
fields := map[string]interface{}{
"uptime": uint64(1452705),
"clients": uint64(31),
"blocked_clients": uint64(13),
"used_memory": uint64(1840104),
"used_memory_rss": uint64(3227648),
"used_memory_peak": uint64(89603656),
"total_connections_received": uint64(5062777),
"total_commands_processed": uint64(12308396),
"instantaneous_ops_per_sec": uint64(18),
"latest_fork_usec": uint64(1644),
"registered_jobs": uint64(360),
"registered_queues": uint64(12),
"mem_fragmentation_ratio": float64(1.75),
"used_cpu_sys": float64(19585.73),
"used_cpu_user": float64(11255.96),
"used_cpu_sys_children": float64(1.75),
"used_cpu_user_children": float64(1.91),
}
acc.AssertContainsFields(t, "disque", fields)
}
const testOutput = `# Server
disque_version:0.0.1
disque_git_sha1:b5247598
disque_git_dirty:0
disque_build_id:379fda78983a60c6
os:Linux 3.13.0-44-generic x86_64
arch_bits:64
multiplexing_api:epoll
gcc_version:4.8.2
process_id:32420
run_id:1cfdfa4c6bc3f285182db5427522a8a4c16e42e4
tcp_port:7711
uptime_in_seconds:1452705
uptime_in_days:16
hz:10
config_file:/usr/local/etc/disque/disque.conf
# Clients
connected_clients:31
client_longest_output_list:0
client_biggest_input_buf:0
blocked_clients:13
# Memory
used_memory:1840104
used_memory_human:1.75M
used_memory_rss:3227648
used_memory_peak:89603656
used_memory_peak_human:85.45M
mem_fragmentation_ratio:1.75
mem_allocator:jemalloc-3.6.0
# Jobs
registered_jobs:360
# Queues
registered_queues:12
# Persistence
loading:0
aof_enabled:1
aof_state:on
aof_rewrite_in_progress:0
aof_rewrite_scheduled:0
aof_last_rewrite_time_sec:0
aof_current_rewrite_time_sec:-1
aof_last_bgrewrite_status:ok
aof_last_write_status:ok
aof_current_size:41952430
aof_base_size:9808
aof_pending_rewrite:0
aof_buffer_length:0
aof_rewrite_buffer_length:0
aof_pending_bio_fsync:0
aof_delayed_fsync:1
# Stats
total_connections_received:5062777
total_commands_processed:12308396
instantaneous_ops_per_sec:18
total_net_input_bytes:1346996528
total_net_output_bytes:1967551763
instantaneous_input_kbps:1.38
instantaneous_output_kbps:1.78
rejected_connections:0
latest_fork_usec:1644
# CPU
used_cpu_sys:19585.73
used_cpu_user:11255.96
used_cpu_sys_children:1.75
used_cpu_user_children:1.91
`

View File

@@ -0,0 +1,148 @@
# Docker Input Plugin
The docker plugin uses the docker remote API to gather metrics on running
docker containers. You can read Docker's documentation for their remote API
[here](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.20/#get-container-stats-based-on-resource-usage)
The docker plugin uses the excellent
[fsouza go-dockerclient](https://github.com/fsouza/go-dockerclient) library to
gather stats. Documentation for the library can be found
[here](https://godoc.org/github.com/fsouza/go-dockerclient) and documentation
for the stat structure can be found
[here](https://godoc.org/github.com/fsouza/go-dockerclient#Stats)
### Configuration:
```
# Read metrics about docker containers
[[inputs.docker]]
# Docker Endpoint
# To use TCP, set endpoint = "tcp://[ip]:[port]"
# To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "unix:///var/run/docker.sock"
# Only collect metrics for these containers, collect all if empty
container_names = []
```
### Measurements & Fields:
Every effort was made to preserve the names based on the JSON response from the
docker API.
Note that the docker_cpu metric may appear multiple times per collection, based
on the availability of per-cpu stats on your system.
- docker_mem
- total_pgmafault
- cache
- mapped_file
- total_inactive_file
- pgpgout
- rss
- total_mapped_file
- writeback
- unevictable
- pgpgin
- total_unevictable
- pgmajfault
- total_rss
- total_rss_huge
- total_writeback
- total_inactive_anon
- rss_huge
- hierarchical_memory_limit
- total_pgfault
- total_active_file
- active_anon
- total_active_anon
- total_pgpgout
- total_cache
- inactive_anon
- active_file
- pgfault
- inactive_file
- total_pgpgin
- max_usage
- usage
- failcnt
- limit
- docker_cpu
- throttling_periods
- throttling_throttled_periods
- throttling_throttled_time
- usage_in_kernelmode
- usage_in_usermode
- usage_system
- usage_total
- docker_net
- rx_dropped
- rx_bytes
- rx_errors
- tx_packets
- tx_dropped
- rx_packets
- tx_errors
- tx_bytes
- docker_blkio
- io_service_bytes_recursive_async
- io_service_bytes_recursive_read
- io_service_bytes_recursive_sync
- io_service_bytes_recursive_total
- io_service_bytes_recursive_write
- io_serviced_recursive_async
- io_serviced_recursive_read
- io_serviced_recursive_sync
- io_serviced_recursive_total
- io_serviced_recursive_write
### Tags:
- All stats have the following tags:
- cont_id (container ID)
- cont_image (container image)
- cont_name (container name)
- docker_cpu specific:
- cpu
- docker_net specific:
- network
- docker_blkio specific:
- device
### Example Output:
```
% ./telegraf -config ~/ws/telegraf.conf -input-filter docker -test
* Plugin: docker, Collection 1
> docker_mem,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
cont_image=spotify/kafka,cont_name=kafka \
active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\
hierarchical_memory_limit=9223372036854771712i,inactive_anon=52707328i,\
inactive_file=5111808i,limit=1044578304i,mapped_file=10301440i,\
max_usage=140656640i,pgfault=63762i,pgmajfault=2837i,pgpgin=73355i,\
pgpgout=45736i,rss=105275392i,rss_huge=4194304i,total_active_anon=52568064i,\
total_active_file=6926336i,total_cache=12038144i,total_inactive_anon=52707328i,\
total_inactive_file=5111808i,total_mapped_file=10301440i,total_pgfault=63762i,\
total_pgmafault=0i,total_pgpgin=73355i,total_pgpgout=45736i,\
total_rss=105275392i,total_rss_huge=4194304i,total_unevictable=0i,\
total_writeback=0i,unevictable=0i,usage=117440512i,writeback=0i 1453409536840126713
> docker_cpu,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
cont_image=spotify/kafka,cont_name=kafka,cpu=cpu-total \
throttling_periods=0i,throttling_throttled_periods=0i,\
throttling_throttled_time=0i,usage_in_kernelmode=440000000i,\
usage_in_usermode=2290000000i,usage_system=84795360000000i,\
usage_total=6628208865i 1453409536840126713
> docker_cpu,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
cont_image=spotify/kafka,cont_name=kafka,cpu=cpu0 \
usage_total=6628208865i 1453409536840126713
> docker_net,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
cont_image=spotify/kafka,cont_name=kafka,network=eth0 \
rx_bytes=7468i,rx_dropped=0i,rx_errors=0i,rx_packets=94i,tx_bytes=946i,\
tx_dropped=0i,tx_errors=0i,tx_packets=13i 1453409536840126713
> docker_blkio,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
cont_image=spotify/kafka,cont_name=kafka,device=8:0 \
io_service_bytes_recursive_async=80216064i,io_service_bytes_recursive_read=79925248i,\
io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888i,\
io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\
io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\
io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713
```

View File

@@ -0,0 +1,318 @@
package system
import (
"fmt"
"log"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/fsouza/go-dockerclient"
)
type Docker struct {
Endpoint string
ContainerNames []string
client *docker.Client
}
var sampleConfig = `
# Docker Endpoint
# To use TCP, set endpoint = "tcp://[ip]:[port]"
# To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "unix:///var/run/docker.sock"
# Only collect metrics for these containers, collect all if empty
container_names = []
`
func (d *Docker) Description() string {
return "Read metrics about docker containers"
}
func (d *Docker) SampleConfig() string { return sampleConfig }
func (d *Docker) Gather(acc telegraf.Accumulator) error {
if d.client == nil {
var c *docker.Client
var err error
if d.Endpoint == "ENV" {
c, err = docker.NewClientFromEnv()
if err != nil {
return err
}
} else if d.Endpoint == "" {
c, err = docker.NewClient("unix:///var/run/docker.sock")
if err != nil {
return err
}
} else {
c, err = docker.NewClient(d.Endpoint)
if err != nil {
return err
}
}
d.client = c
}
opts := docker.ListContainersOptions{}
containers, err := d.client.ListContainers(opts)
if err != nil {
return err
}
var wg sync.WaitGroup
wg.Add(len(containers))
for _, container := range containers {
go func(c docker.APIContainers) {
defer wg.Done()
err := d.gatherContainer(c, acc)
if err != nil {
fmt.Println(err.Error())
}
}(container)
}
wg.Wait()
return nil
}
func (d *Docker) gatherContainer(
container docker.APIContainers,
acc telegraf.Accumulator,
) error {
// Parse container name
cname := "unknown"
if len(container.Names) > 0 {
// Not sure what to do with other names, just take the first.
cname = strings.TrimPrefix(container.Names[0], "/")
}
tags := map[string]string{
"cont_id": container.ID,
"cont_name": cname,
"cont_image": container.Image,
}
if len(d.ContainerNames) > 0 {
if !sliceContains(cname, d.ContainerNames) {
return nil
}
}
statChan := make(chan *docker.Stats)
done := make(chan bool)
statOpts := docker.StatsOptions{
Stream: false,
ID: container.ID,
Stats: statChan,
Done: done,
Timeout: time.Duration(time.Second * 5),
}
go func() {
err := d.client.Stats(statOpts)
if err != nil {
log.Printf("Error getting docker stats: %s\n", err.Error())
}
}()
stat := <-statChan
close(done)
if stat == nil {
return nil
}
// Add labels to tags
for k, v := range container.Labels {
tags[k] = v
}
gatherContainerStats(stat, acc, tags)
return nil
}
func gatherContainerStats(
stat *docker.Stats,
acc telegraf.Accumulator,
tags map[string]string,
) {
now := stat.Read
memfields := map[string]interface{}{
"max_usage": stat.MemoryStats.MaxUsage,
"usage": stat.MemoryStats.Usage,
"fail_count": stat.MemoryStats.Failcnt,
"limit": stat.MemoryStats.Limit,
"total_pgmafault": stat.MemoryStats.Stats.TotalPgmafault,
"cache": stat.MemoryStats.Stats.Cache,
"mapped_file": stat.MemoryStats.Stats.MappedFile,
"total_inactive_file": stat.MemoryStats.Stats.TotalInactiveFile,
"pgpgout": stat.MemoryStats.Stats.Pgpgout,
"rss": stat.MemoryStats.Stats.Rss,
"total_mapped_file": stat.MemoryStats.Stats.TotalMappedFile,
"writeback": stat.MemoryStats.Stats.Writeback,
"unevictable": stat.MemoryStats.Stats.Unevictable,
"pgpgin": stat.MemoryStats.Stats.Pgpgin,
"total_unevictable": stat.MemoryStats.Stats.TotalUnevictable,
"pgmajfault": stat.MemoryStats.Stats.Pgmajfault,
"total_rss": stat.MemoryStats.Stats.TotalRss,
"total_rss_huge": stat.MemoryStats.Stats.TotalRssHuge,
"total_writeback": stat.MemoryStats.Stats.TotalWriteback,
"total_inactive_anon": stat.MemoryStats.Stats.TotalInactiveAnon,
"rss_huge": stat.MemoryStats.Stats.RssHuge,
"hierarchical_memory_limit": stat.MemoryStats.Stats.HierarchicalMemoryLimit,
"total_pgfault": stat.MemoryStats.Stats.TotalPgfault,
"total_active_file": stat.MemoryStats.Stats.TotalActiveFile,
"active_anon": stat.MemoryStats.Stats.ActiveAnon,
"total_active_anon": stat.MemoryStats.Stats.TotalActiveAnon,
"total_pgpgout": stat.MemoryStats.Stats.TotalPgpgout,
"total_cache": stat.MemoryStats.Stats.TotalCache,
"inactive_anon": stat.MemoryStats.Stats.InactiveAnon,
"active_file": stat.MemoryStats.Stats.ActiveFile,
"pgfault": stat.MemoryStats.Stats.Pgfault,
"inactive_file": stat.MemoryStats.Stats.InactiveFile,
"total_pgpgin": stat.MemoryStats.Stats.TotalPgpgin,
}
acc.AddFields("docker_mem", memfields, tags, now)
cpufields := map[string]interface{}{
"usage_total": stat.CPUStats.CPUUsage.TotalUsage,
"usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode,
"usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode,
"usage_system": stat.CPUStats.SystemCPUUsage,
"throttling_periods": stat.CPUStats.ThrottlingData.Periods,
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
}
cputags := copyTags(tags)
cputags["cpu"] = "cpu-total"
acc.AddFields("docker_cpu", cpufields, cputags, now)
for i, percpu := range stat.CPUStats.CPUUsage.PercpuUsage {
percputags := copyTags(tags)
percputags["cpu"] = fmt.Sprintf("cpu%d", i)
acc.AddFields("docker_cpu", map[string]interface{}{"usage_total": percpu}, percputags, now)
}
for network, netstats := range stat.Networks {
netfields := map[string]interface{}{
"rx_dropped": netstats.RxDropped,
"rx_bytes": netstats.RxBytes,
"rx_errors": netstats.RxErrors,
"tx_packets": netstats.TxPackets,
"tx_dropped": netstats.TxDropped,
"rx_packets": netstats.RxPackets,
"tx_errors": netstats.TxErrors,
"tx_bytes": netstats.TxBytes,
}
// Create a new network tag dictionary for the "network" tag
nettags := copyTags(tags)
nettags["network"] = network
acc.AddFields("docker_net", netfields, nettags, now)
}
gatherBlockIOMetrics(stat, acc, tags, now)
}
func gatherBlockIOMetrics(
stat *docker.Stats,
acc telegraf.Accumulator,
tags map[string]string,
now time.Time,
) {
blkioStats := stat.BlkioStats
// Make a map of devices to their block io stats
deviceStatMap := make(map[string]map[string]interface{})
for _, metric := range blkioStats.IOServiceBytesRecursive {
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
_, ok := deviceStatMap[device]
if !ok {
deviceStatMap[device] = make(map[string]interface{})
}
field := fmt.Sprintf("io_service_bytes_recursive_%s", strings.ToLower(metric.Op))
deviceStatMap[device][field] = metric.Value
}
for _, metric := range blkioStats.IOServicedRecursive {
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
_, ok := deviceStatMap[device]
if !ok {
deviceStatMap[device] = make(map[string]interface{})
}
field := fmt.Sprintf("io_serviced_recursive_%s", strings.ToLower(metric.Op))
deviceStatMap[device][field] = metric.Value
}
for _, metric := range blkioStats.IOQueueRecursive {
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op))
deviceStatMap[device][field] = metric.Value
}
for _, metric := range blkioStats.IOServiceTimeRecursive {
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op))
deviceStatMap[device][field] = metric.Value
}
for _, metric := range blkioStats.IOWaitTimeRecursive {
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op))
deviceStatMap[device][field] = metric.Value
}
for _, metric := range blkioStats.IOMergedRecursive {
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op))
deviceStatMap[device][field] = metric.Value
}
for _, metric := range blkioStats.IOTimeRecursive {
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
field := fmt.Sprintf("io_time_recursive_%s", strings.ToLower(metric.Op))
deviceStatMap[device][field] = metric.Value
}
for _, metric := range blkioStats.SectorsRecursive {
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
field := fmt.Sprintf("sectors_recursive_%s", strings.ToLower(metric.Op))
deviceStatMap[device][field] = metric.Value
}
for device, fields := range deviceStatMap {
iotags := copyTags(tags)
iotags["device"] = device
acc.AddFields("docker_blkio", fields, iotags, now)
}
}
func copyTags(in map[string]string) map[string]string {
out := make(map[string]string)
for k, v := range in {
out[k] = v
}
return out
}
func sliceContains(in string, sl []string) bool {
for _, str := range sl {
if str == in {
return true
}
}
return false
}
func init() {
inputs.Add("docker", func() telegraf.Input {
return &Docker{}
})
}

View File

@@ -0,0 +1,190 @@
package system
import (
"testing"
"time"
"github.com/influxdata/telegraf/testutil"
"github.com/fsouza/go-dockerclient"
)
func TestDockerGatherContainerStats(t *testing.T) {
var acc testutil.Accumulator
stats := testStats()
tags := map[string]string{
"cont_id": "foobarbaz",
"cont_name": "redis",
"cont_image": "redis/image",
}
gatherContainerStats(stats, &acc, tags)
// test docker_net measurement
netfields := map[string]interface{}{
"rx_dropped": uint64(1),
"rx_bytes": uint64(2),
"rx_errors": uint64(3),
"tx_packets": uint64(4),
"tx_dropped": uint64(1),
"rx_packets": uint64(2),
"tx_errors": uint64(3),
"tx_bytes": uint64(4),
}
nettags := copyTags(tags)
nettags["network"] = "eth0"
acc.AssertContainsTaggedFields(t, "docker_net", netfields, nettags)
// test docker_blkio measurement
blkiotags := copyTags(tags)
blkiotags["device"] = "6:0"
blkiofields := map[string]interface{}{
"io_service_bytes_recursive_read": uint64(100),
"io_serviced_recursive_write": uint64(101),
}
acc.AssertContainsTaggedFields(t, "docker_blkio", blkiofields, blkiotags)
// test docker_mem measurement
memfields := map[string]interface{}{
"max_usage": uint64(1001),
"usage": uint64(1111),
"fail_count": uint64(1),
"limit": uint64(20),
"total_pgmafault": uint64(0),
"cache": uint64(0),
"mapped_file": uint64(0),
"total_inactive_file": uint64(0),
"pgpgout": uint64(0),
"rss": uint64(0),
"total_mapped_file": uint64(0),
"writeback": uint64(0),
"unevictable": uint64(0),
"pgpgin": uint64(0),
"total_unevictable": uint64(0),
"pgmajfault": uint64(0),
"total_rss": uint64(44),
"total_rss_huge": uint64(444),
"total_writeback": uint64(55),
"total_inactive_anon": uint64(0),
"rss_huge": uint64(0),
"hierarchical_memory_limit": uint64(0),
"total_pgfault": uint64(0),
"total_active_file": uint64(0),
"active_anon": uint64(0),
"total_active_anon": uint64(0),
"total_pgpgout": uint64(0),
"total_cache": uint64(0),
"inactive_anon": uint64(0),
"active_file": uint64(1),
"pgfault": uint64(2),
"inactive_file": uint64(3),
"total_pgpgin": uint64(4),
}
acc.AssertContainsTaggedFields(t, "docker_mem", memfields, tags)
// test docker_cpu measurement
cputags := copyTags(tags)
cputags["cpu"] = "cpu-total"
cpufields := map[string]interface{}{
"usage_total": uint64(500),
"usage_in_usermode": uint64(100),
"usage_in_kernelmode": uint64(200),
"usage_system": uint64(100),
"throttling_periods": uint64(1),
"throttling_throttled_periods": uint64(0),
"throttling_throttled_time": uint64(0),
}
acc.AssertContainsTaggedFields(t, "docker_cpu", cpufields, cputags)
cputags["cpu"] = "cpu0"
cpu0fields := map[string]interface{}{
"usage_total": uint64(1),
}
acc.AssertContainsTaggedFields(t, "docker_cpu", cpu0fields, cputags)
cputags["cpu"] = "cpu1"
cpu1fields := map[string]interface{}{
"usage_total": uint64(1002),
}
acc.AssertContainsTaggedFields(t, "docker_cpu", cpu1fields, cputags)
}
func testStats() *docker.Stats {
stats := &docker.Stats{
Read: time.Now(),
Networks: make(map[string]docker.NetworkStats),
}
stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002}
stats.CPUStats.CPUUsage.UsageInUsermode = 100
stats.CPUStats.CPUUsage.TotalUsage = 500
stats.CPUStats.CPUUsage.UsageInKernelmode = 200
stats.CPUStats.SystemCPUUsage = 100
stats.CPUStats.ThrottlingData.Periods = 1
stats.MemoryStats.Stats.TotalPgmafault = 0
stats.MemoryStats.Stats.Cache = 0
stats.MemoryStats.Stats.MappedFile = 0
stats.MemoryStats.Stats.TotalInactiveFile = 0
stats.MemoryStats.Stats.Pgpgout = 0
stats.MemoryStats.Stats.Rss = 0
stats.MemoryStats.Stats.TotalMappedFile = 0
stats.MemoryStats.Stats.Writeback = 0
stats.MemoryStats.Stats.Unevictable = 0
stats.MemoryStats.Stats.Pgpgin = 0
stats.MemoryStats.Stats.TotalUnevictable = 0
stats.MemoryStats.Stats.Pgmajfault = 0
stats.MemoryStats.Stats.TotalRss = 44
stats.MemoryStats.Stats.TotalRssHuge = 444
stats.MemoryStats.Stats.TotalWriteback = 55
stats.MemoryStats.Stats.TotalInactiveAnon = 0
stats.MemoryStats.Stats.RssHuge = 0
stats.MemoryStats.Stats.HierarchicalMemoryLimit = 0
stats.MemoryStats.Stats.TotalPgfault = 0
stats.MemoryStats.Stats.TotalActiveFile = 0
stats.MemoryStats.Stats.ActiveAnon = 0
stats.MemoryStats.Stats.TotalActiveAnon = 0
stats.MemoryStats.Stats.TotalPgpgout = 0
stats.MemoryStats.Stats.TotalCache = 0
stats.MemoryStats.Stats.InactiveAnon = 0
stats.MemoryStats.Stats.ActiveFile = 1
stats.MemoryStats.Stats.Pgfault = 2
stats.MemoryStats.Stats.InactiveFile = 3
stats.MemoryStats.Stats.TotalPgpgin = 4
stats.MemoryStats.MaxUsage = 1001
stats.MemoryStats.Usage = 1111
stats.MemoryStats.Failcnt = 1
stats.MemoryStats.Limit = 20
stats.Networks["eth0"] = docker.NetworkStats{
RxDropped: 1,
RxBytes: 2,
RxErrors: 3,
TxPackets: 4,
TxDropped: 1,
RxPackets: 2,
TxErrors: 3,
TxBytes: 4,
}
sbr := docker.BlkioStatsEntry{
Major: 6,
Minor: 0,
Op: "read",
Value: 100,
}
sr := docker.BlkioStatsEntry{
Major: 6,
Minor: 0,
Op: "write",
Value: 101,
}
stats.BlkioStats.IOServiceBytesRecursive = append(
stats.BlkioStats.IOServiceBytesRecursive, sbr)
stats.BlkioStats.IOServicedRecursive = append(
stats.BlkioStats.IOServicedRecursive, sr)
return stats
}

View File

@@ -0,0 +1,320 @@
# Elasticsearch plugin
#### Plugin arguments:
- **servers** []string: list of one or more Elasticsearch servers
- **local** boolean: If false, it will read the indices stats from all nodes
- **cluster_health** boolean: If true, it will also obtain cluster level stats
#### Description
The [elasticsearch](https://www.elastic.co/) plugin queries endpoints to obtain
[node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html)
and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) stats.
Example:
```
[elasticsearch]
servers = ["http://localhost:9200"]
local = true
cluster_health = true
```
# Measurements
#### cluster measurements (utilizes fields instead of single values):
contains `status`, `timed_out`, `number_of_nodes`, `number_of_data_nodes`,
`active_primary_shards`, `active_shards`, `relocating_shards`,
`initializing_shards`, `unassigned_shards` fields
- elasticsearch_cluster_health
contains `status`, `number_of_shards`, `number_of_replicas`,
`active_primary_shards`, `active_shards`, `relocating_shards`,
`initializing_shards`, `unassigned_shards` fields
- elasticsearch_indices
#### node measurements:
field data circuit breaker measurement names:
- elasticsearch_breakers_fielddata_estimated_size_in_bytes value=0
- elasticsearch_breakers_fielddata_overhead value=1.03
- elasticsearch_breakers_fielddata_tripped value=0
- elasticsearch_breakers_fielddata_limit_size_in_bytes value=623326003
- elasticsearch_breakers_request_estimated_size_in_bytes value=0
- elasticsearch_breakers_request_overhead value=1.0
- elasticsearch_breakers_request_tripped value=0
- elasticsearch_breakers_request_limit_size_in_bytes value=415550668
- elasticsearch_breakers_parent_overhead value=1.0
- elasticsearch_breakers_parent_tripped value=0
- elasticsearch_breakers_parent_limit_size_in_bytes value=727213670
- elasticsearch_breakers_parent_estimated_size_in_bytes value=0
File system information, data path, free disk space, read/write measurement names:
- elasticsearch_fs_timestamp value=1436460392946
- elasticsearch_fs_total_free_in_bytes value=16909316096
- elasticsearch_fs_total_available_in_bytes value=15894814720
- elasticsearch_fs_total_total_in_bytes value=19507089408
indices size, document count, indexing and deletion times, search times,
field cache size, merges and flushes measurement names:
- elasticsearch_indices_id_cache_memory_size_in_bytes value=0
- elasticsearch_indices_completion_size_in_bytes value=0
- elasticsearch_indices_suggest_total value=0
- elasticsearch_indices_suggest_time_in_millis value=0
- elasticsearch_indices_suggest_current value=0
- elasticsearch_indices_query_cache_memory_size_in_bytes value=0
- elasticsearch_indices_query_cache_evictions value=0
- elasticsearch_indices_query_cache_hit_count value=0
- elasticsearch_indices_query_cache_miss_count value=0
- elasticsearch_indices_store_size_in_bytes value=37715234
- elasticsearch_indices_store_throttle_time_in_millis value=215
- elasticsearch_indices_merges_current_docs value=0
- elasticsearch_indices_merges_current_size_in_bytes value=0
- elasticsearch_indices_merges_total value=133
- elasticsearch_indices_merges_total_time_in_millis value=21060
- elasticsearch_indices_merges_total_docs value=203672
- elasticsearch_indices_merges_total_size_in_bytes value=142900226
- elasticsearch_indices_merges_current value=0
- elasticsearch_indices_filter_cache_memory_size_in_bytes value=7384
- elasticsearch_indices_filter_cache_evictions value=0
- elasticsearch_indices_indexing_index_total value=84790
- elasticsearch_indices_indexing_index_time_in_millis value=29680
- elasticsearch_indices_indexing_index_current value=0
- elasticsearch_indices_indexing_noop_update_total value=0
- elasticsearch_indices_indexing_throttle_time_in_millis value=0
- elasticsearch_indices_indexing_delete_tota value=13879
- elasticsearch_indices_indexing_delete_time_in_millis value=1139
- elasticsearch_indices_indexing_delete_current value=0
- elasticsearch_indices_get_exists_time_in_millis value=0
- elasticsearch_indices_get_missing_total value=1
- elasticsearch_indices_get_missing_time_in_millis value=2
- elasticsearch_indices_get_current value=0
- elasticsearch_indices_get_total value=1
- elasticsearch_indices_get_time_in_millis value=2
- elasticsearch_indices_get_exists_total value=0
- elasticsearch_indices_refresh_total value=1076
- elasticsearch_indices_refresh_total_time_in_millis value=20078
- elasticsearch_indices_percolate_current value=0
- elasticsearch_indices_percolate_memory_size_in_bytes value=-1
- elasticsearch_indices_percolate_queries value=0
- elasticsearch_indices_percolate_total value=0
- elasticsearch_indices_percolate_time_in_millis value=0
- elasticsearch_indices_translog_operations value=17702
- elasticsearch_indices_translog_size_in_bytes value=17
- elasticsearch_indices_recovery_current_as_source value=0
- elasticsearch_indices_recovery_current_as_target value=0
- elasticsearch_indices_recovery_throttle_time_in_millis value=0
- elasticsearch_indices_docs_count value=29652
- elasticsearch_indices_docs_deleted value=5229
- elasticsearch_indices_flush_total_time_in_millis value=2401
- elasticsearch_indices_flush_total value=115
- elasticsearch_indices_fielddata_memory_size_in_bytes value=12996
- elasticsearch_indices_fielddata_evictions value=0
- elasticsearch_indices_search_fetch_current value=0
- elasticsearch_indices_search_open_contexts value=0
- elasticsearch_indices_search_query_total value=1452
- elasticsearch_indices_search_query_time_in_millis value=5695
- elasticsearch_indices_search_query_current value=0
- elasticsearch_indices_search_fetch_total value=414
- elasticsearch_indices_search_fetch_time_in_millis value=146
- elasticsearch_indices_warmer_current value=0
- elasticsearch_indices_warmer_total value=2319
- elasticsearch_indices_warmer_total_time_in_millis value=448
- elasticsearch_indices_segments_count value=134
- elasticsearch_indices_segments_memory_in_bytes value=1285212
- elasticsearch_indices_segments_index_writer_memory_in_bytes value=0
- elasticsearch_indices_segments_index_writer_max_memory_in_bytes value=172368955
- elasticsearch_indices_segments_version_map_memory_in_bytes value=611844
- elasticsearch_indices_segments_fixed_bit_set_memory_in_bytes value=0
HTTP connection measurement names:
- elasticsearch_http_current_open value=3
- elasticsearch_http_total_opened value=3
JVM stats, memory pool information, garbage collection, buffer pools measurement names:
- elasticsearch_jvm_timestamp value=1436460392945
- elasticsearch_jvm_uptime_in_millis value=202245
- elasticsearch_jvm_mem_non_heap_used_in_bytes value=39634576
- elasticsearch_jvm_mem_non_heap_committed_in_bytes value=40841216
- elasticsearch_jvm_mem_pools_young_max_in_bytes value=279183360
- elasticsearch_jvm_mem_pools_young_peak_used_in_bytes value=71630848
- elasticsearch_jvm_mem_pools_young_peak_max_in_bytes value=279183360
- elasticsearch_jvm_mem_pools_young_used_in_bytes value=32685760
- elasticsearch_jvm_mem_pools_survivor_peak_used_in_bytes value=8912888
- elasticsearch_jvm_mem_pools_survivor_peak_max_in_bytes value=34865152
- elasticsearch_jvm_mem_pools_survivor_used_in_bytes value=8912880
- elasticsearch_jvm_mem_pools_survivor_max_in_bytes value=34865152
- elasticsearch_jvm_mem_pools_old_peak_max_in_bytes value=724828160
- elasticsearch_jvm_mem_pools_old_used_in_bytes value=11110928
- elasticsearch_jvm_mem_pools_old_max_in_bytes value=724828160
- elasticsearch_jvm_mem_pools_old_peak_used_in_bytes value=14354608
- elasticsearch_jvm_mem_heap_used_in_bytes value=52709568
- elasticsearch_jvm_mem_heap_used_percent value=5
- elasticsearch_jvm_mem_heap_committed_in_bytes value=259522560
- elasticsearch_jvm_mem_heap_max_in_bytes value=1038876672
- elasticsearch_jvm_threads_peak_count value=45
- elasticsearch_jvm_threads_count value=44
- elasticsearch_jvm_gc_collectors_young_collection_count value=2
- elasticsearch_jvm_gc_collectors_young_collection_time_in_millis value=98
- elasticsearch_jvm_gc_collectors_old_collection_count value=1
- elasticsearch_jvm_gc_collectors_old_collection_time_in_millis value=24
- elasticsearch_jvm_buffer_pools_direct_count value=40
- elasticsearch_jvm_buffer_pools_direct_used_in_bytes value=6304239
- elasticsearch_jvm_buffer_pools_direct_total_capacity_in_bytes value=6304239
- elasticsearch_jvm_buffer_pools_mapped_count value=0
- elasticsearch_jvm_buffer_pools_mapped_used_in_bytes value=0
- elasticsearch_jvm_buffer_pools_mapped_total_capacity_in_bytes value=0
TCP information measurement names:
- elasticsearch_network_tcp_in_errs value=0
- elasticsearch_network_tcp_passive_opens value=16
- elasticsearch_network_tcp_curr_estab value=29
- elasticsearch_network_tcp_in_segs value=113
- elasticsearch_network_tcp_out_segs value=97
- elasticsearch_network_tcp_retrans_segs value=0
- elasticsearch_network_tcp_attempt_fails value=0
- elasticsearch_network_tcp_active_opens value=13
- elasticsearch_network_tcp_estab_resets value=0
- elasticsearch_network_tcp_out_rsts value=0
Operating system stats, load average, cpu, mem, swap measurement names:
- elasticsearch_os_swap_used_in_bytes value=0
- elasticsearch_os_swap_free_in_bytes value=487997440
- elasticsearch_os_timestamp value=1436460392944
- elasticsearch_os_uptime_in_millis value=25092
- elasticsearch_os_cpu_sys value=0
- elasticsearch_os_cpu_user value=0
- elasticsearch_os_cpu_idle value=99
- elasticsearch_os_cpu_usage value=0
- elasticsearch_os_cpu_stolen value=0
- elasticsearch_os_mem_free_percent value=74
- elasticsearch_os_mem_used_percent value=25
- elasticsearch_os_mem_actual_free_in_bytes value=1565470720
- elasticsearch_os_mem_actual_used_in_bytes value=534159360
- elasticsearch_os_mem_free_in_bytes value=477761536
- elasticsearch_os_mem_used_in_bytes value=1621868544
Process statistics, memory consumption, cpu usage, open file descriptors measurement names:
- elasticsearch_process_mem_resident_in_bytes value=246382592
- elasticsearch_process_mem_share_in_bytes value=18747392
- elasticsearch_process_mem_total_virtual_in_bytes value=4747890688
- elasticsearch_process_timestamp value=1436460392945
- elasticsearch_process_open_file_descriptors value=160
- elasticsearch_process_cpu_total_in_millis value=15480
- elasticsearch_process_cpu_percent value=2
- elasticsearch_process_cpu_sys_in_millis value=1870
- elasticsearch_process_cpu_user_in_millis value=13610
Statistics about each thread pool, including current size, queue and rejected tasks measurement names:
- elasticsearch_thread_pool_merge_threads value=6
- elasticsearch_thread_pool_merge_queue value=4
- elasticsearch_thread_pool_merge_active value=5
- elasticsearch_thread_pool_merge_rejected value=2
- elasticsearch_thread_pool_merge_largest value=5
- elasticsearch_thread_pool_merge_completed value=1
- elasticsearch_thread_pool_bulk_threads value=4
- elasticsearch_thread_pool_bulk_queue value=5
- elasticsearch_thread_pool_bulk_active value=7
- elasticsearch_thread_pool_bulk_rejected value=3
- elasticsearch_thread_pool_bulk_largest value=1
- elasticsearch_thread_pool_bulk_completed value=4
- elasticsearch_thread_pool_warmer_threads value=2
- elasticsearch_thread_pool_warmer_queue value=7
- elasticsearch_thread_pool_warmer_active value=3
- elasticsearch_thread_pool_warmer_rejected value=2
- elasticsearch_thread_pool_warmer_largest value=3
- elasticsearch_thread_pool_warmer_completed value=1
- elasticsearch_thread_pool_get_largest value=2
- elasticsearch_thread_pool_get_completed value=1
- elasticsearch_thread_pool_get_threads value=1
- elasticsearch_thread_pool_get_queue value=8
- elasticsearch_thread_pool_get_active value=4
- elasticsearch_thread_pool_get_rejected value=3
- elasticsearch_thread_pool_index_threads value=6
- elasticsearch_thread_pool_index_queue value=8
- elasticsearch_thread_pool_index_active value=4
- elasticsearch_thread_pool_index_rejected value=2
- elasticsearch_thread_pool_index_largest value=3
- elasticsearch_thread_pool_index_completed value=6
- elasticsearch_thread_pool_suggest_threads value=2
- elasticsearch_thread_pool_suggest_queue value=7
- elasticsearch_thread_pool_suggest_active value=2
- elasticsearch_thread_pool_suggest_rejected value=1
- elasticsearch_thread_pool_suggest_largest value=8
- elasticsearch_thread_pool_suggest_completed value=3
- elasticsearch_thread_pool_fetch_shard_store_queue value=7
- elasticsearch_thread_pool_fetch_shard_store_active value=4
- elasticsearch_thread_pool_fetch_shard_store_rejected value=2
- elasticsearch_thread_pool_fetch_shard_store_largest value=4
- elasticsearch_thread_pool_fetch_shard_store_completed value=1
- elasticsearch_thread_pool_fetch_shard_store_threads value=1
- elasticsearch_thread_pool_management_threads value=2
- elasticsearch_thread_pool_management_queue value=3
- elasticsearch_thread_pool_management_active value=1
- elasticsearch_thread_pool_management_rejected value=6
- elasticsearch_thread_pool_management_largest value=2
- elasticsearch_thread_pool_management_completed value=22
- elasticsearch_thread_pool_percolate_queue value=23
- elasticsearch_thread_pool_percolate_active value=13
- elasticsearch_thread_pool_percolate_rejected value=235
- elasticsearch_thread_pool_percolate_largest value=23
- elasticsearch_thread_pool_percolate_completed value=33
- elasticsearch_thread_pool_percolate_threads value=123
- elasticsearch_thread_pool_listener_active value=4
- elasticsearch_thread_pool_listener_rejected value=8
- elasticsearch_thread_pool_listener_largest value=1
- elasticsearch_thread_pool_listener_completed value=1
- elasticsearch_thread_pool_listener_threads value=1
- elasticsearch_thread_pool_listener_queue value=2
- elasticsearch_thread_pool_search_rejected value=7
- elasticsearch_thread_pool_search_largest value=2
- elasticsearch_thread_pool_search_completed value=4
- elasticsearch_thread_pool_search_threads value=5
- elasticsearch_thread_pool_search_queue value=7
- elasticsearch_thread_pool_search_active value=2
- elasticsearch_thread_pool_fetch_shard_started_threads value=3
- elasticsearch_thread_pool_fetch_shard_started_queue value=1
- elasticsearch_thread_pool_fetch_shard_started_active value=5
- elasticsearch_thread_pool_fetch_shard_started_rejected value=6
- elasticsearch_thread_pool_fetch_shard_started_largest value=4
- elasticsearch_thread_pool_fetch_shard_started_completed value=54
- elasticsearch_thread_pool_refresh_rejected value=4
- elasticsearch_thread_pool_refresh_largest value=8
- elasticsearch_thread_pool_refresh_completed value=3
- elasticsearch_thread_pool_refresh_threads value=23
- elasticsearch_thread_pool_refresh_queue value=7
- elasticsearch_thread_pool_refresh_active value=3
- elasticsearch_thread_pool_optimize_threads value=3
- elasticsearch_thread_pool_optimize_queue value=4
- elasticsearch_thread_pool_optimize_active value=1
- elasticsearch_thread_pool_optimize_rejected value=2
- elasticsearch_thread_pool_optimize_largest value=7
- elasticsearch_thread_pool_optimize_completed value=3
- elasticsearch_thread_pool_snapshot_largest value=1
- elasticsearch_thread_pool_snapshot_completed value=0
- elasticsearch_thread_pool_snapshot_threads value=8
- elasticsearch_thread_pool_snapshot_queue value=5
- elasticsearch_thread_pool_snapshot_active value=6
- elasticsearch_thread_pool_snapshot_rejected value=2
- elasticsearch_thread_pool_generic_threads value=1
- elasticsearch_thread_pool_generic_queue value=4
- elasticsearch_thread_pool_generic_active value=6
- elasticsearch_thread_pool_generic_rejected value=3
- elasticsearch_thread_pool_generic_largest value=2
- elasticsearch_thread_pool_generic_completed value=27
- elasticsearch_thread_pool_flush_threads value=3
- elasticsearch_thread_pool_flush_queue value=8
- elasticsearch_thread_pool_flush_active value=0
- elasticsearch_thread_pool_flush_rejected value=1
- elasticsearch_thread_pool_flush_largest value=5
- elasticsearch_thread_pool_flush_completed value=3
Transport statistics about sent and received bytes in cluster communication measurement names:
- elasticsearch_transport_server_open value=13
- elasticsearch_transport_rx_count value=6
- elasticsearch_transport_rx_size_in_bytes value=1380
- elasticsearch_transport_tx_count value=6
- elasticsearch_transport_tx_size_in_bytes value=1380

View File

@@ -0,0 +1,250 @@
package elasticsearch
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
const statsPath = "/_nodes/stats"
const statsPathLocal = "/_nodes/_local/stats"
const healthPath = "/_cluster/health"
type node struct {
Host string `json:"host"`
Name string `json:"name"`
Attributes map[string]string `json:"attributes"`
Indices interface{} `json:"indices"`
OS interface{} `json:"os"`
Process interface{} `json:"process"`
JVM interface{} `json:"jvm"`
ThreadPool interface{} `json:"thread_pool"`
FS interface{} `json:"fs"`
Transport interface{} `json:"transport"`
HTTP interface{} `json:"http"`
Breakers interface{} `json:"breakers"`
}
type clusterHealth struct {
ClusterName string `json:"cluster_name"`
Status string `json:"status"`
TimedOut bool `json:"timed_out"`
NumberOfNodes int `json:"number_of_nodes"`
NumberOfDataNodes int `json:"number_of_data_nodes"`
ActivePrimaryShards int `json:"active_primary_shards"`
ActiveShards int `json:"active_shards"`
RelocatingShards int `json:"relocating_shards"`
InitializingShards int `json:"initializing_shards"`
UnassignedShards int `json:"unassigned_shards"`
Indices map[string]indexHealth `json:"indices"`
}
type indexHealth struct {
Status string `json:"status"`
NumberOfShards int `json:"number_of_shards"`
NumberOfReplicas int `json:"number_of_replicas"`
ActivePrimaryShards int `json:"active_primary_shards"`
ActiveShards int `json:"active_shards"`
RelocatingShards int `json:"relocating_shards"`
InitializingShards int `json:"initializing_shards"`
UnassignedShards int `json:"unassigned_shards"`
}
const sampleConfig = `
# specify a list of one or more Elasticsearch servers
servers = ["http://localhost:9200"]
# set local to false when you want to read the indices stats from all nodes
# within the cluster
local = true
# set cluster_health to true when you want to also obtain cluster level stats
cluster_health = false
`
// Elasticsearch is a plugin to read stats from one or many Elasticsearch
// servers.
type Elasticsearch struct {
Local bool
Servers []string
ClusterHealth bool
client *http.Client
}
// NewElasticsearch return a new instance of Elasticsearch
func NewElasticsearch() *Elasticsearch {
return &Elasticsearch{client: http.DefaultClient}
}
// SampleConfig returns sample configuration for this plugin.
func (e *Elasticsearch) SampleConfig() string {
return sampleConfig
}
// Description returns the plugin description.
func (e *Elasticsearch) Description() string {
return "Read stats from one or more Elasticsearch servers or clusters"
}
// Gather reads the stats from Elasticsearch and writes it to the
// Accumulator.
func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
errChan := make(chan error, len(e.Servers))
var wg sync.WaitGroup
wg.Add(len(e.Servers))
for _, serv := range e.Servers {
go func(s string, acc telegraf.Accumulator) {
defer wg.Done()
var url string
if e.Local {
url = s + statsPathLocal
} else {
url = s + statsPath
}
if err := e.gatherNodeStats(url, acc); err != nil {
errChan <- err
return
}
if e.ClusterHealth {
e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", s), acc)
}
}(serv, acc)
}
wg.Wait()
close(errChan)
// Get all errors and return them as one giant error
errStrings := []string{}
for err := range errChan {
errStrings = append(errStrings, err.Error())
}
if len(errStrings) == 0 {
return nil
}
return errors.New(strings.Join(errStrings, "\n"))
}
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
nodeStats := &struct {
ClusterName string `json:"cluster_name"`
Nodes map[string]*node `json:"nodes"`
}{}
if err := e.gatherData(url, nodeStats); err != nil {
return err
}
for id, n := range nodeStats.Nodes {
tags := map[string]string{
"node_id": id,
"node_host": n.Host,
"node_name": n.Name,
"cluster_name": nodeStats.ClusterName,
}
for k, v := range n.Attributes {
tags["node_attribute_"+k] = v
}
stats := map[string]interface{}{
"indices": n.Indices,
"os": n.OS,
"process": n.Process,
"jvm": n.JVM,
"thread_pool": n.ThreadPool,
"fs": n.FS,
"transport": n.Transport,
"http": n.HTTP,
"breakers": n.Breakers,
}
now := time.Now()
for p, s := range stats {
f := internal.JSONFlattener{}
err := f.FlattenJSON("", s)
if err != nil {
return err
}
acc.AddFields("elasticsearch_"+p, f.Fields, tags, now)
}
}
return nil
}
func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) error {
clusterStats := &clusterHealth{}
if err := e.gatherData(url, clusterStats); err != nil {
return err
}
measurementTime := time.Now()
clusterFields := map[string]interface{}{
"status": clusterStats.Status,
"timed_out": clusterStats.TimedOut,
"number_of_nodes": clusterStats.NumberOfNodes,
"number_of_data_nodes": clusterStats.NumberOfDataNodes,
"active_primary_shards": clusterStats.ActivePrimaryShards,
"active_shards": clusterStats.ActiveShards,
"relocating_shards": clusterStats.RelocatingShards,
"initializing_shards": clusterStats.InitializingShards,
"unassigned_shards": clusterStats.UnassignedShards,
}
acc.AddFields(
"elasticsearch_cluster_health",
clusterFields,
map[string]string{"name": clusterStats.ClusterName},
measurementTime,
)
for name, health := range clusterStats.Indices {
indexFields := map[string]interface{}{
"status": health.Status,
"number_of_shards": health.NumberOfShards,
"number_of_replicas": health.NumberOfReplicas,
"active_primary_shards": health.ActivePrimaryShards,
"active_shards": health.ActiveShards,
"relocating_shards": health.RelocatingShards,
"initializing_shards": health.InitializingShards,
"unassigned_shards": health.UnassignedShards,
}
acc.AddFields(
"elasticsearch_indices",
indexFields,
map[string]string{"index": name},
measurementTime,
)
}
return nil
}
func (e *Elasticsearch) gatherData(url string, v interface{}) error {
r, err := e.client.Get(url)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode != http.StatusOK {
// NOTE: we are not going to read/discard r.Body under the assumption we'd prefer
// to let the underlying transport close the connection and re-establish a new one for
// future calls.
return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d",
r.StatusCode, http.StatusOK)
}
if err = json.NewDecoder(r.Body).Decode(v); err != nil {
return err
}
return nil
}
func init() {
inputs.Add("elasticsearch", func() telegraf.Input {
return NewElasticsearch()
})
}

View File

@@ -0,0 +1,86 @@
package elasticsearch
import (
"io/ioutil"
"net/http"
"strings"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
type transportMock struct {
statusCode int
body string
}
func newTransportMock(statusCode int, body string) http.RoundTripper {
return &transportMock{
statusCode: statusCode,
body: body,
}
}
func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) {
res := &http.Response{
Header: make(http.Header),
Request: r,
StatusCode: t.statusCode,
}
res.Header.Set("Content-Type", "application/json")
res.Body = ioutil.NopCloser(strings.NewReader(t.body))
return res, nil
}
func TestElasticsearch(t *testing.T) {
es := NewElasticsearch()
es.Servers = []string{"http://example.com:9200"}
es.client.Transport = newTransportMock(http.StatusOK, statsResponse)
var acc testutil.Accumulator
if err := es.Gather(&acc); err != nil {
t.Fatal(err)
}
tags := map[string]string{
"cluster_name": "es-testcluster",
"node_attribute_master": "true",
"node_id": "SDFsfSDFsdfFSDSDfSFDSDF",
"node_name": "test.host.com",
"node_host": "test",
}
acc.AssertContainsTaggedFields(t, "elasticsearch_indices", indicesExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_os", osExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_process", processExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", jvmExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", threadPoolExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_fs", fsExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_transport", transportExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_http", httpExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", breakersExpected, tags)
}
func TestGatherClusterStats(t *testing.T) {
es := NewElasticsearch()
es.Servers = []string{"http://example.com:9200"}
es.ClusterHealth = true
es.client.Transport = newTransportMock(http.StatusOK, clusterResponse)
var acc testutil.Accumulator
require.NoError(t, es.Gather(&acc))
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
clusterHealthExpected,
map[string]string{"name": "elasticsearch_telegraf"})
acc.AssertContainsTaggedFields(t, "elasticsearch_indices",
v1IndexExpected,
map[string]string{"index": "v1"})
acc.AssertContainsTaggedFields(t, "elasticsearch_indices",
v2IndexExpected,
map[string]string{"index": "v2"})
}

View File

@@ -0,0 +1,765 @@
package elasticsearch
const clusterResponse = `
{
"cluster_name": "elasticsearch_telegraf",
"status": "green",
"timed_out": false,
"number_of_nodes": 3,
"number_of_data_nodes": 3,
"active_primary_shards": 5,
"active_shards": 15,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0,
"indices": {
"v1": {
"status": "green",
"number_of_shards": 10,
"number_of_replicas": 1,
"active_primary_shards": 10,
"active_shards": 20,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0
},
"v2": {
"status": "red",
"number_of_shards": 10,
"number_of_replicas": 1,
"active_primary_shards": 0,
"active_shards": 0,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 20
}
}
}
`
var clusterHealthExpected = map[string]interface{}{
"status": "green",
"timed_out": false,
"number_of_nodes": 3,
"number_of_data_nodes": 3,
"active_primary_shards": 5,
"active_shards": 15,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0,
}
var v1IndexExpected = map[string]interface{}{
"status": "green",
"number_of_shards": 10,
"number_of_replicas": 1,
"active_primary_shards": 10,
"active_shards": 20,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0,
}
var v2IndexExpected = map[string]interface{}{
"status": "red",
"number_of_shards": 10,
"number_of_replicas": 1,
"active_primary_shards": 0,
"active_shards": 0,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 20,
}
const statsResponse = `
{
"cluster_name": "es-testcluster",
"nodes": {
"SDFsfSDFsdfFSDSDfSFDSDF": {
"timestamp": 1436365550135,
"name": "test.host.com",
"transport_address": "inet[/127.0.0.1:9300]",
"host": "test",
"ip": [
"inet[/127.0.0.1:9300]",
"NONE"
],
"attributes": {
"master": "true"
},
"indices": {
"docs": {
"count": 29652,
"deleted": 5229
},
"store": {
"size_in_bytes": 37715234,
"throttle_time_in_millis": 215
},
"indexing": {
"index_total": 84790,
"index_time_in_millis": 29680,
"index_current": 0,
"delete_total": 13879,
"delete_time_in_millis": 1139,
"delete_current": 0,
"noop_update_total": 0,
"is_throttled": false,
"throttle_time_in_millis": 0
},
"get": {
"total": 1,
"time_in_millis": 2,
"exists_total": 0,
"exists_time_in_millis": 0,
"missing_total": 1,
"missing_time_in_millis": 2,
"current": 0
},
"search": {
"open_contexts": 0,
"query_total": 1452,
"query_time_in_millis": 5695,
"query_current": 0,
"fetch_total": 414,
"fetch_time_in_millis": 146,
"fetch_current": 0
},
"merges": {
"current": 0,
"current_docs": 0,
"current_size_in_bytes": 0,
"total": 133,
"total_time_in_millis": 21060,
"total_docs": 203672,
"total_size_in_bytes": 142900226
},
"refresh": {
"total": 1076,
"total_time_in_millis": 20078
},
"flush": {
"total": 115,
"total_time_in_millis": 2401
},
"warmer": {
"current": 0,
"total": 2319,
"total_time_in_millis": 448
},
"filter_cache": {
"memory_size_in_bytes": 7384,
"evictions": 0
},
"id_cache": {
"memory_size_in_bytes": 0
},
"fielddata": {
"memory_size_in_bytes": 12996,
"evictions": 0
},
"percolate": {
"total": 0,
"time_in_millis": 0,
"current": 0,
"memory_size_in_bytes": -1,
"memory_size": "-1b",
"queries": 0
},
"completion": {
"size_in_bytes": 0
},
"segments": {
"count": 134,
"memory_in_bytes": 1285212,
"index_writer_memory_in_bytes": 0,
"index_writer_max_memory_in_bytes": 172368955,
"version_map_memory_in_bytes": 611844,
"fixed_bit_set_memory_in_bytes": 0
},
"translog": {
"operations": 17702,
"size_in_bytes": 17
},
"suggest": {
"total": 0,
"time_in_millis": 0,
"current": 0
},
"query_cache": {
"memory_size_in_bytes": 0,
"evictions": 0,
"hit_count": 0,
"miss_count": 0
},
"recovery": {
"current_as_source": 0,
"current_as_target": 0,
"throttle_time_in_millis": 0
}
},
"os": {
"timestamp": 1436460392944,
"load_average": [
0.01,
0.04,
0.05
],
"mem": {
"free_in_bytes": 477761536,
"used_in_bytes": 1621868544,
"free_percent": 74,
"used_percent": 25,
"actual_free_in_bytes": 1565470720,
"actual_used_in_bytes": 534159360
},
"swap": {
"used_in_bytes": 0,
"free_in_bytes": 487997440
}
},
"process": {
"timestamp": 1436460392945,
"open_file_descriptors": 160,
"cpu": {
"percent": 2,
"sys_in_millis": 1870,
"user_in_millis": 13610,
"total_in_millis": 15480
},
"mem": {
"total_virtual_in_bytes": 4747890688
}
},
"jvm": {
"timestamp": 1436460392945,
"uptime_in_millis": 202245,
"mem": {
"heap_used_in_bytes": 52709568,
"heap_used_percent": 5,
"heap_committed_in_bytes": 259522560,
"heap_max_in_bytes": 1038876672,
"non_heap_used_in_bytes": 39634576,
"non_heap_committed_in_bytes": 40841216,
"pools": {
"young": {
"used_in_bytes": 32685760,
"max_in_bytes": 279183360,
"peak_used_in_bytes": 71630848,
"peak_max_in_bytes": 279183360
},
"survivor": {
"used_in_bytes": 8912880,
"max_in_bytes": 34865152,
"peak_used_in_bytes": 8912888,
"peak_max_in_bytes": 34865152
},
"old": {
"used_in_bytes": 11110928,
"max_in_bytes": 724828160,
"peak_used_in_bytes": 14354608,
"peak_max_in_bytes": 724828160
}
}
},
"threads": {
"count": 44,
"peak_count": 45
},
"gc": {
"collectors": {
"young": {
"collection_count": 2,
"collection_time_in_millis": 98
},
"old": {
"collection_count": 1,
"collection_time_in_millis": 24
}
}
},
"buffer_pools": {
"direct": {
"count": 40,
"used_in_bytes": 6304239,
"total_capacity_in_bytes": 6304239
},
"mapped": {
"count": 0,
"used_in_bytes": 0,
"total_capacity_in_bytes": 0
}
}
},
"thread_pool": {
"percolate": {
"threads": 123,
"queue": 23,
"active": 13,
"rejected": 235,
"largest": 23,
"completed": 33
},
"fetch_shard_started": {
"threads": 3,
"queue": 1,
"active": 5,
"rejected": 6,
"largest": 4,
"completed": 54
},
"listener": {
"threads": 1,
"queue": 2,
"active": 4,
"rejected": 8,
"largest": 1,
"completed": 1
},
"index": {
"threads": 6,
"queue": 8,
"active": 4,
"rejected": 2,
"largest": 3,
"completed": 6
},
"refresh": {
"threads": 23,
"queue": 7,
"active": 3,
"rejected": 4,
"largest": 8,
"completed": 3
},
"suggest": {
"threads": 2,
"queue": 7,
"active": 2,
"rejected": 1,
"largest": 8,
"completed": 3
},
"generic": {
"threads": 1,
"queue": 4,
"active": 6,
"rejected": 3,
"largest": 2,
"completed": 27
},
"warmer": {
"threads": 2,
"queue": 7,
"active": 3,
"rejected": 2,
"largest": 3,
"completed": 1
},
"search": {
"threads": 5,
"queue": 7,
"active": 2,
"rejected": 7,
"largest": 2,
"completed": 4
},
"flush": {
"threads": 3,
"queue": 8,
"active": 0,
"rejected": 1,
"largest": 5,
"completed": 3
},
"optimize": {
"threads": 3,
"queue": 4,
"active": 1,
"rejected": 2,
"largest": 7,
"completed": 3
},
"fetch_shard_store": {
"threads": 1,
"queue": 7,
"active": 4,
"rejected": 2,
"largest": 4,
"completed": 1
},
"management": {
"threads": 2,
"queue": 3,
"active": 1,
"rejected": 6,
"largest": 2,
"completed": 22
},
"get": {
"threads": 1,
"queue": 8,
"active": 4,
"rejected": 3,
"largest": 2,
"completed": 1
},
"merge": {
"threads": 6,
"queue": 4,
"active": 5,
"rejected": 2,
"largest": 5,
"completed": 1
},
"bulk": {
"threads": 4,
"queue": 5,
"active": 7,
"rejected": 3,
"largest": 1,
"completed": 4
},
"snapshot": {
"threads": 8,
"queue": 5,
"active": 6,
"rejected": 2,
"largest": 1,
"completed": 0
}
},
"fs": {
"timestamp": 1436460392946,
"total": {
"total_in_bytes": 19507089408,
"free_in_bytes": 16909316096,
"available_in_bytes": 15894814720
},
"data": [
{
"path": "/usr/share/elasticsearch/data/elasticsearch/nodes/0",
"mount": "/usr/share/elasticsearch/data",
"type": "ext4",
"total_in_bytes": 19507089408,
"free_in_bytes": 16909316096,
"available_in_bytes": 15894814720
}
]
},
"transport": {
"server_open": 13,
"rx_count": 6,
"rx_size_in_bytes": 1380,
"tx_count": 6,
"tx_size_in_bytes": 1380
},
"http": {
"current_open": 3,
"total_opened": 3
},
"breakers": {
"fielddata": {
"limit_size_in_bytes": 623326003,
"limit_size": "594.4mb",
"estimated_size_in_bytes": 0,
"estimated_size": "0b",
"overhead": 1.03,
"tripped": 0
},
"request": {
"limit_size_in_bytes": 415550668,
"limit_size": "396.2mb",
"estimated_size_in_bytes": 0,
"estimated_size": "0b",
"overhead": 1.0,
"tripped": 0
},
"parent": {
"limit_size_in_bytes": 727213670,
"limit_size": "693.5mb",
"estimated_size_in_bytes": 0,
"estimated_size": "0b",
"overhead": 1.0,
"tripped": 0
}
}
}
}
}
`
var indicesExpected = map[string]interface{}{
"id_cache_memory_size_in_bytes": float64(0),
"completion_size_in_bytes": float64(0),
"suggest_total": float64(0),
"suggest_time_in_millis": float64(0),
"suggest_current": float64(0),
"query_cache_memory_size_in_bytes": float64(0),
"query_cache_evictions": float64(0),
"query_cache_hit_count": float64(0),
"query_cache_miss_count": float64(0),
"store_size_in_bytes": float64(37715234),
"store_throttle_time_in_millis": float64(215),
"merges_current_docs": float64(0),
"merges_current_size_in_bytes": float64(0),
"merges_total": float64(133),
"merges_total_time_in_millis": float64(21060),
"merges_total_docs": float64(203672),
"merges_total_size_in_bytes": float64(142900226),
"merges_current": float64(0),
"filter_cache_memory_size_in_bytes": float64(7384),
"filter_cache_evictions": float64(0),
"indexing_index_total": float64(84790),
"indexing_index_time_in_millis": float64(29680),
"indexing_index_current": float64(0),
"indexing_noop_update_total": float64(0),
"indexing_throttle_time_in_millis": float64(0),
"indexing_delete_total": float64(13879),
"indexing_delete_time_in_millis": float64(1139),
"indexing_delete_current": float64(0),
"get_exists_time_in_millis": float64(0),
"get_missing_total": float64(1),
"get_missing_time_in_millis": float64(2),
"get_current": float64(0),
"get_total": float64(1),
"get_time_in_millis": float64(2),
"get_exists_total": float64(0),
"refresh_total": float64(1076),
"refresh_total_time_in_millis": float64(20078),
"percolate_current": float64(0),
"percolate_memory_size_in_bytes": float64(-1),
"percolate_queries": float64(0),
"percolate_total": float64(0),
"percolate_time_in_millis": float64(0),
"translog_operations": float64(17702),
"translog_size_in_bytes": float64(17),
"recovery_current_as_source": float64(0),
"recovery_current_as_target": float64(0),
"recovery_throttle_time_in_millis": float64(0),
"docs_count": float64(29652),
"docs_deleted": float64(5229),
"flush_total_time_in_millis": float64(2401),
"flush_total": float64(115),
"fielddata_memory_size_in_bytes": float64(12996),
"fielddata_evictions": float64(0),
"search_fetch_current": float64(0),
"search_open_contexts": float64(0),
"search_query_total": float64(1452),
"search_query_time_in_millis": float64(5695),
"search_query_current": float64(0),
"search_fetch_total": float64(414),
"search_fetch_time_in_millis": float64(146),
"warmer_current": float64(0),
"warmer_total": float64(2319),
"warmer_total_time_in_millis": float64(448),
"segments_count": float64(134),
"segments_memory_in_bytes": float64(1285212),
"segments_index_writer_memory_in_bytes": float64(0),
"segments_index_writer_max_memory_in_bytes": float64(172368955),
"segments_version_map_memory_in_bytes": float64(611844),
"segments_fixed_bit_set_memory_in_bytes": float64(0),
}
var osExpected = map[string]interface{}{
"load_average_0": float64(0.01),
"load_average_1": float64(0.04),
"load_average_2": float64(0.05),
"swap_used_in_bytes": float64(0),
"swap_free_in_bytes": float64(487997440),
"timestamp": float64(1436460392944),
"mem_free_percent": float64(74),
"mem_used_percent": float64(25),
"mem_actual_free_in_bytes": float64(1565470720),
"mem_actual_used_in_bytes": float64(534159360),
"mem_free_in_bytes": float64(477761536),
"mem_used_in_bytes": float64(1621868544),
}
var processExpected = map[string]interface{}{
"mem_total_virtual_in_bytes": float64(4747890688),
"timestamp": float64(1436460392945),
"open_file_descriptors": float64(160),
"cpu_total_in_millis": float64(15480),
"cpu_percent": float64(2),
"cpu_sys_in_millis": float64(1870),
"cpu_user_in_millis": float64(13610),
}
var jvmExpected = map[string]interface{}{
"timestamp": float64(1436460392945),
"uptime_in_millis": float64(202245),
"mem_non_heap_used_in_bytes": float64(39634576),
"mem_non_heap_committed_in_bytes": float64(40841216),
"mem_pools_young_max_in_bytes": float64(279183360),
"mem_pools_young_peak_used_in_bytes": float64(71630848),
"mem_pools_young_peak_max_in_bytes": float64(279183360),
"mem_pools_young_used_in_bytes": float64(32685760),
"mem_pools_survivor_peak_used_in_bytes": float64(8912888),
"mem_pools_survivor_peak_max_in_bytes": float64(34865152),
"mem_pools_survivor_used_in_bytes": float64(8912880),
"mem_pools_survivor_max_in_bytes": float64(34865152),
"mem_pools_old_peak_max_in_bytes": float64(724828160),
"mem_pools_old_used_in_bytes": float64(11110928),
"mem_pools_old_max_in_bytes": float64(724828160),
"mem_pools_old_peak_used_in_bytes": float64(14354608),
"mem_heap_used_in_bytes": float64(52709568),
"mem_heap_used_percent": float64(5),
"mem_heap_committed_in_bytes": float64(259522560),
"mem_heap_max_in_bytes": float64(1038876672),
"threads_peak_count": float64(45),
"threads_count": float64(44),
"gc_collectors_young_collection_count": float64(2),
"gc_collectors_young_collection_time_in_millis": float64(98),
"gc_collectors_old_collection_count": float64(1),
"gc_collectors_old_collection_time_in_millis": float64(24),
"buffer_pools_direct_count": float64(40),
"buffer_pools_direct_used_in_bytes": float64(6304239),
"buffer_pools_direct_total_capacity_in_bytes": float64(6304239),
"buffer_pools_mapped_count": float64(0),
"buffer_pools_mapped_used_in_bytes": float64(0),
"buffer_pools_mapped_total_capacity_in_bytes": float64(0),
}
var threadPoolExpected = map[string]interface{}{
"merge_threads": float64(6),
"merge_queue": float64(4),
"merge_active": float64(5),
"merge_rejected": float64(2),
"merge_largest": float64(5),
"merge_completed": float64(1),
"bulk_threads": float64(4),
"bulk_queue": float64(5),
"bulk_active": float64(7),
"bulk_rejected": float64(3),
"bulk_largest": float64(1),
"bulk_completed": float64(4),
"warmer_threads": float64(2),
"warmer_queue": float64(7),
"warmer_active": float64(3),
"warmer_rejected": float64(2),
"warmer_largest": float64(3),
"warmer_completed": float64(1),
"get_largest": float64(2),
"get_completed": float64(1),
"get_threads": float64(1),
"get_queue": float64(8),
"get_active": float64(4),
"get_rejected": float64(3),
"index_threads": float64(6),
"index_queue": float64(8),
"index_active": float64(4),
"index_rejected": float64(2),
"index_largest": float64(3),
"index_completed": float64(6),
"suggest_threads": float64(2),
"suggest_queue": float64(7),
"suggest_active": float64(2),
"suggest_rejected": float64(1),
"suggest_largest": float64(8),
"suggest_completed": float64(3),
"fetch_shard_store_queue": float64(7),
"fetch_shard_store_active": float64(4),
"fetch_shard_store_rejected": float64(2),
"fetch_shard_store_largest": float64(4),
"fetch_shard_store_completed": float64(1),
"fetch_shard_store_threads": float64(1),
"management_threads": float64(2),
"management_queue": float64(3),
"management_active": float64(1),
"management_rejected": float64(6),
"management_largest": float64(2),
"management_completed": float64(22),
"percolate_queue": float64(23),
"percolate_active": float64(13),
"percolate_rejected": float64(235),
"percolate_largest": float64(23),
"percolate_completed": float64(33),
"percolate_threads": float64(123),
"listener_active": float64(4),
"listener_rejected": float64(8),
"listener_largest": float64(1),
"listener_completed": float64(1),
"listener_threads": float64(1),
"listener_queue": float64(2),
"search_rejected": float64(7),
"search_largest": float64(2),
"search_completed": float64(4),
"search_threads": float64(5),
"search_queue": float64(7),
"search_active": float64(2),
"fetch_shard_started_threads": float64(3),
"fetch_shard_started_queue": float64(1),
"fetch_shard_started_active": float64(5),
"fetch_shard_started_rejected": float64(6),
"fetch_shard_started_largest": float64(4),
"fetch_shard_started_completed": float64(54),
"refresh_rejected": float64(4),
"refresh_largest": float64(8),
"refresh_completed": float64(3),
"refresh_threads": float64(23),
"refresh_queue": float64(7),
"refresh_active": float64(3),
"optimize_threads": float64(3),
"optimize_queue": float64(4),
"optimize_active": float64(1),
"optimize_rejected": float64(2),
"optimize_largest": float64(7),
"optimize_completed": float64(3),
"snapshot_largest": float64(1),
"snapshot_completed": float64(0),
"snapshot_threads": float64(8),
"snapshot_queue": float64(5),
"snapshot_active": float64(6),
"snapshot_rejected": float64(2),
"generic_threads": float64(1),
"generic_queue": float64(4),
"generic_active": float64(6),
"generic_rejected": float64(3),
"generic_largest": float64(2),
"generic_completed": float64(27),
"flush_threads": float64(3),
"flush_queue": float64(8),
"flush_active": float64(0),
"flush_rejected": float64(1),
"flush_largest": float64(5),
"flush_completed": float64(3),
}
var fsExpected = map[string]interface{}{
"data_0_total_in_bytes": float64(19507089408),
"data_0_free_in_bytes": float64(16909316096),
"data_0_available_in_bytes": float64(15894814720),
"timestamp": float64(1436460392946),
"total_free_in_bytes": float64(16909316096),
"total_available_in_bytes": float64(15894814720),
"total_total_in_bytes": float64(19507089408),
}
var transportExpected = map[string]interface{}{
"server_open": float64(13),
"rx_count": float64(6),
"rx_size_in_bytes": float64(1380),
"tx_count": float64(6),
"tx_size_in_bytes": float64(1380),
}
var httpExpected = map[string]interface{}{
"current_open": float64(3),
"total_opened": float64(3),
}
var breakersExpected = map[string]interface{}{
"fielddata_estimated_size_in_bytes": float64(0),
"fielddata_overhead": float64(1.03),
"fielddata_tripped": float64(0),
"fielddata_limit_size_in_bytes": float64(623326003),
"request_estimated_size_in_bytes": float64(0),
"request_overhead": float64(1.0),
"request_tripped": float64(0),
"request_limit_size_in_bytes": float64(415550668),
"parent_overhead": float64(1.0),
"parent_tripped": float64(0),
"parent_limit_size_in_bytes": float64(727213670),
"parent_estimated_size_in_bytes": float64(0),
}

View File

@@ -0,0 +1,82 @@
# Exec Input Plugin
The exec plugin can execute arbitrary commands which output JSON or
InfluxDB [line-protocol](https://docs.influxdata.com/influxdb/v0.9/write_protocols/line/).
If using JSON, only numeric values are parsed and turned into floats. Booleans
and strings will be ignored.
### Configuration
```
# Read flattened metrics from one or more commands that output JSON to stdout
[[inputs.exec]]
# the command to run
command = "/usr/bin/mycollector --foo=bar"
# Data format to consume. This can be "json" or "influx" (line-protocol)
# NOTE json only reads numerical measurements, strings and booleans are ignored.
data_format = "json"
# measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
```
Other options for modifying the measurement names are:
```
name_override = "measurement_name"
name_prefix = "prefix_"
```
### Example 1
Let's say that we have the above configuration, and mycollector outputs the
following JSON:
```json
{
"a": 0.5,
"b": {
"c": 0.1,
"d": 5
}
}
```
The collected metrics will be stored as fields under the measurement
"exec_mycollector":
```
exec_mycollector a=0.5,b_c=0.1,b_d=5 1452815002357578567
```
### Example 2
Now let's say we have the following configuration:
```
[[inputs.exec]]
# the command to run
command = "/usr/bin/line_protocol_collector"
# Data format to consume. This can be "json" or "influx" (line-protocol)
# NOTE json only reads numerical measurements, strings and booleans are ignored.
data_format = "influx"
```
And line_protocol_collector outputs the following line protocol:
```
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu1,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu2,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu3,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu4,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
```
You will get data in InfluxDB exactly as it is defined above,
tags are cpu=cpuN, host=foo, and datacenter=us-east with fields usage_idle
and usage_busy. They will receive a timestamp at collection time.

110
plugins/inputs/exec/exec.go Normal file
View File

@@ -0,0 +1,110 @@
package exec
import (
"bytes"
"encoding/json"
"fmt"
"os/exec"
"time"
"github.com/gonuts/go-shellquote"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
const sampleConfig = `
# the command to run
command = "/usr/bin/mycollector --foo=bar"
# Data format to consume. This can be "json" or "influx" (line-protocol)
# NOTE json only reads numerical measurements, strings and booleans are ignored.
data_format = "json"
# measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
`
type Exec struct {
Command string
DataFormat string
runner Runner
}
type Runner interface {
Run(*Exec) ([]byte, error)
}
type CommandRunner struct{}
func (c CommandRunner) Run(e *Exec) ([]byte, error) {
split_cmd, err := shellquote.Split(e.Command)
if err != nil || len(split_cmd) == 0 {
return nil, fmt.Errorf("exec: unable to parse command, %s", err)
}
cmd := exec.Command(split_cmd[0], split_cmd[1:]...)
var out bytes.Buffer
cmd.Stdout = &out
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("exec: %s for command '%s'", err, e.Command)
}
return out.Bytes(), nil
}
func NewExec() *Exec {
return &Exec{runner: CommandRunner{}}
}
func (e *Exec) SampleConfig() string {
return sampleConfig
}
func (e *Exec) Description() string {
return "Read flattened metrics from one or more commands that output JSON to stdout"
}
func (e *Exec) Gather(acc telegraf.Accumulator) error {
out, err := e.runner.Run(e)
if err != nil {
return err
}
switch e.DataFormat {
case "", "json":
var jsonOut interface{}
err = json.Unmarshal(out, &jsonOut)
if err != nil {
return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s",
e.Command, err)
}
f := internal.JSONFlattener{}
err = f.FlattenJSON("", jsonOut)
if err != nil {
return err
}
acc.AddFields("exec", f.Fields, nil)
case "influx":
now := time.Now()
metrics, err := telegraf.ParseMetrics(out)
for _, metric := range metrics {
acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), now)
}
return err
default:
return fmt.Errorf("Unsupported data format: %s. Must be either json "+
"or influx.", e.DataFormat)
}
return nil
}
func init() {
inputs.Add("exec", func() telegraf.Input {
return NewExec()
})
}

View File

@@ -0,0 +1,172 @@
package exec
import (
"fmt"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Midnight 9/22/2015
const baseTimeSeconds = 1442905200
const validJson = `
{
"status": "green",
"num_processes": 82,
"cpu": {
"status": "red",
"nil_status": null,
"used": 8234,
"free": 32
},
"percent": 0.81,
"users": [0, 1, 2, 3]
}`
const malformedJson = `
{
"status": "green",
`
const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1"
const lineProtocolMulti = `
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu1,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu2,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu3,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu4,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
`
type runnerMock struct {
out []byte
err error
}
func newRunnerMock(out []byte, err error) Runner {
return &runnerMock{
out: out,
err: err,
}
}
func (r runnerMock) Run(e *Exec) ([]byte, error) {
if r.err != nil {
return nil, r.err
}
return r.out, nil
}
func TestExec(t *testing.T) {
e := &Exec{
runner: newRunnerMock([]byte(validJson), nil),
Command: "testcommand arg1",
}
var acc testutil.Accumulator
err := e.Gather(&acc)
require.NoError(t, err)
assert.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored")
fields := map[string]interface{}{
"num_processes": float64(82),
"cpu_used": float64(8234),
"cpu_free": float64(32),
"percent": float64(0.81),
"users_0": float64(0),
"users_1": float64(1),
"users_2": float64(2),
"users_3": float64(3),
}
acc.AssertContainsFields(t, "exec", fields)
}
func TestExecMalformed(t *testing.T) {
e := &Exec{
runner: newRunnerMock([]byte(malformedJson), nil),
Command: "badcommand arg1",
}
var acc testutil.Accumulator
err := e.Gather(&acc)
require.Error(t, err)
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
}
func TestCommandError(t *testing.T) {
e := &Exec{
runner: newRunnerMock(nil, fmt.Errorf("exit status code 1")),
Command: "badcommand",
}
var acc testutil.Accumulator
err := e.Gather(&acc)
require.Error(t, err)
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
}
func TestLineProtocolParse(t *testing.T) {
e := &Exec{
runner: newRunnerMock([]byte(lineProtocol), nil),
Command: "line-protocol",
DataFormat: "influx",
}
var acc testutil.Accumulator
err := e.Gather(&acc)
require.NoError(t, err)
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
tags := map[string]string{
"host": "foo",
"datacenter": "us-east",
}
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
}
func TestLineProtocolParseMultiple(t *testing.T) {
e := &Exec{
runner: newRunnerMock([]byte(lineProtocolMulti), nil),
Command: "line-protocol",
DataFormat: "influx",
}
var acc testutil.Accumulator
err := e.Gather(&acc)
require.NoError(t, err)
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
tags := map[string]string{
"host": "foo",
"datacenter": "us-east",
}
cpuTags := []string{"cpu0", "cpu1", "cpu2", "cpu3", "cpu4", "cpu5", "cpu6"}
for _, cpu := range cpuTags {
tags["cpu"] = cpu
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
}
}
func TestInvalidDataFormat(t *testing.T) {
e := &Exec{
runner: newRunnerMock([]byte(lineProtocol), nil),
Command: "bad data format",
DataFormat: "FooBar",
}
var acc testutil.Accumulator
err := e.Gather(&acc)
require.Error(t, err)
}

View File

@@ -0,0 +1,369 @@
# github_webhooks
This is a Telegraf service plugin that listens for events kicked off by Github's Webhooks service and persists data from them into configured outputs. To set up the listener first generate the proper configuration:
```sh
$ telegraf -sample-config -input-filter github_webhooks -output-filter influxdb > config.conf.new
```
Change the config file to point to the InfluxDB server you are using and adjust the settings to match your environment. Once that is complete:
```sh
$ cp config.conf.new /etc/telegraf/telegraf.conf
$ sudo service telegraf start
```
Once the server is running you should configure your Organization's Webhooks to point at the `github_webhooks` service. To do this go to `github.com/{my_organization}` and click `Settings > Webhooks > Add webhook`. In the resulting menu set `Payload URL` to `http://<my_ip>:1618`, `Content type` to `application/json` and under the section `Which events would you like to trigger this webhook?` select 'Send me <b>everything</b>'. By default all of the events will write to the `github_webhooks` measurement, this is configurable by setting the `measurement_name` in the config file.
## Events
The titles of the following sections are links to the full payloads and details for each event. The body contains what information from the event is persisted. The format is as follows:
```
# TAGS
* 'tagKey' = `tagValue` type
# FIELDS
* 'fieldKey' = `fieldValue` type
```
The tag values and field values show the place on the incoming JSON object where the data is sourced from.
#### [`commit_comment` event](https://developer.github.com/v3/activity/events/types/#commitcommentevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
* 'commit' = `event.comment.commit_id` string
* 'comment' = `event.comment.body` string
#### [`create` event](https://developer.github.com/v3/activity/events/types/#createevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
* 'ref' = `event.ref` string
* 'issues' = `event.ref_type` string
#### [`delete` event](https://developer.github.com/v3/activity/events/types/#deleteevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
* 'ref' = `event.ref` string
* 'issues' = `event.ref_type` string
#### [`deployment` event](https://developer.github.com/v3/activity/events/types/#deploymentevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
* 'commit' = `event.deployment.sha` string
* 'task' = `event.deployment.task` string
* 'environment' = `event.deployment.evnironment` string
* 'description' = `event.deployment.description` string
#### [`deployment_status` event](https://developer.github.com/v3/activity/events/types/#deploymentstatusevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
* 'commit' = `event.deployment.sha` string
* 'task' = `event.deployment.task` string
* 'environment' = `event.deployment.evnironment` string
* 'description' = `event.deployment.description` string
* 'depState' = `event.deployment_status.state` string
* 'depDescription' = `event.deployment_status.description` string
#### [`fork` event](https://developer.github.com/v3/activity/events/types/#forkevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
* 'forkee' = `event.forkee.repository` string
#### [`gollum` event](https://developer.github.com/v3/activity/events/types/#gollumevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
#### [`issue_comment` event](https://developer.github.com/v3/activity/events/types/#issuecommentevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
* 'issue' = `event.issue.number` int
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
* 'title' = `event.issue.title` string
* 'comments' = `event.issue.comments` int
* 'body' = `event.comment.body` string
#### [`issues` event](https://developer.github.com/v3/activity/events/types/#issuesevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
* 'issue' = `event.issue.number` int
* 'action' = `event.action` string
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
* 'title' = `event.issue.title` string
* 'comments' = `event.issue.comments` int
#### [`member` event](https://developer.github.com/v3/activity/events/types/#memberevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
* 'newMember' = `event.sender.login` string
* 'newMemberStatus' = `event.sender.site_admin` bool
#### [`membership` event](https://developer.github.com/v3/activity/events/types/#membershipevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
* 'action' = `event.action` string
**Fields:**
* 'newMember' = `event.sender.login` string
* 'newMemberStatus' = `event.sender.site_admin` bool
#### [`page_build` event](https://developer.github.com/v3/activity/events/types/#pagebuildevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
#### [`public` event](https://developer.github.com/v3/activity/events/types/#publicevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
#### [`pull_request_review_comment` event](https://developer.github.com/v3/activity/events/types/#pullrequestreviewcommentevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'action' = `event.action` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
* 'prNumber' = `event.pull_request.number` int
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
* 'state' = `event.pull_request.state` string
* 'title' = `event.pull_request.title` string
* 'comments' = `event.pull_request.comments` int
* 'commits' = `event.pull_request.commits` int
* 'additions' = `event.pull_request.additions` int
* 'deletions' = `event.pull_request.deletions` int
* 'changedFiles' = `event.pull_request.changed_files` int
* 'commentFile' = `event.comment.file` string
* 'comment' = `event.comment.body` string
#### [`pull_request` event](https://developer.github.com/v3/activity/events/types/#pullrequestevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'action' = `event.action` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
* 'prNumber' = `event.pull_request.number` int
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
* 'state' = `event.pull_request.state` string
* 'title' = `event.pull_request.title` string
* 'comments' = `event.pull_request.comments` int
* 'commits' = `event.pull_request.commits` int
* 'additions' = `event.pull_request.additions` int
* 'deletions' = `event.pull_request.deletions` int
* 'changedFiles' = `event.pull_request.changed_files` int
#### [`push` event](https://developer.github.com/v3/activity/events/types/#pushevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
* 'ref' = `event.ref` string
* 'before' = `event.before` string
* 'after' = `event.after` string
#### [`repository` event](https://developer.github.com/v3/activity/events/types/#repositoryevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
#### [`release` event](https://developer.github.com/v3/activity/events/types/#releaseevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
* 'tagName' = `event.release.tag_name` string
#### [`status` event](https://developer.github.com/v3/activity/events/types/#statusevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
* 'commit' = `event.sha` string
* 'state' = `event.state` string
#### [`team_add` event](https://developer.github.com/v3/activity/events/types/#teamaddevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int
* 'teamName' = `event.team.name` string
#### [`watch` event](https://developer.github.com/v3/activity/events/types/#watchevent)
**Tags:**
* 'event' = `headers[X-Github-Event]` string
* 'repository' = `event.repository.full_name` string
* 'private' = `event.repository.private` bool
* 'user' = `event.sender.login` string
* 'admin' = `event.sender.site_admin` bool
**Fields:**
* 'stars' = `event.repository.stargazers_count` int
* 'forks' = `event.repository.forks_count` int
* 'issues' = `event.repository.open_issues_count` int

View File

@@ -0,0 +1,335 @@
package github_webhooks
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"sync"
"github.com/gorilla/mux"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
func init() {
inputs.Add("github_webhooks", func() telegraf.Input { return &GithubWebhooks{} })
}
type GithubWebhooks struct {
ServiceAddress string
// Lock for the struct
sync.Mutex
// Events buffer to store events between Gather calls
events []Event
}
func NewGithubWebhooks() *GithubWebhooks {
return &GithubWebhooks{}
}
func (gh *GithubWebhooks) SampleConfig() string {
return `
# Address and port to host Webhook listener on
service_address = ":1618"
`
}
func (gh *GithubWebhooks) Description() string {
return "A Github Webhook Event collector"
}
// Writes the points from <-gh.in to the Accumulator
func (gh *GithubWebhooks) Gather(acc telegraf.Accumulator) error {
gh.Lock()
defer gh.Unlock()
for _, event := range gh.events {
p := event.NewMetric()
acc.AddFields("github_webhooks", p.Fields(), p.Tags(), p.Time())
}
gh.events = make([]Event, 0)
return nil
}
func (gh *GithubWebhooks) Listen() {
r := mux.NewRouter()
r.HandleFunc("/", gh.eventHandler).Methods("POST")
err := http.ListenAndServe(fmt.Sprintf("%s", gh.ServiceAddress), r)
if err != nil {
log.Printf("Error starting server: %v", err)
}
}
func (gh *GithubWebhooks) Start() error {
go gh.Listen()
log.Printf("Started the github_webhooks service on %s\n", gh.ServiceAddress)
return nil
}
func (gh *GithubWebhooks) Stop() {
log.Println("Stopping the ghWebhooks service")
}
// Handles the / route
func (gh *GithubWebhooks) eventHandler(w http.ResponseWriter, r *http.Request) {
eventType := r.Header["X-Github-Event"][0]
data, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
}
e, err := NewEvent(data, eventType)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
}
gh.Lock()
gh.events = append(gh.events, e)
gh.Unlock()
w.WriteHeader(http.StatusOK)
}
func newCommitComment(data []byte) (Event, error) {
commitCommentStruct := CommitCommentEvent{}
err := json.Unmarshal(data, &commitCommentStruct)
if err != nil {
return nil, err
}
return commitCommentStruct, nil
}
func newCreate(data []byte) (Event, error) {
createStruct := CreateEvent{}
err := json.Unmarshal(data, &createStruct)
if err != nil {
return nil, err
}
return createStruct, nil
}
func newDelete(data []byte) (Event, error) {
deleteStruct := DeleteEvent{}
err := json.Unmarshal(data, &deleteStruct)
if err != nil {
return nil, err
}
return deleteStruct, nil
}
func newDeployment(data []byte) (Event, error) {
deploymentStruct := DeploymentEvent{}
err := json.Unmarshal(data, &deploymentStruct)
if err != nil {
return nil, err
}
return deploymentStruct, nil
}
func newDeploymentStatus(data []byte) (Event, error) {
deploymentStatusStruct := DeploymentStatusEvent{}
err := json.Unmarshal(data, &deploymentStatusStruct)
if err != nil {
return nil, err
}
return deploymentStatusStruct, nil
}
func newFork(data []byte) (Event, error) {
forkStruct := ForkEvent{}
err := json.Unmarshal(data, &forkStruct)
if err != nil {
return nil, err
}
return forkStruct, nil
}
func newGollum(data []byte) (Event, error) {
gollumStruct := GollumEvent{}
err := json.Unmarshal(data, &gollumStruct)
if err != nil {
return nil, err
}
return gollumStruct, nil
}
func newIssueComment(data []byte) (Event, error) {
issueCommentStruct := IssueCommentEvent{}
err := json.Unmarshal(data, &issueCommentStruct)
if err != nil {
return nil, err
}
return issueCommentStruct, nil
}
func newIssues(data []byte) (Event, error) {
issuesStruct := IssuesEvent{}
err := json.Unmarshal(data, &issuesStruct)
if err != nil {
return nil, err
}
return issuesStruct, nil
}
func newMember(data []byte) (Event, error) {
memberStruct := MemberEvent{}
err := json.Unmarshal(data, &memberStruct)
if err != nil {
return nil, err
}
return memberStruct, nil
}
func newMembership(data []byte) (Event, error) {
membershipStruct := MembershipEvent{}
err := json.Unmarshal(data, &membershipStruct)
if err != nil {
return nil, err
}
return membershipStruct, nil
}
func newPageBuild(data []byte) (Event, error) {
pageBuildEvent := PageBuildEvent{}
err := json.Unmarshal(data, &pageBuildEvent)
if err != nil {
return nil, err
}
return pageBuildEvent, nil
}
func newPublic(data []byte) (Event, error) {
publicEvent := PublicEvent{}
err := json.Unmarshal(data, &publicEvent)
if err != nil {
return nil, err
}
return publicEvent, nil
}
func newPullRequest(data []byte) (Event, error) {
pullRequestStruct := PullRequestEvent{}
err := json.Unmarshal(data, &pullRequestStruct)
if err != nil {
return nil, err
}
return pullRequestStruct, nil
}
func newPullRequestReviewComment(data []byte) (Event, error) {
pullRequestReviewCommentStruct := PullRequestReviewCommentEvent{}
err := json.Unmarshal(data, &pullRequestReviewCommentStruct)
if err != nil {
return nil, err
}
return pullRequestReviewCommentStruct, nil
}
func newPush(data []byte) (Event, error) {
pushStruct := PushEvent{}
err := json.Unmarshal(data, &pushStruct)
if err != nil {
return nil, err
}
return pushStruct, nil
}
func newRelease(data []byte) (Event, error) {
releaseStruct := ReleaseEvent{}
err := json.Unmarshal(data, &releaseStruct)
if err != nil {
return nil, err
}
return releaseStruct, nil
}
func newRepository(data []byte) (Event, error) {
repositoryStruct := RepositoryEvent{}
err := json.Unmarshal(data, &repositoryStruct)
if err != nil {
return nil, err
}
return repositoryStruct, nil
}
func newStatus(data []byte) (Event, error) {
statusStruct := StatusEvent{}
err := json.Unmarshal(data, &statusStruct)
if err != nil {
return nil, err
}
return statusStruct, nil
}
func newTeamAdd(data []byte) (Event, error) {
teamAddStruct := TeamAddEvent{}
err := json.Unmarshal(data, &teamAddStruct)
if err != nil {
return nil, err
}
return teamAddStruct, nil
}
func newWatch(data []byte) (Event, error) {
watchStruct := WatchEvent{}
err := json.Unmarshal(data, &watchStruct)
if err != nil {
return nil, err
}
return watchStruct, nil
}
type newEventError struct {
s string
}
func (e *newEventError) Error() string {
return e.s
}
func NewEvent(r []byte, t string) (Event, error) {
log.Printf("New %v event recieved", t)
switch t {
case "commit_comment":
return newCommitComment(r)
case "create":
return newCreate(r)
case "delete":
return newDelete(r)
case "deployment":
return newDeployment(r)
case "deployment_status":
return newDeploymentStatus(r)
case "fork":
return newFork(r)
case "gollum":
return newGollum(r)
case "issue_comment":
return newIssueComment(r)
case "issues":
return newIssues(r)
case "member":
return newMember(r)
case "membership":
return newMembership(r)
case "page_build":
return newPageBuild(r)
case "public":
return newPublic(r)
case "pull_request":
return newPullRequest(r)
case "pull_request_review_comment":
return newPullRequestReviewComment(r)
case "push":
return newPush(r)
case "release":
return newRelease(r)
case "repository":
return newRepository(r)
case "status":
return newStatus(r)
case "team_add":
return newTeamAdd(r)
case "watch":
return newWatch(r)
}
return nil, &newEventError{"Not a recgonized event type"}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,711 @@
package github_webhooks
import (
"fmt"
"log"
"time"
"github.com/influxdata/telegraf"
)
const meas = "github_webhooks"
type Event interface {
NewMetric() telegraf.Metric
}
type Repository struct {
Repository string `json:"full_name"`
Private bool `json:"private"`
Stars int `json:"stargazers_count"`
Forks int `json:"forks_count"`
Issues int `json:"open_issues_count"`
}
type Sender struct {
User string `json:"login"`
Admin bool `json:"site_admin"`
}
type CommitComment struct {
Commit string `json:"commit_id"`
Body string `json:"body"`
}
type Deployment struct {
Commit string `json:"sha"`
Task string `json:"task"`
Environment string `json:"environment"`
Description string `json:"description"`
}
type Page struct {
Name string `json:"page_name"`
Title string `json:"title"`
Action string `json:"action"`
}
type Issue struct {
Number int `json:"number"`
Title string `json:"title"`
Comments int `json:"comments"`
}
type IssueComment struct {
Body string `json:"body"`
}
type Team struct {
Name string `json:"name"`
}
type PullRequest struct {
Number int `json:"number"`
State string `json:"state"`
Title string `json:"title"`
Comments int `json:"comments"`
Commits int `json:"commits"`
Additions int `json:"additions"`
Deletions int `json:"deletions"`
ChangedFiles int `json:"changed_files"`
}
type PullRequestReviewComment struct {
File string `json:"path"`
Comment string `json:"body"`
}
type Release struct {
TagName string `json:"tag_name"`
}
type DeploymentStatus struct {
State string `json:"state"`
Description string `json:"description"`
}
type CommitCommentEvent struct {
Comment CommitComment `json:"comment"`
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s CommitCommentEvent) NewMetric() telegraf.Metric {
event := "commit_comment"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
"commit": s.Comment.Commit,
"comment": s.Comment.Body,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type CreateEvent struct {
Ref string `json:"ref"`
RefType string `json:"ref_type"`
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s CreateEvent) NewMetric() telegraf.Metric {
event := "create"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
"ref": s.Ref,
"refType": s.RefType,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type DeleteEvent struct {
Ref string `json:"ref"`
RefType string `json:"ref_type"`
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s DeleteEvent) NewMetric() telegraf.Metric {
event := "delete"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
"ref": s.Ref,
"refType": s.RefType,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type DeploymentEvent struct {
Deployment Deployment `json:"deployment"`
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s DeploymentEvent) NewMetric() telegraf.Metric {
event := "deployment"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
"commit": s.Deployment.Commit,
"task": s.Deployment.Task,
"environment": s.Deployment.Environment,
"description": s.Deployment.Description,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type DeploymentStatusEvent struct {
Deployment Deployment `json:"deployment"`
DeploymentStatus DeploymentStatus `json:"deployment_status"`
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s DeploymentStatusEvent) NewMetric() telegraf.Metric {
event := "delete"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
"commit": s.Deployment.Commit,
"task": s.Deployment.Task,
"environment": s.Deployment.Environment,
"description": s.Deployment.Description,
"depState": s.DeploymentStatus.State,
"depDescription": s.DeploymentStatus.Description,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type ForkEvent struct {
Forkee Repository `json:"forkee"`
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s ForkEvent) NewMetric() telegraf.Metric {
event := "fork"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
"fork": s.Forkee.Repository,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type GollumEvent struct {
Pages []Page `json:"pages"`
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
// REVIEW: Going to be lazy and not deal with the pages.
func (s GollumEvent) NewMetric() telegraf.Metric {
event := "gollum"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type IssueCommentEvent struct {
Issue Issue `json:"issue"`
Comment IssueComment `json:"comment"`
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s IssueCommentEvent) NewMetric() telegraf.Metric {
event := "issue_comment"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
"issue": fmt.Sprintf("%v", s.Issue.Number),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
"title": s.Issue.Title,
"comments": s.Issue.Comments,
"body": s.Comment.Body,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type IssuesEvent struct {
Action string `json:"action"`
Issue Issue `json:"issue"`
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s IssuesEvent) NewMetric() telegraf.Metric {
event := "issue"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
"issue": fmt.Sprintf("%v", s.Issue.Number),
"action": s.Action,
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
"title": s.Issue.Title,
"comments": s.Issue.Comments,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type MemberEvent struct {
Member Sender `json:"member"`
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s MemberEvent) NewMetric() telegraf.Metric {
event := "member"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
"newMember": s.Member.User,
"newMemberStatus": s.Member.Admin,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type MembershipEvent struct {
Action string `json:"action"`
Member Sender `json:"member"`
Sender Sender `json:"sender"`
Team Team `json:"team"`
}
func (s MembershipEvent) NewMetric() telegraf.Metric {
event := "membership"
t := map[string]string{
"event": event,
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
"action": s.Action,
}
f := map[string]interface{}{
"newMember": s.Member.User,
"newMemberStatus": s.Member.Admin,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type PageBuildEvent struct {
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s PageBuildEvent) NewMetric() telegraf.Metric {
event := "page_build"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type PublicEvent struct {
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s PublicEvent) NewMetric() telegraf.Metric {
event := "public"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type PullRequestEvent struct {
Action string `json:"action"`
PullRequest PullRequest `json:"pull_request"`
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s PullRequestEvent) NewMetric() telegraf.Metric {
event := "pull_request"
t := map[string]string{
"event": event,
"action": s.Action,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
"prNumber": fmt.Sprintf("%v", s.PullRequest.Number),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
"state": s.PullRequest.State,
"title": s.PullRequest.Title,
"comments": s.PullRequest.Comments,
"commits": s.PullRequest.Commits,
"additions": s.PullRequest.Additions,
"deletions": s.PullRequest.Deletions,
"changedFiles": s.PullRequest.ChangedFiles,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type PullRequestReviewCommentEvent struct {
Comment PullRequestReviewComment `json:"comment"`
PullRequest PullRequest `json:"pull_request"`
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s PullRequestReviewCommentEvent) NewMetric() telegraf.Metric {
event := "pull_request_review_comment"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
"prNumber": fmt.Sprintf("%v", s.PullRequest.Number),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
"state": s.PullRequest.State,
"title": s.PullRequest.Title,
"comments": s.PullRequest.Comments,
"commits": s.PullRequest.Commits,
"additions": s.PullRequest.Additions,
"deletions": s.PullRequest.Deletions,
"changedFiles": s.PullRequest.ChangedFiles,
"commentFile": s.Comment.File,
"comment": s.Comment.Comment,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type PushEvent struct {
Ref string `json:"ref"`
Before string `json:"before"`
After string `json:"after"`
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s PushEvent) NewMetric() telegraf.Metric {
event := "push"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
"ref": s.Ref,
"before": s.Before,
"after": s.After,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type ReleaseEvent struct {
Release Release `json:"release"`
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s ReleaseEvent) NewMetric() telegraf.Metric {
event := "release"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
"tagName": s.Release.TagName,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type RepositoryEvent struct {
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s RepositoryEvent) NewMetric() telegraf.Metric {
event := "repository"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type StatusEvent struct {
Commit string `json:"sha"`
State string `json:"state"`
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s StatusEvent) NewMetric() telegraf.Metric {
event := "status"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
"commit": s.Commit,
"state": s.State,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type TeamAddEvent struct {
Team Team `json:"team"`
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s TeamAddEvent) NewMetric() telegraf.Metric {
event := "team_add"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
"teamName": s.Team.Name,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}
type WatchEvent struct {
Repository Repository `json:"repository"`
Sender Sender `json:"sender"`
}
func (s WatchEvent) NewMetric() telegraf.Metric {
event := "delete"
t := map[string]string{
"event": event,
"repository": s.Repository.Repository,
"private": fmt.Sprintf("%v", s.Repository.Private),
"user": s.Sender.User,
"admin": fmt.Sprintf("%v", s.Sender.Admin),
}
f := map[string]interface{}{
"stars": s.Repository.Stars,
"forks": s.Repository.Forks,
"issues": s.Repository.Issues,
}
m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil {
log.Fatalf("Failed to create %v event", event)
}
return m
}

View File

@@ -0,0 +1,237 @@
package github_webhooks
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
)
func TestCommitCommentEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := CommitCommentEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "commit_comment")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestDeleteEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := DeleteEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "delete")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestDeploymentEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := DeploymentEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "deployment")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestDeploymentStatusEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := DeploymentStatusEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "deployment_status")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestForkEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := ForkEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "fork")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestGollumEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := GollumEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "gollum")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestIssueCommentEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := IssueCommentEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "issue_comment")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestIssuesEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := IssuesEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "issues")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestMemberEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := MemberEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "member")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestMembershipEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := MembershipEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "membership")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestPageBuildEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := PageBuildEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "page_build")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestPublicEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := PublicEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "public")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestPullRequestReviewCommentEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := PullRequestReviewCommentEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "pull_request_review_comment")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestPushEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := PushEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "push")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestReleaseEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := ReleaseEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "release")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestRepositoryEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := RepositoryEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "repository")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestStatusEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := StatusEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "status")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestTeamAddEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := TeamAddEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "team_add")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}
func TestWatchEvent(t *testing.T) {
gh := NewGithubWebhooks()
jsonString := WatchEventJSON()
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
req.Header.Add("X-Github-Event", "watch")
w := httptest.NewRecorder()
gh.eventHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
}
}

View File

@@ -0,0 +1,365 @@
package haproxy
import (
"encoding/csv"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"io"
"net/http"
"net/url"
"strconv"
"sync"
"time"
)
//CSV format: https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#9.1
const (
HF_PXNAME = 0 // 0. pxname [LFBS]: proxy name
HF_SVNAME = 1 // 1. svname [LFBS]: service name (FRONTEND for frontend, BACKEND for backend, any name for server/listener)
HF_QCUR = 2 //2. qcur [..BS]: current queued requests. For the backend this reports the number queued without a server assigned.
HF_QMAX = 3 //3. qmax [..BS]: max value of qcur
HF_SCUR = 4 // 4. scur [LFBS]: current sessions
HF_SMAX = 5 //5. smax [LFBS]: max sessions
HF_SLIM = 6 //6. slim [LFBS]: configured session limit
HF_STOT = 7 //7. stot [LFBS]: cumulative number of connections
HF_BIN = 8 //8. bin [LFBS]: bytes in
HF_BOUT = 9 //9. bout [LFBS]: bytes out
HF_DREQ = 10 //10. dreq [LFB.]: requests denied because of security concerns.
HF_DRESP = 11 //11. dresp [LFBS]: responses denied because of security concerns.
HF_EREQ = 12 //12. ereq [LF..]: request errors. Some of the possible causes are:
HF_ECON = 13 //13. econ [..BS]: number of requests that encountered an error trying to
HF_ERESP = 14 //14. eresp [..BS]: response errors. srv_abrt will be counted here also. Some other errors are: - write error on the client socket (won't be counted for the server stat) - failure applying filters to the response.
HF_WRETR = 15 //15. wretr [..BS]: number of times a connection to a server was retried.
HF_WREDIS = 16 //16. wredis [..BS]: number of times a request was redispatched to another server. The server value counts the number of times that server was switched away from.
HF_STATUS = 17 //17. status [LFBS]: status (UP/DOWN/NOLB/MAINT/MAINT(via)...)
HF_WEIGHT = 18 //18. weight [..BS]: total weight (backend), server weight (server)
HF_ACT = 19 //19. act [..BS]: number of active servers (backend), server is active (server)
HF_BCK = 20 //20. bck [..BS]: number of backup servers (backend), server is backup (server)
HF_CHKFAIL = 21 //21. chkfail [...S]: number of failed checks. (Only counts checks failed when the server is up.)
HF_CHKDOWN = 22 //22. chkdown [..BS]: number of UP->DOWN transitions. The backend counter counts transitions to the whole backend being down, rather than the sum of the counters for each server.
HF_LASTCHG = 23 //23. lastchg [..BS]: number of seconds since the last UP<->DOWN transition
HF_DOWNTIME = 24 //24. downtime [..BS]: total downtime (in seconds). The value for the backend is the downtime for the whole backend, not the sum of the server downtime.
HF_QLIMIT = 25 //25. qlimit [...S]: configured maxqueue for the server, or nothing in the value is 0 (default, meaning no limit)
HF_PID = 26 //26. pid [LFBS]: process id (0 for first instance, 1 for second, ...)
HF_IID = 27 //27. iid [LFBS]: unique proxy id
HF_SID = 28 //28. sid [L..S]: server id (unique inside a proxy)
HF_THROTTLE = 29 //29. throttle [...S]: current throttle percentage for the server, when slowstart is active, or no value if not in slowstart.
HF_LBTOT = 30 //30. lbtot [..BS]: total number of times a server was selected, either for new sessions, or when re-dispatching. The server counter is the number of times that server was selected.
HF_TRACKED = 31 //31. tracked [...S]: id of proxy/server if tracking is enabled.
HF_TYPE = 32 //32. type [LFBS]: (0 = frontend, 1 = backend, 2 = server, 3 = socket/listener)
HF_RATE = 33 //33. rate [.FBS]: number of sessions per second over last elapsed second
HF_RATE_LIM = 34 //34. rate_lim [.F..]: configured limit on new sessions per second
HF_RATE_MAX = 35 //35. rate_max [.FBS]: max number of new sessions per second
HF_CHECK_STATUS = 36 //36. check_status [...S]: status of last health check, one of:
HF_CHECK_CODE = 37 //37. check_code [...S]: layer5-7 code, if available
HF_CHECK_DURATION = 38 //38. check_duration [...S]: time in ms took to finish last health check
HF_HRSP_1xx = 39 //39. hrsp_1xx [.FBS]: http responses with 1xx code
HF_HRSP_2xx = 40 //40. hrsp_2xx [.FBS]: http responses with 2xx code
HF_HRSP_3xx = 41 //41. hrsp_3xx [.FBS]: http responses with 3xx code
HF_HRSP_4xx = 42 //42. hrsp_4xx [.FBS]: http responses with 4xx code
HF_HRSP_5xx = 43 //43. hrsp_5xx [.FBS]: http responses with 5xx code
HF_HRSP_OTHER = 44 //44. hrsp_other [.FBS]: http responses with other codes (protocol error)
HF_HANAFAIL = 45 //45. hanafail [...S]: failed health checks details
HF_REQ_RATE = 46 //46. req_rate [.F..]: HTTP requests per second over last elapsed second
HF_REQ_RATE_MAX = 47 //47. req_rate_max [.F..]: max number of HTTP requests per second observed
HF_REQ_TOT = 48 //48. req_tot [.F..]: total number of HTTP requests received
HF_CLI_ABRT = 49 //49. cli_abrt [..BS]: number of data transfers aborted by the client
HF_SRV_ABRT = 50 //50. srv_abrt [..BS]: number of data transfers aborted by the server (inc. in eresp)
HF_COMP_IN = 51 //51. comp_in [.FB.]: number of HTTP response bytes fed to the compressor
HF_COMP_OUT = 52 //52. comp_out [.FB.]: number of HTTP response bytes emitted by the compressor
HF_COMP_BYP = 53 //53. comp_byp [.FB.]: number of bytes that bypassed the HTTP compressor (CPU/BW limit)
HF_COMP_RSP = 54 //54. comp_rsp [.FB.]: number of HTTP responses that were compressed
HF_LASTSESS = 55 //55. lastsess [..BS]: number of seconds since last session assigned to server/backend
HF_LAST_CHK = 56 //56. last_chk [...S]: last health check contents or textual error
HF_LAST_AGT = 57 //57. last_agt [...S]: last agent check contents or textual error
HF_QTIME = 58 //58. qtime [..BS]:
HF_CTIME = 59 //59. ctime [..BS]:
HF_RTIME = 60 //60. rtime [..BS]: (0 for TCP)
HF_TTIME = 61 //61. ttime [..BS]: the average total session time in ms over the 1024 last requests
)
type haproxy struct {
Servers []string
client *http.Client
}
var sampleConfig = `
# An array of address to gather stats about. Specify an ip on hostname
# with optional port. ie localhost, 10.10.3.33:1936, etc.
#
# If no servers are specified, then default to 127.0.0.1:1936
servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
# Or you can also use local socket(not work yet)
# servers = ["socket://run/haproxy/admin.sock"]
`
func (r *haproxy) SampleConfig() string {
return sampleConfig
}
func (r *haproxy) Description() string {
return "Read metrics of haproxy, via socket or csv stats page"
}
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (g *haproxy) Gather(acc telegraf.Accumulator) error {
if len(g.Servers) == 0 {
return g.gatherServer("http://127.0.0.1:1936", acc)
}
var wg sync.WaitGroup
var outerr error
for _, serv := range g.Servers {
wg.Add(1)
go func(serv string) {
defer wg.Done()
outerr = g.gatherServer(serv, acc)
}(serv)
}
wg.Wait()
return outerr
}
func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
if g.client == nil {
client := &http.Client{}
g.client = client
}
u, err := url.Parse(addr)
if err != nil {
return fmt.Errorf("Unable parse server address '%s': %s", addr, err)
}
req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s/;csv", u.Scheme, u.Host, u.Path), nil)
if u.User != nil {
p, _ := u.User.Password()
req.SetBasicAuth(u.User.Username(), p)
}
res, err := g.client.Do(req)
if err != nil {
return fmt.Errorf("Unable to connect to haproxy server '%s': %s", addr, err)
}
if res.StatusCode != 200 {
return fmt.Errorf("Unable to get valid stat result from '%s': %s", addr, err)
}
return importCsvResult(res.Body, acc, u.Host)
}
func importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error {
csv := csv.NewReader(r)
result, err := csv.ReadAll()
now := time.Now()
for _, row := range result {
fields := make(map[string]interface{})
tags := map[string]string{
"server": host,
"proxy": row[HF_PXNAME],
"sv": row[HF_SVNAME],
}
for field, v := range row {
switch field {
case HF_QCUR:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["qcur"] = ival
}
case HF_QMAX:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["qmax"] = ival
}
case HF_SCUR:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["scur"] = ival
}
case HF_SMAX:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["smax"] = ival
}
case HF_STOT:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["stot"] = ival
}
case HF_BIN:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["bin"] = ival
}
case HF_BOUT:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["bout"] = ival
}
case HF_DREQ:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["dreq"] = ival
}
case HF_DRESP:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["dresp"] = ival
}
case HF_EREQ:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["ereq"] = ival
}
case HF_ECON:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["econ"] = ival
}
case HF_ERESP:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["eresp"] = ival
}
case HF_WRETR:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["wretr"] = ival
}
case HF_WREDIS:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["wredis"] = ival
}
case HF_ACT:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["active_servers"] = ival
}
case HF_BCK:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["backup_servers"] = ival
}
case HF_DOWNTIME:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["downtime"] = ival
}
case HF_THROTTLE:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["throttle"] = ival
}
case HF_LBTOT:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["lbtot"] = ival
}
case HF_RATE:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["rate"] = ival
}
case HF_RATE_MAX:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["rate_max"] = ival
}
case HF_CHECK_DURATION:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["check_duration"] = ival
}
case HF_HRSP_1xx:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["http_response.1xx"] = ival
}
case HF_HRSP_2xx:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["http_response.2xx"] = ival
}
case HF_HRSP_3xx:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["http_response.3xx"] = ival
}
case HF_HRSP_4xx:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["http_response.4xx"] = ival
}
case HF_HRSP_5xx:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["http_response.5xx"] = ival
}
case HF_REQ_RATE:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["req_rate"] = ival
}
case HF_REQ_RATE_MAX:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["req_rate_max"] = ival
}
case HF_REQ_TOT:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["req_tot"] = ival
}
case HF_CLI_ABRT:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["cli_abort"] = ival
}
case HF_SRV_ABRT:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["srv_abort"] = ival
}
case HF_QTIME:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["qtime"] = ival
}
case HF_CTIME:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["ctime"] = ival
}
case HF_RTIME:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["rtime"] = ival
}
case HF_TTIME:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["ttime"] = ival
}
}
}
acc.AddFields("haproxy", fields, tags, now)
}
return err
}
func init() {
inputs.Add("haproxy", func() telegraf.Input {
return &haproxy{}
})
}

View File

@@ -0,0 +1,179 @@
package haproxy
import (
"fmt"
"strings"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"net/http"
"net/http/httptest"
)
func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
//We create a fake server to return test data
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
username, password, ok := r.BasicAuth()
if !ok {
w.WriteHeader(http.StatusNotFound)
fmt.Fprint(w, "Unauthorized")
return
}
if username == "user" && password == "password" {
fmt.Fprint(w, csvOutputSample)
} else {
w.WriteHeader(http.StatusNotFound)
fmt.Fprint(w, "Unauthorized")
}
}))
defer ts.Close()
//Now we tested again above server, with our authentication data
r := &haproxy{
Servers: []string{strings.Replace(ts.URL, "http://", "http://user:password@", 1)},
}
var acc testutil.Accumulator
err := r.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"server": ts.Listener.Addr().String(),
"proxy": "be_app",
"sv": "host0",
}
fields := map[string]interface{}{
"active_servers": uint64(1),
"backup_servers": uint64(0),
"bin": uint64(510913516),
"bout": uint64(2193856571),
"check_duration": uint64(10),
"cli_abort": uint64(73),
"ctime": uint64(2),
"downtime": uint64(0),
"dresp": uint64(0),
"econ": uint64(0),
"eresp": uint64(1),
"http_response.1xx": uint64(0),
"http_response.2xx": uint64(119534),
"http_response.3xx": uint64(48051),
"http_response.4xx": uint64(2345),
"http_response.5xx": uint64(1056),
"lbtot": uint64(171013),
"qcur": uint64(0),
"qmax": uint64(0),
"qtime": uint64(0),
"rate": uint64(3),
"rate_max": uint64(12),
"rtime": uint64(312),
"scur": uint64(1),
"smax": uint64(32),
"srv_abort": uint64(1),
"stot": uint64(171014),
"ttime": uint64(2341),
"wredis": uint64(0),
"wretr": uint64(1),
}
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
//Here, we should get error because we don't pass authentication data
r = &haproxy{
Servers: []string{ts.URL},
}
err = r.Gather(&acc)
require.Error(t, err)
}
func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, csvOutputSample)
}))
defer ts.Close()
r := &haproxy{
Servers: []string{ts.URL},
}
var acc testutil.Accumulator
err := r.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"proxy": "be_app",
"server": ts.Listener.Addr().String(),
"sv": "host0",
}
fields := map[string]interface{}{
"active_servers": uint64(1),
"backup_servers": uint64(0),
"bin": uint64(510913516),
"bout": uint64(2193856571),
"check_duration": uint64(10),
"cli_abort": uint64(73),
"ctime": uint64(2),
"downtime": uint64(0),
"dresp": uint64(0),
"econ": uint64(0),
"eresp": uint64(1),
"http_response.1xx": uint64(0),
"http_response.2xx": uint64(119534),
"http_response.3xx": uint64(48051),
"http_response.4xx": uint64(2345),
"http_response.5xx": uint64(1056),
"lbtot": uint64(171013),
"qcur": uint64(0),
"qmax": uint64(0),
"qtime": uint64(0),
"rate": uint64(3),
"rate_max": uint64(12),
"rtime": uint64(312),
"scur": uint64(1),
"smax": uint64(32),
"srv_abort": uint64(1),
"stot": uint64(171014),
"ttime": uint64(2341),
"wredis": uint64(0),
"wretr": uint64(1),
}
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
}
//When not passing server config, we default to localhost
//We just want to make sure we did request stat from localhost
func TestHaproxyDefaultGetFromLocalhost(t *testing.T) {
r := &haproxy{}
var acc testutil.Accumulator
err := r.Gather(&acc)
require.Error(t, err)
assert.Contains(t, err.Error(), "127.0.0.1:1936/;csv")
}
const csvOutputSample = `
# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,last_agt,qtime,ctime,rtime,ttime,
fe_app,FRONTEND,,81,288,713,2000,1094063,5557055817,24096715169,1102,80,95740,,,17,19,OPEN,,,,,,,,,2,16,113,13,114,,0,18,0,102,,,,0,1314093,537036,123452,11966,1360,,35,140,1987928,,,0,0,0,0,,,,,,,,
be_static,host0,0,0,0,3,,3209,1141294,17389596,,0,,0,0,0,0,no check,1,1,0,,,,,,2,17,1,,3209,,2,0,,7,,,,0,218,1497,1494,0,0,0,,,,0,0,,,,,2,,,0,2,23,545,
be_static,BACKEND,0,0,0,3,200,3209,1141294,17389596,0,0,,0,0,0,0,UP,1,1,0,,0,70698,0,,2,17,0,,3209,,1,0,,7,,,,0,218,1497,1494,0,0,,,,,0,0,0,0,0,0,2,,,0,2,23,545,
be_static,host0,0,0,0,1,,28,17313,466003,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,1,,28,,2,0,,1,L4OK,,1,0,17,6,5,0,0,0,,,,0,0,,,,,2103,,,0,1,1,36,
be_static,host4,0,0,0,1,,28,15358,1281073,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,2,,28,,2,0,,1,L4OK,,1,0,20,5,3,0,0,0,,,,0,0,,,,,2076,,,0,1,1,54,
be_static,host5,0,0,0,1,,28,17547,1970404,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,3,,28,,2,0,,1,L4OK,,0,0,20,5,3,0,0,0,,,,0,0,,,,,1495,,,0,1,1,53,
be_static,host6,0,0,0,1,,28,14105,1328679,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,4,,28,,2,0,,1,L4OK,,0,0,18,8,2,0,0,0,,,,0,0,,,,,1418,,,0,0,1,49,
be_static,host7,0,0,0,1,,28,15258,1965185,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,5,,28,,2,0,,1,L4OK,,0,0,17,8,3,0,0,0,,,,0,0,,,,,935,,,0,0,1,28,
be_static,host8,0,0,0,1,,28,12934,1034779,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,6,,28,,2,0,,1,L4OK,,0,0,17,9,2,0,0,0,,,,0,0,,,,,582,,,0,1,1,66,
be_static,host9,0,0,0,1,,28,13434,134063,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,7,,28,,2,0,,1,L4OK,,0,0,17,8,3,0,0,0,,,,0,0,,,,,539,,,0,0,1,80,
be_static,host1,0,0,0,1,,28,7873,1209688,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,8,,28,,2,0,,1,L4OK,,0,0,22,6,0,0,0,0,,,,0,0,,,,,487,,,0,0,1,36,
be_static,host2,0,0,0,1,,28,13830,1085929,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,9,,28,,2,0,,1,L4OK,,0,0,19,6,3,0,0,0,,,,0,0,,,,,338,,,0,1,1,38,
be_static,host3,0,0,0,1,,28,17959,1259760,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,10,,28,,2,0,,1,L4OK,,1,0,20,6,2,0,0,0,,,,0,0,,,,,92,,,0,1,1,17,
be_static,BACKEND,0,0,0,2,200,307,160276,13322728,0,0,,0,0,0,0,UP,11,11,0,,0,70698,0,,2,18,0,,307,,1,0,,4,,,,0,205,73,29,0,0,,,,,0,0,0,0,0,0,92,,,0,1,3,381,
be_app,host0,0,0,1,32,,171014,510913516,2193856571,,0,,0,1,1,0,UP,100,1,0,1,0,70698,0,,2,19,1,,171013,,2,3,,12,L7OK,301,10,0,119534,48051,2345,1056,0,0,,,,73,1,,,,,0,Moved Permanently,,0,2,312,2341,
be_app,host4,0,0,2,29,,171013,499318742,2195595896,12,34,,0,2,0,0,UP,100,1,0,2,0,70698,0,,2,19,2,,171013,,2,3,,12,L7OK,301,12,0,119572,47882,2441,1088,0,0,,,,84,2,,,,,0,Moved Permanently,,0,2,316,2355,
`

View File

@@ -0,0 +1,158 @@
# HTTP JSON Plugin
The httpjson plugin can collect data from remote URLs which respond with JSON. Then it flattens JSON and finds all numeric values, treating them as floats.
For example, if you have a service called _mycollector_, which has HTTP endpoint for gathering stats at http://my.service.com/_stats, you would configure the HTTP JSON
plugin like this:
```
[[httpjson.services]]
name = "mycollector"
servers = [
"http://my.service.com/_stats"
]
# HTTP method to use (case-sensitive)
method = "GET"
```
`name` is used as a prefix for the measurements.
`method` specifies HTTP method to use for requests.
You can also specify which keys from server response should be considered tags:
```
[[httpjson.services]]
...
tag_keys = [
"role",
"version"
]
```
You can also specify additional request parameters for the service:
```
[[httpjson.services]]
...
[httpjson.services.parameters]
event_type = "cpu_spike"
threshold = "0.75"
```
You can also specify additional request header parameters for the service:
```
[[httpjson.services]]
...
[httpjson.services.headers]
X-Auth-Token = "my-xauth-token"
apiVersion = "v1"
```
# Example:
Let's say that we have a service named "mycollector" configured like this:
```
[httpjson]
[[httpjson.services]]
name = "mycollector"
servers = [
"http://my.service.com/_stats"
]
# HTTP method to use (case-sensitive)
method = "GET"
tag_keys = ["service"]
```
which responds with the following JSON:
```json
{
"service": "service01",
"a": 0.5,
"b": {
"c": "some text",
"d": 0.1,
"e": 5
}
}
```
The collected metrics will be:
```
httpjson_mycollector_a,service='service01',server='http://my.service.com/_stats' value=0.5
httpjson_mycollector_b_d,service='service01',server='http://my.service.com/_stats' value=0.1
httpjson_mycollector_b_e,service='service01',server='http://my.service.com/_stats' value=5
```
# Example 2, Multiple Services:
There is also the option to collect JSON from multiple services, here is an
example doing that.
```
[httpjson]
[[httpjson.services]]
name = "mycollector1"
servers = [
"http://my.service1.com/_stats"
]
# HTTP method to use (case-sensitive)
method = "GET"
[[httpjson.services]]
name = "mycollector2"
servers = [
"http://service.net/json/stats"
]
# HTTP method to use (case-sensitive)
method = "POST"
```
The services respond with the following JSON:
mycollector1:
```json
{
"a": 0.5,
"b": {
"c": "some text",
"d": 0.1,
"e": 5
}
}
```
mycollector2:
```json
{
"load": 100,
"users": 1335
}
```
The collected metrics will be:
```
httpjson_mycollector1_a,server='http://my.service.com/_stats' value=0.5
httpjson_mycollector1_b_d,server='http://my.service.com/_stats' value=0.1
httpjson_mycollector1_b_e,server='http://my.service.com/_stats' value=5
httpjson_mycollector2_load,server='http://service.net/json/stats' value=100
httpjson_mycollector2_users,server='http://service.net/json/stats' value=1335
```

View File

@@ -0,0 +1,243 @@
package httpjson
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
type HttpJson struct {
Name string
Servers []string
Method string
TagKeys []string
Parameters map[string]string
Headers map[string]string
client HTTPClient
}
type HTTPClient interface {
// Returns the result of an http request
//
// Parameters:
// req: HTTP request object
//
// Returns:
// http.Response: HTTP respons object
// error : Any error that may have occurred
MakeRequest(req *http.Request) (*http.Response, error)
}
type RealHTTPClient struct {
client *http.Client
}
func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
return c.client.Do(req)
}
var sampleConfig = `
# NOTE This plugin only reads numerical measurements, strings and booleans
# will be ignored.
# a name for the service being polled
name = "webserver_stats"
# URL of each server in the service's cluster
servers = [
"http://localhost:9999/stats/",
"http://localhost:9998/stats/",
]
# HTTP method to use (case-sensitive)
method = "GET"
# List of tag names to extract from top-level of JSON server response
# tag_keys = [
# "my_tag_1",
# "my_tag_2"
# ]
# HTTP parameters (all values must be strings)
[inputs.httpjson.parameters]
event_type = "cpu_spike"
threshold = "0.75"
# HTTP Header parameters (all values must be strings)
# [inputs.httpjson.headers]
# X-Auth-Token = "my-xauth-token"
# apiVersion = "v1"
`
func (h *HttpJson) SampleConfig() string {
return sampleConfig
}
func (h *HttpJson) Description() string {
return "Read flattened metrics from one or more JSON HTTP endpoints"
}
// Gathers data for all servers.
func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
errorChannel := make(chan error, len(h.Servers))
for _, server := range h.Servers {
wg.Add(1)
go func(server string) {
defer wg.Done()
if err := h.gatherServer(acc, server); err != nil {
errorChannel <- err
}
}(server)
}
wg.Wait()
close(errorChannel)
// Get all errors and return them as one giant error
errorStrings := []string{}
for err := range errorChannel {
errorStrings = append(errorStrings, err.Error())
}
if len(errorStrings) == 0 {
return nil
}
return errors.New(strings.Join(errorStrings, "\n"))
}
// Gathers data from a particular server
// Parameters:
// acc : The telegraf Accumulator to use
// serverURL: endpoint to send request to
// service : the service being queried
//
// Returns:
// error: Any error that may have occurred
func (h *HttpJson) gatherServer(
acc telegraf.Accumulator,
serverURL string,
) error {
resp, responseTime, err := h.sendRequest(serverURL)
if err != nil {
return err
}
var jsonOut map[string]interface{}
if err = json.Unmarshal([]byte(resp), &jsonOut); err != nil {
return errors.New("Error decoding JSON response")
}
tags := map[string]string{
"server": serverURL,
}
for _, tag := range h.TagKeys {
switch v := jsonOut[tag].(type) {
case string:
tags[tag] = v
}
delete(jsonOut, tag)
}
if responseTime >= 0 {
jsonOut["response_time"] = responseTime
}
f := internal.JSONFlattener{}
err = f.FlattenJSON("", jsonOut)
if err != nil {
return err
}
var msrmnt_name string
if h.Name == "" {
msrmnt_name = "httpjson"
} else {
msrmnt_name = "httpjson_" + h.Name
}
acc.AddFields(msrmnt_name, f.Fields, tags)
return nil
}
// Sends an HTTP request to the server using the HttpJson object's HTTPClient
// Parameters:
// serverURL: endpoint to send request to
//
// Returns:
// string: body of the response
// error : Any error that may have occurred
func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
// Prepare URL
requestURL, err := url.Parse(serverURL)
if err != nil {
return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL)
}
params := url.Values{}
for k, v := range h.Parameters {
params.Add(k, v)
}
requestURL.RawQuery = params.Encode()
// Create + send request
req, err := http.NewRequest(h.Method, requestURL.String(), nil)
if err != nil {
return "", -1, err
}
// Add header parameters
for k, v := range h.Headers {
if strings.ToLower(k) == "host" {
req.Host = v
} else {
req.Header.Add(k, v)
}
}
start := time.Now()
resp, err := h.client.MakeRequest(req)
if err != nil {
return "", -1, err
}
defer resp.Body.Close()
responseTime := time.Since(start).Seconds()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return string(body), responseTime, err
}
// Process response
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
requestURL.String(),
resp.StatusCode,
http.StatusText(resp.StatusCode),
http.StatusOK,
http.StatusText(http.StatusOK))
return string(body), responseTime, err
}
return string(body), responseTime, err
}
func init() {
inputs.Add("httpjson", func() telegraf.Input {
return &HttpJson{client: RealHTTPClient{client: &http.Client{}}}
})
}

View File

@@ -0,0 +1,219 @@
package httpjson
import (
"io/ioutil"
"net/http"
"strings"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const validJSON = `
{
"parent": {
"child": 3.0,
"ignored_child": "hi"
},
"ignored_null": null,
"integer": 4,
"list": [3, 4],
"ignored_parent": {
"another_ignored_null": null,
"ignored_string": "hello, world!"
},
"another_list": [4]
}`
const validJSONTags = `
{
"value": 15,
"role": "master",
"build": "123"
}`
var expectedFields = map[string]interface{}{
"parent_child": float64(3),
"list_0": float64(3),
"list_1": float64(4),
"another_list_0": float64(4),
"integer": float64(4),
}
const invalidJSON = "I don't think this is JSON"
const empty = ""
type mockHTTPClient struct {
responseBody string
statusCode int
}
// Mock implementation of MakeRequest. Usually returns an http.Response with
// hard-coded responseBody and statusCode. However, if the request uses a
// nonstandard method, it uses status code 405 (method not allowed)
func (c mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
resp := http.Response{}
resp.StatusCode = c.statusCode
// basic error checking on request method
allowedMethods := []string{"GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT"}
methodValid := false
for _, method := range allowedMethods {
if req.Method == method {
methodValid = true
break
}
}
if !methodValid {
resp.StatusCode = 405 // Method not allowed
}
resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody))
return &resp, nil
}
// Generates a pointer to an HttpJson object that uses a mock HTTP client.
// Parameters:
// response : Body of the response that the mock HTTP client should return
// statusCode: HTTP status code the mock HTTP client should return
//
// Returns:
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
func genMockHttpJson(response string, statusCode int) []*HttpJson {
return []*HttpJson{
&HttpJson{
client: mockHTTPClient{responseBody: response, statusCode: statusCode},
Servers: []string{
"http://server1.example.com/metrics/",
"http://server2.example.com/metrics/",
},
Name: "my_webapp",
Method: "GET",
Parameters: map[string]string{
"httpParam1": "12",
"httpParam2": "the second parameter",
},
Headers: map[string]string{
"X-Auth-Token": "the-first-parameter",
"apiVersion": "v1",
},
},
&HttpJson{
client: mockHTTPClient{responseBody: response, statusCode: statusCode},
Servers: []string{
"http://server3.example.com/metrics/",
"http://server4.example.com/metrics/",
},
Name: "other_webapp",
Method: "POST",
Parameters: map[string]string{
"httpParam1": "12",
"httpParam2": "the second parameter",
},
Headers: map[string]string{
"X-Auth-Token": "the-first-parameter",
"apiVersion": "v1",
},
TagKeys: []string{
"role",
"build",
},
},
}
}
// Test that the proper values are ignored or collected
func TestHttpJson200(t *testing.T) {
httpjson := genMockHttpJson(validJSON, 200)
for _, service := range httpjson {
var acc testutil.Accumulator
err := service.Gather(&acc)
require.NoError(t, err)
assert.Equal(t, 12, acc.NFields())
// Set responsetime
for _, p := range acc.Metrics {
p.Fields["response_time"] = 1.0
}
for _, srv := range service.Servers {
tags := map[string]string{"server": srv}
mname := "httpjson_" + service.Name
expectedFields["response_time"] = 1.0
acc.AssertContainsTaggedFields(t, mname, expectedFields, tags)
}
}
}
// Test response to HTTP 500
func TestHttpJson500(t *testing.T) {
httpjson := genMockHttpJson(validJSON, 500)
var acc testutil.Accumulator
err := httpjson[0].Gather(&acc)
assert.NotNil(t, err)
assert.Equal(t, 0, acc.NFields())
}
// Test response to HTTP 405
func TestHttpJsonBadMethod(t *testing.T) {
httpjson := genMockHttpJson(validJSON, 200)
httpjson[0].Method = "NOT_A_REAL_METHOD"
var acc testutil.Accumulator
err := httpjson[0].Gather(&acc)
assert.NotNil(t, err)
assert.Equal(t, 0, acc.NFields())
}
// Test response to malformed JSON
func TestHttpJsonBadJson(t *testing.T) {
httpjson := genMockHttpJson(invalidJSON, 200)
var acc testutil.Accumulator
err := httpjson[0].Gather(&acc)
assert.NotNil(t, err)
assert.Equal(t, 0, acc.NFields())
}
// Test response to empty string as response objectgT
func TestHttpJsonEmptyResponse(t *testing.T) {
httpjson := genMockHttpJson(empty, 200)
var acc testutil.Accumulator
err := httpjson[0].Gather(&acc)
assert.NotNil(t, err)
assert.Equal(t, 0, acc.NFields())
}
// Test that the proper values are ignored or collected
func TestHttpJson200Tags(t *testing.T) {
httpjson := genMockHttpJson(validJSONTags, 200)
for _, service := range httpjson {
if service.Name == "other_webapp" {
var acc testutil.Accumulator
err := service.Gather(&acc)
// Set responsetime
for _, p := range acc.Metrics {
p.Fields["response_time"] = 1.0
}
require.NoError(t, err)
assert.Equal(t, 4, acc.NFields())
for _, srv := range service.Servers {
tags := map[string]string{"server": srv, "role": "master", "build": "123"}
fields := map[string]interface{}{"value": float64(15), "response_time": float64(1)}
mname := "httpjson_" + service.Name
acc.AssertContainsTaggedFields(t, mname, fields, tags)
}
}
}
}

View File

@@ -0,0 +1,72 @@
# influxdb plugin
The influxdb plugin collects InfluxDB-formatted data from JSON endpoints.
With a configuration of:
```toml
[[inputs.influxdb]]
urls = [
"http://127.0.0.1:8086/debug/vars",
"http://192.168.2.1:8086/debug/vars"
]
```
And if 127.0.0.1 responds with this JSON:
```json
{
"k1": {
"name": "fruit",
"tags": {
"kind": "apple"
},
"values": {
"inventory": 371,
"sold": 112
}
},
"k2": {
"name": "fruit",
"tags": {
"kind": "banana"
},
"values": {
"inventory": 1000,
"sold": 403
}
}
}
```
And if 192.168.2.1 responds like so:
```json
{
"k3": {
"name": "transactions",
"tags": {},
"values": {
"total": 100,
"balance": 184.75
}
}
}
```
Then the collected metrics will be:
```
influxdb_fruit,url='http://127.0.0.1:8086/debug/vars',kind='apple' inventory=371.0,sold=112.0
influxdb_fruit,url='http://127.0.0.1:8086/debug/vars',kind='banana' inventory=1000.0,sold=403.0
influxdb_transactions,url='http://192.168.2.1:8086/debug/vars' total=100.0,balance=184.75
```
There are two important details to note about the collected metrics:
1. Even though the values in JSON are being displayed as integers, the metrics are reported as floats.
JSON encoders usually don't print the fractional part for round floats.
Because you cannot change the type of an existing field in InfluxDB, we assume all numbers are floats.
2. The top-level keys' names (in the example above, `"k1"`, `"k2"`, and `"k3"`) are not considered when recording the metrics.

View File

@@ -0,0 +1,147 @@
package influxdb
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"sync"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type InfluxDB struct {
URLs []string `toml:"urls"`
}
func (*InfluxDB) Description() string {
return "Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints"
}
func (*InfluxDB) SampleConfig() string {
return `
# Works with InfluxDB debug endpoints out of the box,
# but other services can use this format too.
# See the influxdb plugin's README for more details.
# Multiple URLs from which to read InfluxDB-formatted JSON
urls = [
"http://localhost:8086/debug/vars"
]
`
}
func (i *InfluxDB) Gather(acc telegraf.Accumulator) error {
errorChannel := make(chan error, len(i.URLs))
var wg sync.WaitGroup
for _, u := range i.URLs {
wg.Add(1)
go func(url string) {
defer wg.Done()
if err := i.gatherURL(acc, url); err != nil {
errorChannel <- fmt.Errorf("[url=%s]: %s", url, err)
}
}(u)
}
wg.Wait()
close(errorChannel)
// If there weren't any errors, we can return nil now.
if len(errorChannel) == 0 {
return nil
}
// There were errors, so join them all together as one big error.
errorStrings := make([]string, 0, len(errorChannel))
for err := range errorChannel {
errorStrings = append(errorStrings, err.Error())
}
return errors.New(strings.Join(errorStrings, "\n"))
}
type point struct {
Name string `json:"name"`
Tags map[string]string `json:"tags"`
Values map[string]interface{} `json:"values"`
}
// Gathers data from a particular URL
// Parameters:
// acc : The telegraf Accumulator to use
// url : endpoint to send request to
//
// Returns:
// error: Any error that may have occurred
func (i *InfluxDB) gatherURL(
acc telegraf.Accumulator,
url string,
) error {
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
// It would be nice to be able to decode into a map[string]point, but
// we'll get a decoder error like:
// `json: cannot unmarshal array into Go value of type influxdb.point`
// if any of the values aren't objects.
// To avoid that error, we decode by hand.
dec := json.NewDecoder(resp.Body)
// Parse beginning of object
if t, err := dec.Token(); err != nil {
return err
} else if t != json.Delim('{') {
return errors.New("document root must be a JSON object")
}
// Loop through rest of object
for {
// Nothing left in this object, we're done
if !dec.More() {
break
}
// Read in a string key. We don't do anything with the top-level keys, so it's discarded.
_, err := dec.Token()
if err != nil {
return err
}
// Attempt to parse a whole object into a point.
// It might be a non-object, like a string or array.
// If we fail to decode it into a point, ignore it and move on.
var p point
if err := dec.Decode(&p); err != nil {
continue
}
// If the object was a point, but was not fully initialized, ignore it and move on.
if p.Name == "" || p.Tags == nil || p.Values == nil || len(p.Values) == 0 {
continue
}
// Add a tag to indicate the source of the data.
p.Tags["url"] = url
acc.AddFields(
"influxdb_"+p.Name,
p.Values,
p.Tags,
)
}
return nil
}
func init() {
inputs.Add("influxdb", func() telegraf.Input {
return &InfluxDB{}
})
}

View File

@@ -0,0 +1,97 @@
package influxdb_test
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/influxdata/telegraf/plugins/inputs/influxdb"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
func TestBasic(t *testing.T) {
js := `
{
"_1": {
"name": "foo",
"tags": {
"id": "ex1"
},
"values": {
"i": -1,
"f": 0.5,
"b": true,
"s": "string"
}
},
"ignored": {
"willBeRecorded": false
},
"ignoredAndNested": {
"hash": {
"is": "nested"
}
},
"array": [
"makes parsing more difficult than necessary"
],
"string": "makes parsing more difficult than necessary",
"_2": {
"name": "bar",
"tags": {
"id": "ex2"
},
"values": {
"x": "x"
}
},
"pointWithoutFields_willNotBeIncluded": {
"name": "asdf",
"tags": {
"id": "ex3"
},
"values": {}
}
}
`
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/endpoint" {
_, _ = w.Write([]byte(js))
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
defer fakeServer.Close()
plugin := &influxdb.InfluxDB{
URLs: []string{fakeServer.URL + "/endpoint"},
}
var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc))
require.Len(t, acc.Metrics, 2)
fields := map[string]interface{}{
// JSON will truncate floats to integer representations.
// Since there's no distinction in JSON, we can't assume it's an int.
"i": -1.0,
"f": 0.5,
"b": true,
"s": "string",
}
tags := map[string]string{
"id": "ex1",
"url": fakeServer.URL + "/endpoint",
}
acc.AssertContainsTaggedFields(t, "influxdb_foo", fields, tags)
fields = map[string]interface{}{
"x": "x",
}
tags = map[string]string{
"id": "ex2",
"url": fakeServer.URL + "/endpoint",
}
acc.AssertContainsTaggedFields(t, "influxdb_bar", fields, tags)
}

View File

@@ -0,0 +1,51 @@
# Telegraf plugin: Jolokia
#### Plugin arguments:
- **context** string: Context root used of jolokia url
- **servers** []Server: List of servers
+ **name** string: Server's logical name
+ **host** string: Server's ip address or hostname
+ **port** string: Server's listening port
- **metrics** []Metric
+ **name** string: Name of the measure
+ **jmx** string: Jmx path that identifies mbeans attributes
+ **pass** []string: Attributes to retain when collecting values
+ **drop** []string: Attributes to drop when collecting values
#### Description
The Jolokia plugin collects JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics
are collected for each server configured.
See: https://jolokia.org/
# Measurements:
Jolokia plugin produces one measure for each metric configured, adding Server's `name`, `host` and `port` as tags.
Given a configuration like:
```ini
[jolokia]
[[jolokia.servers]]
name = "as-service-1"
host = "127.0.0.1"
port = "8080"
[[jolokia.servers]]
name = "as-service-2"
host = "127.0.0.1"
port = "8180"
[[jolokia.metrics]]
name = "heap_memory_usage"
jmx = "/java.lang:type=Memory/HeapMemoryUsage"
pass = ["used", "max"]
```
The collected metrics will be:
```
jolokia_heap_memory_usage name=as-service-1,host=127.0.0.1,port=8080 used=xxx,max=yyy
jolokia_heap_memory_usage name=as-service-2,host=127.0.0.1,port=8180 used=vvv,max=zzz
```

View File

@@ -0,0 +1,164 @@
package jolokia
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Server struct {
Name string
Host string
Username string
Password string
Port string
}
type Metric struct {
Name string
Jmx string
}
type JolokiaClient interface {
MakeRequest(req *http.Request) (*http.Response, error)
}
type JolokiaClientImpl struct {
client *http.Client
}
func (c JolokiaClientImpl) MakeRequest(req *http.Request) (*http.Response, error) {
return c.client.Do(req)
}
type Jolokia struct {
jClient JolokiaClient
Context string
Servers []Server
Metrics []Metric
}
func (j *Jolokia) SampleConfig() string {
return `
# This is the context root used to compose the jolokia url
context = "/jolokia/read"
# List of servers exposing jolokia read service
[[inputs.jolokia.servers]]
name = "stable"
host = "192.168.103.2"
port = "8180"
# username = "myuser"
# password = "mypassword"
# List of metrics collected on above servers
# Each metric consists in a name, a jmx path and either a pass or drop slice attributes
# This collect all heap memory usage metrics
[[inputs.jolokia.metrics]]
name = "heap_memory_usage"
jmx = "/java.lang:type=Memory/HeapMemoryUsage"
`
}
func (j *Jolokia) Description() string {
return "Read JMX metrics through Jolokia"
}
func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
// Create + send request
req, err := http.NewRequest("GET", requestUrl.String(), nil)
if err != nil {
return nil, err
}
resp, err := j.jClient.MakeRequest(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
// Process response
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
requestUrl,
resp.StatusCode,
http.StatusText(resp.StatusCode),
http.StatusOK,
http.StatusText(http.StatusOK))
return nil, err
}
// read body
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
// Unmarshal json
var jsonOut map[string]interface{}
if err = json.Unmarshal([]byte(body), &jsonOut); err != nil {
return nil, errors.New("Error decoding JSON response")
}
return jsonOut, nil
}
func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
context := j.Context //"/jolokia/read"
servers := j.Servers
metrics := j.Metrics
tags := make(map[string]string)
for _, server := range servers {
tags["server"] = server.Name
tags["port"] = server.Port
tags["host"] = server.Host
fields := make(map[string]interface{})
for _, metric := range metrics {
measurement := metric.Name
jmxPath := metric.Jmx
// Prepare URL
requestUrl, err := url.Parse("http://" + server.Host + ":" +
server.Port + context + jmxPath)
if err != nil {
return err
}
if server.Username != "" || server.Password != "" {
requestUrl.User = url.UserPassword(server.Username, server.Password)
}
out, _ := j.getAttr(requestUrl)
if values, ok := out["value"]; ok {
switch t := values.(type) {
case map[string]interface{}:
for k, v := range t {
fields[measurement+"_"+k] = v
}
case interface{}:
fields[measurement] = t
}
} else {
fmt.Printf("Missing key 'value' in '%s' output response\n",
requestUrl.String())
}
}
acc.AddFields("jolokia", fields, tags)
}
return nil
}
func init() {
inputs.Add("jolokia", func() telegraf.Input {
return &Jolokia{jClient: &JolokiaClientImpl{client: &http.Client{}}}
})
}

View File

@@ -0,0 +1,116 @@
package jolokia
import (
_ "fmt"
"io/ioutil"
"net/http"
"strings"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
_ "github.com/stretchr/testify/require"
)
const validMultiValueJSON = `
{
"request":{
"mbean":"java.lang:type=Memory",
"attribute":"HeapMemoryUsage",
"type":"read"
},
"value":{
"init":67108864,
"committed":456130560,
"max":477626368,
"used":203288528
},
"timestamp":1446129191,
"status":200
}`
const validSingleValueJSON = `
{
"request":{
"path":"used",
"mbean":"java.lang:type=Memory",
"attribute":"HeapMemoryUsage",
"type":"read"
},
"value":209274376,
"timestamp":1446129256,
"status":200
}`
const invalidJSON = "I don't think this is JSON"
const empty = ""
var Servers = []Server{Server{Name: "as1", Host: "127.0.0.1", Port: "8080"}}
var HeapMetric = Metric{Name: "heap_memory_usage", Jmx: "/java.lang:type=Memory/HeapMemoryUsage"}
var UsedHeapMetric = Metric{Name: "heap_memory_usage", Jmx: "/java.lang:type=Memory/HeapMemoryUsage"}
type jolokiaClientStub struct {
responseBody string
statusCode int
}
func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error) {
resp := http.Response{}
resp.StatusCode = c.statusCode
resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody))
return &resp, nil
}
// Generates a pointer to an HttpJson object that uses a mock HTTP client.
// Parameters:
// response : Body of the response that the mock HTTP client should return
// statusCode: HTTP status code the mock HTTP client should return
//
// Returns:
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
func genJolokiaClientStub(response string, statusCode int, servers []Server, metrics []Metric) *Jolokia {
return &Jolokia{
jClient: jolokiaClientStub{responseBody: response, statusCode: statusCode},
Servers: servers,
Metrics: metrics,
}
}
// Test that the proper values are ignored or collected
func TestHttpJsonMultiValue(t *testing.T) {
jolokia := genJolokiaClientStub(validMultiValueJSON, 200, Servers, []Metric{HeapMetric})
var acc testutil.Accumulator
err := jolokia.Gather(&acc)
assert.Nil(t, err)
assert.Equal(t, 1, len(acc.Metrics))
fields := map[string]interface{}{
"heap_memory_usage_init": 67108864.0,
"heap_memory_usage_committed": 456130560.0,
"heap_memory_usage_max": 477626368.0,
"heap_memory_usage_used": 203288528.0,
}
tags := map[string]string{
"host": "127.0.0.1",
"port": "8080",
"server": "as1",
}
acc.AssertContainsTaggedFields(t, "jolokia", fields, tags)
}
// Test that the proper values are ignored or collected
func TestHttpJsonOn404(t *testing.T) {
jolokia := genJolokiaClientStub(validMultiValueJSON, 404, Servers,
[]Metric{UsedHeapMetric})
var acc testutil.Accumulator
acc.SetDebug(true)
err := jolokia.Gather(&acc)
assert.Nil(t, err)
assert.Equal(t, 0, len(acc.Metrics))
}

View File

@@ -0,0 +1,24 @@
# Kafka Consumer
The [Kafka](http://kafka.apache.org/) consumer plugin polls a specified Kafka
topic and adds messages to InfluxDB. The plugin assumes messages follow the
line protocol. [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/consumergroup)
is used to talk to the Kafka cluster so multiple instances of telegraf can read
from the same topic in parallel.
## Testing
Running integration tests requires running Zookeeper & Kafka. The following
commands assume you're on OS X & using [boot2docker](http://boot2docker.io/) or docker-machine through [Docker Toolbox](https://www.docker.com/docker-toolbox).
To start Kafka & Zookeeper:
```
docker run -d -p 2181:2181 -p 9092:9092 --env ADVERTISED_HOST=`boot2docker ip || docker-machine ip <your_machine_name>` --env ADVERTISED_PORT=9092 spotify/kafka
```
To run tests:
```
go test
```

View File

@@ -0,0 +1,166 @@
package kafka_consumer
import (
"log"
"strings"
"sync"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/Shopify/sarama"
"github.com/wvanbergen/kafka/consumergroup"
)
type Kafka struct {
ConsumerGroup string
Topics []string
ZookeeperPeers []string
Consumer *consumergroup.ConsumerGroup
PointBuffer int
Offset string
sync.Mutex
// channel for all incoming kafka messages
in <-chan *sarama.ConsumerMessage
// channel for all kafka consumer errors
errs <-chan *sarama.ConsumerError
// channel for all incoming parsed kafka points
metricC chan telegraf.Metric
done chan struct{}
// doNotCommitMsgs tells the parser not to call CommitUpTo on the consumer
// this is mostly for test purposes, but there may be a use-case for it later.
doNotCommitMsgs bool
}
var sampleConfig = `
# topic(s) to consume
topics = ["telegraf"]
# an array of Zookeeper connection strings
zookeeper_peers = ["localhost:2181"]
# the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
# Maximum number of points to buffer between collection intervals
point_buffer = 100000
# Offset (must be either "oldest" or "newest")
offset = "oldest"
`
func (k *Kafka) SampleConfig() string {
return sampleConfig
}
func (k *Kafka) Description() string {
return "Read line-protocol metrics from Kafka topic(s)"
}
func (k *Kafka) Start() error {
k.Lock()
defer k.Unlock()
var consumerErr error
config := consumergroup.NewConfig()
switch strings.ToLower(k.Offset) {
case "oldest", "":
config.Offsets.Initial = sarama.OffsetOldest
case "newest":
config.Offsets.Initial = sarama.OffsetNewest
default:
log.Printf("WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
k.Offset)
config.Offsets.Initial = sarama.OffsetOldest
}
if k.Consumer == nil || k.Consumer.Closed() {
k.Consumer, consumerErr = consumergroup.JoinConsumerGroup(
k.ConsumerGroup,
k.Topics,
k.ZookeeperPeers,
config,
)
if consumerErr != nil {
return consumerErr
}
// Setup message and error channels
k.in = k.Consumer.Messages()
k.errs = k.Consumer.Errors()
}
k.done = make(chan struct{})
if k.PointBuffer == 0 {
k.PointBuffer = 100000
}
k.metricC = make(chan telegraf.Metric, k.PointBuffer)
// Start the kafka message reader
go k.parser()
log.Printf("Started the kafka consumer service, peers: %v, topics: %v\n",
k.ZookeeperPeers, k.Topics)
return nil
}
// parser() reads all incoming messages from the consumer, and parses them into
// influxdb metric points.
func (k *Kafka) parser() {
for {
select {
case <-k.done:
return
case err := <-k.errs:
log.Printf("Kafka Consumer Error: %s\n", err.Error())
case msg := <-k.in:
metrics, err := telegraf.ParseMetrics(msg.Value)
if err != nil {
log.Printf("Could not parse kafka message: %s, error: %s",
string(msg.Value), err.Error())
}
for _, metric := range metrics {
select {
case k.metricC <- metric:
continue
default:
log.Printf("Kafka Consumer buffer is full, dropping a metric." +
" You may want to increase the point_buffer setting")
}
}
if !k.doNotCommitMsgs {
// TODO(cam) this locking can be removed if this PR gets merged:
// https://github.com/wvanbergen/kafka/pull/84
k.Lock()
k.Consumer.CommitUpto(msg)
k.Unlock()
}
}
}
}
func (k *Kafka) Stop() {
k.Lock()
defer k.Unlock()
close(k.done)
if err := k.Consumer.Close(); err != nil {
log.Printf("Error closing kafka consumer: %s\n", err.Error())
}
}
func (k *Kafka) Gather(acc telegraf.Accumulator) error {
k.Lock()
defer k.Unlock()
npoints := len(k.metricC)
for i := 0; i < npoints; i++ {
point := <-k.metricC
acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time())
}
return nil
}
func init() {
inputs.Add("kafka_consumer", func() telegraf.Input {
return &Kafka{}
})
}

View File

@@ -0,0 +1,91 @@
package kafka_consumer
import (
"fmt"
"testing"
"time"
"github.com/Shopify/sarama"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestReadsMetricsFromKafka(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
brokerPeers := []string{testutil.GetLocalHost() + ":9092"}
zkPeers := []string{testutil.GetLocalHost() + ":2181"}
testTopic := fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix())
// Send a Kafka message to the kafka host
msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257"
producer, err := sarama.NewSyncProducer(brokerPeers, nil)
require.NoError(t, err)
_, _, err = producer.SendMessage(
&sarama.ProducerMessage{
Topic: testTopic,
Value: sarama.StringEncoder(msg),
})
require.NoError(t, err)
defer producer.Close()
// Start the Kafka Consumer
k := &Kafka{
ConsumerGroup: "telegraf_test_consumers",
Topics: []string{testTopic},
ZookeeperPeers: zkPeers,
PointBuffer: 100000,
Offset: "oldest",
}
if err := k.Start(); err != nil {
t.Fatal(err.Error())
} else {
defer k.Stop()
}
waitForPoint(k, t)
// Verify that we can now gather the sent message
var acc testutil.Accumulator
// Sanity check
assert.Equal(t, 0, len(acc.Metrics), "There should not be any points")
// Gather points
err = k.Gather(&acc)
require.NoError(t, err)
if len(acc.Metrics) == 1 {
point := acc.Metrics[0]
assert.Equal(t, "cpu_load_short", point.Measurement)
assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
assert.Equal(t, map[string]string{
"host": "server01",
"direction": "in",
"region": "us-west",
}, point.Tags)
assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix())
} else {
t.Errorf("No points found in accumulator, expected 1")
}
}
// Waits for the metric that was sent to the kafka broker to arrive at the kafka
// consumer
func waitForPoint(k *Kafka, t *testing.T) {
// Give the kafka container up to 2 seconds to get the point to the consumer
ticker := time.NewTicker(5 * time.Millisecond)
counter := 0
for {
select {
case <-ticker.C:
counter++
if counter > 1000 {
t.Fatal("Waited for 5s, point never arrived to consumer")
} else if len(k.metricC) == 1 {
return
}
}
}
}

View File

@@ -0,0 +1,99 @@
package kafka_consumer
import (
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/Shopify/sarama"
"github.com/stretchr/testify/assert"
)
const (
testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257"
invalidMsg = "cpu_load_short,host=server01 1422568543702900257"
pointBuffer = 5
)
func NewTestKafka() (*Kafka, chan *sarama.ConsumerMessage) {
in := make(chan *sarama.ConsumerMessage, pointBuffer)
k := Kafka{
ConsumerGroup: "test",
Topics: []string{"telegraf"},
ZookeeperPeers: []string{"localhost:2181"},
PointBuffer: pointBuffer,
Offset: "oldest",
in: in,
doNotCommitMsgs: true,
errs: make(chan *sarama.ConsumerError, pointBuffer),
done: make(chan struct{}),
metricC: make(chan telegraf.Metric, pointBuffer),
}
return &k, in
}
// Test that the parser parses kafka messages into points
func TestRunParser(t *testing.T) {
k, in := NewTestKafka()
defer close(k.done)
go k.parser()
in <- saramaMsg(testMsg)
time.Sleep(time.Millisecond)
assert.Equal(t, len(k.metricC), 1)
}
// Test that the parser ignores invalid messages
func TestRunParserInvalidMsg(t *testing.T) {
k, in := NewTestKafka()
defer close(k.done)
go k.parser()
in <- saramaMsg(invalidMsg)
time.Sleep(time.Millisecond)
assert.Equal(t, len(k.metricC), 0)
}
// Test that points are dropped when we hit the buffer limit
func TestRunParserRespectsBuffer(t *testing.T) {
k, in := NewTestKafka()
defer close(k.done)
go k.parser()
for i := 0; i < pointBuffer+1; i++ {
in <- saramaMsg(testMsg)
}
time.Sleep(time.Millisecond)
assert.Equal(t, len(k.metricC), 5)
}
// Test that the parser parses kafka messages into points
func TestRunParserAndGather(t *testing.T) {
k, in := NewTestKafka()
defer close(k.done)
go k.parser()
in <- saramaMsg(testMsg)
time.Sleep(time.Millisecond)
acc := testutil.Accumulator{}
k.Gather(&acc)
assert.Equal(t, len(acc.Metrics), 1)
acc.AssertContainsFields(t, "cpu_load_short",
map[string]interface{}{"value": float64(23422)})
}
func saramaMsg(val string) *sarama.ConsumerMessage {
return &sarama.ConsumerMessage{
Key: nil,
Value: []byte(val),
Offset: 0,
Partition: 0,
}
}

View File

@@ -0,0 +1,232 @@
package leofs
import (
"bufio"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"net/url"
"os/exec"
"strconv"
"strings"
"sync"
)
const oid = ".1.3.6.1.4.1.35450"
// For Manager Master
const defaultEndpoint = "127.0.0.1:4020"
type ServerType int
const (
ServerTypeManagerMaster ServerType = iota
ServerTypeManagerSlave
ServerTypeStorage
ServerTypeGateway
)
type LeoFS struct {
Servers []string
}
var KeyMapping = map[ServerType][]string{
ServerTypeManagerMaster: {
"num_of_processes",
"total_memory_usage",
"system_memory_usage",
"processes_memory_usage",
"ets_memory_usage",
"num_of_processes_5min",
"total_memory_usage_5min",
"system_memory_usage_5min",
"processes_memory_usage_5min",
"ets_memory_usage_5min",
"used_allocated_memory",
"allocated_memory",
"used_allocated_memory_5min",
"allocated_memory_5min",
},
ServerTypeManagerSlave: {
"num_of_processes",
"total_memory_usage",
"system_memory_usage",
"processes_memory_usage",
"ets_memory_usage",
"num_of_processes_5min",
"total_memory_usage_5min",
"system_memory_usage_5min",
"processes_memory_usage_5min",
"ets_memory_usage_5min",
"used_allocated_memory",
"allocated_memory",
"used_allocated_memory_5min",
"allocated_memory_5min",
},
ServerTypeStorage: {
"num_of_processes",
"total_memory_usage",
"system_memory_usage",
"processes_memory_usage",
"ets_memory_usage",
"num_of_processes_5min",
"total_memory_usage_5min",
"system_memory_usage_5min",
"processes_memory_usage_5min",
"ets_memory_usage_5min",
"num_of_writes",
"num_of_reads",
"num_of_deletes",
"num_of_writes_5min",
"num_of_reads_5min",
"num_of_deletes_5min",
"num_of_active_objects",
"total_objects",
"total_size_of_active_objects",
"total_size",
"num_of_replication_messages",
"num_of_sync-vnode_messages",
"num_of_rebalance_messages",
"used_allocated_memory",
"allocated_memory",
"used_allocated_memory_5min",
"allocated_memory_5min",
},
ServerTypeGateway: {
"num_of_processes",
"total_memory_usage",
"system_memory_usage",
"processes_memory_usage",
"ets_memory_usage",
"num_of_processes_5min",
"total_memory_usage_5min",
"system_memory_usage_5min",
"processes_memory_usage_5min",
"ets_memory_usage_5min",
"num_of_writes",
"num_of_reads",
"num_of_deletes",
"num_of_writes_5min",
"num_of_reads_5min",
"num_of_deletes_5min",
"count_of_cache-hit",
"count_of_cache-miss",
"total_of_files",
"total_cached_size",
"used_allocated_memory",
"allocated_memory",
"used_allocated_memory_5min",
"allocated_memory_5min",
},
}
var serverTypeMapping = map[string]ServerType{
"4020": ServerTypeManagerMaster,
"4021": ServerTypeManagerSlave,
"4010": ServerTypeStorage,
"4011": ServerTypeStorage,
"4012": ServerTypeStorage,
"4013": ServerTypeStorage,
"4000": ServerTypeGateway,
"4001": ServerTypeGateway,
}
var sampleConfig = `
# An array of URI to gather stats about LeoFS.
# Specify an ip or hostname with port. ie 127.0.0.1:4020
#
# If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port.
servers = ["127.0.0.1:4021"]
`
func (l *LeoFS) SampleConfig() string {
return sampleConfig
}
func (l *LeoFS) Description() string {
return "Read metrics from a LeoFS Server via SNMP"
}
func (l *LeoFS) Gather(acc telegraf.Accumulator) error {
if len(l.Servers) == 0 {
l.gatherServer(defaultEndpoint, ServerTypeManagerMaster, acc)
return nil
}
var wg sync.WaitGroup
var outerr error
for _, endpoint := range l.Servers {
_, err := url.Parse(endpoint)
if err != nil {
return fmt.Errorf("Unable to parse the address:%s, err:%s", endpoint, err)
}
port, err := retrieveTokenAfterColon(endpoint)
if err != nil {
return err
}
st, ok := serverTypeMapping[port]
if !ok {
st = ServerTypeStorage
}
wg.Add(1)
go func(endpoint string, st ServerType) {
defer wg.Done()
outerr = l.gatherServer(endpoint, st, acc)
}(endpoint, st)
}
wg.Wait()
return outerr
}
func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc telegraf.Accumulator) error {
cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", endpoint, oid)
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
cmd.Start()
defer cmd.Wait()
scanner := bufio.NewScanner(stdout)
if !scanner.Scan() {
return fmt.Errorf("Unable to retrieve the node name")
}
nodeName, err := retrieveTokenAfterColon(scanner.Text())
if err != nil {
return err
}
nodeNameTrimmed := strings.Trim(nodeName, "\"")
tags := map[string]string{
"node": nodeNameTrimmed,
}
i := 0
fields := make(map[string]interface{})
for scanner.Scan() {
key := KeyMapping[serverType][i]
val, err := retrieveTokenAfterColon(scanner.Text())
if err != nil {
return err
}
fVal, err := strconv.ParseFloat(val, 64)
if err != nil {
return fmt.Errorf("Unable to parse the value:%s, err:%s", val, err)
}
fields[key] = fVal
i++
}
acc.AddFields("leofs", fields, tags)
return nil
}
func retrieveTokenAfterColon(line string) (string, error) {
tokens := strings.Split(line, ":")
if len(tokens) != 2 {
return "", fmt.Errorf("':' not found in the line:%s", line)
}
return strings.TrimSpace(tokens[1]), nil
}
func init() {
inputs.Add("leofs", func() telegraf.Input {
return &LeoFS{}
})
}

View File

@@ -0,0 +1,173 @@
package leofs
import (
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"io/ioutil"
"log"
"os"
"os/exec"
"testing"
)
var fakeSNMP4Manager = `
package main
import "fmt"
const output = ` + "`" + `iso.3.6.1.4.1.35450.15.1.0 = STRING: "manager_888@127.0.0.1"
iso.3.6.1.4.1.35450.15.2.0 = Gauge32: 186
iso.3.6.1.4.1.35450.15.3.0 = Gauge32: 46235519
iso.3.6.1.4.1.35450.15.4.0 = Gauge32: 32168525
iso.3.6.1.4.1.35450.15.5.0 = Gauge32: 14066068
iso.3.6.1.4.1.35450.15.6.0 = Gauge32: 5512968
iso.3.6.1.4.1.35450.15.7.0 = Gauge32: 186
iso.3.6.1.4.1.35450.15.8.0 = Gauge32: 46269006
iso.3.6.1.4.1.35450.15.9.0 = Gauge32: 32202867
iso.3.6.1.4.1.35450.15.10.0 = Gauge32: 14064995
iso.3.6.1.4.1.35450.15.11.0 = Gauge32: 5492634
iso.3.6.1.4.1.35450.15.12.0 = Gauge32: 60
iso.3.6.1.4.1.35450.15.13.0 = Gauge32: 43515904
iso.3.6.1.4.1.35450.15.14.0 = Gauge32: 60
iso.3.6.1.4.1.35450.15.15.0 = Gauge32: 43533983` + "`" +
`
func main() {
fmt.Println(output)
}
`
var fakeSNMP4Storage = `
package main
import "fmt"
const output = ` + "`" + `iso.3.6.1.4.1.35450.34.1.0 = STRING: "storage_0@127.0.0.1"
iso.3.6.1.4.1.35450.34.2.0 = Gauge32: 512
iso.3.6.1.4.1.35450.34.3.0 = Gauge32: 38126307
iso.3.6.1.4.1.35450.34.4.0 = Gauge32: 22308716
iso.3.6.1.4.1.35450.34.5.0 = Gauge32: 15816448
iso.3.6.1.4.1.35450.34.6.0 = Gauge32: 5232008
iso.3.6.1.4.1.35450.34.7.0 = Gauge32: 512
iso.3.6.1.4.1.35450.34.8.0 = Gauge32: 38113176
iso.3.6.1.4.1.35450.34.9.0 = Gauge32: 22313398
iso.3.6.1.4.1.35450.34.10.0 = Gauge32: 15798779
iso.3.6.1.4.1.35450.34.11.0 = Gauge32: 5237315
iso.3.6.1.4.1.35450.34.12.0 = Gauge32: 191
iso.3.6.1.4.1.35450.34.13.0 = Gauge32: 824
iso.3.6.1.4.1.35450.34.14.0 = Gauge32: 0
iso.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105
iso.3.6.1.4.1.35450.34.16.0 = Gauge32: 196654
iso.3.6.1.4.1.35450.34.17.0 = Gauge32: 0
iso.3.6.1.4.1.35450.34.18.0 = Gauge32: 2052
iso.3.6.1.4.1.35450.34.19.0 = Gauge32: 50296
iso.3.6.1.4.1.35450.34.20.0 = Gauge32: 35
iso.3.6.1.4.1.35450.34.21.0 = Gauge32: 898
iso.3.6.1.4.1.35450.34.22.0 = Gauge32: 0
iso.3.6.1.4.1.35450.34.23.0 = Gauge32: 0
iso.3.6.1.4.1.35450.34.24.0 = Gauge32: 0
iso.3.6.1.4.1.35450.34.31.0 = Gauge32: 51
iso.3.6.1.4.1.35450.34.32.0 = Gauge32: 53219328
iso.3.6.1.4.1.35450.34.33.0 = Gauge32: 51
iso.3.6.1.4.1.35450.34.34.0 = Gauge32: 53351083` + "`" +
`
func main() {
fmt.Println(output)
}
`
var fakeSNMP4Gateway = `
package main
import "fmt"
const output = ` + "`" + `iso.3.6.1.4.1.35450.34.1.0 = STRING: "gateway_0@127.0.0.1"
iso.3.6.1.4.1.35450.34.2.0 = Gauge32: 465
iso.3.6.1.4.1.35450.34.3.0 = Gauge32: 61676335
iso.3.6.1.4.1.35450.34.4.0 = Gauge32: 46890415
iso.3.6.1.4.1.35450.34.5.0 = Gauge32: 14785011
iso.3.6.1.4.1.35450.34.6.0 = Gauge32: 5578855
iso.3.6.1.4.1.35450.34.7.0 = Gauge32: 465
iso.3.6.1.4.1.35450.34.8.0 = Gauge32: 61644426
iso.3.6.1.4.1.35450.34.9.0 = Gauge32: 46880358
iso.3.6.1.4.1.35450.34.10.0 = Gauge32: 14763002
iso.3.6.1.4.1.35450.34.11.0 = Gauge32: 5582125
iso.3.6.1.4.1.35450.34.12.0 = Gauge32: 191
iso.3.6.1.4.1.35450.34.13.0 = Gauge32: 827
iso.3.6.1.4.1.35450.34.14.0 = Gauge32: 0
iso.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105
iso.3.6.1.4.1.35450.34.16.0 = Gauge32: 196650
iso.3.6.1.4.1.35450.34.17.0 = Gauge32: 0
iso.3.6.1.4.1.35450.34.18.0 = Gauge32: 30256
iso.3.6.1.4.1.35450.34.19.0 = Gauge32: 532158
iso.3.6.1.4.1.35450.34.20.0 = Gauge32: 34
iso.3.6.1.4.1.35450.34.21.0 = Gauge32: 1
iso.3.6.1.4.1.35450.34.31.0 = Gauge32: 53
iso.3.6.1.4.1.35450.34.32.0 = Gauge32: 55050240
iso.3.6.1.4.1.35450.34.33.0 = Gauge32: 53
iso.3.6.1.4.1.35450.34.34.0 = Gauge32: 55186538` + "`" +
`
func main() {
fmt.Println(output)
}
`
func makeFakeSNMPSrc(code string) string {
path := os.TempDir() + "/test.go"
err := ioutil.WriteFile(path, []byte(code), 0600)
if err != nil {
log.Fatalln(err)
}
return path
}
func buildFakeSNMPCmd(src string) {
err := exec.Command("go", "build", "-o", "snmpwalk", src).Run()
if err != nil {
log.Fatalln(err)
}
}
func testMain(t *testing.T, code string, endpoint string, serverType ServerType) {
// Build the fake snmpwalk for test
src := makeFakeSNMPSrc(code)
defer os.Remove(src)
buildFakeSNMPCmd(src)
defer os.Remove("./snmpwalk")
envPathOrigin := os.Getenv("PATH")
// Refer to the fake snmpwalk
os.Setenv("PATH", ".")
defer os.Setenv("PATH", envPathOrigin)
l := &LeoFS{
Servers: []string{endpoint},
}
var acc testutil.Accumulator
acc.SetDebug(true)
err := l.Gather(&acc)
require.NoError(t, err)
floatMetrics := KeyMapping[serverType]
for _, metric := range floatMetrics {
assert.True(t, acc.HasFloatField("leofs", metric), metric)
}
}
func TestLeoFSManagerMasterMetrics(t *testing.T) {
testMain(t, fakeSNMP4Manager, "localhost:4020", ServerTypeManagerMaster)
}
func TestLeoFSManagerSlaveMetrics(t *testing.T) {
testMain(t, fakeSNMP4Manager, "localhost:4021", ServerTypeManagerSlave)
}
func TestLeoFSStorageMetrics(t *testing.T) {
testMain(t, fakeSNMP4Storage, "localhost:4010", ServerTypeStorage)
}
func TestLeoFSGatewayMetrics(t *testing.T) {
testMain(t, fakeSNMP4Gateway, "localhost:4000", ServerTypeGateway)
}

View File

@@ -0,0 +1,251 @@
/*
Lustre 2.x telegraf plugin
Lustre (http://lustre.org/) is an open-source, parallel file system
for HPC environments. It stores statistics about its activity in
/proc
*/
package lustre2
import (
"path/filepath"
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
// Lustre proc files can change between versions, so we want to future-proof
// by letting people choose what to look at.
type Lustre2 struct {
Ost_procfiles []string
Mds_procfiles []string
// allFields maps and OST name to the metric fields associated with that OST
allFields map[string]map[string]interface{}
}
var sampleConfig = `
# An array of /proc globs to search for Lustre stats
# If not specified, the default will work on Lustre 2.5.x
#
# ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"]
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
`
/* The wanted fields would be a []string if not for the
lines that start with read_bytes/write_bytes and contain
both the byte count and the function call count
*/
type mapping struct {
inProc string // What to look for at the start of a line in /proc/fs/lustre/*
field uint32 // which field to extract from that line
reportAs string // What measurement name to use
tag string // Additional tag to add for this metric
}
var wanted_ost_fields = []*mapping{
{
inProc: "write_bytes",
field: 6,
reportAs: "write_bytes",
},
{ // line starts with 'write_bytes', but value write_calls is in second column
inProc: "write_bytes",
field: 1,
reportAs: "write_calls",
},
{
inProc: "read_bytes",
field: 6,
reportAs: "read_bytes",
},
{ // line starts with 'read_bytes', but value read_calls is in second column
inProc: "read_bytes",
field: 1,
reportAs: "read_calls",
},
{
inProc: "cache_hit",
},
{
inProc: "cache_miss",
},
{
inProc: "cache_access",
},
}
var wanted_mds_fields = []*mapping{
{
inProc: "open",
},
{
inProc: "close",
},
{
inProc: "mknod",
},
{
inProc: "link",
},
{
inProc: "unlink",
},
{
inProc: "mkdir",
},
{
inProc: "rmdir",
},
{
inProc: "rename",
},
{
inProc: "getattr",
},
{
inProc: "setattr",
},
{
inProc: "getxattr",
},
{
inProc: "setxattr",
},
{
inProc: "statfs",
},
{
inProc: "sync",
},
{
inProc: "samedir_rename",
},
{
inProc: "crossdir_rename",
},
}
func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, acc telegraf.Accumulator) error {
files, err := filepath.Glob(fileglob)
if err != nil {
return err
}
for _, file := range files {
/* Turn /proc/fs/lustre/obdfilter/<ost_name>/stats and similar
* into just the object store target name
* Assumpion: the target name is always second to last,
* which is true in Lustre 2.1->2.5
*/
path := strings.Split(file, "/")
name := path[len(path)-2]
var fields map[string]interface{}
fields, ok := l.allFields[name]
if !ok {
fields = make(map[string]interface{})
l.allFields[name] = fields
}
lines, err := internal.ReadLines(file)
if err != nil {
return err
}
for _, line := range lines {
parts := strings.Fields(line)
for _, wanted := range wanted_fields {
var data uint64
if parts[0] == wanted.inProc {
wanted_field := wanted.field
// if not set, assume field[1]. Shouldn't be field[0], as
// that's a string
if wanted_field == 0 {
wanted_field = 1
}
data, err = strconv.ParseUint((parts[wanted_field]), 10, 64)
if err != nil {
return err
}
report_name := wanted.inProc
if wanted.reportAs != "" {
report_name = wanted.reportAs
}
fields[report_name] = data
}
}
}
}
return nil
}
// SampleConfig returns sample configuration message
func (l *Lustre2) SampleConfig() string {
return sampleConfig
}
// Description returns description of Lustre2 plugin
func (l *Lustre2) Description() string {
return "Read metrics from local Lustre service on OST, MDS"
}
// Gather reads stats from all lustre targets
func (l *Lustre2) Gather(acc telegraf.Accumulator) error {
l.allFields = make(map[string]map[string]interface{})
if len(l.Ost_procfiles) == 0 {
// read/write bytes are in obdfilter/<ost_name>/stats
err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats",
wanted_ost_fields, acc)
if err != nil {
return err
}
// cache counters are in osd-ldiskfs/<ost_name>/stats
err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats",
wanted_ost_fields, acc)
if err != nil {
return err
}
}
if len(l.Mds_procfiles) == 0 {
// Metadata server stats
err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats",
wanted_mds_fields, acc)
if err != nil {
return err
}
}
for _, procfile := range l.Ost_procfiles {
err := l.GetLustreProcStats(procfile, wanted_ost_fields, acc)
if err != nil {
return err
}
}
for _, procfile := range l.Mds_procfiles {
err := l.GetLustreProcStats(procfile, wanted_mds_fields, acc)
if err != nil {
return err
}
}
for name, fields := range l.allFields {
tags := map[string]string{
"name": name,
}
acc.AddFields("lustre2", fields, tags)
}
return nil
}
func init() {
inputs.Add("lustre2", func() telegraf.Input {
return &Lustre2{}
})
}

View File

@@ -0,0 +1,130 @@
package lustre2
import (
"io/ioutil"
"os"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
// Set config file variables to point to fake directory structure instead of /proc?
const obdfilterProcContents = `snapshot_time 1438693064.430544 secs.usecs
read_bytes 203238095 samples [bytes] 4096 1048576 78026117632000
write_bytes 71893382 samples [bytes] 1 1048576 15201500833981
get_info 1182008495 samples [reqs]
set_info_async 2 samples [reqs]
connect 1117 samples [reqs]
reconnect 1160 samples [reqs]
disconnect 1084 samples [reqs]
statfs 3575885 samples [reqs]
create 698 samples [reqs]
destroy 3190060 samples [reqs]
setattr 605647 samples [reqs]
punch 805187 samples [reqs]
sync 6608753 samples [reqs]
preprw 275131477 samples [reqs]
commitrw 275131477 samples [reqs]
quotactl 229231 samples [reqs]
ping 78020757 samples [reqs]
`
const osdldiskfsProcContents = `snapshot_time 1438693135.640551 secs.usecs
get_page 275132812 samples [usec] 0 3147 1320420955 22041662259
cache_access 19047063027 samples [pages] 1 1 19047063027
cache_hit 7393729777 samples [pages] 1 1 7393729777
cache_miss 11653333250 samples [pages] 1 1 11653333250
`
const mdtProcContents = `snapshot_time 1438693238.20113 secs.usecs
open 1024577037 samples [reqs]
close 873243496 samples [reqs]
mknod 349042 samples [reqs]
link 445 samples [reqs]
unlink 3549417 samples [reqs]
mkdir 705499 samples [reqs]
rmdir 227434 samples [reqs]
rename 629196 samples [reqs]
getattr 1503663097 samples [reqs]
setattr 1898364 samples [reqs]
getxattr 6145349681 samples [reqs]
setxattr 83969 samples [reqs]
statfs 2916320 samples [reqs]
sync 434081 samples [reqs]
samedir_rename 259625 samples [reqs]
crossdir_rename 369571 samples [reqs]
`
func TestLustre2GeneratesMetrics(t *testing.T) {
tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/"
ost_name := "OST0001"
mdtdir := tempdir + "/mdt/"
err := os.MkdirAll(mdtdir+"/"+ost_name, 0755)
require.NoError(t, err)
osddir := tempdir + "/osd-ldiskfs/"
err = os.MkdirAll(osddir+"/"+ost_name, 0755)
require.NoError(t, err)
obddir := tempdir + "/obdfilter/"
err = os.MkdirAll(obddir+"/"+ost_name, 0755)
require.NoError(t, err)
err = ioutil.WriteFile(mdtdir+"/"+ost_name+"/md_stats", []byte(mdtProcContents), 0644)
require.NoError(t, err)
err = ioutil.WriteFile(osddir+"/"+ost_name+"/stats", []byte(osdldiskfsProcContents), 0644)
require.NoError(t, err)
err = ioutil.WriteFile(obddir+"/"+ost_name+"/stats", []byte(obdfilterProcContents), 0644)
require.NoError(t, err)
m := &Lustre2{
Ost_procfiles: []string{obddir + "/*/stats", osddir + "/*/stats"},
Mds_procfiles: []string{mdtdir + "/*/md_stats"},
}
var acc testutil.Accumulator
err = m.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"name": ost_name,
}
fields := map[string]interface{}{
"cache_access": uint64(19047063027),
"cache_hit": uint64(7393729777),
"cache_miss": uint64(11653333250),
"close": uint64(873243496),
"crossdir_rename": uint64(369571),
"getattr": uint64(1503663097),
"getxattr": uint64(6145349681),
"link": uint64(445),
"mkdir": uint64(705499),
"mknod": uint64(349042),
"open": uint64(1024577037),
"read_bytes": uint64(78026117632000),
"read_calls": uint64(203238095),
"rename": uint64(629196),
"rmdir": uint64(227434),
"samedir_rename": uint64(259625),
"setattr": uint64(1898364),
"setxattr": uint64(83969),
"statfs": uint64(2916320),
"sync": uint64(434081),
"unlink": uint64(3549417),
"write_bytes": uint64(15201500833981),
"write_calls": uint64(71893382),
}
acc.AssertContainsTaggedFields(t, "lustre2", fields, tags)
err = os.RemoveAll(os.TempDir() + "/telegraf")
require.NoError(t, err)
}

View File

@@ -0,0 +1,234 @@
package mailchimp
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"regexp"
"sync"
)
const (
reports_endpoint string = "/3.0/reports"
reports_endpoint_campaign string = "/3.0/reports/%s"
)
var mailchimp_datacenter = regexp.MustCompile("[a-z]+[0-9]+$")
type ChimpAPI struct {
Transport http.RoundTripper
Debug bool
sync.Mutex
url *url.URL
}
type ReportsParams struct {
Count string
Offset string
SinceSendTime string
BeforeSendTime string
}
func (p *ReportsParams) String() string {
v := url.Values{}
if p.Count != "" {
v.Set("count", p.Count)
}
if p.Offset != "" {
v.Set("offset", p.Offset)
}
if p.BeforeSendTime != "" {
v.Set("before_send_time", p.BeforeSendTime)
}
if p.SinceSendTime != "" {
v.Set("since_send_time", p.SinceSendTime)
}
return v.Encode()
}
func NewChimpAPI(apiKey string) *ChimpAPI {
u := &url.URL{}
u.Scheme = "https"
u.Host = fmt.Sprintf("%s.api.mailchimp.com", mailchimp_datacenter.FindString(apiKey))
u.User = url.UserPassword("", apiKey)
return &ChimpAPI{url: u}
}
type APIError struct {
Status int `json:"status"`
Type string `json:"type"`
Title string `json:"title"`
Detail string `json:"detail"`
Instance string `json:"instance"`
}
func (e APIError) Error() string {
return fmt.Sprintf("ERROR %v: %v. See %v", e.Status, e.Title, e.Type)
}
func chimpErrorCheck(body []byte) error {
var e APIError
json.Unmarshal(body, &e)
if e.Title != "" || e.Status != 0 {
return e
}
return nil
}
func (a *ChimpAPI) GetReports(params ReportsParams) (ReportsResponse, error) {
a.Lock()
defer a.Unlock()
a.url.Path = reports_endpoint
var response ReportsResponse
rawjson, err := runChimp(a, params)
if err != nil {
return response, err
}
err = json.Unmarshal(rawjson, &response)
if err != nil {
return response, err
}
return response, nil
}
func (a *ChimpAPI) GetReport(campaignID string) (Report, error) {
a.Lock()
defer a.Unlock()
a.url.Path = fmt.Sprintf(reports_endpoint_campaign, campaignID)
var response Report
rawjson, err := runChimp(a, ReportsParams{})
if err != nil {
return response, err
}
err = json.Unmarshal(rawjson, &response)
if err != nil {
return response, err
}
return response, nil
}
func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) {
client := &http.Client{Transport: api.Transport}
var b bytes.Buffer
req, err := http.NewRequest("GET", api.url.String(), &b)
if err != nil {
return nil, err
}
req.URL.RawQuery = params.String()
req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin")
if api.Debug {
log.Printf("Request URL: %s", req.URL.String())
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if api.Debug {
log.Printf("Response Body:%s", string(body))
}
if err = chimpErrorCheck(body); err != nil {
return nil, err
}
return body, nil
}
type ReportsResponse struct {
Reports []Report `json:"reports"`
TotalItems int `json:"total_items"`
}
type Report struct {
ID string `json:"id"`
CampaignTitle string `json:"campaign_title"`
Type string `json:"type"`
EmailsSent int `json:"emails_sent"`
AbuseReports int `json:"abuse_reports"`
Unsubscribed int `json:"unsubscribed"`
SendTime string `json:"send_time"`
TimeSeries []TimeSerie
Bounces Bounces `json:"bounces"`
Forwards Forwards `json:"forwards"`
Opens Opens `json:"opens"`
Clicks Clicks `json:"clicks"`
FacebookLikes FacebookLikes `json:"facebook_likes"`
IndustryStats IndustryStats `json:"industry_stats"`
ListStats ListStats `json:"list_stats"`
}
type Bounces struct {
HardBounces int `json:"hard_bounces"`
SoftBounces int `json:"soft_bounces"`
SyntaxErrors int `json:"syntax_errors"`
}
type Forwards struct {
ForwardsCount int `json:"forwards_count"`
ForwardsOpens int `json:"forwards_opens"`
}
type Opens struct {
OpensTotal int `json:"opens_total"`
UniqueOpens int `json:"unique_opens"`
OpenRate float64 `json:"open_rate"`
LastOpen string `json:"last_open"`
}
type Clicks struct {
ClicksTotal int `json:"clicks_total"`
UniqueClicks int `json:"unique_clicks"`
UniqueSubscriberClicks int `json:"unique_subscriber_clicks"`
ClickRate float64 `json:"click_rate"`
LastClick string `json:"last_click"`
}
type FacebookLikes struct {
RecipientLikes int `json:"recipient_likes"`
UniqueLikes int `json:"unique_likes"`
FacebookLikes int `json:"facebook_likes"`
}
type IndustryStats struct {
Type string `json:"type"`
OpenRate float64 `json:"open_rate"`
ClickRate float64 `json:"click_rate"`
BounceRate float64 `json:"bounce_rate"`
UnopenRate float64 `json:"unopen_rate"`
UnsubRate float64 `json:"unsub_rate"`
AbuseRate float64 `json:"abuse_rate"`
}
type ListStats struct {
SubRate float64 `json:"sub_rate"`
UnsubRate float64 `json:"unsub_rate"`
OpenRate float64 `json:"open_rate"`
ClickRate float64 `json:"click_rate"`
}
type TimeSerie struct {
TimeStamp string `json:"timestamp"`
EmailsSent int `json:"emails_sent"`
UniqueOpens int `json:"unique_opens"`
RecipientsClick int `json:"recipients_click"`
}

View File

@@ -0,0 +1,117 @@
package mailchimp
import (
"fmt"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type MailChimp struct {
api *ChimpAPI
ApiKey string
DaysOld int
CampaignId string
}
var sampleConfig = `
# MailChimp API key
# get from https://admin.mailchimp.com/account/api/
api_key = "" # required
# Reports for campaigns sent more than days_old ago will not be collected.
# 0 means collect all.
days_old = 0
# Campaign ID to get, if empty gets all campaigns, this option overrides days_old
# campaign_id = ""
`
func (m *MailChimp) SampleConfig() string {
return sampleConfig
}
func (m *MailChimp) Description() string {
return "Gathers metrics from the /3.0/reports MailChimp API"
}
func (m *MailChimp) Gather(acc telegraf.Accumulator) error {
if m.api == nil {
m.api = NewChimpAPI(m.ApiKey)
}
m.api.Debug = false
if m.CampaignId == "" {
since := ""
if m.DaysOld > 0 {
now := time.Now()
d, _ := time.ParseDuration(fmt.Sprintf("%dh", 24*m.DaysOld))
since = now.Add(-d).Format(time.RFC3339)
}
reports, err := m.api.GetReports(ReportsParams{
SinceSendTime: since,
})
if err != nil {
return err
}
now := time.Now()
for _, report := range reports.Reports {
gatherReport(acc, report, now)
}
} else {
report, err := m.api.GetReport(m.CampaignId)
if err != nil {
return err
}
now := time.Now()
gatherReport(acc, report, now)
}
return nil
}
func gatherReport(acc telegraf.Accumulator, report Report, now time.Time) {
tags := make(map[string]string)
tags["id"] = report.ID
tags["campaign_title"] = report.CampaignTitle
fields := map[string]interface{}{
"emails_sent": report.EmailsSent,
"abuse_reports": report.AbuseReports,
"unsubscribed": report.Unsubscribed,
"hard_bounces": report.Bounces.HardBounces,
"soft_bounces": report.Bounces.SoftBounces,
"syntax_errors": report.Bounces.SyntaxErrors,
"forwards_count": report.Forwards.ForwardsCount,
"forwards_opens": report.Forwards.ForwardsOpens,
"opens_total": report.Opens.OpensTotal,
"unique_opens": report.Opens.UniqueOpens,
"open_rate": report.Opens.OpenRate,
"clicks_total": report.Clicks.ClicksTotal,
"unique_clicks": report.Clicks.UniqueClicks,
"unique_subscriber_clicks": report.Clicks.UniqueSubscriberClicks,
"click_rate": report.Clicks.ClickRate,
"facebook_recipient_likes": report.FacebookLikes.RecipientLikes,
"facebook_unique_likes": report.FacebookLikes.UniqueLikes,
"facebook_likes": report.FacebookLikes.FacebookLikes,
"industry_type": report.IndustryStats.Type,
"industry_open_rate": report.IndustryStats.OpenRate,
"industry_click_rate": report.IndustryStats.ClickRate,
"industry_bounce_rate": report.IndustryStats.BounceRate,
"industry_unopen_rate": report.IndustryStats.UnopenRate,
"industry_unsub_rate": report.IndustryStats.UnsubRate,
"industry_abuse_rate": report.IndustryStats.AbuseRate,
"list_stats_sub_rate": report.ListStats.SubRate,
"list_stats_unsub_rate": report.ListStats.UnsubRate,
"list_stats_open_rate": report.ListStats.OpenRate,
"list_stats_click_rate": report.ListStats.ClickRate,
}
acc.AddFields("mailchimp", fields, tags, now)
}
func init() {
inputs.Add("mailchimp", func() telegraf.Input {
return &MailChimp{}
})
}

View File

@@ -0,0 +1,774 @@
package mailchimp
import (
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
func TestMailChimpGatherReports(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, sampleReports)
},
))
defer ts.Close()
u, err := url.ParseRequestURI(ts.URL)
require.NoError(t, err)
api := &ChimpAPI{
url: u,
Debug: true,
}
m := MailChimp{
api: api,
}
var acc testutil.Accumulator
err = m.Gather(&acc)
require.NoError(t, err)
tags := make(map[string]string)
tags["id"] = "42694e9e57"
tags["campaign_title"] = "Freddie's Jokes Vol. 1"
fields := map[string]interface{}{
"emails_sent": int(200),
"abuse_reports": int(0),
"unsubscribed": int(2),
"hard_bounces": int(0),
"soft_bounces": int(2),
"syntax_errors": int(0),
"forwards_count": int(0),
"forwards_opens": int(0),
"opens_total": int(186),
"unique_opens": int(100),
"clicks_total": int(42),
"unique_clicks": int(400),
"unique_subscriber_clicks": int(42),
"facebook_recipient_likes": int(5),
"facebook_unique_likes": int(8),
"facebook_likes": int(42),
"open_rate": float64(42),
"click_rate": float64(42),
"industry_open_rate": float64(0.17076777144396),
"industry_click_rate": float64(0.027431311866951),
"industry_bounce_rate": float64(0.0063767751251474),
"industry_unopen_rate": float64(0.82285545343089),
"industry_unsub_rate": float64(0.001436957032815),
"industry_abuse_rate": float64(0.00021111996110887),
"list_stats_sub_rate": float64(10),
"list_stats_unsub_rate": float64(20),
"list_stats_open_rate": float64(42),
"list_stats_click_rate": float64(42),
"industry_type": "Social Networks and Online Communities",
}
acc.AssertContainsTaggedFields(t, "mailchimp", fields, tags)
}
func TestMailChimpGatherReport(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, sampleReport)
},
))
defer ts.Close()
u, err := url.ParseRequestURI(ts.URL)
require.NoError(t, err)
api := &ChimpAPI{
url: u,
Debug: true,
}
m := MailChimp{
api: api,
CampaignId: "test",
}
var acc testutil.Accumulator
err = m.Gather(&acc)
require.NoError(t, err)
tags := make(map[string]string)
tags["id"] = "42694e9e57"
tags["campaign_title"] = "Freddie's Jokes Vol. 1"
fields := map[string]interface{}{
"emails_sent": int(200),
"abuse_reports": int(0),
"unsubscribed": int(2),
"hard_bounces": int(0),
"soft_bounces": int(2),
"syntax_errors": int(0),
"forwards_count": int(0),
"forwards_opens": int(0),
"opens_total": int(186),
"unique_opens": int(100),
"clicks_total": int(42),
"unique_clicks": int(400),
"unique_subscriber_clicks": int(42),
"facebook_recipient_likes": int(5),
"facebook_unique_likes": int(8),
"facebook_likes": int(42),
"open_rate": float64(42),
"click_rate": float64(42),
"industry_open_rate": float64(0.17076777144396),
"industry_click_rate": float64(0.027431311866951),
"industry_bounce_rate": float64(0.0063767751251474),
"industry_unopen_rate": float64(0.82285545343089),
"industry_unsub_rate": float64(0.001436957032815),
"industry_abuse_rate": float64(0.00021111996110887),
"list_stats_sub_rate": float64(10),
"list_stats_unsub_rate": float64(20),
"list_stats_open_rate": float64(42),
"list_stats_click_rate": float64(42),
"industry_type": "Social Networks and Online Communities",
}
acc.AssertContainsTaggedFields(t, "mailchimp", fields, tags)
}
func TestMailChimpGatherError(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, sampleError)
},
))
defer ts.Close()
u, err := url.ParseRequestURI(ts.URL)
require.NoError(t, err)
api := &ChimpAPI{
url: u,
Debug: true,
}
m := MailChimp{
api: api,
CampaignId: "test",
}
var acc testutil.Accumulator
err = m.Gather(&acc)
require.Error(t, err)
}
var sampleReports = `
{
"reports": [
{
"id": "42694e9e57",
"campaign_title": "Freddie's Jokes Vol. 1",
"type": "regular",
"emails_sent": 200,
"abuse_reports": 0,
"unsubscribed": 2,
"send_time": "2015-09-15T19:05:51+00:00",
"bounces": {
"hard_bounces": 0,
"soft_bounces": 2,
"syntax_errors": 0
},
"forwards": {
"forwards_count": 0,
"forwards_opens": 0
},
"opens": {
"opens_total": 186,
"unique_opens": 100,
"open_rate": 42,
"last_open": "2015-09-15T19:15:47+00:00"
},
"clicks": {
"clicks_total": 42,
"unique_clicks": 400,
"unique_subscriber_clicks": 42,
"click_rate": 42,
"last_click": "2015-09-15T19:15:47+00:00"
},
"facebook_likes": {
"recipient_likes": 5,
"unique_likes": 8,
"facebook_likes": 42
},
"industry_stats": {
"type": "Social Networks and Online Communities",
"open_rate": 0.17076777144396,
"click_rate": 0.027431311866951,
"bounce_rate": 0.0063767751251474,
"unopen_rate": 0.82285545343089,
"unsub_rate": 0.001436957032815,
"abuse_rate": 0.00021111996110887
},
"list_stats": {
"sub_rate": 10,
"unsub_rate": 20,
"open_rate": 42,
"click_rate": 42
},
"timeseries": [
{
"timestamp": "2015-09-15T19:00:00+00:00",
"emails_sent": 198,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-15T20:00:00+00:00",
"emails_sent": 2,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-15T21:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-15T22:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-15T23:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T00:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T01:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T02:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T03:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T04:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T05:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T06:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T07:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T08:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T09:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T10:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T11:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T12:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T13:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T14:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T15:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T16:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T17:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T18:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
}
],
"share_report": {
"share_url": "http://usX.vip-reports.net/reports/summary?u=xxxx&id=xxxx",
"share_password": "freddielikesjokes"
},
"delivery_status": {
"enabled": false
},
"_links": [
{
"rel": "parent",
"href": "https://usX.api.mailchimp.com/3.0/reports",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Collection.json",
"schema": "https://api.mailchimp.com/schema/3.0/CollectionLinks/Reports.json"
},
{
"rel": "self",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Instance.json"
},
{
"rel": "campaign",
"href": "https://usX.api.mailchimp.com/3.0/campaigns/42694e9e57",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Campaigns/Instance.json"
},
{
"rel": "sub-reports",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/sub-reports",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Sub/Collection.json"
},
{
"rel": "abuse-reports",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/abuse-reports",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Abuse/Collection.json"
},
{
"rel": "advice",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/advice",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Advice/Collection.json"
},
{
"rel": "click-details",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/click-details",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/ClickDetails/Collection.json"
},
{
"rel": "domain-performance",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/domain-performance",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/DomainPerformance/Collection.json"
},
{
"rel": "eepurl",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/eepurl",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Eepurl/Collection.json"
},
{
"rel": "email-activity",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/email-activity",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/EmailActivity/Collection.json"
},
{
"rel": "locations",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/locations",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Locations/Collection.json"
},
{
"rel": "sent-to",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/sent-to",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/SentTo/Collection.json"
},
{
"rel": "unsubscribed",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/unsubscribed",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Unsubs/Collection.json"
}
]
}
],
"_links": [
{
"rel": "parent",
"href": "https://usX.api.mailchimp.com/3.0/",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Root.json"
},
{
"rel": "self",
"href": "https://usX.api.mailchimp.com/3.0/reports",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Collection.json",
"schema": "https://api.mailchimp.com/schema/3.0/CollectionLinks/Reports.json"
}
],
"total_items": 1
}
`
var sampleReport = `
{
"id": "42694e9e57",
"campaign_title": "Freddie's Jokes Vol. 1",
"type": "regular",
"emails_sent": 200,
"abuse_reports": 0,
"unsubscribed": 2,
"send_time": "2015-09-15T19:05:51+00:00",
"bounces": {
"hard_bounces": 0,
"soft_bounces": 2,
"syntax_errors": 0
},
"forwards": {
"forwards_count": 0,
"forwards_opens": 0
},
"opens": {
"opens_total": 186,
"unique_opens": 100,
"open_rate": 42,
"last_open": "2015-09-15T19:15:47+00:00"
},
"clicks": {
"clicks_total": 42,
"unique_clicks": 400,
"unique_subscriber_clicks": 42,
"click_rate": 42,
"last_click": "2015-09-15T19:15:47+00:00"
},
"facebook_likes": {
"recipient_likes": 5,
"unique_likes": 8,
"facebook_likes": 42
},
"industry_stats": {
"type": "Social Networks and Online Communities",
"open_rate": 0.17076777144396,
"click_rate": 0.027431311866951,
"bounce_rate": 0.0063767751251474,
"unopen_rate": 0.82285545343089,
"unsub_rate": 0.001436957032815,
"abuse_rate": 0.00021111996110887
},
"list_stats": {
"sub_rate": 10,
"unsub_rate": 20,
"open_rate": 42,
"click_rate": 42
},
"timeseries": [
{
"timestamp": "2015-09-15T19:00:00+00:00",
"emails_sent": 198,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-15T20:00:00+00:00",
"emails_sent": 2,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-15T21:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-15T22:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-15T23:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T00:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T01:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T02:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T03:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T04:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T05:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T06:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T07:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T08:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T09:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T10:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T11:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T12:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T13:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T14:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T15:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T16:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T17:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
},
{
"timestamp": "2015-09-16T18:00:00+00:00",
"emails_sent": 0,
"unique_opens": 0,
"recipients_clicks": 0
}
],
"share_report": {
"share_url": "http://usX.vip-reports.net/reports/summary?u=xxxx&id=xxxx",
"share_password": "freddielikesjokes"
},
"delivery_status": {
"enabled": false
},
"_links": [
{
"rel": "parent",
"href": "https://usX.api.mailchimp.com/3.0/reports",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Collection.json",
"schema": "https://api.mailchimp.com/schema/3.0/CollectionLinks/Reports.json"
},
{
"rel": "self",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Instance.json"
},
{
"rel": "campaign",
"href": "https://usX.api.mailchimp.com/3.0/campaigns/42694e9e57",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Campaigns/Instance.json"
},
{
"rel": "sub-reports",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/sub-reports",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Sub/Collection.json"
},
{
"rel": "abuse-reports",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/abuse-reports",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Abuse/Collection.json"
},
{
"rel": "advice",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/advice",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Advice/Collection.json"
},
{
"rel": "click-details",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/click-details",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/ClickDetails/Collection.json"
},
{
"rel": "domain-performance",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/domain-performance",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/DomainPerformance/Collection.json"
},
{
"rel": "eepurl",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/eepurl",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Eepurl/Collection.json"
},
{
"rel": "email-activity",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/email-activity",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/EmailActivity/Collection.json"
},
{
"rel": "locations",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/locations",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Locations/Collection.json"
},
{
"rel": "sent-to",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/sent-to",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/SentTo/Collection.json"
},
{
"rel": "unsubscribed",
"href": "https://usX.api.mailchimp.com/3.0/reports/42694e9e57/unsubscribed",
"method": "GET",
"targetSchema": "https://api.mailchimp.com/schema/3.0/Reports/Unsubs/Collection.json"
}
]
}
`
var sampleError = `
{
"type": "http://developer.mailchimp.com/documentation/mailchimp/guides/error-glossary/",
"title": "API Key Invalid",
"status": 401,
"detail": "Your API key may be invalid, or you've attempted to access the wrong datacenter.",
"instance": ""
}
`

View File

@@ -0,0 +1,185 @@
package memcached
import (
"bufio"
"bytes"
"fmt"
"net"
"strconv"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
// Memcached is a memcached plugin
type Memcached struct {
Servers []string
UnixSockets []string
}
var sampleConfig = `
# An array of address to gather stats about. Specify an ip on hostname
# with optional port. ie localhost, 10.0.0.1:11211, etc.
#
# If no servers are specified, then localhost is used as the host.
servers = ["localhost:11211"]
# unix_sockets = ["/var/run/memcached.sock"]
`
var defaultTimeout = 5 * time.Second
// The list of metrics that should be sent
var sendMetrics = []string{
"get_hits",
"get_misses",
"evictions",
"limit_maxbytes",
"bytes",
"uptime",
"curr_items",
"total_items",
"curr_connections",
"total_connections",
"connection_structures",
"cmd_get",
"cmd_set",
"delete_hits",
"delete_misses",
"incr_hits",
"incr_misses",
"decr_hits",
"decr_misses",
"cas_hits",
"cas_misses",
"evictions",
"bytes_read",
"bytes_written",
"threads",
"conn_yields",
}
// SampleConfig returns sample configuration message
func (m *Memcached) SampleConfig() string {
return sampleConfig
}
// Description returns description of Memcached plugin
func (m *Memcached) Description() string {
return "Read metrics from one or many memcached servers"
}
// Gather reads stats from all configured servers accumulates stats
func (m *Memcached) Gather(acc telegraf.Accumulator) error {
if len(m.Servers) == 0 && len(m.UnixSockets) == 0 {
return m.gatherServer(":11211", false, acc)
}
for _, serverAddress := range m.Servers {
if err := m.gatherServer(serverAddress, false, acc); err != nil {
return err
}
}
for _, unixAddress := range m.UnixSockets {
if err := m.gatherServer(unixAddress, true, acc); err != nil {
return err
}
}
return nil
}
func (m *Memcached) gatherServer(
address string,
unix bool,
acc telegraf.Accumulator,
) error {
var conn net.Conn
if unix {
conn, err := net.DialTimeout("unix", address, defaultTimeout)
if err != nil {
return err
}
defer conn.Close()
} else {
_, _, err := net.SplitHostPort(address)
if err != nil {
address = address + ":11211"
}
conn, err = net.DialTimeout("tcp", address, defaultTimeout)
if err != nil {
return err
}
defer conn.Close()
}
// Extend connection
conn.SetDeadline(time.Now().Add(defaultTimeout))
// Read and write buffer
rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
// Send command
if _, err := fmt.Fprint(rw, "stats\r\n"); err != nil {
return err
}
if err := rw.Flush(); err != nil {
return err
}
values, err := parseResponse(rw.Reader)
if err != nil {
return err
}
// Add server address as a tag
tags := map[string]string{"server": address}
// Process values
fields := make(map[string]interface{})
for _, key := range sendMetrics {
if value, ok := values[key]; ok {
// Mostly it is the number
if iValue, errParse := strconv.ParseInt(value, 10, 64); errParse == nil {
fields[key] = iValue
} else {
fields[key] = value
}
}
}
acc.AddFields("memcached", fields, tags)
return nil
}
func parseResponse(r *bufio.Reader) (map[string]string, error) {
values := make(map[string]string)
for {
// Read line
line, _, errRead := r.ReadLine()
if errRead != nil {
return values, errRead
}
// Done
if bytes.Equal(line, []byte("END")) {
break
}
// Read values
s := bytes.SplitN(line, []byte(" "), 3)
if len(s) != 3 || !bytes.Equal(s[0], []byte("STAT")) {
return values, fmt.Errorf("unexpected line in stats response: %q", line)
}
// Save values
values[string(s[1])] = string(s[2])
}
return values, nil
}
func init() {
inputs.Add("memcached", func() telegraf.Input {
return &Memcached{}
})
}

View File

@@ -0,0 +1,160 @@
package memcached
import (
"bufio"
"strings"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMemcachedGeneratesMetrics(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
m := &Memcached{
Servers: []string{testutil.GetLocalHost()},
}
var acc testutil.Accumulator
err := m.Gather(&acc)
require.NoError(t, err)
intMetrics := []string{"get_hits", "get_misses", "evictions",
"limit_maxbytes", "bytes", "uptime", "curr_items", "total_items",
"curr_connections", "total_connections", "connection_structures", "cmd_get",
"cmd_set", "delete_hits", "delete_misses", "incr_hits", "incr_misses",
"decr_hits", "decr_misses", "cas_hits", "cas_misses", "evictions",
"bytes_read", "bytes_written", "threads", "conn_yields"}
for _, metric := range intMetrics {
assert.True(t, acc.HasIntField("memcached", metric), metric)
}
}
func TestMemcachedParseMetrics(t *testing.T) {
r := bufio.NewReader(strings.NewReader(memcachedStats))
values, err := parseResponse(r)
require.NoError(t, err, "Error parsing memcached response")
tests := []struct {
key string
value string
}{
{"pid", "23235"},
{"uptime", "194"},
{"time", "1449174679"},
{"version", "1.4.14 (Ubuntu)"},
{"libevent", "2.0.21-stable"},
{"pointer_size", "64"},
{"rusage_user", "0.000000"},
{"rusage_system", "0.007566"},
{"curr_connections", "5"},
{"total_connections", "6"},
{"connection_structures", "6"},
{"reserved_fds", "20"},
{"cmd_get", "0"},
{"cmd_set", "0"},
{"cmd_flush", "0"},
{"cmd_touch", "0"},
{"get_hits", "0"},
{"get_misses", "0"},
{"delete_misses", "0"},
{"delete_hits", "0"},
{"incr_misses", "0"},
{"incr_hits", "0"},
{"decr_misses", "0"},
{"decr_hits", "0"},
{"cas_misses", "0"},
{"cas_hits", "0"},
{"cas_badval", "0"},
{"touch_hits", "0"},
{"touch_misses", "0"},
{"auth_cmds", "0"},
{"auth_errors", "0"},
{"bytes_read", "7"},
{"bytes_written", "0"},
{"limit_maxbytes", "67108864"},
{"accepting_conns", "1"},
{"listen_disabled_num", "0"},
{"threads", "4"},
{"conn_yields", "0"},
{"hash_power_level", "16"},
{"hash_bytes", "524288"},
{"hash_is_expanding", "0"},
{"expired_unfetched", "0"},
{"evicted_unfetched", "0"},
{"bytes", "0"},
{"curr_items", "0"},
{"total_items", "0"},
{"evictions", "0"},
{"reclaimed", "0"},
}
for _, test := range tests {
value, ok := values[test.key]
if !ok {
t.Errorf("Did not find key for metric %s in values", test.key)
continue
}
if value != test.value {
t.Errorf("Metric: %s, Expected: %s, actual: %s",
test.key, test.value, value)
}
}
}
var memcachedStats = `STAT pid 23235
STAT uptime 194
STAT time 1449174679
STAT version 1.4.14 (Ubuntu)
STAT libevent 2.0.21-stable
STAT pointer_size 64
STAT rusage_user 0.000000
STAT rusage_system 0.007566
STAT curr_connections 5
STAT total_connections 6
STAT connection_structures 6
STAT reserved_fds 20
STAT cmd_get 0
STAT cmd_set 0
STAT cmd_flush 0
STAT cmd_touch 0
STAT get_hits 0
STAT get_misses 0
STAT delete_misses 0
STAT delete_hits 0
STAT incr_misses 0
STAT incr_hits 0
STAT decr_misses 0
STAT decr_hits 0
STAT cas_misses 0
STAT cas_hits 0
STAT cas_badval 0
STAT touch_hits 0
STAT touch_misses 0
STAT auth_cmds 0
STAT auth_errors 0
STAT bytes_read 7
STAT bytes_written 0
STAT limit_maxbytes 67108864
STAT accepting_conns 1
STAT listen_disabled_num 0
STAT threads 4
STAT conn_yields 0
STAT hash_power_level 16
STAT hash_bytes 524288
STAT hash_is_expanding 0
STAT expired_unfetched 0
STAT evicted_unfetched 0
STAT bytes 0
STAT curr_items 0
STAT total_items 0
STAT evictions 0
STAT reclaimed 0
END
`

View File

@@ -0,0 +1,19 @@
package inputs
import (
"github.com/influxdata/telegraf"
"github.com/stretchr/testify/mock"
)
type MockPlugin struct {
mock.Mock
}
func (m *MockPlugin) Gather(_a0 telegraf.Accumulator) error {
ret := m.Called(_a0)
r0 := ret.Error(0)
return r0
}

View File

@@ -0,0 +1,147 @@
package mongodb
import (
"crypto/tls"
"crypto/x509"
"fmt"
"net"
"net/url"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"gopkg.in/mgo.v2"
)
type MongoDB struct {
Servers []string
Ssl Ssl
mongos map[string]*Server
}
type Ssl struct {
Enabled bool
CaCerts []string `toml:"cacerts"`
}
var sampleConfig = `
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
#
# If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port.
servers = ["127.0.0.1:27017"]
`
func (m *MongoDB) SampleConfig() string {
return sampleConfig
}
func (*MongoDB) Description() string {
return "Read metrics from one or many MongoDB servers"
}
var localhost = &url.URL{Host: "127.0.0.1:27017"}
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (m *MongoDB) Gather(acc telegraf.Accumulator) error {
if len(m.Servers) == 0 {
m.gatherServer(m.getMongoServer(localhost), acc)
return nil
}
var wg sync.WaitGroup
var outerr error
for _, serv := range m.Servers {
u, err := url.Parse(serv)
if err != nil {
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err)
} else if u.Scheme == "" {
u.Scheme = "mongodb"
// fallback to simple string based address (i.e. "10.0.0.1:10000")
u.Host = serv
if u.Path == u.Host {
u.Path = ""
}
}
wg.Add(1)
go func() {
defer wg.Done()
outerr = m.gatherServer(m.getMongoServer(u), acc)
}()
}
wg.Wait()
return outerr
}
func (m *MongoDB) getMongoServer(url *url.URL) *Server {
if _, ok := m.mongos[url.Host]; !ok {
m.mongos[url.Host] = &Server{
Url: url,
}
}
return m.mongos[url.Host]
}
func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error {
if server.Session == nil {
var dialAddrs []string
if server.Url.User != nil {
dialAddrs = []string{server.Url.String()}
} else {
dialAddrs = []string{server.Url.Host}
}
dialInfo, err := mgo.ParseURL(dialAddrs[0])
if err != nil {
return fmt.Errorf("Unable to parse URL (%s), %s\n",
dialAddrs[0], err.Error())
}
dialInfo.Direct = true
dialInfo.Timeout = time.Duration(10) * time.Second
if m.Ssl.Enabled {
tlsConfig := &tls.Config{}
if len(m.Ssl.CaCerts) > 0 {
roots := x509.NewCertPool()
for _, caCert := range m.Ssl.CaCerts {
ok := roots.AppendCertsFromPEM([]byte(caCert))
if !ok {
return fmt.Errorf("failed to parse root certificate")
}
}
tlsConfig.RootCAs = roots
} else {
tlsConfig.InsecureSkipVerify = true
}
dialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {
conn, err := tls.Dial("tcp", addr.String(), tlsConfig)
if err != nil {
fmt.Printf("error in Dial, %s\n", err.Error())
}
return conn, err
}
}
sess, err := mgo.DialWithInfo(dialInfo)
if err != nil {
fmt.Printf("error dialing over ssl, %s\n", err.Error())
return fmt.Errorf("Unable to connect to MongoDB, %s\n", err.Error())
}
server.Session = sess
}
return server.gatherData(acc)
}
func init() {
inputs.Add("mongodb", func() telegraf.Input {
return &MongoDB{
mongos: make(map[string]*Server),
}
})
}

View File

@@ -0,0 +1,108 @@
package mongodb
import (
"fmt"
"reflect"
"strconv"
"github.com/influxdata/telegraf"
)
type MongodbData struct {
StatLine *StatLine
Fields map[string]interface{}
Tags map[string]string
}
func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData {
if statLine.NodeType != "" && statLine.NodeType != "UNK" {
tags["state"] = statLine.NodeType
}
return &MongodbData{
StatLine: statLine,
Tags: tags,
Fields: make(map[string]interface{}),
}
}
var DefaultStats = map[string]string{
"inserts_per_sec": "Insert",
"queries_per_sec": "Query",
"updates_per_sec": "Update",
"deletes_per_sec": "Delete",
"getmores_per_sec": "GetMore",
"commands_per_sec": "Command",
"flushes_per_sec": "Flushes",
"vsize_megabytes": "Virtual",
"resident_megabytes": "Resident",
"queued_reads": "QueuedReaders",
"queued_writes": "QueuedWriters",
"active_reads": "ActiveReaders",
"active_writes": "ActiveWriters",
"net_in_bytes": "NetIn",
"net_out_bytes": "NetOut",
"open_connections": "NumConnections",
}
var DefaultReplStats = map[string]string{
"repl_inserts_per_sec": "InsertR",
"repl_queries_per_sec": "QueryR",
"repl_updates_per_sec": "UpdateR",
"repl_deletes_per_sec": "DeleteR",
"repl_getmores_per_sec": "GetMoreR",
"repl_commands_per_sec": "CommandR",
"member_status": "NodeType",
}
var MmapStats = map[string]string{
"mapped_megabytes": "Mapped",
"non-mapped_megabytes": "NonMapped",
"page_faults_per_sec": "Faults",
}
var WiredTigerStats = map[string]string{
"percent_cache_dirty": "CacheDirtyPercent",
"percent_cache_used": "CacheUsedPercent",
}
func (d *MongodbData) AddDefaultStats() {
statLine := reflect.ValueOf(d.StatLine).Elem()
d.addStat(statLine, DefaultStats)
if d.StatLine.NodeType != "" {
d.addStat(statLine, DefaultReplStats)
}
if d.StatLine.StorageEngine == "mmapv1" {
d.addStat(statLine, MmapStats)
} else if d.StatLine.StorageEngine == "wiredTiger" {
for key, value := range WiredTigerStats {
val := statLine.FieldByName(value).Interface()
percentVal := fmt.Sprintf("%.1f", val.(float64)*100)
floatVal, _ := strconv.ParseFloat(percentVal, 64)
d.add(key, floatVal)
}
}
}
func (d *MongodbData) addStat(
statLine reflect.Value,
stats map[string]string,
) {
for key, value := range stats {
val := statLine.FieldByName(value).Interface()
d.add(key, val)
}
}
func (d *MongodbData) add(key string, val interface{}) {
d.Fields[key] = val
}
func (d *MongodbData) flush(acc telegraf.Accumulator) {
acc.AddFields(
"mongodb",
d.Fields,
d.Tags,
d.StatLine.Time,
)
d.Fields = make(map[string]interface{})
}

View File

@@ -0,0 +1,133 @@
package mongodb
import (
"testing"
"time"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
var tags = make(map[string]string)
func TestAddNonReplStats(t *testing.T) {
d := NewMongodbData(
&StatLine{
StorageEngine: "",
Time: time.Now(),
Insert: 0,
Query: 0,
Update: 0,
Delete: 0,
GetMore: 0,
Command: 0,
Flushes: 0,
Virtual: 0,
Resident: 0,
QueuedReaders: 0,
QueuedWriters: 0,
ActiveReaders: 0,
ActiveWriters: 0,
NetIn: 0,
NetOut: 0,
NumConnections: 0,
},
tags,
)
var acc testutil.Accumulator
d.AddDefaultStats()
d.flush(&acc)
for key, _ := range DefaultStats {
assert.True(t, acc.HasIntField("mongodb", key))
}
}
func TestAddReplStats(t *testing.T) {
d := NewMongodbData(
&StatLine{
StorageEngine: "mmapv1",
Mapped: 0,
NonMapped: 0,
Faults: 0,
},
tags,
)
var acc testutil.Accumulator
d.AddDefaultStats()
d.flush(&acc)
for key, _ := range MmapStats {
assert.True(t, acc.HasIntField("mongodb", key))
}
}
func TestAddWiredTigerStats(t *testing.T) {
d := NewMongodbData(
&StatLine{
StorageEngine: "wiredTiger",
CacheDirtyPercent: 0,
CacheUsedPercent: 0,
},
tags,
)
var acc testutil.Accumulator
d.AddDefaultStats()
d.flush(&acc)
for key, _ := range WiredTigerStats {
assert.True(t, acc.HasFloatField("mongodb", key))
}
}
func TestStateTag(t *testing.T) {
d := NewMongodbData(
&StatLine{
StorageEngine: "",
Time: time.Now(),
Insert: 0,
Query: 0,
NodeType: "PRI",
},
tags,
)
stateTags := make(map[string]string)
stateTags["state"] = "PRI"
var acc testutil.Accumulator
d.AddDefaultStats()
d.flush(&acc)
fields := map[string]interface{}{
"active_reads": int64(0),
"active_writes": int64(0),
"commands_per_sec": int64(0),
"deletes_per_sec": int64(0),
"flushes_per_sec": int64(0),
"getmores_per_sec": int64(0),
"inserts_per_sec": int64(0),
"member_status": "PRI",
"net_in_bytes": int64(0),
"net_out_bytes": int64(0),
"open_connections": int64(0),
"queries_per_sec": int64(0),
"queued_reads": int64(0),
"queued_writes": int64(0),
"repl_commands_per_sec": int64(0),
"repl_deletes_per_sec": int64(0),
"repl_getmores_per_sec": int64(0),
"repl_inserts_per_sec": int64(0),
"repl_queries_per_sec": int64(0),
"repl_updates_per_sec": int64(0),
"resident_megabytes": int64(0),
"updates_per_sec": int64(0),
"vsize_megabytes": int64(0),
}
acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags)
}

View File

@@ -0,0 +1,51 @@
package mongodb
import (
"net/url"
"time"
"github.com/influxdata/telegraf"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
type Server struct {
Url *url.URL
Session *mgo.Session
lastResult *ServerStatus
}
func (s *Server) getDefaultTags() map[string]string {
tags := make(map[string]string)
tags["hostname"] = s.Url.Host
return tags
}
func (s *Server) gatherData(acc telegraf.Accumulator) error {
s.Session.SetMode(mgo.Eventual, true)
s.Session.SetSocketTimeout(0)
result := &ServerStatus{}
err := s.Session.DB("admin").Run(bson.D{{"serverStatus", 1}, {"recordStats", 0}}, result)
if err != nil {
return err
}
defer func() {
s.lastResult = result
}()
result.SampleTime = time.Now()
if s.lastResult != nil && result != nil {
duration := result.SampleTime.Sub(s.lastResult.SampleTime)
durationInSeconds := int64(duration.Seconds())
if durationInSeconds == 0 {
durationInSeconds = 1
}
data := NewMongodbData(
NewStatLine(*s.lastResult, *result, s.Url.Host, true, durationInSeconds),
s.getDefaultTags(),
)
data.AddDefaultStats()
data.flush(acc)
}
return nil
}

View File

@@ -0,0 +1,43 @@
// +build integration
package mongodb
import (
"testing"
"time"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGetDefaultTags(t *testing.T) {
var tagTests = []struct {
in string
out string
}{
{"hostname", server.Url.Host},
}
defaultTags := server.getDefaultTags()
for _, tt := range tagTests {
if defaultTags[tt.in] != tt.out {
t.Errorf("expected %q, got %q", tt.out, defaultTags[tt.in])
}
}
}
func TestAddDefaultStats(t *testing.T) {
var acc testutil.Accumulator
err := server.gatherData(&acc)
require.NoError(t, err)
time.Sleep(time.Duration(1) * time.Second)
// need to call this twice so it can perform the diff
err = server.gatherData(&acc)
require.NoError(t, err)
for key, _ := range DefaultStats {
assert.True(t, acc.HasIntValue(key))
}
}

Some files were not shown because too many files have changed in this diff Show More