This commit is contained in:
ncohensm 2016-08-23 10:00:36 -07:00
commit bbeb5aeadf
56 changed files with 4858 additions and 2258 deletions

View File

@ -1,5 +1,5 @@
### Required for all PRs: ### Required for all PRs:
- [ ] CHANGELOG.md updated - [ ] CHANGELOG.md updated (we recommend not updating this until the PR has been approved by a maintainer)
- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed) - [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed)
- [ ] README.md updated (if adding a new plugin) - [ ] README.md updated (if adding a new plugin)

View File

@ -1,24 +1,19 @@
## v1.0 [unreleased] ## v1.0 [unreleased]
### Features
- [#1413](https://github.com/influxdata/telegraf/issues/1413): Separate container_version from container_image tag.
- [#1525](https://github.com/influxdata/telegraf/pull/1525): Support setting per-device and total metrics for Docker network and blockio.
- [#1466](https://github.com/influxdata/telegraf/pull/1466): MongoDB input plugin: adding per DB stats from db.stats()
### Bugfixes
- [#1519](https://github.com/influxdata/telegraf/pull/1519): Fix error race conditions and partial failures.
- [#1477](https://github.com/influxdata/telegraf/issues/1477): nstat: fix inaccurate config panic.
- [#1481](https://github.com/influxdata/telegraf/issues/1481): jolokia: fix handling multiple multi-dimensional attributes.
- [#1430](https://github.com/influxdata/telegraf/issues/1430): Fix prometheus character sanitizing. Sanitize more win_perf_counters characters.
- [#1534](https://github.com/influxdata/telegraf/pull/1534): Add diskio io_time to FreeBSD & report timing metrics as ms (as linux does).
- [#1379](https://github.com/influxdata/telegraf/issues/1379): Fix covering Amazon Linux for post remove flow.
## v1.0 beta 3 [2016-07-18]
### Release Notes ### Release Notes
**Breaking Change** The SNMP plugin is being deprecated in it's current form.
There is a [new SNMP plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
which fixes many of the issues and confusions
of it's predecessor. For users wanting to continue to use the deprecated SNMP
plugin, you will need to change your config file from `[[inputs.snmp]]` to
`[[inputs.snmp_legacy]]`. The configuration of the new SNMP plugin is _not_
backwards-compatible.
- Telegraf now supports being installed as an official windows service,
which can be installed via
`> C:\Program Files\Telegraf\telegraf.exe --service install`
**Breaking Change**: Aerospike main server node measurements have been renamed **Breaking Change**: Aerospike main server node measurements have been renamed
aerospike_node. Aerospike namespace measurements have been renamed to aerospike_node. Aerospike namespace measurements have been renamed to
aerospike_namespace. They will also now be tagged with the node_name aerospike_namespace. They will also now be tagged with the node_name
@ -49,8 +44,15 @@ should now look like:
path = "/" path = "/"
``` ```
- `flush_jitter` behavior has been changed. The random jitter will now be
evaluated at every flush interval, rather than once at startup. This makes it
consistent with the behavior of `collection_jitter`.
### Features ### Features
- [#1413](https://github.com/influxdata/telegraf/issues/1413): Separate container_version from container_image tag.
- [#1525](https://github.com/influxdata/telegraf/pull/1525): Support setting per-device and total metrics for Docker network and blockio.
- [#1466](https://github.com/influxdata/telegraf/pull/1466): MongoDB input plugin: adding per DB stats from db.stats()
- [#1503](https://github.com/influxdata/telegraf/pull/1503): Add tls support for certs to RabbitMQ input plugin - [#1503](https://github.com/influxdata/telegraf/pull/1503): Add tls support for certs to RabbitMQ input plugin
- [#1289](https://github.com/influxdata/telegraf/pull/1289): webhooks input plugin. Thanks @francois2metz and @cduez! - [#1289](https://github.com/influxdata/telegraf/pull/1289): webhooks input plugin. Thanks @francois2metz and @cduez!
- [#1247](https://github.com/influxdata/telegraf/pull/1247): rollbar webhook plugin. - [#1247](https://github.com/influxdata/telegraf/pull/1247): rollbar webhook plugin.
@ -66,9 +68,40 @@ should now look like:
- [#1434](https://github.com/influxdata/telegraf/pull/1434): Add measurement name arg to logparser plugin. - [#1434](https://github.com/influxdata/telegraf/pull/1434): Add measurement name arg to logparser plugin.
- [#1479](https://github.com/influxdata/telegraf/pull/1479): logparser: change resp_code from a field to a tag. - [#1479](https://github.com/influxdata/telegraf/pull/1479): logparser: change resp_code from a field to a tag.
- [#1411](https://github.com/influxdata/telegraf/pull/1411): Implement support for fetching hddtemp data - [#1411](https://github.com/influxdata/telegraf/pull/1411): Implement support for fetching hddtemp data
- [#1340](https://github.com/influxdata/telegraf/issues/1340): statsd: do not log every dropped metric.
- [#1368](https://github.com/influxdata/telegraf/pull/1368): Add precision rounding to all metrics on collection.
- [#1390](https://github.com/influxdata/telegraf/pull/1390): Add support for Tengine
- [#1320](https://github.com/influxdata/telegraf/pull/1320): Logparser input plugin for parsing grok-style log patterns.
- [#1397](https://github.com/influxdata/telegraf/issues/1397): ElasticSearch: now supports connecting to ElasticSearch via SSL
- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging.
- [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash
- [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy!
- [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm!
- [#1208](https://github.com/influxdata/telegraf/pull/1208): Standardized AWS credentials evaluation & wildcard CloudWatch dimensions. Thanks @johnrengelman!
- [#1264](https://github.com/influxdata/telegraf/pull/1264): Add SSL config options to http_response plugin.
- [#1272](https://github.com/influxdata/telegraf/pull/1272): graphite parser: add ability to specify multiple tag keys, for consistency with influxdb parser.
- [#1265](https://github.com/influxdata/telegraf/pull/1265): Make dns lookups for chrony configurable. Thanks @zbindenren!
- [#1275](https://github.com/influxdata/telegraf/pull/1275): Allow wildcard filtering of varnish stats.
- [#1142](https://github.com/influxdata/telegraf/pull/1142): Support for glob patterns in exec plugin commands configuration.
- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL (http://localhost:15672) if not specified
- [#1197](https://github.com/influxdata/telegraf/pull/1197): Limit AWS GetMetricStatistics requests to 10 per second.
- [#1278](https://github.com/influxdata/telegraf/pull/1278) & [#1288](https://github.com/influxdata/telegraf/pull/1288) & [#1295](https://github.com/influxdata/telegraf/pull/1295): RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified
- [#1296](https://github.com/influxdata/telegraf/issues/1296): Refactor of flush_jitter argument.
- [#1213](https://github.com/influxdata/telegraf/issues/1213): Add inactive & active memory to mem plugin.
- [#1543](https://github.com/influxdata/telegraf/pull/1543): Official Windows service.
- [#1414](https://github.com/influxdata/telegraf/pull/1414): Forking sensors command to remove C package dependency.
- [#1389](https://github.com/influxdata/telegraf/pull/1389): Add a new SNMP plugin.
### Bugfixes ### Bugfixes
- [#1619](https://github.com/influxdata/telegraf/issues/1619): Fix `make windows` build target
- [#1519](https://github.com/influxdata/telegraf/pull/1519): Fix error race conditions and partial failures.
- [#1477](https://github.com/influxdata/telegraf/issues/1477): nstat: fix inaccurate config panic.
- [#1481](https://github.com/influxdata/telegraf/issues/1481): jolokia: fix handling multiple multi-dimensional attributes.
- [#1430](https://github.com/influxdata/telegraf/issues/1430): Fix prometheus character sanitizing. Sanitize more win_perf_counters characters.
- [#1534](https://github.com/influxdata/telegraf/pull/1534): Add diskio io_time to FreeBSD & report timing metrics as ms (as linux does).
- [#1379](https://github.com/influxdata/telegraf/issues/1379): Fix covering Amazon Linux for post remove flow.
- [#1584](https://github.com/influxdata/telegraf/issues/1584): procstat missing fields: read/write bytes & count
- [#1472](https://github.com/influxdata/telegraf/pull/1472): diskio input plugin: set 'skip_serial_number = true' by default to avoid high cardinality. - [#1472](https://github.com/influxdata/telegraf/pull/1472): diskio input plugin: set 'skip_serial_number = true' by default to avoid high cardinality.
- [#1426](https://github.com/influxdata/telegraf/pull/1426): nil metrics panic fix. - [#1426](https://github.com/influxdata/telegraf/pull/1426): nil metrics panic fix.
- [#1384](https://github.com/influxdata/telegraf/pull/1384): Fix datarace in apache input plugin. - [#1384](https://github.com/influxdata/telegraf/pull/1384): Fix datarace in apache input plugin.
@ -87,19 +120,6 @@ should now look like:
- [#1418](https://github.com/influxdata/telegraf/issues/1418): logparser: error and exit on file permissions/missing errors. - [#1418](https://github.com/influxdata/telegraf/issues/1418): logparser: error and exit on file permissions/missing errors.
- [#1499](https://github.com/influxdata/telegraf/pull/1499): Make the user able to specify full path for HAproxy stats - [#1499](https://github.com/influxdata/telegraf/pull/1499): Make the user able to specify full path for HAproxy stats
- [#1521](https://github.com/influxdata/telegraf/pull/1521): Fix Redis url, an extra "tcp://" was added. - [#1521](https://github.com/influxdata/telegraf/pull/1521): Fix Redis url, an extra "tcp://" was added.
## v1.0 beta 2 [2016-06-21]
### Features
- [#1340](https://github.com/influxdata/telegraf/issues/1340): statsd: do not log every dropped metric.
- [#1368](https://github.com/influxdata/telegraf/pull/1368): Add precision rounding to all metrics on collection.
- [#1390](https://github.com/influxdata/telegraf/pull/1390): Add support for Tengine
- [#1320](https://github.com/influxdata/telegraf/pull/1320): Logparser input plugin for parsing grok-style log patterns.
- [#1397](https://github.com/influxdata/telegraf/issues/1397): ElasticSearch: now supports connecting to ElasticSearch via SSL
### Bugfixes
- [#1330](https://github.com/influxdata/telegraf/issues/1330): Fix exec plugin panic when using single binary. - [#1330](https://github.com/influxdata/telegraf/issues/1330): Fix exec plugin panic when using single binary.
- [#1336](https://github.com/influxdata/telegraf/issues/1336): Fixed incorrect prometheus metrics source selection. - [#1336](https://github.com/influxdata/telegraf/issues/1336): Fixed incorrect prometheus metrics source selection.
- [#1112](https://github.com/influxdata/telegraf/issues/1112): Set default Zookeeper chroot to empty string. - [#1112](https://github.com/influxdata/telegraf/issues/1112): Set default Zookeeper chroot to empty string.
@ -107,50 +127,6 @@ should now look like:
- [#1374](https://github.com/influxdata/telegraf/pull/1374): Change "default" retention policy to "". - [#1374](https://github.com/influxdata/telegraf/pull/1374): Change "default" retention policy to "".
- [#1377](https://github.com/influxdata/telegraf/issues/1377): Graphite output mangling '%' character. - [#1377](https://github.com/influxdata/telegraf/issues/1377): Graphite output mangling '%' character.
- [#1396](https://github.com/influxdata/telegraf/pull/1396): Prometheus input plugin now supports x509 certs authentication - [#1396](https://github.com/influxdata/telegraf/pull/1396): Prometheus input plugin now supports x509 certs authentication
## v1.0 beta 1 [2016-06-07]
### Release Notes
- `flush_jitter` behavior has been changed. The random jitter will now be
evaluated at every flush interval, rather than once at startup. This makes it
consistent with the behavior of `collection_jitter`.
- All AWS plugins now utilize a standard mechanism for evaluating credentials.
This allows all AWS plugins to support environment variables, shared credential
files & profiles, and role assumptions. See the specific plugin README for
details.
- The AWS CloudWatch input plugin can now declare a wildcard value for a metric
dimension. This causes the plugin to read all metrics that contain the specified
dimension key regardless of value. This is used to export collections of metrics
without having to know the dimension values ahead of time.
- The AWS CloudWatch input plugin can now be configured with the `cache_ttl`
attribute. This configures the TTL of the internal metric cache. This is useful
in conjunction with wildcard dimension values as it will control the amount of
time before a new metric is included by the plugin.
### Features
- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging.
- [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash
- [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy!
- [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm!
- [#1208](https://github.com/influxdata/telegraf/pull/1208): Standardized AWS credentials evaluation & wildcard CloudWatch dimensions. Thanks @johnrengelman!
- [#1264](https://github.com/influxdata/telegraf/pull/1264): Add SSL config options to http_response plugin.
- [#1272](https://github.com/influxdata/telegraf/pull/1272): graphite parser: add ability to specify multiple tag keys, for consistency with influxdb parser.
- [#1265](https://github.com/influxdata/telegraf/pull/1265): Make dns lookups for chrony configurable. Thanks @zbindenren!
- [#1275](https://github.com/influxdata/telegraf/pull/1275): Allow wildcard filtering of varnish stats.
- [#1142](https://github.com/influxdata/telegraf/pull/1142): Support for glob patterns in exec plugin commands configuration.
- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL (http://localhost:15672) if not specified
- [#1197](https://github.com/influxdata/telegraf/pull/1197): Limit AWS GetMetricStatistics requests to 10 per second.
- [#1278](https://github.com/influxdata/telegraf/pull/1278) & [#1288](https://github.com/influxdata/telegraf/pull/1288) & [#1295](https://github.com/influxdata/telegraf/pull/1295): RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified
- [#1296](https://github.com/influxdata/telegraf/issues/1296): Refactor of flush_jitter argument.
- [#1213](https://github.com/influxdata/telegraf/issues/1213): Add inactive & active memory to mem plugin.
### Bugfixes
- [#1252](https://github.com/influxdata/telegraf/pull/1252) & [#1279](https://github.com/influxdata/telegraf/pull/1279): Fix systemd service. Thanks @zbindenren & @PierreF! - [#1252](https://github.com/influxdata/telegraf/pull/1252) & [#1279](https://github.com/influxdata/telegraf/pull/1279): Fix systemd service. Thanks @zbindenren & @PierreF!
- [#1221](https://github.com/influxdata/telegraf/pull/1221): Fix influxdb n_shards counter. - [#1221](https://github.com/influxdata/telegraf/pull/1221): Fix influxdb n_shards counter.
- [#1258](https://github.com/influxdata/telegraf/pull/1258): Fix potential kernel plugin integer parse error. - [#1258](https://github.com/influxdata/telegraf/pull/1258): Fix potential kernel plugin integer parse error.
@ -160,6 +136,11 @@ time before a new metric is included by the plugin.
- [#1316](https://github.com/influxdata/telegraf/pull/1316): Removed leaked "database" tag on redis metrics. Thanks @PierreF! - [#1316](https://github.com/influxdata/telegraf/pull/1316): Removed leaked "database" tag on redis metrics. Thanks @PierreF!
- [#1323](https://github.com/influxdata/telegraf/issues/1323): Processes plugin: fix potential error with /proc/net/stat directory. - [#1323](https://github.com/influxdata/telegraf/issues/1323): Processes plugin: fix potential error with /proc/net/stat directory.
- [#1322](https://github.com/influxdata/telegraf/issues/1322): Fix rare RHEL 5.2 panic in gopsutil diskio gathering function. - [#1322](https://github.com/influxdata/telegraf/issues/1322): Fix rare RHEL 5.2 panic in gopsutil diskio gathering function.
- [#1586](https://github.com/influxdata/telegraf/pull/1586): Remove IF NOT EXISTS from influxdb output database creation.
- [#1600](https://github.com/influxdata/telegraf/issues/1600): Fix quoting with text values in postgresql_extensible plugin.
- [#1425](https://github.com/influxdata/telegraf/issues/1425): Fix win_perf_counter "index out of range" panic.
- [#1634](https://github.com/influxdata/telegraf/issues/1634): Fix ntpq panic when field is missing.
- [#1637](https://github.com/influxdata/telegraf/issues/1637): Sanitize graphite output field names.
## v0.13.1 [2016-05-24] ## v0.13.1 [2016-05-24]

View File

@ -11,6 +11,7 @@ Output plugins READMEs are less structured,
but any information you can provide on how the data will look is appreciated. but any information you can provide on how the data will look is appreciated.
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb) See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
for a good example. for a good example.
1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin.
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf). Or mention @jackzampolin in a PR comment with some common queries that you would want to alert on and he will write one for you. 1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf). Or mention @jackzampolin in a PR comment with some common queries that you would want to alert on and he will write one for you.
## GoDoc ## GoDoc

6
Godeps
View File

@ -29,6 +29,8 @@ github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0 github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc
github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720 github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36 github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453 github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
@ -44,8 +46,8 @@ github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37 github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8 github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil ee66bc560c366dd33b9a4046ba0b644caba46bed github.com/shirou/gopsutil 4d0c402af66c78735c5ccf820dc2ca7de5e4ff08
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d github.com/soniah/gosnmp eb32571c2410868d85849ad67d1e51d01273eb84
github.com/sparrc/aerospike-client-go d4bb42d2c2d39dae68e054116f4538af189e05d5 github.com/sparrc/aerospike-client-go d4bb42d2c2d39dae68e054116f4538af189e05d5
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c

View File

@ -1,59 +1,12 @@
github.com/Microsoft/go-winio 9f57cbbcbcb41dea496528872a4f0e37a4f7ae98 github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5 github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687 github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857 github.com/lxn/win 950a0e81e7678e63d8e6cd32412bdecb325ccd88
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4 github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99 golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8
github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1 github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2 github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6 github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537 gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb
github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42
github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4

View File

@ -16,7 +16,7 @@ build:
go install -ldflags "-X main.version=$(VERSION)" ./... go install -ldflags "-X main.version=$(VERSION)" ./...
build-windows: build-windows:
go build -o telegraf.exe -ldflags \ GOOS=windows GOARCH=amd64 go build -o telegraf.exe -ldflags \
"-X main.version=$(VERSION)" \ "-X main.version=$(VERSION)" \
./cmd/telegraf/telegraf.go ./cmd/telegraf/telegraf.go
@ -37,6 +37,7 @@ prepare:
# Use the windows godeps file to prepare dependencies # Use the windows godeps file to prepare dependencies
prepare-windows: prepare-windows:
go get github.com/sparrc/gdm go get github.com/sparrc/gdm
gdm restore
gdm restore -f Godeps_windows gdm restore -f Godeps_windows
# Run all docker containers necessary for unit tests # Run all docker containers necessary for unit tests

View File

@ -188,8 +188,9 @@ Currently implemented sources:
* [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis) * [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis)
* [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb) * [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb)
* [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak) * [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak)
* [sensors ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors) (only available if built from source) * [sensors](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors)
* [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp) * [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
* [snmp_legacy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp_legacy)
* [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft) * [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft)
* [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy) * [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy)
* [varnish](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/varnish) * [varnish](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/varnish)

View File

@ -12,7 +12,7 @@ import (
) )
func NewAccumulator( func NewAccumulator(
inputConfig *internal_models.InputConfig, inputConfig *models.InputConfig,
metrics chan telegraf.Metric, metrics chan telegraf.Metric,
) *accumulator { ) *accumulator {
acc := accumulator{} acc := accumulator{}
@ -31,7 +31,7 @@ type accumulator struct {
// print every point added to the accumulator // print every point added to the accumulator
trace bool trace bool
inputConfig *internal_models.InputConfig inputConfig *models.InputConfig
precision time.Duration precision time.Duration

View File

@ -21,7 +21,7 @@ func TestAdd(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.Add("acctest", float64(101), map[string]string{}) a.Add("acctest", float64(101), map[string]string{})
a.Add("acctest", float64(101), map[string]string{"acc": "test"}) a.Add("acctest", float64(101), map[string]string{"acc": "test"})
@ -47,7 +47,7 @@ func TestAddNoPrecisionWithInterval(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC) now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.SetPrecision(0, time.Second) a.SetPrecision(0, time.Second)
a.Add("acctest", float64(101), map[string]string{}) a.Add("acctest", float64(101), map[string]string{})
@ -74,7 +74,7 @@ func TestAddNoIntervalWithPrecision(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC) now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.SetPrecision(time.Second, time.Millisecond) a.SetPrecision(time.Second, time.Millisecond)
a.Add("acctest", float64(101), map[string]string{}) a.Add("acctest", float64(101), map[string]string{})
@ -101,7 +101,7 @@ func TestAddDisablePrecision(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC) now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.SetPrecision(time.Second, time.Millisecond) a.SetPrecision(time.Second, time.Millisecond)
a.DisablePrecision() a.DisablePrecision()
@ -129,7 +129,7 @@ func TestDifferentPrecisions(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC) now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.SetPrecision(0, time.Second) a.SetPrecision(0, time.Second)
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
@ -170,7 +170,7 @@ func TestAddDefaultTags(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.Add("acctest", float64(101), map[string]string{}) a.Add("acctest", float64(101), map[string]string{})
a.Add("acctest", float64(101), map[string]string{"acc": "test"}) a.Add("acctest", float64(101), map[string]string{"acc": "test"})
@ -196,7 +196,7 @@ func TestAddFields(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
fields := map[string]interface{}{ fields := map[string]interface{}{
"usage": float64(99), "usage": float64(99),
@ -229,7 +229,7 @@ func TestAddInfFields(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
fields := map[string]interface{}{ fields := map[string]interface{}{
"usage": inf, "usage": inf,
@ -257,7 +257,7 @@ func TestAddNaNFields(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
fields := map[string]interface{}{ fields := map[string]interface{}{
"usage": nan, "usage": nan,
@ -281,7 +281,7 @@ func TestAddUint64Fields(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
fields := map[string]interface{}{ fields := map[string]interface{}{
"usage": uint64(99), "usage": uint64(99),
@ -310,7 +310,7 @@ func TestAddUint64Overflow(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
fields := map[string]interface{}{ fields := map[string]interface{}{
"usage": uint64(9223372036854775808), "usage": uint64(9223372036854775808),
@ -340,7 +340,7 @@ func TestAddInts(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.Add("acctest", int(101), map[string]string{}) a.Add("acctest", int(101), map[string]string{})
a.Add("acctest", int32(101), map[string]string{"acc": "test"}) a.Add("acctest", int32(101), map[string]string{"acc": "test"})
@ -367,7 +367,7 @@ func TestAddFloats(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.Add("acctest", float32(101), map[string]string{"acc": "test"}) a.Add("acctest", float32(101), map[string]string{"acc": "test"})
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
@ -389,7 +389,7 @@ func TestAddStrings(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.Add("acctest", "test", map[string]string{"acc": "test"}) a.Add("acctest", "test", map[string]string{"acc": "test"})
a.Add("acctest", "foo", map[string]string{"acc": "test"}, now) a.Add("acctest", "foo", map[string]string{"acc": "test"}, now)
@ -411,7 +411,7 @@ func TestAddBools(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.Add("acctest", true, map[string]string{"acc": "test"}) a.Add("acctest", true, map[string]string{"acc": "test"})
a.Add("acctest", false, map[string]string{"acc": "test"}, now) a.Add("acctest", false, map[string]string{"acc": "test"}, now)
@ -433,11 +433,11 @@ func TestAccFilterTags(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
filter := internal_models.Filter{ filter := models.Filter{
TagExclude: []string{"acc"}, TagExclude: []string{"acc"},
} }
assert.NoError(t, filter.CompileFilter()) assert.NoError(t, filter.CompileFilter())
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.inputConfig.Filter = filter a.inputConfig.Filter = filter
a.Add("acctest", float64(101), map[string]string{}) a.Add("acctest", float64(101), map[string]string{})
@ -465,7 +465,7 @@ func TestAccAddError(t *testing.T) {
defer log.SetOutput(os.Stderr) defer log.SetOutput(os.Stderr)
a := accumulator{} a := accumulator{}
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.inputConfig.Name = "mock_plugin" a.inputConfig.Name = "mock_plugin"
a.AddError(fmt.Errorf("foo")) a.AddError(fmt.Errorf("foo"))

View File

@ -88,7 +88,7 @@ func (a *Agent) Close() error {
return err return err
} }
func panicRecover(input *internal_models.RunningInput) { func panicRecover(input *models.RunningInput) {
if err := recover(); err != nil { if err := recover(); err != nil {
trace := make([]byte, 2048) trace := make([]byte, 2048)
runtime.Stack(trace, true) runtime.Stack(trace, true)
@ -104,7 +104,7 @@ func panicRecover(input *internal_models.RunningInput) {
// reporting interval. // reporting interval.
func (a *Agent) gatherer( func (a *Agent) gatherer(
shutdown chan struct{}, shutdown chan struct{},
input *internal_models.RunningInput, input *models.RunningInput,
interval time.Duration, interval time.Duration,
metricC chan telegraf.Metric, metricC chan telegraf.Metric,
) error { ) error {
@ -152,7 +152,7 @@ func (a *Agent) gatherer(
// over. // over.
func gatherWithTimeout( func gatherWithTimeout(
shutdown chan struct{}, shutdown chan struct{},
input *internal_models.RunningInput, input *models.RunningInput,
acc *accumulator, acc *accumulator,
timeout time.Duration, timeout time.Duration,
) { ) {
@ -240,7 +240,7 @@ func (a *Agent) flush() {
wg.Add(len(a.Config.Outputs)) wg.Add(len(a.Config.Outputs))
for _, o := range a.Config.Outputs { for _, o := range a.Config.Outputs {
go func(output *internal_models.RunningOutput) { go func(output *models.RunningOutput) {
defer wg.Done() defer wg.Done()
err := output.Write() err := output.Write()
if err != nil { if err != nil {
@ -351,7 +351,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
if input.Config.Interval != 0 { if input.Config.Interval != 0 {
interval = input.Config.Interval interval = input.Config.Interval
} }
go func(in *internal_models.RunningInput, interv time.Duration) { go func(in *models.RunningInput, interv time.Duration) {
defer wg.Done() defer wg.Done()
if err := a.gatherer(shutdown, in, interv, metricC); err != nil { if err := a.gatherer(shutdown, in, interv, metricC); err != nil {
log.Printf(err.Error()) log.Printf(err.Error())

View File

@ -6,6 +6,7 @@ import (
"log" "log"
"os" "os"
"os/signal" "os/signal"
"runtime"
"strings" "strings"
"syscall" "syscall"
@ -15,6 +16,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/all" _ "github.com/influxdata/telegraf/plugins/inputs/all"
"github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/outputs"
_ "github.com/influxdata/telegraf/plugins/outputs/all" _ "github.com/influxdata/telegraf/plugins/outputs/all"
"github.com/kardianos/service"
) )
var fDebug = flag.Bool("debug", false, var fDebug = flag.Bool("debug", false,
@ -39,6 +41,8 @@ var fOutputList = flag.Bool("output-list", false,
"print available output plugins.") "print available output plugins.")
var fUsage = flag.String("usage", "", var fUsage = flag.String("usage", "",
"print usage for a plugin, ie, 'telegraf -usage mysql'") "print usage for a plugin, ie, 'telegraf -usage mysql'")
var fService = flag.String("service", "",
"operate on the service")
// Telegraf version, populated linker. // Telegraf version, populated linker.
// ie, -ldflags "-X main.version=`git describe --always --tags`" // ie, -ldflags "-X main.version=`git describe --always --tags`"
@ -68,6 +72,7 @@ The flags are:
-debug print metrics as they're generated to stdout -debug print metrics as they're generated to stdout
-quiet run in quiet mode -quiet run in quiet mode
-version print the version to stdout -version print the version to stdout
-service Control the service, ie, 'telegraf -service install (windows only)'
In addition to the -config flag, telegraf will also load the config file from In addition to the -config flag, telegraf will also load the config file from
an environment variable or default location. Precedence is: an environment variable or default location. Precedence is:
@ -94,7 +99,22 @@ Examples:
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
` `
func main() { var logger service.Logger
var stop chan struct{}
var srvc service.Service
var svcConfig *service.Config
type program struct{}
func reloadLoop(stop chan struct{}, s service.Service) {
defer func() {
if service.Interactive() {
os.Exit(0)
}
return
}()
reload := make(chan bool, 1) reload := make(chan bool, 1)
reload <- true reload <- true
for <-reload { for <-reload {
@ -154,6 +174,15 @@ func main() {
} }
} }
return return
case *fService != "" && runtime.GOOS == "windows":
if *fConfig != "" {
(*svcConfig).Arguments = []string{"-config", *fConfig}
}
err := service.Control(s, *fService)
if err != nil {
log.Fatal(err)
}
return
} }
// If no other options are specified, load the config file and run. // If no other options are specified, load the config file and run.
@ -209,14 +238,18 @@ func main() {
signals := make(chan os.Signal) signals := make(chan os.Signal)
signal.Notify(signals, os.Interrupt, syscall.SIGHUP) signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
go func() { go func() {
sig := <-signals select {
if sig == os.Interrupt { case sig := <-signals:
close(shutdown) if sig == os.Interrupt {
} close(shutdown)
if sig == syscall.SIGHUP { }
log.Printf("Reloading Telegraf config\n") if sig == syscall.SIGHUP {
<-reload log.Printf("Reloading Telegraf config\n")
reload <- true <-reload
reload <- true
close(shutdown)
}
case <-stop:
close(shutdown) close(shutdown)
} }
}() }()
@ -245,3 +278,46 @@ func usageExit(rc int) {
fmt.Println(usage) fmt.Println(usage)
os.Exit(rc) os.Exit(rc)
} }
func (p *program) Start(s service.Service) error {
srvc = s
go p.run()
return nil
}
func (p *program) run() {
stop = make(chan struct{})
reloadLoop(stop, srvc)
}
func (p *program) Stop(s service.Service) error {
close(stop)
return nil
}
func main() {
if runtime.GOOS == "windows" {
svcConfig = &service.Config{
Name: "telegraf",
DisplayName: "Telegraf Data Collector Service",
Description: "Collects data using a series of plugins and publishes it to" +
"another series of plugins.",
Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
}
prg := &program{}
s, err := service.New(prg, svcConfig)
if err != nil {
log.Fatal(err)
}
logger, err = s.Logger(nil)
if err != nil {
log.Fatal(err)
}
err = s.Run()
if err != nil {
logger.Error(err)
}
} else {
stop = make(chan struct{})
reloadLoop(stop, nil)
}
}

View File

@ -16,6 +16,7 @@
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE) - github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE) - github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE) - github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
- github.com/kardianos/service [ZLIB LICENSE](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md) - github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) - github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE) - github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)

View File

@ -1,36 +1,40 @@
# Running Telegraf as a Windows Service # Running Telegraf as a Windows Service
If you have tried to install Go binaries as Windows Services with the **sc.exe** Telegraf natively supports running as a Windows Service. Outlined below is are
tool you may have seen that the service errors and stops running after a while. the general steps to set it up.
**NSSM** (the Non-Sucking Service Manager) is a tool that helps you in a 1. Obtain the telegraf windows distribution
[number of scenarios](http://nssm.cc/scenarios) including running Go binaries 2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
that were not specifically designed to run only in Windows platforms. location simply specify the `-config` parameter with the desired location)
3. Place the telegraf.exe and the config file into `C:\Program Files\Telegraf`
4. To install the service into the Windows Service Manager, run (as an
administrator):
## NSSM Installation via Chocolatey ```
> C:\Program Files\Telegraf\telegraf.exe --service install
```
You can install [Chocolatey](https://chocolatey.org/) and [NSSM](http://nssm.cc/) 5. Edit the configuration file to meet your needs
with these commands 6. To check that it works, run:
```powershell ```
iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1')) > C:\Program Files\Telegraf\telegraf.exe --config C:\Program Files\Telegraf\telegraf.conf --test
choco install -y nssm ```
```
## Installing Telegraf as a Windows Service with NSSM 7. To start collecting data, run:
You can download the latest Telegraf Windows binaries (still Experimental at ```
the moment) from [the Telegraf Github repo](https://github.com/influxdata/telegraf). > net start telegraf
```
Then you can create a C:\telegraf folder, unzip the binary there and modify the ## Other supported operations
**telegraf.conf** sample to allocate the metrics you want to send to **InfluxDB**.
Once you have NSSM installed in your system, the process is quite straightforward. Telegraf can manage its own service through the --service flag:
You only need to type this command in your Windows shell
```powershell | Command | Effect |
nssm install Telegraf c:\telegraf\telegraf.exe -config c:\telegraf\telegraf.config |------------------------------------|-------------------------------|
``` | `telegraf.exe --service install` | Install telegraf as a service |
| `telegraf.exe --service uninstall` | Remove the telegraf service |
| `telegraf.exe --service start` | Start the telegraf service |
| `telegraf.exe --service stop` | Stop the telegraf service |
And now your service will be installed in Windows and you will be able to start and
stop it gracefully

View File

@ -55,7 +55,7 @@
## By default, precision will be set to the same timestamp order as the ## By default, precision will be set to the same timestamp order as the
## collection interval, with the maximum being 1s. ## collection interval, with the maximum being 1s.
## Precision will NOT be used for service inputs, such as logparser and statsd. ## Precision will NOT be used for service inputs, such as logparser and statsd.
## Valid values are "Nns", "Nus" (or "Nµs"), "Nms", "Ns". ## Valid values are "ns", "us" (or "µs"), "ms", "s".
precision = "" precision = ""
## Run telegraf in debug mode ## Run telegraf in debug mode
debug = false debug = false
@ -83,7 +83,7 @@
## Retention policy to write to. Empty string writes to the default rp. ## Retention policy to write to. Empty string writes to the default rp.
retention_policy = "" retention_policy = ""
## Write consistency (clusters only), can be: "any", "one", "quorom", "all" ## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
write_consistency = "any" write_consistency = "any"
## Write timeout (for the InfluxDB client), formatted as a string. ## Write timeout (for the InfluxDB client), formatted as a string.
@ -321,14 +321,13 @@
# api_token = "my-secret-token" # required. # api_token = "my-secret-token" # required.
# ## Debug # ## Debug
# # debug = false # # debug = false
# ## Tag Field to populate source attribute (optional)
# ## This is typically the _hostname_ from which the metric was obtained.
# source_tag = "host"
# ## Connection timeout. # ## Connection timeout.
# # timeout = "5s" # # timeout = "5s"
# ## Output Name Template (same as graphite buckets) # ## Output source Template (same as graphite buckets)
# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
# template = "host.tags.measurement.field" # ## This template is used in librato's source (not metric's name)
# template = "host"
#
# # Configuration for MQTT server to send metrics to # # Configuration for MQTT server to send metrics to
@ -1151,6 +1150,23 @@
# command = "passenger-status -v --show=xml" # command = "passenger-status -v --show=xml"
# # Read metrics from one or many pgbouncer servers
# [[inputs.pgbouncer]]
# ## specify address via a url matching:
# ## postgres://[pqgotest[:password]]@localhost:port[/dbname]\
# ## ?sslmode=[disable|verify-ca|verify-full]
# ## or a simple string:
# ## host=localhost user=pqotest port=6432 password=... sslmode=... dbname=pgbouncer
# ##
# ## All connection parameters are optional, except for dbname,
# ## you need to set it always as pgbouncer.
# address = "host=localhost user=postgres port=6432 sslmode=disable dbname=pgbouncer"
#
# ## A list of databases to pull metrics about. If not specified, metrics for all
# ## databases are gathered.
# # databases = ["app_production", "testing"]
# # Read metrics of phpfpm, via HTTP status page or socket # # Read metrics of phpfpm, via HTTP status page or socket
# [[inputs.phpfpm]] # [[inputs.phpfpm]]
# ## An array of addresses to gather stats about. Specify an ip or hostname # ## An array of addresses to gather stats about. Specify an ip or hostname
@ -1377,8 +1393,8 @@
# servers = ["http://localhost:8098"] # servers = ["http://localhost:8098"]
# # Reads oids value from one or many snmp agents # # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
# [[inputs.snmp]] # [[inputs.snmp_legacy]]
# ## Use 'oids.txt' file to translate oids to names # ## Use 'oids.txt' file to translate oids to names
# ## To generate 'oids.txt' you need to run: # ## To generate 'oids.txt' you need to run:
# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt # ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt

View File

@ -9,6 +9,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"regexp" "regexp"
"runtime"
"sort" "sort"
"strings" "strings"
"time" "time"
@ -47,8 +48,8 @@ type Config struct {
OutputFilters []string OutputFilters []string
Agent *AgentConfig Agent *AgentConfig
Inputs []*internal_models.RunningInput Inputs []*models.RunningInput
Outputs []*internal_models.RunningOutput Outputs []*models.RunningOutput
} }
func NewConfig() *Config { func NewConfig() *Config {
@ -61,8 +62,8 @@ func NewConfig() *Config {
}, },
Tags: make(map[string]string), Tags: make(map[string]string),
Inputs: make([]*internal_models.RunningInput, 0), Inputs: make([]*models.RunningInput, 0),
Outputs: make([]*internal_models.RunningOutput, 0), Outputs: make([]*models.RunningOutput, 0),
InputFilters: make([]string, 0), InputFilters: make([]string, 0),
OutputFilters: make([]string, 0), OutputFilters: make([]string, 0),
} }
@ -219,7 +220,7 @@ var header = `# Telegraf Configuration
## By default, precision will be set to the same timestamp order as the ## By default, precision will be set to the same timestamp order as the
## collection interval, with the maximum being 1s. ## collection interval, with the maximum being 1s.
## Precision will NOT be used for service inputs, such as logparser and statsd. ## Precision will NOT be used for service inputs, such as logparser and statsd.
## Valid values are "Nns", "Nus" (or "Nµs"), "Nms", "Ns". ## Valid values are "ns", "us" (or "µs"), "ms", "s".
precision = "" precision = ""
## Run telegraf in debug mode ## Run telegraf in debug mode
debug = false debug = false
@ -432,6 +433,9 @@ func getDefaultConfigPath() (string, error) {
envfile := os.Getenv("TELEGRAF_CONFIG_PATH") envfile := os.Getenv("TELEGRAF_CONFIG_PATH")
homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf") homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf")
etcfile := "/etc/telegraf/telegraf.conf" etcfile := "/etc/telegraf/telegraf.conf"
if runtime.GOOS == "windows" {
etcfile = `C:\Program Files\Telegraf\telegraf.conf`
}
for _, path := range []string{envfile, homefile, etcfile} { for _, path := range []string{envfile, homefile, etcfile} {
if _, err := os.Stat(path); err == nil { if _, err := os.Stat(path); err == nil {
log.Printf("Using config file: %s", path) log.Printf("Using config file: %s", path)
@ -598,7 +602,7 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
return err return err
} }
ro := internal_models.NewRunningOutput(name, output, outputConfig, ro := models.NewRunningOutput(name, output, outputConfig,
c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit) c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit)
c.Outputs = append(c.Outputs, ro) c.Outputs = append(c.Outputs, ro)
return nil return nil
@ -639,7 +643,7 @@ func (c *Config) addInput(name string, table *ast.Table) error {
return err return err
} }
rp := &internal_models.RunningInput{ rp := &models.RunningInput{
Name: name, Name: name,
Input: input, Input: input,
Config: pluginConfig, Config: pluginConfig,
@ -650,10 +654,10 @@ func (c *Config) addInput(name string, table *ast.Table) error {
// buildFilter builds a Filter // buildFilter builds a Filter
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to // (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
// be inserted into the internal_models.OutputConfig/internal_models.InputConfig // be inserted into the models.OutputConfig/models.InputConfig
// to be used for glob filtering on tags and measurements // to be used for glob filtering on tags and measurements
func buildFilter(tbl *ast.Table) (internal_models.Filter, error) { func buildFilter(tbl *ast.Table) (models.Filter, error) {
f := internal_models.Filter{} f := models.Filter{}
if node, ok := tbl.Fields["namepass"]; ok { if node, ok := tbl.Fields["namepass"]; ok {
if kv, ok := node.(*ast.KeyValue); ok { if kv, ok := node.(*ast.KeyValue); ok {
@ -717,7 +721,7 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
if subtbl, ok := node.(*ast.Table); ok { if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields { for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok { if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &internal_models.TagFilter{Name: name} tagfilter := &models.TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok { if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value { for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok { if str, ok := elem.(*ast.String); ok {
@ -736,7 +740,7 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
if subtbl, ok := node.(*ast.Table); ok { if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields { for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok { if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &internal_models.TagFilter{Name: name} tagfilter := &models.TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok { if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value { for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok { if str, ok := elem.(*ast.String); ok {
@ -793,9 +797,9 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
// buildInput parses input specific items from the ast.Table, // buildInput parses input specific items from the ast.Table,
// builds the filter and returns a // builds the filter and returns a
// internal_models.InputConfig to be inserted into internal_models.RunningInput // models.InputConfig to be inserted into models.RunningInput
func buildInput(name string, tbl *ast.Table) (*internal_models.InputConfig, error) { func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
cp := &internal_models.InputConfig{Name: name} cp := &models.InputConfig{Name: name}
if node, ok := tbl.Fields["interval"]; ok { if node, ok := tbl.Fields["interval"]; ok {
if kv, ok := node.(*ast.KeyValue); ok { if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok { if str, ok := kv.Value.(*ast.String); ok {
@ -969,14 +973,14 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
// buildOutput parses output specific items from the ast.Table, // buildOutput parses output specific items from the ast.Table,
// builds the filter and returns an // builds the filter and returns an
// internal_models.OutputConfig to be inserted into internal_models.RunningInput // models.OutputConfig to be inserted into models.RunningInput
// Note: error exists in the return for future calls that might require error // Note: error exists in the return for future calls that might require error
func buildOutput(name string, tbl *ast.Table) (*internal_models.OutputConfig, error) { func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) {
filter, err := buildFilter(tbl) filter, err := buildFilter(tbl)
if err != nil { if err != nil {
return nil, err return nil, err
} }
oc := &internal_models.OutputConfig{ oc := &models.OutputConfig{
Name: name, Name: name,
Filter: filter, Filter: filter,
} }

View File

@ -26,19 +26,19 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"192.168.1.1"} memcached.Servers = []string{"192.168.1.1"}
filter := internal_models.Filter{ filter := models.Filter{
NameDrop: []string{"metricname2"}, NameDrop: []string{"metricname2"},
NamePass: []string{"metricname1"}, NamePass: []string{"metricname1"},
FieldDrop: []string{"other", "stuff"}, FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"}, FieldPass: []string{"some", "strings"},
TagDrop: []internal_models.TagFilter{ TagDrop: []models.TagFilter{
internal_models.TagFilter{ models.TagFilter{
Name: "badtag", Name: "badtag",
Filter: []string{"othertag"}, Filter: []string{"othertag"},
}, },
}, },
TagPass: []internal_models.TagFilter{ TagPass: []models.TagFilter{
internal_models.TagFilter{ models.TagFilter{
Name: "goodtag", Name: "goodtag",
Filter: []string{"mytag"}, Filter: []string{"mytag"},
}, },
@ -46,7 +46,7 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
IsActive: true, IsActive: true,
} }
assert.NoError(t, filter.CompileFilter()) assert.NoError(t, filter.CompileFilter())
mConfig := &internal_models.InputConfig{ mConfig := &models.InputConfig{
Name: "memcached", Name: "memcached",
Filter: filter, Filter: filter,
Interval: 10 * time.Second, Interval: 10 * time.Second,
@ -66,19 +66,19 @@ func TestConfig_LoadSingleInput(t *testing.T) {
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"localhost"} memcached.Servers = []string{"localhost"}
filter := internal_models.Filter{ filter := models.Filter{
NameDrop: []string{"metricname2"}, NameDrop: []string{"metricname2"},
NamePass: []string{"metricname1"}, NamePass: []string{"metricname1"},
FieldDrop: []string{"other", "stuff"}, FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"}, FieldPass: []string{"some", "strings"},
TagDrop: []internal_models.TagFilter{ TagDrop: []models.TagFilter{
internal_models.TagFilter{ models.TagFilter{
Name: "badtag", Name: "badtag",
Filter: []string{"othertag"}, Filter: []string{"othertag"},
}, },
}, },
TagPass: []internal_models.TagFilter{ TagPass: []models.TagFilter{
internal_models.TagFilter{ models.TagFilter{
Name: "goodtag", Name: "goodtag",
Filter: []string{"mytag"}, Filter: []string{"mytag"},
}, },
@ -86,7 +86,7 @@ func TestConfig_LoadSingleInput(t *testing.T) {
IsActive: true, IsActive: true,
} }
assert.NoError(t, filter.CompileFilter()) assert.NoError(t, filter.CompileFilter())
mConfig := &internal_models.InputConfig{ mConfig := &models.InputConfig{
Name: "memcached", Name: "memcached",
Filter: filter, Filter: filter,
Interval: 5 * time.Second, Interval: 5 * time.Second,
@ -113,19 +113,19 @@ func TestConfig_LoadDirectory(t *testing.T) {
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"localhost"} memcached.Servers = []string{"localhost"}
filter := internal_models.Filter{ filter := models.Filter{
NameDrop: []string{"metricname2"}, NameDrop: []string{"metricname2"},
NamePass: []string{"metricname1"}, NamePass: []string{"metricname1"},
FieldDrop: []string{"other", "stuff"}, FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"}, FieldPass: []string{"some", "strings"},
TagDrop: []internal_models.TagFilter{ TagDrop: []models.TagFilter{
internal_models.TagFilter{ models.TagFilter{
Name: "badtag", Name: "badtag",
Filter: []string{"othertag"}, Filter: []string{"othertag"},
}, },
}, },
TagPass: []internal_models.TagFilter{ TagPass: []models.TagFilter{
internal_models.TagFilter{ models.TagFilter{
Name: "goodtag", Name: "goodtag",
Filter: []string{"mytag"}, Filter: []string{"mytag"},
}, },
@ -133,7 +133,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
IsActive: true, IsActive: true,
} }
assert.NoError(t, filter.CompileFilter()) assert.NoError(t, filter.CompileFilter())
mConfig := &internal_models.InputConfig{ mConfig := &models.InputConfig{
Name: "memcached", Name: "memcached",
Filter: filter, Filter: filter,
Interval: 5 * time.Second, Interval: 5 * time.Second,
@ -150,7 +150,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
ex.SetParser(p) ex.SetParser(p)
ex.Command = "/usr/bin/myothercollector --foo=bar" ex.Command = "/usr/bin/myothercollector --foo=bar"
eConfig := &internal_models.InputConfig{ eConfig := &models.InputConfig{
Name: "exec", Name: "exec",
MeasurementSuffix: "_myothercollector", MeasurementSuffix: "_myothercollector",
} }
@ -169,7 +169,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
pstat := inputs.Inputs["procstat"]().(*procstat.Procstat) pstat := inputs.Inputs["procstat"]().(*procstat.Procstat)
pstat.PidFile = "/var/run/grafana-server.pid" pstat.PidFile = "/var/run/grafana-server.pid"
pConfig := &internal_models.InputConfig{Name: "procstat"} pConfig := &models.InputConfig{Name: "procstat"}
pConfig.Tags = make(map[string]string) pConfig.Tags = make(map[string]string)
assert.Equal(t, pstat, c.Inputs[3].Input, assert.Equal(t, pstat, c.Inputs[3].Input,

View File

@ -1,4 +1,4 @@
package internal_models package models
import ( import (
"fmt" "fmt"

View File

@ -1,4 +1,4 @@
package internal_models package models
import ( import (
"testing" "testing"

View File

@ -1,4 +1,4 @@
package internal_models package models
import ( import (
"time" "time"

View File

@ -1,4 +1,4 @@
package internal_models package models
import ( import (
"log" "log"

View File

@ -1,4 +1,4 @@
package internal_models package models
import ( import (
"fmt" "fmt"

View File

@ -27,6 +27,14 @@ The example plugin gathers metrics about example things
- tag2 - tag2
- measurement2 has the following tags: - measurement2 has the following tags:
- tag3 - tag3
### Sample Queries:
These are some useful queries (to generate dashboards or other) to run against data from this plugin:
```
SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag
```
### Example Output: ### Example Output:

View File

@ -62,6 +62,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/riak" _ "github.com/influxdata/telegraf/plugins/inputs/riak"
_ "github.com/influxdata/telegraf/plugins/inputs/sensors" _ "github.com/influxdata/telegraf/plugins/inputs/sensors"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp" _ "github.com/influxdata/telegraf/plugins/inputs/snmp"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
_ "github.com/influxdata/telegraf/plugins/inputs/statsd" _ "github.com/influxdata/telegraf/plugins/inputs/statsd"
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat" _ "github.com/influxdata/telegraf/plugins/inputs/sysstat"

View File

@ -39,9 +39,9 @@ For more information, please check the [Mesos Observability Metrics](http://meso
# slave_tasks = true # slave_tasks = true
``` ```
By dafault this plugin is not configured to gather metrics from mesos. Since mesos cluster can be deployed in numerous ways it does not provide ane default By default this plugin is not configured to gather metrics from mesos. Since a mesos cluster can be deployed in numerous ways it does not provide any default
values in that matter. User needs to specify master/slave nodes this plugin will gather metrics from. Additionally by enabling `slave_tasks` will allow values. User needs to specify master/slave nodes this plugin will gather metrics from. Additionally, enabling `slave_tasks` will allow
agthering metrics from takss runing on specified slaves (this options is disabled by default). gathering metrics from tasks running on specified slaves (this option is disabled by default).
### Measurements & Fields: ### Measurements & Fields:

View File

@ -6,10 +6,22 @@ import (
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
) )
// MockPlugin struct should be named the same as the Plugin
type MockPlugin struct { type MockPlugin struct {
mock.Mock mock.Mock
} }
// Description will appear directly above the plugin definition in the config file
func (m *MockPlugin) Description() string {
return `This is an example plugin`
}
// SampleConfig will populate the sample configuration portion of the plugin's configuration
func (m *MockPlugin) SampleConfig() string {
return ` sampleVar = 'foo'`
}
// Gather defines what data the plugin will gather.
func (m *MockPlugin) Gather(_a0 telegraf.Accumulator) error { func (m *MockPlugin) Gather(_a0 telegraf.Accumulator) error {
ret := m.Called(_a0) ret := m.Called(_a0)

View File

@ -1376,6 +1376,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf
&rowsAffected, &rowsSent, &rowsExamined, &rowsAffected, &rowsSent, &rowsExamined,
&tmpTables, &tmpDiskTables, &tmpTables, &tmpDiskTables,
&sortMergePasses, &sortRows, &sortMergePasses, &sortRows,
&noIndexUsed,
) )
if err != nil { if err != nil {

View File

@ -119,7 +119,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
// Get integer metrics from output // Get integer metrics from output
for key, index := range intI { for key, index := range intI {
if index == -1 { if index == -1 || index >= len(fields) {
continue continue
} }
if fields[index] == "-" { if fields[index] == "-" {
@ -169,7 +169,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
// get float metrics from output // get float metrics from output
for key, index := range floatI { for key, index := range floatI {
if index == -1 { if index == -1 || index >= len(fields) {
continue continue
} }
if fields[index] == "-" { if fields[index] == "-" {

View File

@ -41,6 +41,35 @@ func TestSingleNTPQ(t *testing.T) {
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
} }
func TestMissingJitterField(t *testing.T) {
tt := tester{
ret: []byte(missingJitterField),
err: nil,
}
n := &NTPQ{
runQ: tt.runqTest,
}
acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc))
fields := map[string]interface{}{
"when": int64(101),
"poll": int64(256),
"reach": int64(37),
"delay": float64(51.016),
"offset": float64(233.010),
}
tags := map[string]string{
"remote": "uschi5-ntp-002.",
"state_prefix": "*",
"refid": "10.177.80.46",
"stratum": "2",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
}
func TestBadIntNTPQ(t *testing.T) { func TestBadIntNTPQ(t *testing.T) {
tt := tester{ tt := tester{
ret: []byte(badIntParseNTPQ), ret: []byte(badIntParseNTPQ),
@ -381,6 +410,11 @@ var singleNTPQ = ` remote refid st t when poll reach delay
*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462 *uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462
` `
var missingJitterField = ` remote refid st t when poll reach delay offset jitter
==============================================================================
*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010
`
var badHeaderNTPQ = `remote refid foobar t when poll reach delay offset jitter var badHeaderNTPQ = `remote refid foobar t when poll reach delay offset jitter
============================================================================== ==============================================================================
*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462 *uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462

View File

@ -0,0 +1,36 @@
# Ping input plugin
This input plugin will measures the round-trip
## Windows:
### Configration:
```
## urls to ping
urls = ["www.google.com"] # required
## number of pings to send per collection (ping -n <COUNT>)
count = 4 # required
## Ping timeout, in seconds. 0 means default timeout (ping -w <TIMEOUT>)
Timeout = 0
```
### Measurements & Fields:
- packets_transmitted ( from ping output )
- reply_received ( increasing only on valid metric from echo replay, eg. 'Destination net unreachable' reply will increment packets_received but not reply_received )
- packets_received ( from ping output )
- percent_reply_loss ( compute from packets_transmitted and reply_received )
- percent_packets_loss ( compute from packets_transmitted and packets_received )
- errors ( when host can not be found or wrong prameters is passed to application )
- response time
- average_response_ms ( compute from minimum_response_ms and maximum_response_ms )
- minimum_response_ms ( from ping output )
- maximum_response_ms ( from ping output )
### Tags:
- server
### Example Output:
```
* Plugin: ping, Collection 1
ping,host=WIN-PBAPLP511R7,url=www.google.com average_response_ms=7i,maximum_response_ms=9i,minimum_response_ms=7i,packets_received=4i,packets_transmitted=4i,percent_packet_loss=0,percent_reply_loss=0,reply_received=4i 1469879119000000000
```

View File

@ -65,16 +65,20 @@ func hostPinger(timeout float64, args ...string) (string, error) {
// processPingOutput takes in a string output from the ping command // processPingOutput takes in a string output from the ping command
// based on linux implementation but using regex ( multilanguage support ) ( shouldn't affect the performance of the program ) // based on linux implementation but using regex ( multilanguage support ) ( shouldn't affect the performance of the program )
// It returns (<transmitted packets>, <received packets>, <average response>, <min response>, <max response>) // It returns (<transmitted packets>, <received reply>, <received packet>, <average response>, <min response>, <max response>)
func processPingOutput(out string) (int, int, int, int, int, error) { func processPingOutput(out string) (int, int, int, int, int, int, error) {
// So find a line contain 3 numbers except reply lines // So find a line contain 3 numbers except reply lines
var stats, aproxs []string = nil, nil var stats, aproxs []string = nil, nil
err := errors.New("Fatal error processing ping output") err := errors.New("Fatal error processing ping output")
stat := regexp.MustCompile(`=\W*(\d+)\D*=\W*(\d+)\D*=\W*(\d+)`) stat := regexp.MustCompile(`=\W*(\d+)\D*=\W*(\d+)\D*=\W*(\d+)`)
aprox := regexp.MustCompile(`=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms`) aprox := regexp.MustCompile(`=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms`)
tttLine := regexp.MustCompile(`TTL=\d+`)
lines := strings.Split(out, "\n") lines := strings.Split(out, "\n")
var receivedReply int = 0
for _, line := range lines { for _, line := range lines {
if !strings.Contains(line, "TTL") { if tttLine.MatchString(line) {
receivedReply++
} else {
if stats == nil { if stats == nil {
stats = stat.FindStringSubmatch(line) stats = stat.FindStringSubmatch(line)
} }
@ -86,35 +90,35 @@ func processPingOutput(out string) (int, int, int, int, int, error) {
// stats data should contain 4 members: entireExpression + ( Send, Receive, Lost ) // stats data should contain 4 members: entireExpression + ( Send, Receive, Lost )
if len(stats) != 4 { if len(stats) != 4 {
return 0, 0, 0, 0, 0, err return 0, 0, 0, 0, 0, 0, err
} }
trans, err := strconv.Atoi(stats[1]) trans, err := strconv.Atoi(stats[1])
if err != nil { if err != nil {
return 0, 0, 0, 0, 0, err return 0, 0, 0, 0, 0, 0, err
} }
rec, err := strconv.Atoi(stats[2]) receivedPacket, err := strconv.Atoi(stats[2])
if err != nil { if err != nil {
return 0, 0, 0, 0, 0, err return 0, 0, 0, 0, 0, 0, err
} }
// aproxs data should contain 4 members: entireExpression + ( min, max, avg ) // aproxs data should contain 4 members: entireExpression + ( min, max, avg )
if len(aproxs) != 4 { if len(aproxs) != 4 {
return trans, rec, 0, 0, 0, err return trans, receivedReply, receivedPacket, 0, 0, 0, err
} }
min, err := strconv.Atoi(aproxs[1]) min, err := strconv.Atoi(aproxs[1])
if err != nil { if err != nil {
return trans, rec, 0, 0, 0, err return trans, receivedReply, receivedPacket, 0, 0, 0, err
} }
max, err := strconv.Atoi(aproxs[2]) max, err := strconv.Atoi(aproxs[2])
if err != nil { if err != nil {
return trans, rec, 0, 0, 0, err return trans, receivedReply, receivedPacket, 0, 0, 0, err
} }
avg, err := strconv.Atoi(aproxs[3]) avg, err := strconv.Atoi(aproxs[3])
if err != nil { if err != nil {
return 0, 0, 0, 0, 0, err return 0, 0, 0, 0, 0, 0, err
} }
return trans, rec, avg, min, max, err return trans, receivedReply, receivedPacket, avg, min, max, err
} }
func (p *Ping) timeout() float64 { func (p *Ping) timeout() float64 {
@ -159,21 +163,30 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
pendingError = errors.New(strings.TrimSpace(out) + ", " + err.Error()) pendingError = errors.New(strings.TrimSpace(out) + ", " + err.Error())
} }
tags := map[string]string{"url": u} tags := map[string]string{"url": u}
trans, rec, avg, min, max, err := processPingOutput(out) trans, recReply, receivePacket, avg, min, max, err := processPingOutput(out)
if err != nil { if err != nil {
// fatal error // fatal error
if pendingError != nil { if pendingError != nil {
errorChannel <- pendingError errorChannel <- pendingError
} }
errorChannel <- err errorChannel <- err
fields := map[string]interface{}{
"errors": 100.0,
}
acc.AddFields("ping", fields, tags)
return return
} }
// Calculate packet loss percentage // Calculate packet loss percentage
loss := float64(trans-rec) / float64(trans) * 100.0 lossReply := float64(trans-recReply) / float64(trans) * 100.0
lossPackets := float64(trans-receivePacket) / float64(trans) * 100.0
fields := map[string]interface{}{ fields := map[string]interface{}{
"packets_transmitted": trans, "packets_transmitted": trans,
"packets_received": rec, "reply_received": recReply,
"percent_packet_loss": loss, "packets_received": receivePacket,
"percent_packet_loss": lossPackets,
"percent_reply_loss": lossReply,
} }
if avg > 0 { if avg > 0 {
fields["average_response_ms"] = avg fields["average_response_ms"] = avg

View File

@ -38,18 +38,20 @@ Approximate round trip times in milli-seconds:
` `
func TestHost(t *testing.T) { func TestHost(t *testing.T) {
trans, rec, avg, min, max, err := processPingOutput(winPLPingOutput) trans, recReply, recPacket, avg, min, max, err := processPingOutput(winPLPingOutput)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 4, trans, "4 packets were transmitted") assert.Equal(t, 4, trans, "4 packets were transmitted")
assert.Equal(t, 4, rec, "4 packets were received") assert.Equal(t, 4, recReply, "4 packets were reply")
assert.Equal(t, 4, recPacket, "4 packets were received")
assert.Equal(t, 50, avg, "Average 50") assert.Equal(t, 50, avg, "Average 50")
assert.Equal(t, 46, min, "Min 46") assert.Equal(t, 46, min, "Min 46")
assert.Equal(t, 57, max, "max 57") assert.Equal(t, 57, max, "max 57")
trans, rec, avg, min, max, err = processPingOutput(winENPingOutput) trans, recReply, recPacket, avg, min, max, err = processPingOutput(winENPingOutput)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 4, trans, "4 packets were transmitted") assert.Equal(t, 4, trans, "4 packets were transmitted")
assert.Equal(t, 4, rec, "4 packets were received") assert.Equal(t, 4, recReply, "4 packets were reply")
assert.Equal(t, 4, recPacket, "4 packets were received")
assert.Equal(t, 50, avg, "Average 50") assert.Equal(t, 50, avg, "Average 50")
assert.Equal(t, 50, min, "Min 50") assert.Equal(t, 50, min, "Min 50")
assert.Equal(t, 52, max, "Max 52") assert.Equal(t, 52, max, "Max 52")
@ -72,7 +74,9 @@ func TestPingGather(t *testing.T) {
fields := map[string]interface{}{ fields := map[string]interface{}{
"packets_transmitted": 4, "packets_transmitted": 4,
"packets_received": 4, "packets_received": 4,
"reply_received": 4,
"percent_packet_loss": 0.0, "percent_packet_loss": 0.0,
"percent_reply_loss": 0.0,
"average_response_ms": 50, "average_response_ms": 50,
"minimum_response_ms": 50, "minimum_response_ms": 50,
"maximum_response_ms": 52, "maximum_response_ms": 52,
@ -113,7 +117,9 @@ func TestBadPingGather(t *testing.T) {
fields := map[string]interface{}{ fields := map[string]interface{}{
"packets_transmitted": 4, "packets_transmitted": 4,
"packets_received": 0, "packets_received": 0,
"reply_received": 0,
"percent_packet_loss": 100.0, "percent_packet_loss": 100.0,
"percent_reply_loss": 100.0,
} }
acc.AssertContainsTaggedFields(t, "ping", fields, tags) acc.AssertContainsTaggedFields(t, "ping", fields, tags)
} }
@ -154,7 +160,9 @@ func TestLossyPingGather(t *testing.T) {
fields := map[string]interface{}{ fields := map[string]interface{}{
"packets_transmitted": 9, "packets_transmitted": 9,
"packets_received": 7, "packets_received": 7,
"reply_received": 7,
"percent_packet_loss": 22.22222222222222, "percent_packet_loss": 22.22222222222222,
"percent_reply_loss": 22.22222222222222,
"average_response_ms": 115, "average_response_ms": 115,
"minimum_response_ms": 114, "minimum_response_ms": 114,
"maximum_response_ms": 119, "maximum_response_ms": 119,
@ -207,12 +215,114 @@ func TestFatalPingGather(t *testing.T) {
} }
p.Gather(&acc) p.Gather(&acc)
assert.False(t, acc.HasMeasurement("packets_transmitted"), assert.True(t, acc.HasFloatField("ping", "errors"),
"Fatal ping should have packet measurements")
assert.False(t, acc.HasIntField("ping", "packets_transmitted"),
"Fatal ping should not have packet measurements") "Fatal ping should not have packet measurements")
assert.False(t, acc.HasMeasurement("packets_received"), assert.False(t, acc.HasIntField("ping", "packets_received"),
"Fatal ping should not have packet measurements") "Fatal ping should not have packet measurements")
assert.False(t, acc.HasMeasurement("percent_packet_loss"), assert.False(t, acc.HasFloatField("ping", "percent_packet_loss"),
"Fatal ping should not have packet measurements") "Fatal ping should not have packet measurements")
assert.False(t, acc.HasMeasurement("average_response_ms"), assert.False(t, acc.HasFloatField("ping", "percent_reply_loss"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "average_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "maximum_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "minimum_response_ms"),
"Fatal ping should not have packet measurements")
}
var UnreachablePingOutput = `
Pinging www.google.pl [8.8.8.8] with 32 bytes of data:
Request timed out.
Request timed out.
Reply from 194.204.175.50: Destination net unreachable.
Request timed out.
Ping statistics for 8.8.8.8:
Packets: Sent = 4, Received = 1, Lost = 3 (75% loss),
`
func mockUnreachableHostPinger(timeout float64, args ...string) (string, error) {
return UnreachablePingOutput, errors.New("So very bad")
}
//Reply from 185.28.251.217: TTL expired in transit.
// in case 'Destination net unreachable' ping app return receive packet which is not what we need
// it's not contain valid metric so treat it as lost one
func TestUnreachablePingGather(t *testing.T) {
var acc testutil.Accumulator
p := Ping{
Urls: []string{"www.google.com"},
pingHost: mockUnreachableHostPinger,
}
p.Gather(&acc)
tags := map[string]string{"url": "www.google.com"}
fields := map[string]interface{}{
"packets_transmitted": 4,
"packets_received": 1,
"reply_received": 0,
"percent_packet_loss": 75.0,
"percent_reply_loss": 100.0,
}
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
assert.False(t, acc.HasFloatField("ping", "errors"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "average_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "maximum_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "minimum_response_ms"),
"Fatal ping should not have packet measurements")
}
var TTLExpiredPingOutput = `
Pinging www.google.pl [8.8.8.8] with 32 bytes of data:
Request timed out.
Request timed out.
Reply from 185.28.251.217: TTL expired in transit.
Request timed out.
Ping statistics for 8.8.8.8:
Packets: Sent = 4, Received = 1, Lost = 3 (75% loss),
`
func mockTTLExpiredPinger(timeout float64, args ...string) (string, error) {
return TTLExpiredPingOutput, errors.New("So very bad")
}
// in case 'Destination net unreachable' ping app return receive packet which is not what we need
// it's not contain valid metric so treat it as lost one
func TestTTLExpiredPingGather(t *testing.T) {
var acc testutil.Accumulator
p := Ping{
Urls: []string{"www.google.com"},
pingHost: mockTTLExpiredPinger,
}
p.Gather(&acc)
tags := map[string]string{"url": "www.google.com"}
fields := map[string]interface{}{
"packets_transmitted": 4,
"packets_received": 1,
"reply_received": 0,
"percent_packet_loss": 75.0,
"percent_reply_loss": 100.0,
}
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
assert.False(t, acc.HasFloatField("ping", "errors"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "average_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "maximum_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "minimum_response_ms"),
"Fatal ping should not have packet measurements") "Fatal ping should not have packet measurements")
} }

View File

@ -266,29 +266,33 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula
tags := map[string]string{} tags := map[string]string{}
tags["server"] = tagAddress tags["server"] = tagAddress
tags["db"] = dbname.String() tags["db"] = dbname.String()
var isATag int
fields := make(map[string]interface{}) fields := make(map[string]interface{})
COLUMN:
for col, val := range columnMap { for col, val := range columnMap {
if acc.Debug() { if acc.Debug() {
log.Printf("postgresql_extensible: column: %s = %T: %s\n", col, *val, *val) log.Printf("postgresql_extensible: column: %s = %T: %s\n", col, *val, *val)
} }
_, ignore := ignoredColumns[col] _, ignore := ignoredColumns[col]
if !ignore && *val != nil { if ignore || *val == nil {
isATag = 0 continue
for tag := range p.AdditionalTags { }
if col == p.AdditionalTags[tag] { for _, tag := range p.AdditionalTags {
isATag = 1 if col != tag {
value_type_p := fmt.Sprintf(`%T`, *val) continue
if value_type_p == "[]uint8" {
tags[col] = fmt.Sprintf(`%s`, *val)
} else if value_type_p == "int64" {
tags[col] = fmt.Sprintf(`%v`, *val)
}
}
} }
if isATag == 0 { switch v := (*val).(type) {
fields[col] = *val case []byte:
tags[col] = string(v)
case int64:
tags[col] = fmt.Sprintf("%d", v)
} }
continue COLUMN
}
if v, ok := (*val).([]byte); ok {
fields[col] = string(v)
} else {
fields[col] = *val
} }
} }
acc.AddFields(meas_name, fields, tags) acc.AddFields(meas_name, fields, tags)

View File

@ -71,7 +71,7 @@ func (p *SpecProcessor) pushMetrics() {
fields[prefix+"read_count"] = io.ReadCount fields[prefix+"read_count"] = io.ReadCount
fields[prefix+"write_count"] = io.WriteCount fields[prefix+"write_count"] = io.WriteCount
fields[prefix+"read_bytes"] = io.ReadBytes fields[prefix+"read_bytes"] = io.ReadBytes
fields[prefix+"write_bytes"] = io.WriteCount fields[prefix+"write_bytes"] = io.WriteBytes
} }
cpu_time, err := p.proc.Times() cpu_time, err := p.proc.Times()

View File

@ -0,0 +1,47 @@
# sensors Input Plugin
Collect [lm-sensors](https://en.wikipedia.org/wiki/Lm_sensors) metrics - requires the lm-sensors
package installed.
This plugin collects sensor metrics with the `sensors` executable from the lm-sensor package.
### Configuration:
```
# Monitor sensors, requires lm-sensors package
[[inputs.sensors]]
## Remove numbers from field names.
## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
# remove_numbers = true
```
### Measurements & Fields:
Fields are created dynamicaly depending on the sensors. All fields are float.
### Tags:
- All measurements have the following tags:
- chip
- feature
### Example Output:
#### Default
```
$ telegraf -config telegraf.conf -input-filter sensors -test
* Plugin: sensors, Collection 1
> sensors,chip=power_meter-acpi-0,feature=power1 power_average=0,power_average_interval=300 1466751326000000000
> sensors,chip=k10temp-pci-00c3,feature=temp1 temp_crit=70,temp_crit_hyst=65,temp_input=29,temp_max=70 1466751326000000000
> sensors,chip=k10temp-pci-00cb,feature=temp1 temp_input=29,temp_max=70 1466751326000000000
> sensors,chip=k10temp-pci-00d3,feature=temp1 temp_input=27.5,temp_max=70 1466751326000000000
> sensors,chip=k10temp-pci-00db,feature=temp1 temp_crit=70,temp_crit_hyst=65,temp_input=29.5,temp_max=70 1466751326000000000
```
#### With remove_numbers=false
```
* Plugin: sensors, Collection 1
> sensors,chip=power_meter-acpi-0,feature=power1 power1_average=0,power1_average_interval=300 1466753424000000000
> sensors,chip=k10temp-pci-00c3,feature=temp1 temp1_crit=70,temp1_crit_hyst=65,temp1_input=29.125,temp1_max=70 1466753424000000000
> sensors,chip=k10temp-pci-00cb,feature=temp1 temp1_input=29,temp1_max=70 1466753424000000000
> sensors,chip=k10temp-pci-00d3,feature=temp1 temp1_input=29.5,temp1_max=70 1466753424000000000
> sensors,chip=k10temp-pci-00db,feature=temp1 temp1_crit=70,temp1_crit_hyst=65,temp1_input=30,temp1_max=70 1466753424000000000
```

View File

@ -1,91 +1,118 @@
// +build linux,sensors // +build linux
package sensors package sensors
import ( import (
"errors"
"fmt"
"os/exec"
"regexp"
"strconv"
"strings" "strings"
"time"
"github.com/md14454/gosensors"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
) )
var (
execCommand = exec.Command // execCommand is used to mock commands in tests.
numberRegp = regexp.MustCompile("[0-9]+")
)
type Sensors struct { type Sensors struct {
Sensors []string RemoveNumbers bool `toml:"remove_numbers"`
path string
} }
func (_ *Sensors) Description() string { func (*Sensors) Description() string {
return "Monitor sensors using lm-sensors package" return "Monitor sensors, requires lm-sensors package"
} }
var sensorsSampleConfig = ` func (*Sensors) SampleConfig() string {
## By default, telegraf gathers stats from all sensors detected by the return `
## lm-sensors module. ## Remove numbers from field names.
## ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
## Only collect stats from the selected sensors. Sensors are listed as # remove_numbers = true
## <chip name>:<feature name>. This information can be found by running the
## sensors command, e.g. sensors -u
##
## A * as the feature name will return all features of the chip
##
# sensors = ["coretemp-isa-0000:Core 0", "coretemp-isa-0001:*"]
` `
func (_ *Sensors) SampleConfig() string {
return sensorsSampleConfig
} }
func (s *Sensors) Gather(acc telegraf.Accumulator) error { func (s *Sensors) Gather(acc telegraf.Accumulator) error {
gosensors.Init() if len(s.path) == 0 {
defer gosensors.Cleanup() return errors.New("sensors not found: verify that lm-sensors package is installed and that sensors is in your PATH")
for _, chip := range gosensors.GetDetectedChips() {
for _, feature := range chip.GetFeatures() {
chipName := chip.String()
featureLabel := feature.GetLabel()
if len(s.Sensors) != 0 {
var found bool
for _, sensor := range s.Sensors {
parts := strings.SplitN(sensor, ":", 2)
if parts[0] == chipName {
if parts[1] == "*" || parts[1] == featureLabel {
found = true
break
}
}
}
if !found {
continue
}
}
tags := map[string]string{
"chip": chipName,
"adapter": chip.AdapterName(),
"feature-name": feature.Name,
"feature-label": featureLabel,
}
fieldName := chipName + ":" + featureLabel
fields := map[string]interface{}{
fieldName: feature.GetValue(),
}
acc.AddFields("sensors", fields, tags)
}
} }
return s.parse(acc)
}
// parse forks the command:
// sensors -u -A
// and parses the output to add it to the telegraf.Accumulator.
func (s *Sensors) parse(acc telegraf.Accumulator) error {
tags := map[string]string{}
fields := map[string]interface{}{}
chip := ""
cmd := execCommand(s.path, "-A", "-u")
out, err := internal.CombinedOutputTimeout(cmd, time.Second*5)
if err != nil {
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
}
lines := strings.Split(strings.TrimSpace(string(out)), "\n")
for _, line := range lines {
if len(line) == 0 {
acc.AddFields("sensors", fields, tags)
chip = ""
tags = map[string]string{}
fields = map[string]interface{}{}
continue
}
if len(chip) == 0 {
chip = line
tags["chip"] = chip
continue
}
if !strings.HasPrefix(line, " ") {
if len(tags) > 1 {
acc.AddFields("sensors", fields, tags)
}
fields = map[string]interface{}{}
tags = map[string]string{
"chip": chip,
"feature": strings.TrimRight(snake(line), ":"),
}
} else {
splitted := strings.Split(line, ":")
fieldName := strings.TrimSpace(splitted[0])
if s.RemoveNumbers {
fieldName = numberRegp.ReplaceAllString(fieldName, "")
}
fieldValue, err := strconv.ParseFloat(strings.TrimSpace(splitted[1]), 64)
if err != nil {
return err
}
fields[fieldName] = fieldValue
}
}
acc.AddFields("sensors", fields, tags)
return nil return nil
} }
func init() { func init() {
s := Sensors{
RemoveNumbers: true,
}
path, _ := exec.LookPath("sensors")
if len(path) > 0 {
s.path = path
}
inputs.Add("sensors", func() telegraf.Input { inputs.Add("sensors", func() telegraf.Input {
return &Sensors{} return &s
}) })
} }
// snake converts string to snake case
func snake(input string) string {
return strings.ToLower(strings.Replace(input, " ", "_", -1))
}

View File

@ -1,3 +0,0 @@
// +build !linux !sensors
package sensors

View File

@ -0,0 +1,3 @@
// +build !linux
package sensors

View File

@ -0,0 +1,328 @@
// +build linux
package sensors
import (
"fmt"
"os"
"os/exec"
"testing"
"github.com/influxdata/telegraf/testutil"
)
func TestGatherDefault(t *testing.T) {
s := Sensors{
RemoveNumbers: true,
path: "sensors",
}
// overwriting exec commands with mock commands
execCommand = fakeExecCommand
defer func() { execCommand = exec.Command }()
var acc testutil.Accumulator
err := s.Gather(&acc)
if err != nil {
t.Fatal(err)
}
var tests = []struct {
tags map[string]string
fields map[string]interface{}
}{
{
map[string]string{
"chip": "acpitz-virtual-0",
"feature": "temp1",
},
map[string]interface{}{
"temp_input": 8.3,
"temp_crit": 31.3,
},
},
{
map[string]string{
"chip": "power_meter-acpi-0",
"feature": "power1",
},
map[string]interface{}{
"power_average": 0.0,
"power_average_interval": 300.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0000",
"feature": "physical_id_0",
},
map[string]interface{}{
"temp_input": 77.0,
"temp_max": 82.0,
"temp_crit": 92.0,
"temp_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0000",
"feature": "core_0",
},
map[string]interface{}{
"temp_input": 75.0,
"temp_max": 82.0,
"temp_crit": 92.0,
"temp_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0000",
"feature": "core_1",
},
map[string]interface{}{
"temp_input": 77.0,
"temp_max": 82.0,
"temp_crit": 92.0,
"temp_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0001",
"feature": "physical_id_1",
},
map[string]interface{}{
"temp_input": 70.0,
"temp_max": 82.0,
"temp_crit": 92.0,
"temp_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0001",
"feature": "core_0",
},
map[string]interface{}{
"temp_input": 66.0,
"temp_max": 82.0,
"temp_crit": 92.0,
"temp_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0001",
"feature": "core_1",
},
map[string]interface{}{
"temp_input": 70.0,
"temp_max": 82.0,
"temp_crit": 92.0,
"temp_crit_alarm": 0.0,
},
},
}
for _, test := range tests {
acc.AssertContainsTaggedFields(t, "sensors", test.fields, test.tags)
}
}
func TestGatherNotRemoveNumbers(t *testing.T) {
s := Sensors{
RemoveNumbers: false,
path: "sensors",
}
// overwriting exec commands with mock commands
execCommand = fakeExecCommand
defer func() { execCommand = exec.Command }()
var acc testutil.Accumulator
err := s.Gather(&acc)
if err != nil {
t.Fatal(err)
}
var tests = []struct {
tags map[string]string
fields map[string]interface{}
}{
{
map[string]string{
"chip": "acpitz-virtual-0",
"feature": "temp1",
},
map[string]interface{}{
"temp1_input": 8.3,
"temp1_crit": 31.3,
},
},
{
map[string]string{
"chip": "power_meter-acpi-0",
"feature": "power1",
},
map[string]interface{}{
"power1_average": 0.0,
"power1_average_interval": 300.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0000",
"feature": "physical_id_0",
},
map[string]interface{}{
"temp1_input": 77.0,
"temp1_max": 82.0,
"temp1_crit": 92.0,
"temp1_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0000",
"feature": "core_0",
},
map[string]interface{}{
"temp2_input": 75.0,
"temp2_max": 82.0,
"temp2_crit": 92.0,
"temp2_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0000",
"feature": "core_1",
},
map[string]interface{}{
"temp3_input": 77.0,
"temp3_max": 82.0,
"temp3_crit": 92.0,
"temp3_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0001",
"feature": "physical_id_1",
},
map[string]interface{}{
"temp1_input": 70.0,
"temp1_max": 82.0,
"temp1_crit": 92.0,
"temp1_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0001",
"feature": "core_0",
},
map[string]interface{}{
"temp2_input": 66.0,
"temp2_max": 82.0,
"temp2_crit": 92.0,
"temp2_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0001",
"feature": "core_1",
},
map[string]interface{}{
"temp3_input": 70.0,
"temp3_max": 82.0,
"temp3_crit": 92.0,
"temp3_crit_alarm": 0.0,
},
},
}
for _, test := range tests {
acc.AssertContainsTaggedFields(t, "sensors", test.fields, test.tags)
}
}
// fackeExecCommand is a helper function that mock
// the exec.Command call (and call the test binary)
func fakeExecCommand(command string, args ...string) *exec.Cmd {
cs := []string{"-test.run=TestHelperProcess", "--", command}
cs = append(cs, args...)
cmd := exec.Command(os.Args[0], cs...)
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
return cmd
}
// TestHelperProcess isn't a real test. It's used to mock exec.Command
// For example, if you run:
// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking
// it returns below mockData.
func TestHelperProcess(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
mockData := `acpitz-virtual-0
temp1:
temp1_input: 8.300
temp1_crit: 31.300
power_meter-acpi-0
power1:
power1_average: 0.000
power1_average_interval: 300.000
coretemp-isa-0000
Physical id 0:
temp1_input: 77.000
temp1_max: 82.000
temp1_crit: 92.000
temp1_crit_alarm: 0.000
Core 0:
temp2_input: 75.000
temp2_max: 82.000
temp2_crit: 92.000
temp2_crit_alarm: 0.000
Core 1:
temp3_input: 77.000
temp3_max: 82.000
temp3_crit: 92.000
temp3_crit_alarm: 0.000
coretemp-isa-0001
Physical id 1:
temp1_input: 70.000
temp1_max: 82.000
temp1_crit: 92.000
temp1_crit_alarm: 0.000
Core 0:
temp2_input: 66.000
temp2_max: 82.000
temp2_crit: 92.000
temp2_crit_alarm: 0.000
Core 1:
temp3_input: 70.000
temp3_max: 82.000
temp3_crit: 92.000
temp3_crit_alarm: 0.000
`
args := os.Args
// Previous arguments are tests stuff, that looks like :
// /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess --
cmd, args := args[3], args[4:]
if cmd == "sensors" {
fmt.Fprint(os.Stdout, mockData)
} else {
fmt.Fprint(os.Stdout, "command not found")
os.Exit(1)
}
os.Exit(0)
}

View File

@ -1,549 +1,167 @@
# SNMP Input Plugin # SNMP Plugin
The SNMP input plugin gathers metrics from SNMP agents The SNMP input plugin gathers metrics from SNMP agents.
### Configuration: ## Configuration:
### Example:
#### Very simple example SNMP data:
```
In this example, the plugin will gather value of OIDS: .1.0.0.0.1.1.0 octet_str "foo"
.1.0.0.0.1.1.1 octet_str "bar"
- `.1.3.6.1.2.1.2.2.1.4.1` .1.0.0.0.1.102 octet_str "bad"
.1.0.0.0.1.2.0 integer 1
```toml .1.0.0.0.1.2.1 integer 2
# Very Simple Example .1.0.0.0.1.3.0 octet_str "0.123"
[[inputs.snmp]] .1.0.0.0.1.3.1 octet_str "0.456"
.1.0.0.0.1.3.2 octet_str "9.999"
[[inputs.snmp.host]] .1.0.0.1.1 octet_str "baz"
address = "127.0.0.1:161" .1.0.0.1.2 uinteger 54321
# SNMP community .1.0.0.1.3 uinteger 234
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Simple list of OIDs to get, in addition to "collect"
get_oids = [".1.3.6.1.2.1.2.2.1.4.1"]
``` ```
Telegraf config:
#### Simple example
In this example, Telegraf gathers value of OIDS:
- named **ifnumber**
- named **interface_speed**
With **inputs.snmp.get** section the plugin gets the oid number:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed*
As you can see *ifSpeed* is not a valid OID. In order to get
the valid OID, the plugin uses `snmptranslate_file` to match the OID:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5`
Also as the plugin will append `instance` to the corresponding OID:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1`
In this example, the plugin will gather value of OIDS:
- `.1.3.6.1.2.1.2.1.0`
- `.1.3.6.1.2.1.2.2.1.5.1`
```toml ```toml
# Simple example
[[inputs.snmp]] [[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names agents = [ "127.0.0.1:161" ]
## To generate 'oids.txt' you need to run: version = 2
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt community = "public"
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which get/bulk do you want to collect for this host
collect = ["ifnumber", "interface_speed"]
[[inputs.snmp.get]] name = "system"
name = "ifnumber" [[inputs.snmp.field]]
oid = ".1.3.6.1.2.1.2.1.0" name = "hostname"
oid = ".1.0.0.1.1"
is_tag = true
[[inputs.snmp.field]]
name = "uptime"
oid = ".1.0.0.1.2"
[[inputs.snmp.field]]
name = "loadavg"
oid = ".1.0.0.1.3"
conversion = "float(2)"
[[inputs.snmp.get]]
name = "interface_speed"
oid = "ifSpeed"
instance = "1"
```
#### Simple bulk example
In this example, Telegraf gathers value of OIDS:
- named **ifnumber**
- named **interface_speed**
- named **if_out_octets**
With **inputs.snmp.get** section the plugin gets oid number:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed*
With **inputs.snmp.bulk** section the plugin gets the oid number:
- **if_out_octets** => *ifOutOctets*
As you can see *ifSpeed* and *ifOutOctets* are not a valid OID.
In order to get the valid OID, the plugin uses `snmptranslate_file`
to match the OID:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5`
- **if_out_octets** => *ifOutOctets* => `.1.3.6.1.2.1.2.2.1.16`
Also, the plugin will append `instance` to the corresponding OID:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1`
And **if_out_octets** is a bulk request, the plugin will gathers all
OIDS in the table.
- `.1.3.6.1.2.1.2.2.1.16.1`
- `.1.3.6.1.2.1.2.2.1.16.2`
- `.1.3.6.1.2.1.2.2.1.16.3`
- `.1.3.6.1.2.1.2.2.1.16.4`
- `.1.3.6.1.2.1.2.2.1.16.5`
- `...`
In this example, the plugin will gather value of OIDS:
- `.1.3.6.1.2.1.2.1.0`
- `.1.3.6.1.2.1.2.2.1.5.1`
- `.1.3.6.1.2.1.2.2.1.16.1`
- `.1.3.6.1.2.1.2.2.1.16.2`
- `.1.3.6.1.2.1.2.2.1.16.3`
- `.1.3.6.1.2.1.2.2.1.16.4`
- `.1.3.6.1.2.1.2.2.1.16.5`
- `...`
```toml
# Simple bulk example
[[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which get/bulk do you want to collect for this host
collect = ["interface_speed", "if_number", "if_out_octets"]
[[inputs.snmp.get]]
name = "interface_speed"
oid = "ifSpeed"
instance = "1"
[[inputs.snmp.get]]
name = "if_number"
oid = "ifNumber"
[[inputs.snmp.bulk]]
name = "if_out_octets"
oid = "ifOutOctets"
```
#### Table example
In this example, we remove collect attribute to the host section,
but you can still use it in combination of the following part.
Note: This example is like a bulk request a but using an
other configuration
Telegraf gathers value of OIDS of the table:
- named **iftable1**
With **inputs.snmp.table** section the plugin gets oid number:
- **iftable1** => `.1.3.6.1.2.1.31.1.1.1`
Also **iftable1** is a table, the plugin will gathers all
OIDS in the table and in the subtables
- `.1.3.6.1.2.1.31.1.1.1.1`
- `.1.3.6.1.2.1.31.1.1.1.1.1`
- `.1.3.6.1.2.1.31.1.1.1.1.2`
- `.1.3.6.1.2.1.31.1.1.1.1.3`
- `.1.3.6.1.2.1.31.1.1.1.1.4`
- `.1.3.6.1.2.1.31.1.1.1.1....`
- `.1.3.6.1.2.1.31.1.1.1.2`
- `.1.3.6.1.2.1.31.1.1.1.2....`
- `.1.3.6.1.2.1.31.1.1.1.3`
- `.1.3.6.1.2.1.31.1.1.1.3....`
- `.1.3.6.1.2.1.31.1.1.1.4`
- `.1.3.6.1.2.1.31.1.1.1.4....`
- `.1.3.6.1.2.1.31.1.1.1.5`
- `.1.3.6.1.2.1.31.1.1.1.5....`
- `.1.3.6.1.2.1.31.1.1.1.6....`
- `...`
```toml
# Table example
[[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which get/bulk do you want to collect for this host
# Which table do you want to collect
[[inputs.snmp.host.table]]
name = "iftable1"
# table without mapping neither subtables
# This is like bulk request
[[inputs.snmp.table]] [[inputs.snmp.table]]
name = "iftable1" name = "remote_servers"
oid = ".1.3.6.1.2.1.31.1.1.1" inherit_tags = [ "hostname" ]
[[inputs.snmp.table.field]]
name = "server"
oid = ".1.0.0.0.1.1"
is_tag = true
[[inputs.snmp.table.field]]
name = "connections"
oid = ".1.0.0.0.1.2"
[[inputs.snmp.table.field]]
name = "latency"
oid = ".1.0.0.0.1.3"
conversion = "float"
``` ```
Resulting output:
```
* Plugin: snmp, Collection 1
> system,agent_host=127.0.0.1,host=mylocalhost,hostname=baz loadavg=2.34,uptime=54321i 1468953135000000000
> remote_servers,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=foo connections=1i,latency=0.123 1468953135000000000
> remote_servers,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=bar connections=2i,latency=0.456 1468953135000000000
```
#### Table with subtable example #### Configuration via MIB:
In this example, we remove collect attribute to the host section,
but you can still use it in combination of the following part.
Note: This example is like a bulk request a but using an
other configuration
Telegraf gathers value of OIDS of the table:
- named **iftable2**
With **inputs.snmp.table** section *AND* **sub_tables** attribute,
the plugin will get OIDS from subtables:
- **iftable2** => `.1.3.6.1.2.1.2.2.1.13`
Also **iftable2** is a table, the plugin will gathers all
OIDS in subtables:
- `.1.3.6.1.2.1.2.2.1.13.1`
- `.1.3.6.1.2.1.2.2.1.13.2`
- `.1.3.6.1.2.1.2.2.1.13.3`
- `.1.3.6.1.2.1.2.2.1.13.4`
- `.1.3.6.1.2.1.2.2.1.13....`
This example uses the SNMP data above, but is configured via the MIB.
The example MIB file can be found in the `testdata` directory. See the [MIB lookups](#mib-lookups) section for more information.
Telegraf config:
```toml ```toml
# Table with subtable example
[[inputs.snmp]] [[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names agents = [ "127.0.0.1:161" ]
## To generate 'oids.txt' you need to run: version = 2
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt community = "public"
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt [[inputs.snmp.field]]
snmptranslate_file = "/tmp/oids.txt" oid = "TEST::hostname"
[[inputs.snmp.host]] is_tag = true
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which table do you want to collect
[[inputs.snmp.host.table]]
name = "iftable2"
# table without mapping but with subtables
[[inputs.snmp.table]] [[inputs.snmp.table]]
name = "iftable2" oid = "TEST::testTable"
sub_tables = [".1.3.6.1.2.1.2.2.1.13"] inherit_tags = "hostname"
# note
# oid attribute is useless
``` ```
Resulting output:
#### Table with mapping example ```
* Plugin: snmp, Collection 1
In this example, we remove collect attribute to the host section, > testTable,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=foo connections=1i,latency="0.123" 1468953135000000000
but you can still use it in combination of the following part. > testTable,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=bar connections=2i,latency="0.456" 1468953135000000000
Telegraf gathers value of OIDS of the table:
- named **iftable3**
With **inputs.snmp.table** section the plugin gets oid number:
- **iftable3** => `.1.3.6.1.2.1.31.1.1.1`
Also **iftable2** is a table, the plugin will gathers all
OIDS in the table and in the subtables
- `.1.3.6.1.2.1.31.1.1.1.1`
- `.1.3.6.1.2.1.31.1.1.1.1.1`
- `.1.3.6.1.2.1.31.1.1.1.1.2`
- `.1.3.6.1.2.1.31.1.1.1.1.3`
- `.1.3.6.1.2.1.31.1.1.1.1.4`
- `.1.3.6.1.2.1.31.1.1.1.1....`
- `.1.3.6.1.2.1.31.1.1.1.2`
- `.1.3.6.1.2.1.31.1.1.1.2....`
- `.1.3.6.1.2.1.31.1.1.1.3`
- `.1.3.6.1.2.1.31.1.1.1.3....`
- `.1.3.6.1.2.1.31.1.1.1.4`
- `.1.3.6.1.2.1.31.1.1.1.4....`
- `.1.3.6.1.2.1.31.1.1.1.5`
- `.1.3.6.1.2.1.31.1.1.1.5....`
- `.1.3.6.1.2.1.31.1.1.1.6....`
- `...`
But the **include_instances** attribute will filter which OIDS
will be gathered; As you see, there is an other attribute, `mapping_table`.
`include_instances` and `mapping_table` permit to build a hash table
to filter only OIDS you want.
Let's say, we have the following data on SNMP server:
- OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1`
The plugin will build the following hash table:
| instance name | instance id |
|---------------|-------------|
| `enp5s0` | `1` |
| `enp5s1` | `2` |
| `enp5s2` | `3` |
| `eth0` | `4` |
| `eth1` | `5` |
With the **include_instances** attribute, the plugin will gather
the following OIDS:
- `.1.3.6.1.2.1.31.1.1.1.1.1`
- `.1.3.6.1.2.1.31.1.1.1.1.5`
- `.1.3.6.1.2.1.31.1.1.1.2.1`
- `.1.3.6.1.2.1.31.1.1.1.2.5`
- `.1.3.6.1.2.1.31.1.1.1.3.1`
- `.1.3.6.1.2.1.31.1.1.1.3.5`
- `.1.3.6.1.2.1.31.1.1.1.4.1`
- `.1.3.6.1.2.1.31.1.1.1.4.5`
- `.1.3.6.1.2.1.31.1.1.1.5.1`
- `.1.3.6.1.2.1.31.1.1.1.5.5`
- `.1.3.6.1.2.1.31.1.1.1.6.1`
- `.1.3.6.1.2.1.31.1.1.1.6.5`
- `...`
Note: the plugin will add instance name as tag *instance*
```toml
# Simple table with mapping example
[[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which table do you want to collect
[[inputs.snmp.host.table]]
name = "iftable3"
include_instances = ["enp5s0", "eth1"]
# table with mapping but without subtables
[[inputs.snmp.table]]
name = "iftable3"
oid = ".1.3.6.1.2.1.31.1.1.1"
# if empty. get all instances
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# if empty, get all subtables
``` ```
### Config parameters
#### Table with both mapping and subtable example * `agents`: Default: `[]`
List of SNMP agents to connect to in the form of `IP[:PORT]`. If `:PORT` is unspecified, it defaults to `161`.
In this example, we remove collect attribute to the host section, * `version`: Default: `2`
but you can still use it in combination of the following part. SNMP protocol version to use.
Telegraf gathers value of OIDS of the table: * `community`: Default: `"public"`
SNMP community to use.
- named **iftable4** * `max_repetitions`: Default: `50`
Maximum number of iterations for repeating variables.
With **inputs.snmp.table** section *AND* **sub_tables** attribute, * `sec_name`:
the plugin will get OIDS from subtables: Security name for authenticated SNMPv3 requests.
- **iftable4** => `.1.3.6.1.2.1.31.1.1.1` * `auth_protocol`: Values: `"MD5"`,`"SHA"`,`""`. Default: `""`
Authentication protocol for authenticated SNMPv3 requests.
Also **iftable2** is a table, the plugin will gathers all * `auth_password`:
OIDS in the table and in the subtables Authentication password for authenticated SNMPv3 requests.
- `.1.3.6.1.2.1.31.1.1.1.6.1 * `sec_level`: Values: `"noAuthNoPriv"`,`"authNoPriv"`,`"authPriv"`. Default: `"noAuthNoPriv"`
- `.1.3.6.1.2.1.31.1.1.1.6.2` Security level used for SNMPv3 messages.
- `.1.3.6.1.2.1.31.1.1.1.6.3`
- `.1.3.6.1.2.1.31.1.1.1.6.4`
- `.1.3.6.1.2.1.31.1.1.1.6....`
- `.1.3.6.1.2.1.31.1.1.1.10.1`
- `.1.3.6.1.2.1.31.1.1.1.10.2`
- `.1.3.6.1.2.1.31.1.1.1.10.3`
- `.1.3.6.1.2.1.31.1.1.1.10.4`
- `.1.3.6.1.2.1.31.1.1.1.10....`
But the **include_instances** attribute will filter which OIDS * `context_name`:
will be gathered; As you see, there is an other attribute, `mapping_table`. Context name used for SNMPv3 requests.
`include_instances` and `mapping_table` permit to build a hash table
to filter only OIDS you want.
Let's say, we have the following data on SNMP server:
- OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1`
The plugin will build the following hash table: * `priv_protocol`: Values: `"DES"`,`"AES"`,`""`. Default: `""`
Privacy protocol used for encrypted SNMPv3 messages.
| instance name | instance id | * `priv_password`:
|---------------|-------------| Privacy password used for encrypted SNMPv3 messages.
| `enp5s0` | `1` |
| `enp5s1` | `2` |
| `enp5s2` | `3` |
| `eth0` | `4` |
| `eth1` | `5` |
With the **include_instances** attribute, the plugin will gather
the following OIDS:
- `.1.3.6.1.2.1.31.1.1.1.6.1`
- `.1.3.6.1.2.1.31.1.1.1.6.5`
- `.1.3.6.1.2.1.31.1.1.1.10.1`
- `.1.3.6.1.2.1.31.1.1.1.10.5`
Note: the plugin will add instance name as tag *instance*
* `name`:
Output measurement name.
```toml #### Field parameters:
# Table with both mapping and subtable example * `oid`:
[[inputs.snmp]] OID to get. May be a numeric or textual OID.
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which table do you want to collect
[[inputs.snmp.host.table]]
name = "iftable4"
include_instances = ["enp5s0", "eth1"]
# table with both mapping and subtables * `name`:
[[inputs.snmp.table]] Output field/tag name.
name = "iftable4" If not specified, it defaults to the value of `oid`. If `oid` is numeric, an attempt to translate the numeric OID into a texual OID will be made.
# if empty get all instances
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# if empty get all subtables
# sub_tables could be not "real subtables"
sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
# note
# oid attribute is useless
# SNMP SUBTABLES * `is_tag`:
[[inputs.snmp.subtable]] Output this field as a tag.
name = "bytes_recv"
oid = ".1.3.6.1.2.1.31.1.1.1.6"
unit = "octets"
[[inputs.snmp.subtable]] * `conversion`: Values: `"float(X)"`,`"float"`,`"int"`,`""`. Default: `""`
name = "bytes_send" Converts the value according to the given specification.
oid = ".1.3.6.1.2.1.31.1.1.1.10"
unit = "octets"
```
#### Configuration notes - `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Efficively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`.
- `float`: Converts the value into a float with no adjustment. Same as `float(0)`.
- `int`: Convertes the value into an integer.
- In **inputs.snmp.table** section, the `oid` attribute is useless if #### Table parameters:
the `sub_tables` attributes is defined * `oid`:
Automatically populates the table's fields using data from the MIB.
- In **inputs.snmp.subtable** section, you can put a name from `snmptranslate_file` * `name`:
as `oid` attribute instead of a valid OID Output measurement name.
If not specified, it defaults to the value of `oid`. If `oid` is numeric, an attempt to translate the numeric OID into a texual OID will be made.
### Measurements & Fields: * `inherit_tags`:
Which tags to inherit from the top-level config and to use in the output of this table's measurement.
With the last example (Table with both mapping and subtable example): ### MIB lookups
If the plugin is configured such that it needs to perform lookups from the MIB, it will use the net-snmp utilities `snmptranslate` and `snmptable`.
- ifHCOutOctets When performing the lookups, the plugin will load all available MIBs. If your MIB files are in a custom path, you may add the path using the `MIBDIRS` environment variable. See [`man 1 snmpcmd`](http://net-snmp.sourceforge.net/docs/man/snmpcmd.html#lbAK) for more information on the variable.
- ifHCOutOctets
- ifInDiscards
- ifInDiscards
- ifHCInOctets
- ifHCInOctets
### Tags:
With the last example (Table with both mapping and subtable example):
- ifHCOutOctets
- host
- instance
- unit
- ifInDiscards
- host
- instance
- ifHCInOctets
- host
- instance
- unit
### Example Output:
With the last example (Table with both mapping and subtable example):
```
ifHCOutOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCOutOctets=10565628i 1456878706044462901
ifInDiscards,host=127.0.0.1,instance=enp5s0 ifInDiscards=0i 1456878706044510264
ifHCInOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCInOctets=76351777i 1456878706044531312
```

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

17
plugins/inputs/snmp/testdata/snmpd.conf vendored Normal file
View File

@ -0,0 +1,17 @@
# This config provides the data represented in the plugin documentation
# Requires net-snmp >= 5.7
#agentaddress UDP:127.0.0.1:1161
rocommunity public
override .1.0.0.0.1.1.0 octet_str "foo"
override .1.0.0.0.1.1.1 octet_str "bar"
override .1.0.0.0.1.102 octet_str "bad"
override .1.0.0.0.1.2.0 integer 1
override .1.0.0.0.1.2.1 integer 2
override .1.0.0.0.1.3.0 octet_str "0.123"
override .1.0.0.0.1.3.1 octet_str "0.456"
override .1.0.0.0.1.3.2 octet_str "9.999"
override .1.0.0.1.1 octet_str "baz"
override .1.0.0.1.2 uinteger 54321
override .1.0.0.1.3 uinteger 234

51
plugins/inputs/snmp/testdata/test.mib vendored Normal file
View File

@ -0,0 +1,51 @@
TEST DEFINITIONS ::= BEGIN
testOID ::= { 1 0 0 }
testTable OBJECT-TYPE
SYNTAX SEQUENCE OF testTableEntry
MAX-ACCESS not-accessible
STATUS current
::= { testOID 0 }
testTableEntry OBJECT-TYPE
SYNTAX TestTableEntry
MAX-ACCESS not-accessible
STATUS current
INDEX {
server
}
::= { testTable 1 }
TestTableEntry ::=
SEQUENCE {
server OCTET STRING,
connections INTEGER,
latency OCTET STRING,
}
server OBJECT-TYPE
SYNTAX OCTET STRING
MAX-ACCESS read-only
STATUS current
::= { testTableEntry 1 }
connections OBJECT-TYPE
SYNTAX INTEGER
MAX-ACCESS read-only
STATUS current
::= { testTableEntry 2 }
latency OBJECT-TYPE
SYNTAX OCTET STRING
MAX-ACCESS read-only
STATUS current
::= { testTableEntry 3 }
hostname OBJECT-TYPE
SYNTAX OCTET STRING
MAX-ACCESS read-only
STATUS current
::= { testOID 1 1 }
END

View File

@ -0,0 +1,549 @@
# SNMP Input Plugin
The SNMP input plugin gathers metrics from SNMP agents
### Configuration:
#### Very simple example
In this example, the plugin will gather value of OIDS:
- `.1.3.6.1.2.1.2.2.1.4.1`
```toml
# Very Simple Example
[[inputs.snmp]]
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Simple list of OIDs to get, in addition to "collect"
get_oids = [".1.3.6.1.2.1.2.2.1.4.1"]
```
#### Simple example
In this example, Telegraf gathers value of OIDS:
- named **ifnumber**
- named **interface_speed**
With **inputs.snmp.get** section the plugin gets the oid number:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed*
As you can see *ifSpeed* is not a valid OID. In order to get
the valid OID, the plugin uses `snmptranslate_file` to match the OID:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5`
Also as the plugin will append `instance` to the corresponding OID:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1`
In this example, the plugin will gather value of OIDS:
- `.1.3.6.1.2.1.2.1.0`
- `.1.3.6.1.2.1.2.2.1.5.1`
```toml
# Simple example
[[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which get/bulk do you want to collect for this host
collect = ["ifnumber", "interface_speed"]
[[inputs.snmp.get]]
name = "ifnumber"
oid = ".1.3.6.1.2.1.2.1.0"
[[inputs.snmp.get]]
name = "interface_speed"
oid = "ifSpeed"
instance = "1"
```
#### Simple bulk example
In this example, Telegraf gathers value of OIDS:
- named **ifnumber**
- named **interface_speed**
- named **if_out_octets**
With **inputs.snmp.get** section the plugin gets oid number:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed*
With **inputs.snmp.bulk** section the plugin gets the oid number:
- **if_out_octets** => *ifOutOctets*
As you can see *ifSpeed* and *ifOutOctets* are not a valid OID.
In order to get the valid OID, the plugin uses `snmptranslate_file`
to match the OID:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5`
- **if_out_octets** => *ifOutOctets* => `.1.3.6.1.2.1.2.2.1.16`
Also, the plugin will append `instance` to the corresponding OID:
- **ifnumber** => `.1.3.6.1.2.1.2.1.0`
- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1`
And **if_out_octets** is a bulk request, the plugin will gathers all
OIDS in the table.
- `.1.3.6.1.2.1.2.2.1.16.1`
- `.1.3.6.1.2.1.2.2.1.16.2`
- `.1.3.6.1.2.1.2.2.1.16.3`
- `.1.3.6.1.2.1.2.2.1.16.4`
- `.1.3.6.1.2.1.2.2.1.16.5`
- `...`
In this example, the plugin will gather value of OIDS:
- `.1.3.6.1.2.1.2.1.0`
- `.1.3.6.1.2.1.2.2.1.5.1`
- `.1.3.6.1.2.1.2.2.1.16.1`
- `.1.3.6.1.2.1.2.2.1.16.2`
- `.1.3.6.1.2.1.2.2.1.16.3`
- `.1.3.6.1.2.1.2.2.1.16.4`
- `.1.3.6.1.2.1.2.2.1.16.5`
- `...`
```toml
# Simple bulk example
[[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which get/bulk do you want to collect for this host
collect = ["interface_speed", "if_number", "if_out_octets"]
[[inputs.snmp.get]]
name = "interface_speed"
oid = "ifSpeed"
instance = "1"
[[inputs.snmp.get]]
name = "if_number"
oid = "ifNumber"
[[inputs.snmp.bulk]]
name = "if_out_octets"
oid = "ifOutOctets"
```
#### Table example
In this example, we remove collect attribute to the host section,
but you can still use it in combination of the following part.
Note: This example is like a bulk request a but using an
other configuration
Telegraf gathers value of OIDS of the table:
- named **iftable1**
With **inputs.snmp.table** section the plugin gets oid number:
- **iftable1** => `.1.3.6.1.2.1.31.1.1.1`
Also **iftable1** is a table, the plugin will gathers all
OIDS in the table and in the subtables
- `.1.3.6.1.2.1.31.1.1.1.1`
- `.1.3.6.1.2.1.31.1.1.1.1.1`
- `.1.3.6.1.2.1.31.1.1.1.1.2`
- `.1.3.6.1.2.1.31.1.1.1.1.3`
- `.1.3.6.1.2.1.31.1.1.1.1.4`
- `.1.3.6.1.2.1.31.1.1.1.1....`
- `.1.3.6.1.2.1.31.1.1.1.2`
- `.1.3.6.1.2.1.31.1.1.1.2....`
- `.1.3.6.1.2.1.31.1.1.1.3`
- `.1.3.6.1.2.1.31.1.1.1.3....`
- `.1.3.6.1.2.1.31.1.1.1.4`
- `.1.3.6.1.2.1.31.1.1.1.4....`
- `.1.3.6.1.2.1.31.1.1.1.5`
- `.1.3.6.1.2.1.31.1.1.1.5....`
- `.1.3.6.1.2.1.31.1.1.1.6....`
- `...`
```toml
# Table example
[[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which get/bulk do you want to collect for this host
# Which table do you want to collect
[[inputs.snmp.host.table]]
name = "iftable1"
# table without mapping neither subtables
# This is like bulk request
[[inputs.snmp.table]]
name = "iftable1"
oid = ".1.3.6.1.2.1.31.1.1.1"
```
#### Table with subtable example
In this example, we remove collect attribute to the host section,
but you can still use it in combination of the following part.
Note: This example is like a bulk request a but using an
other configuration
Telegraf gathers value of OIDS of the table:
- named **iftable2**
With **inputs.snmp.table** section *AND* **sub_tables** attribute,
the plugin will get OIDS from subtables:
- **iftable2** => `.1.3.6.1.2.1.2.2.1.13`
Also **iftable2** is a table, the plugin will gathers all
OIDS in subtables:
- `.1.3.6.1.2.1.2.2.1.13.1`
- `.1.3.6.1.2.1.2.2.1.13.2`
- `.1.3.6.1.2.1.2.2.1.13.3`
- `.1.3.6.1.2.1.2.2.1.13.4`
- `.1.3.6.1.2.1.2.2.1.13....`
```toml
# Table with subtable example
[[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which table do you want to collect
[[inputs.snmp.host.table]]
name = "iftable2"
# table without mapping but with subtables
[[inputs.snmp.table]]
name = "iftable2"
sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
# note
# oid attribute is useless
```
#### Table with mapping example
In this example, we remove collect attribute to the host section,
but you can still use it in combination of the following part.
Telegraf gathers value of OIDS of the table:
- named **iftable3**
With **inputs.snmp.table** section the plugin gets oid number:
- **iftable3** => `.1.3.6.1.2.1.31.1.1.1`
Also **iftable2** is a table, the plugin will gathers all
OIDS in the table and in the subtables
- `.1.3.6.1.2.1.31.1.1.1.1`
- `.1.3.6.1.2.1.31.1.1.1.1.1`
- `.1.3.6.1.2.1.31.1.1.1.1.2`
- `.1.3.6.1.2.1.31.1.1.1.1.3`
- `.1.3.6.1.2.1.31.1.1.1.1.4`
- `.1.3.6.1.2.1.31.1.1.1.1....`
- `.1.3.6.1.2.1.31.1.1.1.2`
- `.1.3.6.1.2.1.31.1.1.1.2....`
- `.1.3.6.1.2.1.31.1.1.1.3`
- `.1.3.6.1.2.1.31.1.1.1.3....`
- `.1.3.6.1.2.1.31.1.1.1.4`
- `.1.3.6.1.2.1.31.1.1.1.4....`
- `.1.3.6.1.2.1.31.1.1.1.5`
- `.1.3.6.1.2.1.31.1.1.1.5....`
- `.1.3.6.1.2.1.31.1.1.1.6....`
- `...`
But the **include_instances** attribute will filter which OIDS
will be gathered; As you see, there is an other attribute, `mapping_table`.
`include_instances` and `mapping_table` permit to build a hash table
to filter only OIDS you want.
Let's say, we have the following data on SNMP server:
- OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1`
The plugin will build the following hash table:
| instance name | instance id |
|---------------|-------------|
| `enp5s0` | `1` |
| `enp5s1` | `2` |
| `enp5s2` | `3` |
| `eth0` | `4` |
| `eth1` | `5` |
With the **include_instances** attribute, the plugin will gather
the following OIDS:
- `.1.3.6.1.2.1.31.1.1.1.1.1`
- `.1.3.6.1.2.1.31.1.1.1.1.5`
- `.1.3.6.1.2.1.31.1.1.1.2.1`
- `.1.3.6.1.2.1.31.1.1.1.2.5`
- `.1.3.6.1.2.1.31.1.1.1.3.1`
- `.1.3.6.1.2.1.31.1.1.1.3.5`
- `.1.3.6.1.2.1.31.1.1.1.4.1`
- `.1.3.6.1.2.1.31.1.1.1.4.5`
- `.1.3.6.1.2.1.31.1.1.1.5.1`
- `.1.3.6.1.2.1.31.1.1.1.5.5`
- `.1.3.6.1.2.1.31.1.1.1.6.1`
- `.1.3.6.1.2.1.31.1.1.1.6.5`
- `...`
Note: the plugin will add instance name as tag *instance*
```toml
# Simple table with mapping example
[[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which table do you want to collect
[[inputs.snmp.host.table]]
name = "iftable3"
include_instances = ["enp5s0", "eth1"]
# table with mapping but without subtables
[[inputs.snmp.table]]
name = "iftable3"
oid = ".1.3.6.1.2.1.31.1.1.1"
# if empty. get all instances
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# if empty, get all subtables
```
#### Table with both mapping and subtable example
In this example, we remove collect attribute to the host section,
but you can still use it in combination of the following part.
Telegraf gathers value of OIDS of the table:
- named **iftable4**
With **inputs.snmp.table** section *AND* **sub_tables** attribute,
the plugin will get OIDS from subtables:
- **iftable4** => `.1.3.6.1.2.1.31.1.1.1`
Also **iftable2** is a table, the plugin will gathers all
OIDS in the table and in the subtables
- `.1.3.6.1.2.1.31.1.1.1.6.1
- `.1.3.6.1.2.1.31.1.1.1.6.2`
- `.1.3.6.1.2.1.31.1.1.1.6.3`
- `.1.3.6.1.2.1.31.1.1.1.6.4`
- `.1.3.6.1.2.1.31.1.1.1.6....`
- `.1.3.6.1.2.1.31.1.1.1.10.1`
- `.1.3.6.1.2.1.31.1.1.1.10.2`
- `.1.3.6.1.2.1.31.1.1.1.10.3`
- `.1.3.6.1.2.1.31.1.1.1.10.4`
- `.1.3.6.1.2.1.31.1.1.1.10....`
But the **include_instances** attribute will filter which OIDS
will be gathered; As you see, there is an other attribute, `mapping_table`.
`include_instances` and `mapping_table` permit to build a hash table
to filter only OIDS you want.
Let's say, we have the following data on SNMP server:
- OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0`
- OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1`
The plugin will build the following hash table:
| instance name | instance id |
|---------------|-------------|
| `enp5s0` | `1` |
| `enp5s1` | `2` |
| `enp5s2` | `3` |
| `eth0` | `4` |
| `eth1` | `5` |
With the **include_instances** attribute, the plugin will gather
the following OIDS:
- `.1.3.6.1.2.1.31.1.1.1.6.1`
- `.1.3.6.1.2.1.31.1.1.1.6.5`
- `.1.3.6.1.2.1.31.1.1.1.10.1`
- `.1.3.6.1.2.1.31.1.1.1.10.5`
Note: the plugin will add instance name as tag *instance*
```toml
# Table with both mapping and subtable example
[[inputs.snmp]]
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "127.0.0.1:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# Which table do you want to collect
[[inputs.snmp.host.table]]
name = "iftable4"
include_instances = ["enp5s0", "eth1"]
# table with both mapping and subtables
[[inputs.snmp.table]]
name = "iftable4"
# if empty get all instances
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# if empty get all subtables
# sub_tables could be not "real subtables"
sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
# note
# oid attribute is useless
# SNMP SUBTABLES
[[inputs.snmp.subtable]]
name = "bytes_recv"
oid = ".1.3.6.1.2.1.31.1.1.1.6"
unit = "octets"
[[inputs.snmp.subtable]]
name = "bytes_send"
oid = ".1.3.6.1.2.1.31.1.1.1.10"
unit = "octets"
```
#### Configuration notes
- In **inputs.snmp.table** section, the `oid` attribute is useless if
the `sub_tables` attributes is defined
- In **inputs.snmp.subtable** section, you can put a name from `snmptranslate_file`
as `oid` attribute instead of a valid OID
### Measurements & Fields:
With the last example (Table with both mapping and subtable example):
- ifHCOutOctets
- ifHCOutOctets
- ifInDiscards
- ifInDiscards
- ifHCInOctets
- ifHCInOctets
### Tags:
With the last example (Table with both mapping and subtable example):
- ifHCOutOctets
- host
- instance
- unit
- ifInDiscards
- host
- instance
- ifHCInOctets
- host
- instance
- unit
### Example Output:
With the last example (Table with both mapping and subtable example):
```
ifHCOutOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCOutOctets=10565628i 1456878706044462901
ifInDiscards,host=127.0.0.1,instance=enp5s0 ifInDiscards=0i 1456878706044510264
ifHCInOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCInOctets=76351777i 1456878706044531312
```

View File

@ -0,0 +1,818 @@
package snmp_legacy
import (
"io/ioutil"
"log"
"net"
"strconv"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/soniah/gosnmp"
)
// Snmp is a snmp plugin
type Snmp struct {
Host []Host
Get []Data
Bulk []Data
Table []Table
Subtable []Subtable
SnmptranslateFile string
nameToOid map[string]string
initNode Node
subTableMap map[string]Subtable
}
type Host struct {
Address string
Community string
// SNMP version. Default 2
Version int
// SNMP timeout, in seconds. 0 means no timeout
Timeout float64
// SNMP retries
Retries int
// Data to collect (list of Data names)
Collect []string
// easy get oids
GetOids []string
// Table
Table []HostTable
// Oids
getOids []Data
bulkOids []Data
tables []HostTable
// array of processed oids
// to skip oid duplication
processedOids []string
OidInstanceMapping map[string]map[string]string
}
type Table struct {
// name = "iftable"
Name string
// oid = ".1.3.6.1.2.1.31.1.1.1"
Oid string
//if empty get all instances
//mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
MappingTable string
// if empty get all subtables
// sub_tables could be not "real subtables"
//sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
SubTables []string
}
type HostTable struct {
// name = "iftable"
Name string
// Includes only these instances
// include_instances = ["eth0", "eth1"]
IncludeInstances []string
// Excludes only these instances
// exclude_instances = ["eth20", "eth21"]
ExcludeInstances []string
// From Table struct
oid string
mappingTable string
subTables []string
}
// TODO find better names
type Subtable struct {
//name = "bytes_send"
Name string
//oid = ".1.3.6.1.2.1.31.1.1.1.10"
Oid string
//unit = "octets"
Unit string
}
type Data struct {
Name string
// OID (could be numbers or name)
Oid string
// Unit
Unit string
// SNMP getbulk max repetition
MaxRepetition uint8 `toml:"max_repetition"`
// SNMP Instance (default 0)
// (only used with GET request and if
// OID is a name from snmptranslate file)
Instance string
// OID (only number) (used for computation)
rawOid string
}
type Node struct {
id string
name string
subnodes map[string]Node
}
var sampleConfig = `
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
## Or if you have an other MIB folder with custom MIBs
## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "192.168.2.2:161"
# SNMP community
community = "public" # default public
# SNMP version (1, 2 or 3)
# Version 3 not supported yet
version = 2 # default 2
# SNMP response timeout
timeout = 2.0 # default 2.0
# SNMP request retries
retries = 2 # default 2
# Which get/bulk do you want to collect for this host
collect = ["mybulk", "sysservices", "sysdescr"]
# Simple list of OIDs to get, in addition to "collect"
get_oids = []
[[inputs.snmp.host]]
address = "192.168.2.3:161"
community = "public"
version = 2
timeout = 2.0
retries = 2
collect = ["mybulk"]
get_oids = [
"ifNumber",
".1.3.6.1.2.1.1.3.0",
]
[[inputs.snmp.get]]
name = "ifnumber"
oid = "ifNumber"
[[inputs.snmp.get]]
name = "interface_speed"
oid = "ifSpeed"
instance = "0"
[[inputs.snmp.get]]
name = "sysuptime"
oid = ".1.3.6.1.2.1.1.3.0"
unit = "second"
[[inputs.snmp.bulk]]
name = "mybulk"
max_repetition = 127
oid = ".1.3.6.1.2.1.1"
[[inputs.snmp.bulk]]
name = "ifoutoctets"
max_repetition = 127
oid = "ifOutOctets"
[[inputs.snmp.host]]
address = "192.168.2.13:161"
#address = "127.0.0.1:161"
community = "public"
version = 2
timeout = 2.0
retries = 2
#collect = ["mybulk", "sysservices", "sysdescr", "systype"]
collect = ["sysuptime" ]
[[inputs.snmp.host.table]]
name = "iftable3"
include_instances = ["enp5s0", "eth1"]
# SNMP TABLEs
# table without mapping neither subtables
[[inputs.snmp.table]]
name = "iftable1"
oid = ".1.3.6.1.2.1.31.1.1.1"
# table without mapping but with subtables
[[inputs.snmp.table]]
name = "iftable2"
oid = ".1.3.6.1.2.1.31.1.1.1"
sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
# table with mapping but without subtables
[[inputs.snmp.table]]
name = "iftable3"
oid = ".1.3.6.1.2.1.31.1.1.1"
# if empty. get all instances
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# if empty, get all subtables
# table with both mapping and subtables
[[inputs.snmp.table]]
name = "iftable4"
oid = ".1.3.6.1.2.1.31.1.1.1"
# if empty get all instances
mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# if empty get all subtables
# sub_tables could be not "real subtables"
sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
`
// SampleConfig returns sample configuration message
func (s *Snmp) SampleConfig() string {
return sampleConfig
}
// Description returns description of Zookeeper plugin
func (s *Snmp) Description() string {
return `DEPRECATED! PLEASE USE inputs.snmp INSTEAD.`
}
func fillnode(parentNode Node, oid_name string, ids []string) {
// ids = ["1", "3", "6", ...]
id, ids := ids[0], ids[1:]
node, ok := parentNode.subnodes[id]
if ok == false {
node = Node{
id: id,
name: "",
subnodes: make(map[string]Node),
}
if len(ids) == 0 {
node.name = oid_name
}
parentNode.subnodes[id] = node
}
if len(ids) > 0 {
fillnode(node, oid_name, ids)
}
}
func findnodename(node Node, ids []string) (string, string) {
// ids = ["1", "3", "6", ...]
if len(ids) == 1 {
return node.name, ids[0]
}
id, ids := ids[0], ids[1:]
// Get node
subnode, ok := node.subnodes[id]
if ok {
return findnodename(subnode, ids)
}
// We got a node
// Get node name
if node.name != "" && len(ids) == 0 && id == "0" {
// node with instance 0
return node.name, "0"
} else if node.name != "" && len(ids) == 0 && id != "0" {
// node with an instance
return node.name, string(id)
} else if node.name != "" && len(ids) > 0 {
// node with subinstances
return node.name, strings.Join(ids, ".")
}
// return an empty node name
return node.name, ""
}
func (s *Snmp) Gather(acc telegraf.Accumulator) error {
// TODO put this in cache on first run
// Create subtables mapping
if len(s.subTableMap) == 0 {
s.subTableMap = make(map[string]Subtable)
for _, sb := range s.Subtable {
s.subTableMap[sb.Name] = sb
}
}
// TODO put this in cache on first run
// Create oid tree
if s.SnmptranslateFile != "" && len(s.initNode.subnodes) == 0 {
s.nameToOid = make(map[string]string)
s.initNode = Node{
id: "1",
name: "",
subnodes: make(map[string]Node),
}
data, err := ioutil.ReadFile(s.SnmptranslateFile)
if err != nil {
log.Printf("Reading SNMPtranslate file error: %s", err)
return err
} else {
for _, line := range strings.Split(string(data), "\n") {
oids := strings.Fields(string(line))
if len(oids) == 2 && oids[1] != "" {
oid_name := oids[0]
oid := oids[1]
fillnode(s.initNode, oid_name, strings.Split(string(oid), "."))
s.nameToOid[oid_name] = oid
}
}
}
}
// Fetching data
for _, host := range s.Host {
// Set default args
if len(host.Address) == 0 {
host.Address = "127.0.0.1:161"
}
if host.Community == "" {
host.Community = "public"
}
if host.Timeout <= 0 {
host.Timeout = 2.0
}
if host.Retries <= 0 {
host.Retries = 2
}
// Prepare host
// Get Easy GET oids
for _, oidstring := range host.GetOids {
oid := Data{}
if val, ok := s.nameToOid[oidstring]; ok {
// TODO should we add the 0 instance ?
oid.Name = oidstring
oid.Oid = val
oid.rawOid = "." + val + ".0"
} else {
oid.Name = oidstring
oid.Oid = oidstring
if string(oidstring[:1]) != "." {
oid.rawOid = "." + oidstring
} else {
oid.rawOid = oidstring
}
}
host.getOids = append(host.getOids, oid)
}
for _, oid_name := range host.Collect {
// Get GET oids
for _, oid := range s.Get {
if oid.Name == oid_name {
if val, ok := s.nameToOid[oid.Oid]; ok {
// TODO should we add the 0 instance ?
if oid.Instance != "" {
oid.rawOid = "." + val + "." + oid.Instance
} else {
oid.rawOid = "." + val + ".0"
}
} else {
oid.rawOid = oid.Oid
}
host.getOids = append(host.getOids, oid)
}
}
// Get GETBULK oids
for _, oid := range s.Bulk {
if oid.Name == oid_name {
if val, ok := s.nameToOid[oid.Oid]; ok {
oid.rawOid = "." + val
} else {
oid.rawOid = oid.Oid
}
host.bulkOids = append(host.bulkOids, oid)
}
}
}
// Table
for _, hostTable := range host.Table {
for _, snmpTable := range s.Table {
if hostTable.Name == snmpTable.Name {
table := hostTable
table.oid = snmpTable.Oid
table.mappingTable = snmpTable.MappingTable
table.subTables = snmpTable.SubTables
host.tables = append(host.tables, table)
}
}
}
// Launch Mapping
// TODO put this in cache on first run
// TODO save mapping and computed oids
// to do it only the first time
// only if len(s.OidInstanceMapping) == 0
if len(host.OidInstanceMapping) >= 0 {
if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil {
log.Printf("SNMP Mapping error for host '%s': %s", host.Address, err)
continue
}
}
// Launch Get requests
if err := host.SNMPGet(acc, s.initNode); err != nil {
log.Printf("SNMP Error for host '%s': %s", host.Address, err)
}
if err := host.SNMPBulk(acc, s.initNode); err != nil {
log.Printf("SNMP Error for host '%s': %s", host.Address, err)
}
}
return nil
}
func (h *Host) SNMPMap(
acc telegraf.Accumulator,
nameToOid map[string]string,
subTableMap map[string]Subtable,
) error {
if h.OidInstanceMapping == nil {
h.OidInstanceMapping = make(map[string]map[string]string)
}
// Get snmp client
snmpClient, err := h.GetSNMPClient()
if err != nil {
return err
}
// Deconnection
defer snmpClient.Conn.Close()
// Prepare OIDs
for _, table := range h.tables {
// We don't have mapping
if table.mappingTable == "" {
if len(table.subTables) == 0 {
// If We don't have mapping table
// neither subtables list
// This is just a bulk request
oid := Data{}
oid.Oid = table.oid
if val, ok := nameToOid[oid.Oid]; ok {
oid.rawOid = "." + val
} else {
oid.rawOid = oid.Oid
}
h.bulkOids = append(h.bulkOids, oid)
} else {
// If We don't have mapping table
// but we have subtables
// This is a bunch of bulk requests
// For each subtable ...
for _, sb := range table.subTables {
// ... we create a new Data (oid) object
oid := Data{}
// Looking for more information about this subtable
ssb, exists := subTableMap[sb]
if exists {
// We found a subtable section in config files
oid.Oid = ssb.Oid
oid.rawOid = ssb.Oid
oid.Unit = ssb.Unit
} else {
// We did NOT find a subtable section in config files
oid.Oid = sb
oid.rawOid = sb
}
// TODO check oid validity
// Add the new oid to getOids list
h.bulkOids = append(h.bulkOids, oid)
}
}
} else {
// We have a mapping table
// We need to query this table
// To get mapping between instance id
// and instance name
oid_asked := table.mappingTable
oid_next := oid_asked
need_more_requests := true
// Set max repetition
maxRepetition := uint8(32)
// Launch requests
for need_more_requests {
// Launch request
result, err3 := snmpClient.GetBulk([]string{oid_next}, 0, maxRepetition)
if err3 != nil {
return err3
}
lastOid := ""
for _, variable := range result.Variables {
lastOid = variable.Name
if strings.HasPrefix(variable.Name, oid_asked) {
switch variable.Type {
// handle instance names
case gosnmp.OctetString:
// Check if instance is in includes instances
getInstances := true
if len(table.IncludeInstances) > 0 {
getInstances = false
for _, instance := range table.IncludeInstances {
if instance == string(variable.Value.([]byte)) {
getInstances = true
}
}
}
// Check if instance is in excludes instances
if len(table.ExcludeInstances) > 0 {
getInstances = true
for _, instance := range table.ExcludeInstances {
if instance == string(variable.Value.([]byte)) {
getInstances = false
}
}
}
// We don't want this instance
if !getInstances {
continue
}
// remove oid table from the complete oid
// in order to get the current instance id
key := strings.Replace(variable.Name, oid_asked, "", 1)
if len(table.subTables) == 0 {
// We have a mapping table
// but no subtables
// This is just a bulk request
// Building mapping table
mapping := map[string]string{strings.Trim(key, "."): string(variable.Value.([]byte))}
_, exists := h.OidInstanceMapping[table.oid]
if exists {
h.OidInstanceMapping[table.oid][strings.Trim(key, ".")] = string(variable.Value.([]byte))
} else {
h.OidInstanceMapping[table.oid] = mapping
}
// Add table oid in bulk oid list
oid := Data{}
oid.Oid = table.oid
if val, ok := nameToOid[oid.Oid]; ok {
oid.rawOid = "." + val
} else {
oid.rawOid = oid.Oid
}
h.bulkOids = append(h.bulkOids, oid)
} else {
// We have a mapping table
// and some subtables
// This is a bunch of get requests
// This is the best case :)
// For each subtable ...
for _, sb := range table.subTables {
// ... we create a new Data (oid) object
oid := Data{}
// Looking for more information about this subtable
ssb, exists := subTableMap[sb]
if exists {
// We found a subtable section in config files
oid.Oid = ssb.Oid + key
oid.rawOid = ssb.Oid + key
oid.Unit = ssb.Unit
oid.Instance = string(variable.Value.([]byte))
} else {
// We did NOT find a subtable section in config files
oid.Oid = sb + key
oid.rawOid = sb + key
oid.Instance = string(variable.Value.([]byte))
}
// TODO check oid validity
// Add the new oid to getOids list
h.getOids = append(h.getOids, oid)
}
}
default:
}
} else {
break
}
}
// Determine if we need more requests
if strings.HasPrefix(lastOid, oid_asked) {
need_more_requests = true
oid_next = lastOid
} else {
need_more_requests = false
}
}
}
}
// Mapping finished
// Create newoids based on mapping
return nil
}
func (h *Host) SNMPGet(acc telegraf.Accumulator, initNode Node) error {
// Get snmp client
snmpClient, err := h.GetSNMPClient()
if err != nil {
return err
}
// Deconnection
defer snmpClient.Conn.Close()
// Prepare OIDs
oidsList := make(map[string]Data)
for _, oid := range h.getOids {
oidsList[oid.rawOid] = oid
}
oidsNameList := make([]string, 0, len(oidsList))
for _, oid := range oidsList {
oidsNameList = append(oidsNameList, oid.rawOid)
}
// gosnmp.MAX_OIDS == 60
// TODO use gosnmp.MAX_OIDS instead of hard coded value
max_oids := 60
// limit 60 (MAX_OIDS) oids by requests
for i := 0; i < len(oidsList); i = i + max_oids {
// Launch request
max_index := i + max_oids
if i+max_oids > len(oidsList) {
max_index = len(oidsList)
}
result, err3 := snmpClient.Get(oidsNameList[i:max_index]) // Get() accepts up to g.MAX_OIDS
if err3 != nil {
return err3
}
// Handle response
_, err = h.HandleResponse(oidsList, result, acc, initNode)
if err != nil {
return err
}
}
return nil
}
func (h *Host) SNMPBulk(acc telegraf.Accumulator, initNode Node) error {
// Get snmp client
snmpClient, err := h.GetSNMPClient()
if err != nil {
return err
}
// Deconnection
defer snmpClient.Conn.Close()
// Prepare OIDs
oidsList := make(map[string]Data)
for _, oid := range h.bulkOids {
oidsList[oid.rawOid] = oid
}
oidsNameList := make([]string, 0, len(oidsList))
for _, oid := range oidsList {
oidsNameList = append(oidsNameList, oid.rawOid)
}
// TODO Trying to make requests with more than one OID
// to reduce the number of requests
for _, oid := range oidsNameList {
oid_asked := oid
need_more_requests := true
// Set max repetition
maxRepetition := oidsList[oid].MaxRepetition
if maxRepetition <= 0 {
maxRepetition = 32
}
// Launch requests
for need_more_requests {
// Launch request
result, err3 := snmpClient.GetBulk([]string{oid}, 0, maxRepetition)
if err3 != nil {
return err3
}
// Handle response
last_oid, err := h.HandleResponse(oidsList, result, acc, initNode)
if err != nil {
return err
}
// Determine if we need more requests
if strings.HasPrefix(last_oid, oid_asked) {
need_more_requests = true
oid = last_oid
} else {
need_more_requests = false
}
}
}
return nil
}
func (h *Host) GetSNMPClient() (*gosnmp.GoSNMP, error) {
// Prepare Version
var version gosnmp.SnmpVersion
if h.Version == 1 {
version = gosnmp.Version1
} else if h.Version == 3 {
version = gosnmp.Version3
} else {
version = gosnmp.Version2c
}
// Prepare host and port
host, port_str, err := net.SplitHostPort(h.Address)
if err != nil {
port_str = string("161")
}
// convert port_str to port in uint16
port_64, err := strconv.ParseUint(port_str, 10, 16)
port := uint16(port_64)
// Get SNMP client
snmpClient := &gosnmp.GoSNMP{
Target: host,
Port: port,
Community: h.Community,
Version: version,
Timeout: time.Duration(h.Timeout) * time.Second,
Retries: h.Retries,
}
// Connection
err2 := snmpClient.Connect()
if err2 != nil {
return nil, err2
}
// Return snmpClient
return snmpClient, nil
}
func (h *Host) HandleResponse(
oids map[string]Data,
result *gosnmp.SnmpPacket,
acc telegraf.Accumulator,
initNode Node,
) (string, error) {
var lastOid string
for _, variable := range result.Variables {
lastOid = variable.Name
nextresult:
// Get only oid wanted
for oid_key, oid := range oids {
// Skip oids already processed
for _, processedOid := range h.processedOids {
if variable.Name == processedOid {
break nextresult
}
}
// If variable.Name is the same as oid_key
// OR
// the result is SNMP table which "." comes right after oid_key.
// ex: oid_key: .1.3.6.1.2.1.2.2.1.16, variable.Name: .1.3.6.1.2.1.2.2.1.16.1
if variable.Name == oid_key || strings.HasPrefix(variable.Name, oid_key+".") {
switch variable.Type {
// handle Metrics
case gosnmp.Boolean, gosnmp.Integer, gosnmp.Counter32, gosnmp.Gauge32,
gosnmp.TimeTicks, gosnmp.Counter64, gosnmp.Uinteger32, gosnmp.OctetString:
// Prepare tags
tags := make(map[string]string)
if oid.Unit != "" {
tags["unit"] = oid.Unit
}
// Get name and instance
var oid_name string
var instance string
// Get oidname and instance from translate file
oid_name, instance = findnodename(initNode,
strings.Split(string(variable.Name[1:]), "."))
// Set instance tag
// From mapping table
mapping, inMappingNoSubTable := h.OidInstanceMapping[oid_key]
if inMappingNoSubTable {
// filter if the instance in not in
// OidInstanceMapping mapping map
if instance_name, exists := mapping[instance]; exists {
tags["instance"] = instance_name
} else {
continue
}
} else if oid.Instance != "" {
// From config files
tags["instance"] = oid.Instance
} else if instance != "" {
// Using last id of the current oid, ie:
// with .1.3.6.1.2.1.31.1.1.1.10.3
// instance is 3
tags["instance"] = instance
}
// Set name
var field_name string
if oid_name != "" {
// Set fieldname as oid name from translate file
field_name = oid_name
} else {
// Set fieldname as oid name from inputs.snmp.get section
// Because the result oid is equal to inputs.snmp.get section
field_name = oid.Name
}
tags["snmp_host"], _, _ = net.SplitHostPort(h.Address)
fields := make(map[string]interface{})
fields[string(field_name)] = variable.Value
h.processedOids = append(h.processedOids, variable.Name)
acc.AddFields(field_name, fields, tags)
case gosnmp.NoSuchObject, gosnmp.NoSuchInstance:
// Oid not found
log.Printf("[snmp input] Oid not found: %s", oid_key)
default:
// delete other data
}
break
}
}
}
return lastOid, nil
}
func init() {
inputs.Add("snmp_legacy", func() telegraf.Input {
return &Snmp{}
})
}

View File

@ -0,0 +1,482 @@
package snmp_legacy
import (
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSNMPErrorGet1(t *testing.T) {
get1 := Data{
Name: "oid1",
Unit: "octets",
Oid: ".1.3.6.1.2.1.2.2.1.16.1",
}
h := Host{
Collect: []string{"oid1"},
}
s := Snmp{
SnmptranslateFile: "bad_oid.txt",
Host: []Host{h},
Get: []Data{get1},
}
var acc testutil.Accumulator
err := s.Gather(&acc)
require.Error(t, err)
}
func TestSNMPErrorGet2(t *testing.T) {
get1 := Data{
Name: "oid1",
Unit: "octets",
Oid: ".1.3.6.1.2.1.2.2.1.16.1",
}
h := Host{
Collect: []string{"oid1"},
}
s := Snmp{
Host: []Host{h},
Get: []Data{get1},
}
var acc testutil.Accumulator
err := s.Gather(&acc)
require.NoError(t, err)
assert.Equal(t, 0, len(acc.Metrics))
}
func TestSNMPErrorBulk(t *testing.T) {
bulk1 := Data{
Name: "oid1",
Unit: "octets",
Oid: ".1.3.6.1.2.1.2.2.1.16",
}
h := Host{
Address: testutil.GetLocalHost(),
Collect: []string{"oid1"},
}
s := Snmp{
Host: []Host{h},
Bulk: []Data{bulk1},
}
var acc testutil.Accumulator
err := s.Gather(&acc)
require.NoError(t, err)
assert.Equal(t, 0, len(acc.Metrics))
}
func TestSNMPGet1(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
get1 := Data{
Name: "oid1",
Unit: "octets",
Oid: ".1.3.6.1.2.1.2.2.1.16.1",
}
h := Host{
Address: testutil.GetLocalHost() + ":31161",
Community: "telegraf",
Version: 2,
Timeout: 2.0,
Retries: 2,
Collect: []string{"oid1"},
}
s := Snmp{
Host: []Host{h},
Get: []Data{get1},
}
var acc testutil.Accumulator
err := s.Gather(&acc)
require.NoError(t, err)
acc.AssertContainsTaggedFields(t,
"oid1",
map[string]interface{}{
"oid1": uint(543846),
},
map[string]string{
"unit": "octets",
"snmp_host": testutil.GetLocalHost(),
},
)
}
func TestSNMPGet2(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
get1 := Data{
Name: "oid1",
Oid: "ifNumber",
}
h := Host{
Address: testutil.GetLocalHost() + ":31161",
Community: "telegraf",
Version: 2,
Timeout: 2.0,
Retries: 2,
Collect: []string{"oid1"},
}
s := Snmp{
SnmptranslateFile: "./testdata/oids.txt",
Host: []Host{h},
Get: []Data{get1},
}
var acc testutil.Accumulator
err := s.Gather(&acc)
require.NoError(t, err)
acc.AssertContainsTaggedFields(t,
"ifNumber",
map[string]interface{}{
"ifNumber": int(4),
},
map[string]string{
"instance": "0",
"snmp_host": testutil.GetLocalHost(),
},
)
}
func TestSNMPGet3(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
get1 := Data{
Name: "oid1",
Unit: "octets",
Oid: "ifSpeed",
Instance: "1",
}
h := Host{
Address: testutil.GetLocalHost() + ":31161",
Community: "telegraf",
Version: 2,
Timeout: 2.0,
Retries: 2,
Collect: []string{"oid1"},
}
s := Snmp{
SnmptranslateFile: "./testdata/oids.txt",
Host: []Host{h},
Get: []Data{get1},
}
var acc testutil.Accumulator
err := s.Gather(&acc)
require.NoError(t, err)
acc.AssertContainsTaggedFields(t,
"ifSpeed",
map[string]interface{}{
"ifSpeed": uint(10000000),
},
map[string]string{
"unit": "octets",
"instance": "1",
"snmp_host": testutil.GetLocalHost(),
},
)
}
func TestSNMPEasyGet4(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
get1 := Data{
Name: "oid1",
Unit: "octets",
Oid: "ifSpeed",
Instance: "1",
}
h := Host{
Address: testutil.GetLocalHost() + ":31161",
Community: "telegraf",
Version: 2,
Timeout: 2.0,
Retries: 2,
Collect: []string{"oid1"},
GetOids: []string{"ifNumber"},
}
s := Snmp{
SnmptranslateFile: "./testdata/oids.txt",
Host: []Host{h},
Get: []Data{get1},
}
var acc testutil.Accumulator
err := s.Gather(&acc)
require.NoError(t, err)
acc.AssertContainsTaggedFields(t,
"ifSpeed",
map[string]interface{}{
"ifSpeed": uint(10000000),
},
map[string]string{
"unit": "octets",
"instance": "1",
"snmp_host": testutil.GetLocalHost(),
},
)
acc.AssertContainsTaggedFields(t,
"ifNumber",
map[string]interface{}{
"ifNumber": int(4),
},
map[string]string{
"instance": "0",
"snmp_host": testutil.GetLocalHost(),
},
)
}
func TestSNMPEasyGet5(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
get1 := Data{
Name: "oid1",
Unit: "octets",
Oid: "ifSpeed",
Instance: "1",
}
h := Host{
Address: testutil.GetLocalHost() + ":31161",
Community: "telegraf",
Version: 2,
Timeout: 2.0,
Retries: 2,
Collect: []string{"oid1"},
GetOids: []string{".1.3.6.1.2.1.2.1.0"},
}
s := Snmp{
SnmptranslateFile: "./testdata/oids.txt",
Host: []Host{h},
Get: []Data{get1},
}
var acc testutil.Accumulator
err := s.Gather(&acc)
require.NoError(t, err)
acc.AssertContainsTaggedFields(t,
"ifSpeed",
map[string]interface{}{
"ifSpeed": uint(10000000),
},
map[string]string{
"unit": "octets",
"instance": "1",
"snmp_host": testutil.GetLocalHost(),
},
)
acc.AssertContainsTaggedFields(t,
"ifNumber",
map[string]interface{}{
"ifNumber": int(4),
},
map[string]string{
"instance": "0",
"snmp_host": testutil.GetLocalHost(),
},
)
}
func TestSNMPEasyGet6(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
h := Host{
Address: testutil.GetLocalHost() + ":31161",
Community: "telegraf",
Version: 2,
Timeout: 2.0,
Retries: 2,
GetOids: []string{"1.3.6.1.2.1.2.1.0"},
}
s := Snmp{
SnmptranslateFile: "./testdata/oids.txt",
Host: []Host{h},
}
var acc testutil.Accumulator
err := s.Gather(&acc)
require.NoError(t, err)
acc.AssertContainsTaggedFields(t,
"ifNumber",
map[string]interface{}{
"ifNumber": int(4),
},
map[string]string{
"instance": "0",
"snmp_host": testutil.GetLocalHost(),
},
)
}
func TestSNMPBulk1(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
bulk1 := Data{
Name: "oid1",
Unit: "octets",
Oid: ".1.3.6.1.2.1.2.2.1.16",
MaxRepetition: 2,
}
h := Host{
Address: testutil.GetLocalHost() + ":31161",
Community: "telegraf",
Version: 2,
Timeout: 2.0,
Retries: 2,
Collect: []string{"oid1"},
}
s := Snmp{
SnmptranslateFile: "./testdata/oids.txt",
Host: []Host{h},
Bulk: []Data{bulk1},
}
var acc testutil.Accumulator
err := s.Gather(&acc)
require.NoError(t, err)
acc.AssertContainsTaggedFields(t,
"ifOutOctets",
map[string]interface{}{
"ifOutOctets": uint(543846),
},
map[string]string{
"unit": "octets",
"instance": "1",
"snmp_host": testutil.GetLocalHost(),
},
)
acc.AssertContainsTaggedFields(t,
"ifOutOctets",
map[string]interface{}{
"ifOutOctets": uint(26475179),
},
map[string]string{
"unit": "octets",
"instance": "2",
"snmp_host": testutil.GetLocalHost(),
},
)
acc.AssertContainsTaggedFields(t,
"ifOutOctets",
map[string]interface{}{
"ifOutOctets": uint(108963968),
},
map[string]string{
"unit": "octets",
"instance": "3",
"snmp_host": testutil.GetLocalHost(),
},
)
acc.AssertContainsTaggedFields(t,
"ifOutOctets",
map[string]interface{}{
"ifOutOctets": uint(12991453),
},
map[string]string{
"unit": "octets",
"instance": "36",
"snmp_host": testutil.GetLocalHost(),
},
)
}
// TODO find why, if this test is active
// Circle CI stops with the following error...
// bash scripts/circle-test.sh died unexpectedly
// Maybe the test is too long ??
func dTestSNMPBulk2(t *testing.T) {
bulk1 := Data{
Name: "oid1",
Unit: "octets",
Oid: "ifOutOctets",
MaxRepetition: 2,
}
h := Host{
Address: testutil.GetLocalHost() + ":31161",
Community: "telegraf",
Version: 2,
Timeout: 2.0,
Retries: 2,
Collect: []string{"oid1"},
}
s := Snmp{
SnmptranslateFile: "./testdata/oids.txt",
Host: []Host{h},
Bulk: []Data{bulk1},
}
var acc testutil.Accumulator
err := s.Gather(&acc)
require.NoError(t, err)
acc.AssertContainsTaggedFields(t,
"ifOutOctets",
map[string]interface{}{
"ifOutOctets": uint(543846),
},
map[string]string{
"unit": "octets",
"instance": "1",
"snmp_host": testutil.GetLocalHost(),
},
)
acc.AssertContainsTaggedFields(t,
"ifOutOctets",
map[string]interface{}{
"ifOutOctets": uint(26475179),
},
map[string]string{
"unit": "octets",
"instance": "2",
"snmp_host": testutil.GetLocalHost(),
},
)
acc.AssertContainsTaggedFields(t,
"ifOutOctets",
map[string]interface{}{
"ifOutOctets": uint(108963968),
},
map[string]string{
"unit": "octets",
"instance": "3",
"snmp_host": testutil.GetLocalHost(),
},
)
acc.AssertContainsTaggedFields(t,
"ifOutOctets",
map[string]interface{}{
"ifOutOctets": uint(12991453),
},
map[string]string{
"unit": "octets",
"instance": "36",
"snmp_host": testutil.GetLocalHost(),
},
)
}

View File

@ -272,6 +272,9 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error {
&bufCount, &emptyBuf[0]) // uses null ptr here according to MSDN. &bufCount, &emptyBuf[0]) // uses null ptr here according to MSDN.
if ret == win.PDH_MORE_DATA { if ret == win.PDH_MORE_DATA {
filledBuf := make([]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size) filledBuf := make([]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size)
if len(filledBuf) == 0 {
continue
}
ret = win.PdhGetFormattedCounterArrayDouble(metric.counterHandle, ret = win.PdhGetFormattedCounterArrayDouble(metric.counterHandle,
&bufSize, &bufCount, &filledBuf[0]) &bufSize, &bufCount, &filledBuf[0])
for i := 0; i < int(bufCount); i++ { for i := 0; i < int(bufCount); i++ {

View File

@ -27,40 +27,39 @@ echo mntr | nc localhost 2181
zk_max_file_descriptor_count 1024 - only available on Unix platforms zk_max_file_descriptor_count 1024 - only available on Unix platforms
``` ```
## Measurements: ## Configuration
#### Zookeeper measurements:
Meta: ```
- units: int64 # Reads 'mntr' stats from one or many zookeeper servers
- tags: `server=<hostname> port=<port> state=<leader|follower>` [[inputs.zookeeper]]
## An array of address to gather stats about. Specify an ip or hostname
## with port. ie localhost:2181, 10.0.0.1:2181, etc.
Measurement names: ## If no servers are specified, then localhost is used as the host.
- zookeeper_avg_latency ## If no port is specified, 2181 is used
- zookeeper_max_latency servers = [":2181"]
- zookeeper_min_latency ```
- zookeeper_packets_received
- zookeeper_packets_sent
- zookeeper_outstanding_requests
- zookeeper_znode_count
- zookeeper_watch_count
- zookeeper_ephemerals_count
- zookeeper_approximate_data_size
- zookeeper_followers #only exposed by the Leader
- zookeeper_synced_followers #only exposed by the Leader
- zookeeper_pending_syncs #only exposed by the Leader
- zookeeper_open_file_descriptor_count
- zookeeper_max_file_descriptor_count
#### Zookeeper string measurements: ## InfluxDB Measurement:
Meta: ```
- units: string M zookeeper
- tags: `server=<hostname> port=<port> state=<leader|follower>` T host
T port
Measurement names: T state
- zookeeper_version
F approximate_data_size integer
### Tags: F avg_latency integer
F ephemerals_count integer
- All measurements have the following tags: F max_file_descriptor_count integer
- F max_latency integer
F min_latency integer
F num_alive_connections integer
F open_file_descriptor_count integer
F outstanding_requests integer
F packets_received integer
F packets_sent integer
F version string
F watch_count integer
F znode_count integer
```

View File

@ -2,6 +2,42 @@
This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP. This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP.
### Configuration:
```toml
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
## The full HTTP or UDP endpoint URL for your InfluxDB instance.
## Multiple urls can be specified as part of the same cluster,
## this means that only ONE of the urls will be written to each interval.
# urls = ["udp://localhost:8089"] # UDP endpoint example
urls = ["http://localhost:8086"] # required
## The target database for metrics (telegraf will create it if not exists).
database = "telegraf" # required
## Retention policy to write to. Empty string writes to the default rp.
retention_policy = ""
## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
write_consistency = "any"
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
## Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
```
### Required parameters: ### Required parameters:
* `urls`: List of strings, this is for InfluxDB clustering * `urls`: List of strings, this is for InfluxDB clustering
@ -12,16 +48,14 @@ to write to. Each URL should start with either `http://` or `udp://`
### Optional parameters: ### Optional parameters:
* `write_consistency`: Write consistency (clusters only), can be: "any", "one", "quorum", "all".
* `retention_policy`: Retention policy to write to. * `retention_policy`: Retention policy to write to.
* `precision`: Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". note: using "s" precision greatly improves InfluxDB compression.
* `timeout`: Write timeout (for the InfluxDB client), formatted as a string. If not provided, will default to 5s. 0s means no timeout (not recommended). * `timeout`: Write timeout (for the InfluxDB client), formatted as a string. If not provided, will default to 5s. 0s means no timeout (not recommended).
* `username`: Username for influxdb * `username`: Username for influxdb
* `password`: Password for influxdb * `password`: Password for influxdb
* `user_agent`: Set the user agent for HTTP POSTs (can be useful for log differentiation) * `user_agent`: Set the user agent for HTTP POSTs (can be useful for log differentiation)
* `udp_payload`: Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) * `udp_payload`: Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
## Optional SSL Config
* `ssl_ca`: SSL CA * `ssl_ca`: SSL CA
* `ssl_cert`: SSL CERT * `ssl_cert`: SSL CERT
* `ssl_key`: SSL key * `ssl_key`: SSL key
* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false) * `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false)
* `write_consistency`: Write consistency for clusters only, can be: "any", "one", "quorom", "all"

View File

@ -55,7 +55,7 @@ var sampleConfig = `
## Retention policy to write to. Empty string writes to the default rp. ## Retention policy to write to. Empty string writes to the default rp.
retention_policy = "" retention_policy = ""
## Write consistency (clusters only), can be: "any", "one", "quorom", "all" ## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
write_consistency = "any" write_consistency = "any"
## Write timeout (for the InfluxDB client), formatted as a string. ## Write timeout (for the InfluxDB client), formatted as a string.
@ -146,7 +146,7 @@ func (i *InfluxDB) Connect() error {
func createDatabase(c client.Client, database string) error { func createDatabase(c client.Client, database string) error {
// Create Database if it doesn't exist // Create Database if it doesn't exist
_, err := c.Query(client.Query{ _, err := c.Query(client.Query{
Command: fmt.Sprintf("CREATE DATABASE IF NOT EXISTS \"%s\"", database), Command: fmt.Sprintf("CREATE DATABASE \"%s\"", database),
}) })
return err return err
} }

View File

@ -0,0 +1,67 @@
# Kafka Producer Output Plugin
This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.html) acting a Kafka Producer.
```
[[outputs.kafka]]
## URLs of kafka brokers
brokers = ["localhost:9092"]
## Kafka topic for producer messages
topic = "telegraf"
## Telegraf tag to use as a routing key
## ie, if this tag exists, it's value will be used as the routing key
routing_tag = "host"
## CompressionCodec represents the various compression codecs recognized by
## Kafka in messages.
## 0 : No compression
## 1 : Gzip compression
## 2 : Snappy compression
compression_codec = 0
## RequiredAcks is used in Produce Requests to tell the broker how many
## replica acknowledgements it must see before responding
## 0 : the producer never waits for an acknowledgement from the broker.
## This option provides the lowest latency but the weakest durability
## guarantees (some data will be lost when a server fails).
## 1 : the producer gets an acknowledgement after the leader replica has
## received the data. This option provides better durability as the
## client waits until the server acknowledges the request as successful
## (only messages that were written to the now-dead leader but not yet
## replicated will be lost).
## -1: the producer gets an acknowledgement after all in-sync replicas have
## received the data. This option provides the best durability, we
## guarantee that no messages will be lost as long as at least one in
## sync replica remains.
required_acks = -1
## The total number of times to retry sending a message
max_retry = 3
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
data_format = "influx"
```
### Required parameters:
* `brokers`: List of strings, this is for speaking to a cluster of `kafka` brokers. On each flush interval, Telegraf will randomly choose one of the urls to write to. Each URL should just include host and port e.g. -> `["{host}:{port}","{host2}:{port2}"]`
* `topic`: The `kafka` topic to publish to.
### Optional parameters:
* `routing_tag`: if this tag exists, it's value will be used as the routing key
* `compression_codec`: What level of compression to use: `0` -> no compression, `1` -> gzip compression, `2` -> snappy compression
* `required_acks`: a setting for how may `acks` required from the `kafka` broker cluster.
* `max_retry`: Max number of times to retry failed write
* `ssl_ca`: SSL CA
* `ssl_cert`: SSL CERT
* `ssl_key`: SSL key
* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false)
* `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md)

View File

@ -7,6 +7,7 @@ import (
"io/ioutil" "io/ioutil"
"log" "log"
"net/http" "net/http"
"regexp"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal"
@ -14,19 +15,22 @@ import (
"github.com/influxdata/telegraf/plugins/serializers/graphite" "github.com/influxdata/telegraf/plugins/serializers/graphite"
) )
// Librato structure for configuration and client
type Librato struct { type Librato struct {
ApiUser string APIUser string
ApiToken string APIToken string
Debug bool Debug bool
NameFromTags bool SourceTag string // Deprecated, keeping for backward-compatibility
SourceTag string Timeout internal.Duration
Timeout internal.Duration Template string
Template string
apiUrl string APIUrl string
client *http.Client client *http.Client
} }
// https://www.librato.com/docs/kb/faq/best_practices/naming_convention_metrics_sources.html#naming-limitations-for-sources-and-metrics
var reUnacceptedChar = regexp.MustCompile("[^.a-zA-Z0-9_-]")
var sampleConfig = ` var sampleConfig = `
## Librator API Docs ## Librator API Docs
## http://dev.librato.com/v1/metrics-authentication ## http://dev.librato.com/v1/metrics-authentication
@ -36,20 +40,21 @@ var sampleConfig = `
api_token = "my-secret-token" # required. api_token = "my-secret-token" # required.
## Debug ## Debug
# debug = false # debug = false
## Tag Field to populate source attribute (optional)
## This is typically the _hostname_ from which the metric was obtained.
source_tag = "host"
## Connection timeout. ## Connection timeout.
# timeout = "5s" # timeout = "5s"
## Output Name Template (same as graphite buckets) ## Output source Template (same as graphite buckets)
## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
template = "host.tags.measurement.field" ## This template is used in librato's source (not metric's name)
template = "host"
` `
// LMetrics is the default struct for Librato's API fromat
type LMetrics struct { type LMetrics struct {
Gauges []*Gauge `json:"gauges"` Gauges []*Gauge `json:"gauges"`
} }
// Gauge is the gauge format for Librato's API fromat
type Gauge struct { type Gauge struct {
Name string `json:"name"` Name string `json:"name"`
Value float64 `json:"value"` Value float64 `json:"value"`
@ -57,17 +62,22 @@ type Gauge struct {
MeasureTime int64 `json:"measure_time"` MeasureTime int64 `json:"measure_time"`
} }
const librato_api = "https://metrics-api.librato.com/v1/metrics" const libratoAPI = "https://metrics-api.librato.com/v1/metrics"
func NewLibrato(apiUrl string) *Librato { // NewLibrato is the main constructor for librato output plugins
func NewLibrato(apiURL string) *Librato {
return &Librato{ return &Librato{
apiUrl: apiUrl, APIUrl: apiURL,
Template: "host",
} }
} }
// Connect is the default output plugin connection function who make sure it
// can connect to the endpoint
func (l *Librato) Connect() error { func (l *Librato) Connect() error {
if l.ApiUser == "" || l.ApiToken == "" { if l.APIUser == "" || l.APIToken == "" {
return fmt.Errorf("api_user and api_token are required fields for librato output") return fmt.Errorf(
"api_user and api_token are required fields for librato output")
} }
l.client = &http.Client{ l.client = &http.Client{
Timeout: l.Timeout.Duration, Timeout: l.Timeout.Duration,
@ -76,18 +86,23 @@ func (l *Librato) Connect() error {
} }
func (l *Librato) Write(metrics []telegraf.Metric) error { func (l *Librato) Write(metrics []telegraf.Metric) error {
if len(metrics) == 0 { if len(metrics) == 0 {
return nil return nil
} }
lmetrics := LMetrics{} if l.Template == "" {
l.Template = "host"
}
if l.SourceTag != "" {
l.Template = l.SourceTag
}
tempGauges := []*Gauge{} tempGauges := []*Gauge{}
metricCounter := 0
for _, m := range metrics { for _, m := range metrics {
if gauges, err := l.buildGauges(m); err == nil { if gauges, err := l.buildGauges(m); err == nil {
for _, gauge := range gauges { for _, gauge := range gauges {
tempGauges = append(tempGauges, gauge) tempGauges = append(tempGauges, gauge)
metricCounter++
if l.Debug { if l.Debug {
log.Printf("[DEBUG] Got a gauge: %v\n", gauge) log.Printf("[DEBUG] Got a gauge: %v\n", gauge)
} }
@ -100,81 +115,115 @@ func (l *Librato) Write(metrics []telegraf.Metric) error {
} }
} }
lmetrics.Gauges = make([]*Gauge, metricCounter) metricCounter := len(tempGauges)
copy(lmetrics.Gauges, tempGauges[0:]) // make sur we send a batch of maximum 300
metricsBytes, err := json.Marshal(lmetrics) sizeBatch := 300
if err != nil { for start := 0; start < metricCounter; start += sizeBatch {
return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error()) lmetrics := LMetrics{}
} else { end := start + sizeBatch
if end > metricCounter {
end = metricCounter
sizeBatch = end - start
}
lmetrics.Gauges = make([]*Gauge, sizeBatch)
copy(lmetrics.Gauges, tempGauges[start:end])
metricsBytes, err := json.Marshal(lmetrics)
if err != nil {
return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error())
}
if l.Debug { if l.Debug {
log.Printf("[DEBUG] Librato request: %v\n", string(metricsBytes)) log.Printf("[DEBUG] Librato request: %v\n", string(metricsBytes))
} }
}
req, err := http.NewRequest("POST", l.apiUrl, bytes.NewBuffer(metricsBytes))
if err != nil {
return fmt.Errorf("unable to create http.Request, %s\n", err.Error())
}
req.Header.Add("Content-Type", "application/json")
req.SetBasicAuth(l.ApiUser, l.ApiToken)
resp, err := l.client.Do(req) req, err := http.NewRequest(
if err != nil { "POST",
if l.Debug { l.APIUrl,
log.Printf("[DEBUG] Error POSTing metrics: %v\n", err.Error()) bytes.NewBuffer(metricsBytes))
if err != nil {
return fmt.Errorf(
"unable to create http.Request, %s\n",
err.Error())
} }
return fmt.Errorf("error POSTing metrics, %s\n", err.Error()) req.Header.Add("Content-Type", "application/json")
} else { req.SetBasicAuth(l.APIUser, l.APIToken)
if l.Debug {
resp, err := l.client.Do(req)
if err != nil {
if l.Debug {
log.Printf("[DEBUG] Error POSTing metrics: %v\n", err.Error())
}
return fmt.Errorf("error POSTing metrics, %s\n", err.Error())
}
defer resp.Body.Close()
if resp.StatusCode != 200 || l.Debug {
htmlData, err := ioutil.ReadAll(resp.Body) htmlData, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
log.Printf("[DEBUG] Couldn't get response! (%v)\n", err) log.Printf("[DEBUG] Couldn't get response! (%v)\n", err)
} else { }
if resp.StatusCode != 200 {
return fmt.Errorf(
"received bad status code, %d\n %s",
resp.StatusCode,
string(htmlData))
}
if l.Debug {
log.Printf("[DEBUG] Librato response: %v\n", string(htmlData)) log.Printf("[DEBUG] Librato response: %v\n", string(htmlData))
} }
} }
} }
defer resp.Body.Close()
if resp.StatusCode != 200 {
return fmt.Errorf("received bad status code, %d\n", resp.StatusCode)
}
return nil return nil
} }
// SampleConfig is function who return the default configuration for this
// output
func (l *Librato) SampleConfig() string { func (l *Librato) SampleConfig() string {
return sampleConfig return sampleConfig
} }
// Description is function who return the Description of this output
func (l *Librato) Description() string { func (l *Librato) Description() string {
return "Configuration for Librato API to send metrics to." return "Configuration for Librato API to send metrics to."
} }
func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) { func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) {
gauges := []*Gauge{} gauges := []*Gauge{}
bucket := graphite.SerializeBucketName(m.Name(), m.Tags(), l.Template, "") if m.Time().Unix() == 0 {
return gauges, fmt.Errorf(
"Measure time must not be zero\n <%s> \n",
m.String())
}
metricSource := graphite.InsertField(
graphite.SerializeBucketName("", m.Tags(), l.Template, ""),
"value")
if metricSource == "" {
return gauges,
fmt.Errorf("undeterminable Source type from Field, %s\n",
l.Template)
}
for fieldName, value := range m.Fields() { for fieldName, value := range m.Fields() {
metricName := m.Name()
if fieldName != "value" {
metricName = fmt.Sprintf("%s.%s", m.Name(), fieldName)
}
gauge := &Gauge{ gauge := &Gauge{
Name: graphite.InsertField(bucket, fieldName), Source: reUnacceptedChar.ReplaceAllString(metricSource, "-"),
Name: reUnacceptedChar.ReplaceAllString(metricName, "-"),
MeasureTime: m.Time().Unix(), MeasureTime: m.Time().Unix(),
} }
if !gauge.verifyValue(value) { if !verifyValue(value) {
continue continue
} }
if err := gauge.setValue(value); err != nil { if err := gauge.setValue(value); err != nil {
return gauges, fmt.Errorf("unable to extract value from Fields, %s\n", return gauges, fmt.Errorf(
"unable to extract value from Fields, %s\n",
err.Error()) err.Error())
} }
if l.SourceTag != "" {
if source, ok := m.Tags()[l.SourceTag]; ok {
gauge.Source = source
} else {
return gauges,
fmt.Errorf("undeterminable Source type from Field, %s\n",
l.SourceTag)
}
}
gauges = append(gauges, gauge) gauges = append(gauges, gauge)
} }
if l.Debug { if l.Debug {
@ -183,7 +232,7 @@ func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) {
return gauges, nil return gauges, nil
} }
func (g *Gauge) verifyValue(v interface{}) bool { func verifyValue(v interface{}) bool {
switch v.(type) { switch v.(type) {
case string: case string:
return false return false
@ -209,12 +258,13 @@ func (g *Gauge) setValue(v interface{}) error {
return nil return nil
} }
//Close is used to close the connection to librato Output
func (l *Librato) Close() error { func (l *Librato) Close() error {
return nil return nil
} }
func init() { func init() {
outputs.Add("librato", func() telegraf.Output { outputs.Add("librato", func() telegraf.Output {
return NewLibrato(librato_api) return NewLibrato(libratoAPI)
}) })
} }

View File

@ -1,7 +1,6 @@
package librato package librato
import ( import (
"encoding/json"
"fmt" "fmt"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -10,141 +9,137 @@ import (
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/serializers/graphite"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
var ( var (
fakeUrl = "http://test.librato.com" fakeURL = "http://test.librato.com"
fakeUser = "telegraf@influxdb.com" fakeUser = "telegraf@influxdb.com"
fakeToken = "123456" fakeToken = "123456"
) )
func fakeLibrato() *Librato { func fakeLibrato() *Librato {
l := NewLibrato(fakeUrl) l := NewLibrato(fakeURL)
l.ApiUser = fakeUser l.APIUser = fakeUser
l.ApiToken = fakeToken l.APIToken = fakeToken
return l return l
} }
func BuildTags(t *testing.T) {
testMetric := testutil.TestMetric(0.0, "test1")
graphiteSerializer := graphite.GraphiteSerializer{}
tags, err := graphiteSerializer.Serialize(testMetric)
fmt.Printf("Tags: %v", tags)
require.NoError(t, err)
}
func TestUriOverride(t *testing.T) { func TestUriOverride(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ts := httptest.NewServer(
w.WriteHeader(http.StatusOK) http.HandlerFunc(
})) func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
defer ts.Close() defer ts.Close()
l := NewLibrato(ts.URL) l := NewLibrato(ts.URL)
l.ApiUser = "telegraf@influxdb.com" l.APIUser = "telegraf@influxdb.com"
l.ApiToken = "123456" l.APIToken = "123456"
err := l.Connect() err := l.Connect()
require.NoError(t, err) require.NoError(t, err)
err = l.Write(testutil.MockMetrics()) err = l.Write([]telegraf.Metric{newHostMetric(int32(0), "name", "host")})
require.NoError(t, err) require.NoError(t, err)
} }
func TestBadStatusCode(t *testing.T) { func TestBadStatusCode(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ts := httptest.NewServer(
w.WriteHeader(http.StatusServiceUnavailable) http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
json.NewEncoder(w).Encode(`{ w.WriteHeader(http.StatusServiceUnavailable)
"errors": { }))
"system": [
"The API is currently down for maintenance. It'll be back shortly."
]
}
}`)
}))
defer ts.Close() defer ts.Close()
l := NewLibrato(ts.URL) l := NewLibrato(ts.URL)
l.ApiUser = "telegraf@influxdb.com" l.APIUser = "telegraf@influxdb.com"
l.ApiToken = "123456" l.APIToken = "123456"
err := l.Connect() err := l.Connect()
require.NoError(t, err) require.NoError(t, err)
err = l.Write(testutil.MockMetrics()) err = l.Write([]telegraf.Metric{newHostMetric(int32(0), "name", "host")})
if err == nil { if err == nil {
t.Errorf("error expected but none returned") t.Errorf("error expected but none returned")
} else { } else {
require.EqualError(t, fmt.Errorf("received bad status code, 503\n"), err.Error()) require.EqualError(
t,
fmt.Errorf("received bad status code, 503\n "), err.Error())
} }
} }
func TestBuildGauge(t *testing.T) { func TestBuildGauge(t *testing.T) {
mtime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()
var gaugeTests = []struct { var gaugeTests = []struct {
ptIn telegraf.Metric ptIn telegraf.Metric
outGauge *Gauge outGauge *Gauge
err error err error
}{ }{
{ {
testutil.TestMetric(0.0, "test1"), newHostMetric(0.0, "test1", "host1"),
&Gauge{ &Gauge{
Name: "value1.test1", Name: "test1",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), MeasureTime: mtime,
Value: 0.0, Value: 0.0,
Source: "host1",
}, },
nil, nil,
}, },
{ {
testutil.TestMetric(1.0, "test2"), newHostMetric(1.0, "test2", "host2"),
&Gauge{ &Gauge{
Name: "value1.test2", Name: "test2",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), MeasureTime: mtime,
Value: 1.0, Value: 1.0,
Source: "host2",
}, },
nil, nil,
}, },
{ {
testutil.TestMetric(10, "test3"), newHostMetric(10, "test3", "host3"),
&Gauge{ &Gauge{
Name: "value1.test3", Name: "test3",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), MeasureTime: mtime,
Value: 10.0, Value: 10.0,
Source: "host3",
}, },
nil, nil,
}, },
{ {
testutil.TestMetric(int32(112345), "test4"), newHostMetric(int32(112345), "test4", "host4"),
&Gauge{ &Gauge{
Name: "value1.test4", Name: "test4",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), MeasureTime: mtime,
Value: 112345.0, Value: 112345.0,
Source: "host4",
}, },
nil, nil,
}, },
{ {
testutil.TestMetric(int64(112345), "test5"), newHostMetric(int64(112345), "test5", "host5"),
&Gauge{ &Gauge{
Name: "value1.test5", Name: "test5",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), MeasureTime: mtime,
Value: 112345.0, Value: 112345.0,
Source: "host5",
}, },
nil, nil,
}, },
{ {
testutil.TestMetric(float32(11234.5), "test6"), newHostMetric(float32(11234.5), "test6", "host6"),
&Gauge{ &Gauge{
Name: "value1.test6", Name: "test6",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), MeasureTime: mtime,
Value: 11234.5, Value: 11234.5,
Source: "host6",
}, },
nil, nil,
}, },
{ {
testutil.TestMetric("11234.5", "test7"), newHostMetric("11234.5", "test7", "host7"),
nil, nil,
nil, nil,
}, },
} }
l := NewLibrato(fakeUrl) l := NewLibrato(fakeURL)
for _, gt := range gaugeTests { for _, gt := range gaugeTests {
gauges, err := l.buildGauges(gt.ptIn) gauges, err := l.buildGauges(gt.ptIn)
if err != nil && gt.err == nil { if err != nil && gt.err == nil {
@ -167,61 +162,121 @@ func TestBuildGauge(t *testing.T) {
} }
} }
func newHostMetric(value interface{}, name, host string) (metric telegraf.Metric) {
metric, _ = telegraf.NewMetric(
name,
map[string]string{"host": host},
map[string]interface{}{"value": value},
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
)
return
}
func TestBuildGaugeWithSource(t *testing.T) { func TestBuildGaugeWithSource(t *testing.T) {
mtime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
pt1, _ := telegraf.NewMetric( pt1, _ := telegraf.NewMetric(
"test1", "test1",
map[string]string{"hostname": "192.168.0.1", "tag1": "value1"}, map[string]string{"hostname": "192.168.0.1", "tag1": "value1"},
map[string]interface{}{"value": 0.0}, map[string]interface{}{"value": 0.0},
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), mtime,
) )
pt2, _ := telegraf.NewMetric( pt2, _ := telegraf.NewMetric(
"test2", "test2",
map[string]string{"hostnam": "192.168.0.1", "tag1": "value1"}, map[string]string{"hostnam": "192.168.0.1", "tag1": "value1"},
map[string]interface{}{"value": 1.0}, map[string]interface{}{"value": 1.0},
time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC), mtime,
)
pt3, _ := telegraf.NewMetric(
"test3",
map[string]string{
"hostname": "192.168.0.1",
"tag2": "value2",
"tag1": "value1"},
map[string]interface{}{"value": 1.0},
mtime,
)
pt4, _ := telegraf.NewMetric(
"test4",
map[string]string{
"hostname": "192.168.0.1",
"tag2": "value2",
"tag1": "value1"},
map[string]interface{}{"value": 1.0},
mtime,
) )
var gaugeTests = []struct { var gaugeTests = []struct {
ptIn telegraf.Metric ptIn telegraf.Metric
template string
outGauge *Gauge outGauge *Gauge
err error err error
}{ }{
{ {
pt1, pt1,
"hostname",
&Gauge{ &Gauge{
Name: "192_168_0_1.value1.test1", Name: "test1",
MeasureTime: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), MeasureTime: mtime.Unix(),
Value: 0.0, Value: 0.0,
Source: "192.168.0.1", Source: "192_168_0_1",
}, },
nil, nil,
}, },
{ {
pt2, pt2,
"hostname",
&Gauge{ &Gauge{
Name: "192_168_0_1.value1.test1", Name: "test2",
MeasureTime: time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC).Unix(), MeasureTime: mtime.Unix(),
Value: 1.0, Value: 1.0,
}, },
fmt.Errorf("undeterminable Source type from Field, hostname"), fmt.Errorf("undeterminable Source type from Field, hostname"),
}, },
{
pt3,
"tags",
&Gauge{
Name: "test3",
MeasureTime: mtime.Unix(),
Value: 1.0,
Source: "192_168_0_1.value1.value2",
},
nil,
},
{
pt4,
"hostname.tag2",
&Gauge{
Name: "test4",
MeasureTime: mtime.Unix(),
Value: 1.0,
Source: "192_168_0_1.value2",
},
nil,
},
} }
l := NewLibrato(fakeUrl) l := NewLibrato(fakeURL)
l.SourceTag = "hostname"
for _, gt := range gaugeTests { for _, gt := range gaugeTests {
l.Template = gt.template
gauges, err := l.buildGauges(gt.ptIn) gauges, err := l.buildGauges(gt.ptIn)
if err != nil && gt.err == nil { if err != nil && gt.err == nil {
t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err) t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err)
} }
if gt.err != nil && err == nil { if gt.err != nil && err == nil {
t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error()) t.Errorf(
"%s: expected an error (%s) but none returned",
gt.ptIn.Name(),
gt.err.Error())
} }
if len(gauges) == 0 { if len(gauges) == 0 {
continue continue
} }
if gt.err == nil && !reflect.DeepEqual(gauges[0], gt.outGauge) { if gt.err == nil && !reflect.DeepEqual(gauges[0], gt.outGauge) {
t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauges[0]) t.Errorf(
"%s: \nexpected %+v\ngot %+v\n",
gt.ptIn.Name(),
gt.outGauge, gauges[0])
} }
} }
} }

View File

@ -12,7 +12,7 @@ const DEFAULT_TEMPLATE = "host.tags.measurement.field"
var ( var (
fieldDeleter = strings.NewReplacer(".FIELDNAME", "", "FIELDNAME.", "") fieldDeleter = strings.NewReplacer(".FIELDNAME", "", "FIELDNAME.", "")
sanitizedChars = strings.NewReplacer("/", "-", "@", "-", "*", "-", " ", "_", "..", ".") sanitizedChars = strings.NewReplacer("/", "-", "@", "-", "*", "-", " ", "_", "..", ".", `\`, "")
) )
type GraphiteSerializer struct { type GraphiteSerializer struct {
@ -36,8 +36,8 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]string, error)
valueS := fmt.Sprintf("%#v", value) valueS := fmt.Sprintf("%#v", value)
point := fmt.Sprintf("%s %s %d", point := fmt.Sprintf("%s %s %d",
// insert "field" section of template // insert "field" section of template
InsertField(bucket, fieldName), sanitizedChars.Replace(InsertField(bucket, fieldName)),
valueS, sanitizedChars.Replace(valueS),
timestamp) timestamp)
out = append(out, point) out = append(out, point)
} }
@ -100,9 +100,9 @@ func SerializeBucketName(
} }
if prefix == "" { if prefix == "" {
return sanitizedChars.Replace(strings.Join(out, ".")) return strings.Join(out, ".")
} }
return sanitizedChars.Replace(prefix + "." + strings.Join(out, ".")) return prefix + "." + strings.Join(out, ".")
} }
// InsertField takes the bucket string from SerializeBucketName and replaces the // InsertField takes the bucket string from SerializeBucketName and replaces the

View File

@ -160,6 +160,58 @@ func TestSerializeValueField2(t *testing.T) {
assert.Equal(t, expS, mS) assert.Equal(t, expS, mS)
} }
// test that fields with spaces get fixed.
func TestSerializeFieldWithSpaces(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"cpu": "cpu0",
"datacenter": "us-west-2",
}
fields := map[string]interface{}{
`field\ with\ spaces`: float64(91.5),
}
m, err := telegraf.NewMetric("cpu", tags, fields, now)
assert.NoError(t, err)
s := GraphiteSerializer{
Template: "host.tags.measurement.field",
}
mS, err := s.Serialize(m)
assert.NoError(t, err)
expS := []string{
fmt.Sprintf("localhost.cpu0.us-west-2.cpu.field_with_spaces 91.5 %d", now.Unix()),
}
assert.Equal(t, expS, mS)
}
// test that tags with spaces get fixed.
func TestSerializeTagWithSpaces(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"cpu": `cpu\ 0`,
"datacenter": "us-west-2",
}
fields := map[string]interface{}{
`field_with_spaces`: float64(91.5),
}
m, err := telegraf.NewMetric("cpu", tags, fields, now)
assert.NoError(t, err)
s := GraphiteSerializer{
Template: "host.tags.measurement.field",
}
mS, err := s.Serialize(m)
assert.NoError(t, err)
expS := []string{
fmt.Sprintf("localhost.cpu_0.us-west-2.cpu.field_with_spaces 91.5 %d", now.Unix()),
}
assert.Equal(t, expS, mS)
}
// test that a field named "value" gets ignored at beginning of template. // test that a field named "value" gets ignored at beginning of template.
func TestSerializeValueField3(t *testing.T) { func TestSerializeValueField3(t *testing.T) {
now := time.Now() now := time.Now()
@ -186,6 +238,32 @@ func TestSerializeValueField3(t *testing.T) {
assert.Equal(t, expS, mS) assert.Equal(t, expS, mS)
} }
// test that a field named "value" gets ignored at beginning of template.
func TestSerializeValueField5(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"cpu": "cpu0",
"datacenter": "us-west-2",
}
fields := map[string]interface{}{
"value": float64(91.5),
}
m, err := telegraf.NewMetric("cpu", tags, fields, now)
assert.NoError(t, err)
s := GraphiteSerializer{
Template: template5,
}
mS, err := s.Serialize(m)
assert.NoError(t, err)
expS := []string{
fmt.Sprintf("localhost.us-west-2.cpu0.cpu 91.5 %d", now.Unix()),
}
assert.Equal(t, expS, mS)
}
func TestSerializeMetricPrefix(t *testing.T) { func TestSerializeMetricPrefix(t *testing.T) {
now := time.Now() now := time.Now()
tags := map[string]string{ tags := map[string]string{
@ -315,20 +393,6 @@ func TestTemplate4(t *testing.T) {
assert.Equal(t, expS, mS) assert.Equal(t, expS, mS)
} }
func TestTemplate5(t *testing.T) {
now := time.Now()
fields := map[string]interface{}{
"usage_idle": float64(91.5),
}
m, err := telegraf.NewMetric("cpu", defaultTags, fields, now)
assert.NoError(t, err)
mS := SerializeBucketName(m.Name(), m.Tags(), template5, "")
expS := "localhost.us-west-2.cpu0.cpu.FIELDNAME"
assert.Equal(t, expS, mS)
}
func TestTemplate6(t *testing.T) { func TestTemplate6(t *testing.T) {
now := time.Now() now := time.Now()
fields := map[string]interface{}{ fields := map[string]interface{}{