merge upstream changes

This commit is contained in:
Edie Zhang 2016-10-07 22:21:22 +11:00
commit 823df18095
185 changed files with 11300 additions and 3831 deletions

View File

@ -1,5 +1,5 @@
### Required for all PRs: ### Required for all PRs:
- [ ] CHANGELOG.md updated - [ ] CHANGELOG.md updated (we recommend not updating this until the PR has been approved by a maintainer)
- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed) - [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed)
- [ ] README.md updated (if adding a new plugin) - [ ] README.md updated (if adding a new plugin)

View File

@ -1,19 +1,81 @@
## v1.0 [unreleased] ## v1.1 [unreleased]
### Release Notes
- On systemd Telegraf will no longer redirect it's stdout to /var/log/telegraf/telegraf.log.
On most systems, the logs will be directed to the systemd journal and can be
accessed by `journalctl -u telegraf.service`. Consult the systemd journal
documentation for configuring journald. There is also a [`logfile` config option](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf#L70)
available in 1.1, which will allow users to easily configure telegraf to
continue sending logs to /var/log/telegraf/telegraf.log.
### Features ### Features
- [#1413](https://github.com/influxdata/telegraf/issues/1413): Separate container_version from container_image tag. - [#1732](https://github.com/influxdata/telegraf/pull/1732): Telegraf systemd service, log to journal.
- [#1782](https://github.com/influxdata/telegraf/pull/1782): Allow numeric and non-string values for tag_keys.
- [#1694](https://github.com/influxdata/telegraf/pull/1694): Adding Gauge and Counter metric types.
- [#1606](https://github.com/influxdata/telegraf/pull/1606): Remove carraige returns from exec plugin output on Windows
- [#1674](https://github.com/influxdata/telegraf/issues/1674): elasticsearch input: configurable timeout.
- [#1607](https://github.com/influxdata/telegraf/pull/1607): Massage metric names in Instrumental output plugin
- [#1572](https://github.com/influxdata/telegraf/pull/1572): mesos improvements.
- [#1513](https://github.com/influxdata/telegraf/issues/1513): Add Ceph Cluster Performance Statistics
- [#1650](https://github.com/influxdata/telegraf/issues/1650): Ability to configure response_timeout in httpjson input.
- [#1685](https://github.com/influxdata/telegraf/issues/1685): Add additional redis metrics.
- [#1539](https://github.com/influxdata/telegraf/pull/1539): Added capability to send metrics through Http API for OpenTSDB.
- [#1471](https://github.com/influxdata/telegraf/pull/1471): iptables input plugin.
- [#1542](https://github.com/influxdata/telegraf/pull/1542): Add filestack webhook plugin.
- [#1599](https://github.com/influxdata/telegraf/pull/1599): Add server hostname for each docker measurements.
- [#1697](https://github.com/influxdata/telegraf/pull/1697): Add NATS output plugin.
- [#1407](https://github.com/influxdata/telegraf/pull/1407): HTTP service listener input plugin.
- [#1699](https://github.com/influxdata/telegraf/pull/1699): Add database blacklist option for Postgresql
- [#1791](https://github.com/influxdata/telegraf/pull/1791): Add Docker container state metrics to Docker input plugin output
- [#1755](https://github.com/influxdata/telegraf/issues/1755): Add support to SNMP for IP & MAC address conversion.
- [#1729](https://github.com/influxdata/telegraf/issues/1729): Add support to SNMP for OID index suffixes.
- [#1813](https://github.com/influxdata/telegraf/pull/1813): Change default arguments for SNMP plugin.
- [#1686](https://github.com/influxdata/telegraf/pull/1686): Mesos input plugin: very high-cardinality mesos-task metrics removed.
- [#1838](https://github.com/influxdata/telegraf/pull/1838): Logging overhaul to centralize the logger & log levels, & provide a logfile config option.
### Bugfixes ### Bugfixes
- [#1519](https://github.com/influxdata/telegraf/pull/1519): Fix error race conditions and partial failures. - [#1746](https://github.com/influxdata/telegraf/issues/1746): Fix handling of non-string values for JSON keys listed in tag_keys.
- [#1477](https://github.com/influxdata/telegraf/issues/1477): nstat: fix inaccurate config panic. - [#1628](https://github.com/influxdata/telegraf/issues/1628): Fix mongodb input panic on version 2.2.
- [#1481](https://github.com/influxdata/telegraf/issues/1481): jolokia: fix handling multiple multi-dimensional attributes. - [#1733](https://github.com/influxdata/telegraf/issues/1733): Fix statsd scientific notation parsing
- [#1716](https://github.com/influxdata/telegraf/issues/1716): Sensors plugin strconv.ParseFloat: parsing "": invalid syntax
- [#1530](https://github.com/influxdata/telegraf/issues/1530): Fix prometheus_client reload panic
- [#1764](https://github.com/influxdata/telegraf/issues/1764): Fix kafka consumer panic when nil error is returned down errs channel.
- [#1768](https://github.com/influxdata/telegraf/pull/1768): Speed up statsd parsing.
- [#1751](https://github.com/influxdata/telegraf/issues/1751): Fix powerdns integer parse error handling.
- [#1752](https://github.com/influxdata/telegraf/issues/1752): Fix varnish plugin defaults not being used.
- [#1517](https://github.com/influxdata/telegraf/issues/1517): Fix windows glob paths.
- [#1137](https://github.com/influxdata/telegraf/issues/1137): Fix issue loading config directory on windows.
- [#1772](https://github.com/influxdata/telegraf/pull/1772): Windows remote management interactive service fix.
- [#1702](https://github.com/influxdata/telegraf/issues/1702): sqlserver, fix issue when case sensitive collation is activated.
- [#1823](https://github.com/influxdata/telegraf/issues/1823): Fix huge allocations in http_listener when dealing with huge payloads.
- [#1833](https://github.com/influxdata/telegraf/issues/1833): Fix translating SNMP fields not in MIB.
- [#1835](https://github.com/influxdata/telegraf/issues/1835): Fix SNMP emitting empty fields.
- [#1854](https://github.com/influxdata/telegraf/pull/1853): SQL Server waitstats truncation bug.
## v1.0 beta 3 [2016-07-18] ## v1.0.1 [2016-09-26]
### Bugfixes
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Prometheus output: Fix bug with multi-batch writes.
- [#1738](https://github.com/influxdata/telegraf/issues/1738): Fix unmarshal of influxdb metrics with null tags.
- [#1773](https://github.com/influxdata/telegraf/issues/1773): Add configurable timeout to influxdb input plugin.
- [#1785](https://github.com/influxdata/telegraf/pull/1785): Fix statsd no default value panic.
## v1.0 [2016-09-08]
### Release Notes ### Release Notes
**Breaking Change** The SNMP plugin is being deprecated in it's current form.
There is a [new SNMP plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
which fixes many of the issues and confusions
of its predecessor. For users wanting to continue to use the deprecated SNMP
plugin, you will need to change your config file from `[[inputs.snmp]]` to
`[[inputs.snmp_legacy]]`. The configuration of the new SNMP plugin is _not_
backwards-compatible.
**Breaking Change**: Aerospike main server node measurements have been renamed **Breaking Change**: Aerospike main server node measurements have been renamed
aerospike_node. Aerospike namespace measurements have been renamed to aerospike_node. Aerospike namespace measurements have been renamed to
aerospike_namespace. They will also now be tagged with the node_name aerospike_namespace. They will also now be tagged with the node_name
@ -44,8 +106,19 @@ should now look like:
path = "/" path = "/"
``` ```
- Telegraf now supports being installed as an official windows service,
which can be installed via
`> C:\Program Files\Telegraf\telegraf.exe --service install`
- `flush_jitter` behavior has been changed. The random jitter will now be
evaluated at every flush interval, rather than once at startup. This makes it
consistent with the behavior of `collection_jitter`.
### Features ### Features
- [#1413](https://github.com/influxdata/telegraf/issues/1413): Separate container_version from container_image tag.
- [#1525](https://github.com/influxdata/telegraf/pull/1525): Support setting per-device and total metrics for Docker network and blockio.
- [#1466](https://github.com/influxdata/telegraf/pull/1466): MongoDB input plugin: adding per DB stats from db.stats()
- [#1503](https://github.com/influxdata/telegraf/pull/1503): Add tls support for certs to RabbitMQ input plugin - [#1503](https://github.com/influxdata/telegraf/pull/1503): Add tls support for certs to RabbitMQ input plugin
- [#1289](https://github.com/influxdata/telegraf/pull/1289): webhooks input plugin. Thanks @francois2metz and @cduez! - [#1289](https://github.com/influxdata/telegraf/pull/1289): webhooks input plugin. Thanks @francois2metz and @cduez!
- [#1247](https://github.com/influxdata/telegraf/pull/1247): rollbar webhook plugin. - [#1247](https://github.com/influxdata/telegraf/pull/1247): rollbar webhook plugin.
@ -59,10 +132,41 @@ should now look like:
- [#1500](https://github.com/influxdata/telegraf/pull/1500): Aerospike plugin refactored to use official client lib. - [#1500](https://github.com/influxdata/telegraf/pull/1500): Aerospike plugin refactored to use official client lib.
- [#1434](https://github.com/influxdata/telegraf/pull/1434): Add measurement name arg to logparser plugin. - [#1434](https://github.com/influxdata/telegraf/pull/1434): Add measurement name arg to logparser plugin.
- [#1479](https://github.com/influxdata/telegraf/pull/1479): logparser: change resp_code from a field to a tag. - [#1479](https://github.com/influxdata/telegraf/pull/1479): logparser: change resp_code from a field to a tag.
- [#1466](https://github.com/influxdata/telegraf/pull/1466): MongoDB input plugin: adding per DB stats from db.stats() - [#1411](https://github.com/influxdata/telegraf/pull/1411): Implement support for fetching hddtemp data
- [#1340](https://github.com/influxdata/telegraf/issues/1340): statsd: do not log every dropped metric.
- [#1368](https://github.com/influxdata/telegraf/pull/1368): Add precision rounding to all metrics on collection.
- [#1390](https://github.com/influxdata/telegraf/pull/1390): Add support for Tengine
- [#1320](https://github.com/influxdata/telegraf/pull/1320): Logparser input plugin for parsing grok-style log patterns.
- [#1397](https://github.com/influxdata/telegraf/issues/1397): ElasticSearch: now supports connecting to ElasticSearch via SSL
- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging.
- [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash
- [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy!
- [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm!
- [#1208](https://github.com/influxdata/telegraf/pull/1208): Standardized AWS credentials evaluation & wildcard CloudWatch dimensions. Thanks @johnrengelman!
- [#1264](https://github.com/influxdata/telegraf/pull/1264): Add SSL config options to http_response plugin.
- [#1272](https://github.com/influxdata/telegraf/pull/1272): graphite parser: add ability to specify multiple tag keys, for consistency with influxdb parser.
- [#1265](https://github.com/influxdata/telegraf/pull/1265): Make dns lookups for chrony configurable. Thanks @zbindenren!
- [#1275](https://github.com/influxdata/telegraf/pull/1275): Allow wildcard filtering of varnish stats.
- [#1142](https://github.com/influxdata/telegraf/pull/1142): Support for glob patterns in exec plugin commands configuration.
- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL (http://localhost:15672) if not specified
- [#1197](https://github.com/influxdata/telegraf/pull/1197): Limit AWS GetMetricStatistics requests to 10 per second.
- [#1278](https://github.com/influxdata/telegraf/pull/1278) & [#1288](https://github.com/influxdata/telegraf/pull/1288) & [#1295](https://github.com/influxdata/telegraf/pull/1295): RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified
- [#1296](https://github.com/influxdata/telegraf/issues/1296): Refactor of flush_jitter argument.
- [#1213](https://github.com/influxdata/telegraf/issues/1213): Add inactive & active memory to mem plugin.
- [#1543](https://github.com/influxdata/telegraf/pull/1543): Official Windows service.
- [#1414](https://github.com/influxdata/telegraf/pull/1414): Forking sensors command to remove C package dependency.
- [#1389](https://github.com/influxdata/telegraf/pull/1389): Add a new SNMP plugin.
### Bugfixes ### Bugfixes
- [#1619](https://github.com/influxdata/telegraf/issues/1619): Fix `make windows` build target
- [#1519](https://github.com/influxdata/telegraf/pull/1519): Fix error race conditions and partial failures.
- [#1477](https://github.com/influxdata/telegraf/issues/1477): nstat: fix inaccurate config panic.
- [#1481](https://github.com/influxdata/telegraf/issues/1481): jolokia: fix handling multiple multi-dimensional attributes.
- [#1430](https://github.com/influxdata/telegraf/issues/1430): Fix prometheus character sanitizing. Sanitize more win_perf_counters characters.
- [#1534](https://github.com/influxdata/telegraf/pull/1534): Add diskio io_time to FreeBSD & report timing metrics as ms (as linux does).
- [#1379](https://github.com/influxdata/telegraf/issues/1379): Fix covering Amazon Linux for post remove flow.
- [#1584](https://github.com/influxdata/telegraf/issues/1584): procstat missing fields: read/write bytes & count
- [#1472](https://github.com/influxdata/telegraf/pull/1472): diskio input plugin: set 'skip_serial_number = true' by default to avoid high cardinality. - [#1472](https://github.com/influxdata/telegraf/pull/1472): diskio input plugin: set 'skip_serial_number = true' by default to avoid high cardinality.
- [#1426](https://github.com/influxdata/telegraf/pull/1426): nil metrics panic fix. - [#1426](https://github.com/influxdata/telegraf/pull/1426): nil metrics panic fix.
- [#1384](https://github.com/influxdata/telegraf/pull/1384): Fix datarace in apache input plugin. - [#1384](https://github.com/influxdata/telegraf/pull/1384): Fix datarace in apache input plugin.
@ -81,19 +185,6 @@ should now look like:
- [#1418](https://github.com/influxdata/telegraf/issues/1418): logparser: error and exit on file permissions/missing errors. - [#1418](https://github.com/influxdata/telegraf/issues/1418): logparser: error and exit on file permissions/missing errors.
- [#1499](https://github.com/influxdata/telegraf/pull/1499): Make the user able to specify full path for HAproxy stats - [#1499](https://github.com/influxdata/telegraf/pull/1499): Make the user able to specify full path for HAproxy stats
- [#1521](https://github.com/influxdata/telegraf/pull/1521): Fix Redis url, an extra "tcp://" was added. - [#1521](https://github.com/influxdata/telegraf/pull/1521): Fix Redis url, an extra "tcp://" was added.
## v1.0 beta 2 [2016-06-21]
### Features
- [#1340](https://github.com/influxdata/telegraf/issues/1340): statsd: do not log every dropped metric.
- [#1368](https://github.com/influxdata/telegraf/pull/1368): Add precision rounding to all metrics on collection.
- [#1390](https://github.com/influxdata/telegraf/pull/1390): Add support for Tengine
- [#1320](https://github.com/influxdata/telegraf/pull/1320): Logparser input plugin for parsing grok-style log patterns.
- [#1397](https://github.com/influxdata/telegraf/issues/1397): ElasticSearch: now supports connecting to ElasticSearch via SSL
### Bugfixes
- [#1330](https://github.com/influxdata/telegraf/issues/1330): Fix exec plugin panic when using single binary. - [#1330](https://github.com/influxdata/telegraf/issues/1330): Fix exec plugin panic when using single binary.
- [#1336](https://github.com/influxdata/telegraf/issues/1336): Fixed incorrect prometheus metrics source selection. - [#1336](https://github.com/influxdata/telegraf/issues/1336): Fixed incorrect prometheus metrics source selection.
- [#1112](https://github.com/influxdata/telegraf/issues/1112): Set default Zookeeper chroot to empty string. - [#1112](https://github.com/influxdata/telegraf/issues/1112): Set default Zookeeper chroot to empty string.
@ -101,50 +192,6 @@ should now look like:
- [#1374](https://github.com/influxdata/telegraf/pull/1374): Change "default" retention policy to "". - [#1374](https://github.com/influxdata/telegraf/pull/1374): Change "default" retention policy to "".
- [#1377](https://github.com/influxdata/telegraf/issues/1377): Graphite output mangling '%' character. - [#1377](https://github.com/influxdata/telegraf/issues/1377): Graphite output mangling '%' character.
- [#1396](https://github.com/influxdata/telegraf/pull/1396): Prometheus input plugin now supports x509 certs authentication - [#1396](https://github.com/influxdata/telegraf/pull/1396): Prometheus input plugin now supports x509 certs authentication
## v1.0 beta 1 [2016-06-07]
### Release Notes
- `flush_jitter` behavior has been changed. The random jitter will now be
evaluated at every flush interval, rather than once at startup. This makes it
consistent with the behavior of `collection_jitter`.
- All AWS plugins now utilize a standard mechanism for evaluating credentials.
This allows all AWS plugins to support environment variables, shared credential
files & profiles, and role assumptions. See the specific plugin README for
details.
- The AWS CloudWatch input plugin can now declare a wildcard value for a metric
dimension. This causes the plugin to read all metrics that contain the specified
dimension key regardless of value. This is used to export collections of metrics
without having to know the dimension values ahead of time.
- The AWS CloudWatch input plugin can now be configured with the `cache_ttl`
attribute. This configures the TTL of the internal metric cache. This is useful
in conjunction with wildcard dimension values as it will control the amount of
time before a new metric is included by the plugin.
### Features
- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging.
- [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash
- [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy!
- [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm!
- [#1208](https://github.com/influxdata/telegraf/pull/1208): Standardized AWS credentials evaluation & wildcard CloudWatch dimensions. Thanks @johnrengelman!
- [#1264](https://github.com/influxdata/telegraf/pull/1264): Add SSL config options to http_response plugin.
- [#1272](https://github.com/influxdata/telegraf/pull/1272): graphite parser: add ability to specify multiple tag keys, for consistency with influxdb parser.
- [#1265](https://github.com/influxdata/telegraf/pull/1265): Make dns lookups for chrony configurable. Thanks @zbindenren!
- [#1275](https://github.com/influxdata/telegraf/pull/1275): Allow wildcard filtering of varnish stats.
- [#1142](https://github.com/influxdata/telegraf/pull/1142): Support for glob patterns in exec plugin commands configuration.
- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL (http://localhost:15672) if not specified
- [#1197](https://github.com/influxdata/telegraf/pull/1197): Limit AWS GetMetricStatistics requests to 10 per second.
- [#1278](https://github.com/influxdata/telegraf/pull/1278) & [#1288](https://github.com/influxdata/telegraf/pull/1288) & [#1295](https://github.com/influxdata/telegraf/pull/1295): RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified
- [#1296](https://github.com/influxdata/telegraf/issues/1296): Refactor of flush_jitter argument.
- [#1213](https://github.com/influxdata/telegraf/issues/1213): Add inactive & active memory to mem plugin.
### Bugfixes
- [#1252](https://github.com/influxdata/telegraf/pull/1252) & [#1279](https://github.com/influxdata/telegraf/pull/1279): Fix systemd service. Thanks @zbindenren & @PierreF! - [#1252](https://github.com/influxdata/telegraf/pull/1252) & [#1279](https://github.com/influxdata/telegraf/pull/1279): Fix systemd service. Thanks @zbindenren & @PierreF!
- [#1221](https://github.com/influxdata/telegraf/pull/1221): Fix influxdb n_shards counter. - [#1221](https://github.com/influxdata/telegraf/pull/1221): Fix influxdb n_shards counter.
- [#1258](https://github.com/influxdata/telegraf/pull/1258): Fix potential kernel plugin integer parse error. - [#1258](https://github.com/influxdata/telegraf/pull/1258): Fix potential kernel plugin integer parse error.
@ -154,6 +201,12 @@ time before a new metric is included by the plugin.
- [#1316](https://github.com/influxdata/telegraf/pull/1316): Removed leaked "database" tag on redis metrics. Thanks @PierreF! - [#1316](https://github.com/influxdata/telegraf/pull/1316): Removed leaked "database" tag on redis metrics. Thanks @PierreF!
- [#1323](https://github.com/influxdata/telegraf/issues/1323): Processes plugin: fix potential error with /proc/net/stat directory. - [#1323](https://github.com/influxdata/telegraf/issues/1323): Processes plugin: fix potential error with /proc/net/stat directory.
- [#1322](https://github.com/influxdata/telegraf/issues/1322): Fix rare RHEL 5.2 panic in gopsutil diskio gathering function. - [#1322](https://github.com/influxdata/telegraf/issues/1322): Fix rare RHEL 5.2 panic in gopsutil diskio gathering function.
- [#1586](https://github.com/influxdata/telegraf/pull/1586): Remove IF NOT EXISTS from influxdb output database creation.
- [#1600](https://github.com/influxdata/telegraf/issues/1600): Fix quoting with text values in postgresql_extensible plugin.
- [#1425](https://github.com/influxdata/telegraf/issues/1425): Fix win_perf_counter "index out of range" panic.
- [#1634](https://github.com/influxdata/telegraf/issues/1634): Fix ntpq panic when field is missing.
- [#1637](https://github.com/influxdata/telegraf/issues/1637): Sanitize graphite output field names.
- [#1695](https://github.com/influxdata/telegraf/pull/1695): Fix MySQL plugin not sending 0 value fields.
## v0.13.1 [2016-05-24] ## v0.13.1 [2016-05-24]

View File

@ -11,6 +11,8 @@ Output plugins READMEs are less structured,
but any information you can provide on how the data will look is appreciated. but any information you can provide on how the data will look is appreciated.
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb) See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
for a good example. for a good example.
1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin.
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf). Or mention @jackzampolin in a PR comment with some common queries that you would want to alert on and he will write one for you.
## GoDoc ## GoDoc
@ -30,7 +32,7 @@ Assuming you can already build the project, run these in the telegraf directory:
1. `go get github.com/sparrc/gdm` 1. `go get github.com/sparrc/gdm`
1. `gdm restore` 1. `gdm restore`
1. `gdm save` 1. `GOOS=linux gdm save`
## Input Plugins ## Input Plugins
@ -82,9 +84,9 @@ func (s *Simple) SampleConfig() string {
func (s *Simple) Gather(acc telegraf.Accumulator) error { func (s *Simple) Gather(acc telegraf.Accumulator) error {
if s.Ok { if s.Ok {
acc.Add("state", "pretty good", nil) acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil)
} else { } else {
acc.Add("state", "not great", nil) acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil)
} }
return nil return nil
@ -95,6 +97,13 @@ func init() {
} }
``` ```
## Adding Typed Metrics
In addition the the `AddFields` function, the accumulator also supports an
`AddGauge` and `AddCounter` function. These functions are for adding _typed_
metrics. Metric types are ignored for the InfluxDB output, but can be used
for other outputs, such as [prometheus](https://prometheus.io/docs/concepts/metric_types/).
## Input Plugins Accepting Arbitrary Data Formats ## Input Plugins Accepting Arbitrary Data Formats
Some input plugins (such as Some input plugins (such as

14
Godeps
View File

@ -1,6 +1,6 @@
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9 github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
github.com/aerospike/aerospike-client-go 45863b7fd8640dc12f7fdd397104d97e1986f25a github.com/aerospike/aerospike-client-go 7f3a312c3b2a60ac083ec6da296091c52c795c63
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687 github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857 github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4 github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
@ -29,14 +29,17 @@ github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0 github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc
github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720 github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36 github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453 github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504 github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3 github.com/nats-io/nats ea8b4fd12ebb823073c0004b9f09ac8748f4f165
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa github.com/nats-io/nuid a5152d67cf63cbfb5d992a395458722a45194715
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980 github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8 github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831 github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
@ -44,9 +47,8 @@ github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37 github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8 github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil 586bb697f3ec9f8ec08ffefe18f521a64534037c github.com/shirou/gopsutil 4d0c402af66c78735c5ccf820dc2ca7de5e4ff08
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26
github.com/sparrc/aerospike-client-go d4bb42d2c2d39dae68e054116f4538af189e05d5
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2 github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2

View File

@ -1,59 +1,12 @@
github.com/Microsoft/go-winio 9f57cbbcbcb41dea496528872a4f0e37a4f7ae98 github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5 github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687 github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857 github.com/lxn/win 950a0e81e7678e63d8e6cd32412bdecb325ccd88
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4 github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99 golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8
github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1 github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2 github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6 github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537 gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb
github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42
github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4

View File

@ -1,4 +1,6 @@
VERSION := $(shell sh -c 'git describe --always --tags') VERSION := $(shell sh -c 'git describe --always --tags')
BRANCH := $(shell sh -c 'git rev-parse --abbrev-ref HEAD')
COMMIT := $(shell sh -c 'git rev-parse HEAD')
ifdef GOBIN ifdef GOBIN
PATH := $(GOBIN):$(PATH) PATH := $(GOBIN):$(PATH)
else else
@ -13,17 +15,18 @@ windows: prepare-windows build-windows
# Only run the build (no dependency grabbing) # Only run the build (no dependency grabbing)
build: build:
go install -ldflags "-X main.version=$(VERSION)" ./... go install -ldflags \
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" ./...
build-windows: build-windows:
go build -o telegraf.exe -ldflags \ GOOS=windows GOARCH=amd64 go build -o telegraf.exe -ldflags \
"-X main.version=$(VERSION)" \ "-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
./cmd/telegraf/telegraf.go ./cmd/telegraf/telegraf.go
build-for-docker: build-for-docker:
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \ CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
"-s -X main.version=$(VERSION)" \ "-s -X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
./cmd/telegraf/telegraf.go ./cmd/telegraf/telegraf.go
# run package script # run package script
package: package:
@ -37,10 +40,12 @@ prepare:
# Use the windows godeps file to prepare dependencies # Use the windows godeps file to prepare dependencies
prepare-windows: prepare-windows:
go get github.com/sparrc/gdm go get github.com/sparrc/gdm
gdm restore
gdm restore -f Godeps_windows gdm restore -f Godeps_windows
# Run all docker containers necessary for unit tests # Run all docker containers necessary for unit tests
docker-run: docker-run:
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
docker run --name kafka \ docker run --name kafka \
-e ADVERTISED_HOST=localhost \ -e ADVERTISED_HOST=localhost \
-e ADVERTISED_PORT=9092 \ -e ADVERTISED_PORT=9092 \
@ -51,29 +56,28 @@ docker-run:
docker run --name postgres -p "5432:5432" -d postgres docker run --name postgres -p "5432:5432" -d postgres
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
docker run --name redis -p "6379:6379" -d redis docker run --name redis -p "6379:6379" -d redis
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
docker run --name riemann -p "5555:5555" -d blalor/riemann docker run --name riemann -p "5555:5555" -d blalor/riemann
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim docker run --name nats -p "4222:4222" -d nats
# Run docker containers necessary for CircleCI unit tests # Run docker containers necessary for CircleCI unit tests
docker-run-circle: docker-run-circle:
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
docker run --name kafka \ docker run --name kafka \
-e ADVERTISED_HOST=localhost \ -e ADVERTISED_HOST=localhost \
-e ADVERTISED_PORT=9092 \ -e ADVERTISED_PORT=9092 \
-p "2181:2181" -p "9092:9092" \ -p "2181:2181" -p "9092:9092" \
-d spotify/kafka -d spotify/kafka
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
docker run --name riemann -p "5555:5555" -d blalor/riemann docker run --name riemann -p "5555:5555" -d blalor/riemann
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim docker run --name nats -p "4222:4222" -d nats
# Kill all docker containers, ignore errors # Kill all docker containers, ignore errors
docker-kill: docker-kill:
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann snmp -docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann snmp -docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
# Run full unit tests using docker containers (includes setup and teardown) # Run full unit tests using docker containers (includes setup and teardown)
test: vet docker-kill docker-run test: vet docker-kill docker-run

View File

@ -20,12 +20,12 @@ new plugins.
### Linux deb and rpm Packages: ### Linux deb and rpm Packages:
Latest: Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta3_amd64.deb * https://dl.influxdata.com/telegraf/releases/telegraf_1.0.1_amd64.deb
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta3.x86_64.rpm * https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.x86_64.rpm
Latest (arm): Latest (arm):
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta3_armhf.deb * https://dl.influxdata.com/telegraf/releases/telegraf_1.0.1_armhf.deb
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta3.armhf.rpm * https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.armhf.rpm
##### Package Instructions: ##### Package Instructions:
@ -46,14 +46,14 @@ to use this repo to install & update telegraf.
### Linux tarballs: ### Linux tarballs:
Latest: Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_amd64.tar.gz * https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_amd64.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_i386.tar.gz * https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_i386.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_armhf.tar.gz * https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_armhf.tar.gz
### FreeBSD tarball: ### FreeBSD tarball:
Latest: Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_freebsd_amd64.tar.gz * https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_freebsd_amd64.tar.gz
### Ansible Role: ### Ansible Role:
@ -69,7 +69,7 @@ brew install telegraf
### Windows Binaries (EXPERIMENTAL) ### Windows Binaries (EXPERIMENTAL)
Latest: Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_windows_amd64.zip * https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_windows_amd64.zip
### From Source: ### From Source:
@ -156,10 +156,12 @@ Currently implemented sources:
* [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) * [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
* [filestat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/filestat) * [filestat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/filestat)
* [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy) * [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy)
* [hddtemp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/hddtemp)
* [http_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_response) * [http_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_response)
* [httpjson](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson) (generic JSON-emitting http service plugin) * [httpjson](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb) * [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb)
* [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor) * [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor)
* [iptables](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/iptables)
* [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia) * [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia)
* [leofs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/leofs) * [leofs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/leofs)
* [lustre2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/lustre2) * [lustre2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/lustre2)
@ -187,8 +189,9 @@ Currently implemented sources:
* [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis) * [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis)
* [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb) * [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb)
* [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak) * [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak)
* [sensors ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors) (only available if built from source) * [sensors](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors)
* [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp) * [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
* [snmp_legacy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp_legacy)
* [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft) * [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft)
* [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy) * [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy)
* [varnish](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/varnish) * [varnish](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/varnish)
@ -210,18 +213,21 @@ Currently implemented sources:
Telegraf can also collect metrics via the following service plugins: Telegraf can also collect metrics via the following service plugins:
* [http_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_listener)
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
* [nsq_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq_consumer)
* [logparser](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/logparser)
* [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd) * [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd)
* [tail](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail) * [tail](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail)
* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
* [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener) * [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener)
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer) * [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
* [webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks) * [webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks)
* [filestack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/filestack)
* [github](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/github) * [github](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/github)
* [mandrill](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/mandrill) * [mandrill](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/mandrill)
* [rollbar](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/rollbar) * [rollbar](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/rollbar)
* [nsq_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq_consumer)
We'll be adding support for many more over the coming months. Read on if you We'll be adding support for many more over the coming months. Read on if you
want to add support for another service or third-party API. want to add support for another service or third-party API.
@ -241,6 +247,7 @@ want to add support for another service or third-party API.
* [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka) * [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka)
* [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato) * [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato)
* [mqtt](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/mqtt) * [mqtt](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/mqtt)
* [nats](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nats)
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nsq) * [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nsq)
* [opentsdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb) * [opentsdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client) * [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client)

View File

@ -2,20 +2,35 @@ package telegraf
import "time" import "time"
// Accumulator is an interface for "accumulating" metrics from input plugin(s).
// The metrics are sent down a channel shared between all input plugins and then
// flushed on the configured flush_interval.
type Accumulator interface { type Accumulator interface {
// AddFields adds a metric to the accumulator with the given measurement
// name, fields, and tags (and timestamp). If a timestamp is not provided,
// then the accumulator sets it to "now".
// Create a point with a value, decorating it with tags // Create a point with a value, decorating it with tags
// NOTE: tags is expected to be owned by the caller, don't mutate // NOTE: tags is expected to be owned by the caller, don't mutate
// it after passing to Add. // it after passing to Add.
Add(measurement string,
value interface{},
tags map[string]string,
t ...time.Time)
AddFields(measurement string, AddFields(measurement string,
fields map[string]interface{}, fields map[string]interface{},
tags map[string]string, tags map[string]string,
t ...time.Time) t ...time.Time)
// AddGauge is the same as AddFields, but will add the metric as a "Gauge" type
AddGauge(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
// AddCounter is the same as AddFields, but will add the metric as a "Counter" type
AddCounter(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
AddError(err error)
Debug() bool Debug() bool
SetDebug(enabled bool) SetDebug(enabled bool)

View File

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"log" "log"
"math" "math"
"sync/atomic"
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
@ -11,7 +12,7 @@ import (
) )
func NewAccumulator( func NewAccumulator(
inputConfig *internal_models.InputConfig, inputConfig *models.InputConfig,
metrics chan telegraf.Metric, metrics chan telegraf.Metric,
) *accumulator { ) *accumulator {
acc := accumulator{} acc := accumulator{}
@ -30,27 +31,11 @@ type accumulator struct {
// print every point added to the accumulator // print every point added to the accumulator
trace bool trace bool
inputConfig *internal_models.InputConfig inputConfig *models.InputConfig
prefix string
precision time.Duration precision time.Duration
}
func (ac *accumulator) Add( errCount uint64
measurement string,
value interface{},
tags map[string]string,
t ...time.Time,
) {
fields := make(map[string]interface{})
fields["value"] = value
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
return
}
ac.AddFields(measurement, fields, tags, t...)
} }
func (ac *accumulator) AddFields( func (ac *accumulator) AddFields(
@ -59,16 +44,47 @@ func (ac *accumulator) AddFields(
tags map[string]string, tags map[string]string,
t ...time.Time, t ...time.Time,
) { ) {
if m := ac.makeMetric(measurement, fields, tags, telegraf.Untyped, t...); m != nil {
ac.metrics <- m
}
}
func (ac *accumulator) AddGauge(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if m := ac.makeMetric(measurement, fields, tags, telegraf.Gauge, t...); m != nil {
ac.metrics <- m
}
}
func (ac *accumulator) AddCounter(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if m := ac.makeMetric(measurement, fields, tags, telegraf.Counter, t...); m != nil {
ac.metrics <- m
}
}
// makeMetric either returns a metric, or returns nil if the metric doesn't
// need to be created (because of filtering, an error, etc.)
func (ac *accumulator) makeMetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
t ...time.Time,
) telegraf.Metric {
if len(fields) == 0 || len(measurement) == 0 { if len(fields) == 0 || len(measurement) == 0 {
return return nil
} }
if tags == nil {
if !ac.inputConfig.Filter.ShouldNamePass(measurement) { tags = make(map[string]string)
return
}
if !ac.inputConfig.Filter.ShouldTagsPass(tags) {
return
} }
// Override measurement name if set // Override measurement name if set
@ -83,9 +99,6 @@ func (ac *accumulator) AddFields(
measurement = measurement + ac.inputConfig.MeasurementSuffix measurement = measurement + ac.inputConfig.MeasurementSuffix
} }
if tags == nil {
tags = make(map[string]string)
}
// Apply plugin-wide tags if set // Apply plugin-wide tags if set
for k, v := range ac.inputConfig.Tags { for k, v := range ac.inputConfig.Tags {
if _, ok := tags[k]; !ok { if _, ok := tags[k]; !ok {
@ -98,44 +111,37 @@ func (ac *accumulator) AddFields(
tags[k] = v tags[k] = v
} }
} }
ac.inputConfig.Filter.FilterTags(tags)
result := make(map[string]interface{}) // Apply the metric filter(s)
if ok := ac.inputConfig.Filter.Apply(measurement, fields, tags); !ok {
return nil
}
for k, v := range fields { for k, v := range fields {
// Filter out any filtered fields
if ac.inputConfig != nil {
if !ac.inputConfig.Filter.ShouldFieldsPass(k) {
continue
}
}
// Validate uint64 and float64 fields // Validate uint64 and float64 fields
switch val := v.(type) { switch val := v.(type) {
case uint64: case uint64:
// InfluxDB does not support writing uint64 // InfluxDB does not support writing uint64
if val < uint64(9223372036854775808) { if val < uint64(9223372036854775808) {
result[k] = int64(val) fields[k] = int64(val)
} else { } else {
result[k] = int64(9223372036854775807) fields[k] = int64(9223372036854775807)
} }
continue continue
case float64: case float64:
// NaNs are invalid values in influxdb, skip measurement // NaNs are invalid values in influxdb, skip measurement
if math.IsNaN(val) || math.IsInf(val, 0) { if math.IsNaN(val) || math.IsInf(val, 0) {
if ac.debug { if ac.debug {
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+ log.Printf("I! Measurement [%s] field [%s] has a NaN or Inf "+
"field, skipping", "field, skipping",
measurement, k) measurement, k)
} }
delete(fields, k)
continue continue
} }
} }
result[k] = v fields[k] = v
}
fields = nil
if len(result) == 0 {
return
} }
var timestamp time.Time var timestamp time.Time
@ -146,19 +152,37 @@ func (ac *accumulator) AddFields(
} }
timestamp = timestamp.Round(ac.precision) timestamp = timestamp.Round(ac.precision)
if ac.prefix != "" { var m telegraf.Metric
measurement = ac.prefix + measurement var err error
switch mType {
case telegraf.Counter:
m, err = telegraf.NewCounterMetric(measurement, tags, fields, timestamp)
case telegraf.Gauge:
m, err = telegraf.NewGaugeMetric(measurement, tags, fields, timestamp)
default:
m, err = telegraf.NewMetric(measurement, tags, fields, timestamp)
}
if err != nil {
log.Printf("E! Error adding point [%s]: %s\n", measurement, err.Error())
return nil
} }
m, err := telegraf.NewMetric(measurement, tags, result, timestamp)
if err != nil {
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
return
}
if ac.trace { if ac.trace {
fmt.Println("> " + m.String()) fmt.Println("> " + m.String())
} }
ac.metrics <- m
return m
}
// AddError passes a runtime error to the accumulator.
// The error will be tagged with the plugin name and written to the log.
func (ac *accumulator) AddError(err error) {
if err == nil {
return
}
atomic.AddUint64(&ac.errCount, 1)
//TODO suppress/throttle consecutive duplicate errors?
log.Printf("E! Error in input [%s]: %s", ac.inputConfig.Name, err)
} }
func (ac *accumulator) Debug() bool { func (ac *accumulator) Debug() bool {

View File

@ -1,8 +1,11 @@
package agent package agent
import ( import (
"bytes"
"fmt" "fmt"
"log"
"math" "math"
"os"
"testing" "testing"
"time" "time"
@ -10,6 +13,7 @@ import (
"github.com/influxdata/telegraf/internal/models" "github.com/influxdata/telegraf/internal/models"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestAdd(t *testing.T) { func TestAdd(t *testing.T) {
@ -17,11 +21,17 @@ func TestAdd(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.Add("acctest", float64(101), map[string]string{}) a.AddFields("acctest",
a.Add("acctest", float64(101), map[string]string{"acc": "test"}) map[string]interface{}{"value": float64(101)},
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics testm := <-a.metrics
actual := testm.String() actual := testm.String()
@ -38,17 +48,93 @@ func TestAdd(t *testing.T) {
actual) actual)
} }
func TestAddGauge(t *testing.T) {
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
a.AddGauge("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddGauge("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddGauge("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
assert.Equal(t, testm.Type(), telegraf.Gauge)
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
assert.Equal(t, testm.Type(), telegraf.Gauge)
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
actual)
assert.Equal(t, testm.Type(), telegraf.Gauge)
}
func TestAddCounter(t *testing.T) {
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
a.AddCounter("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddCounter("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddCounter("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
assert.Equal(t, testm.Type(), telegraf.Counter)
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
assert.Equal(t, testm.Type(), telegraf.Counter)
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
actual)
assert.Equal(t, testm.Type(), telegraf.Counter)
}
func TestAddNoPrecisionWithInterval(t *testing.T) { func TestAddNoPrecisionWithInterval(t *testing.T) {
a := accumulator{} a := accumulator{}
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC) now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.SetPrecision(0, time.Second) a.SetPrecision(0, time.Second)
a.Add("acctest", float64(101), map[string]string{}) a.AddFields("acctest",
a.Add("acctest", float64(101), map[string]string{"acc": "test"}) map[string]interface{}{"value": float64(101)},
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics testm := <-a.metrics
actual := testm.String() actual := testm.String()
@ -70,12 +156,18 @@ func TestAddNoIntervalWithPrecision(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC) now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.SetPrecision(time.Second, time.Millisecond) a.SetPrecision(time.Second, time.Millisecond)
a.Add("acctest", float64(101), map[string]string{}) a.AddFields("acctest",
a.Add("acctest", float64(101), map[string]string{"acc": "test"}) map[string]interface{}{"value": float64(101)},
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics testm := <-a.metrics
actual := testm.String() actual := testm.String()
@ -97,13 +189,19 @@ func TestAddDisablePrecision(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC) now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.SetPrecision(time.Second, time.Millisecond) a.SetPrecision(time.Second, time.Millisecond)
a.DisablePrecision() a.DisablePrecision()
a.Add("acctest", float64(101), map[string]string{}) a.AddFields("acctest",
a.Add("acctest", float64(101), map[string]string{"acc": "test"}) map[string]interface{}{"value": float64(101)},
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics testm := <-a.metrics
actual := testm.String() actual := testm.String()
@ -125,10 +223,12 @@ func TestDifferentPrecisions(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC) now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.SetPrecision(0, time.Second) a.SetPrecision(0, time.Second)
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics testm := <-a.metrics
actual := testm.String() actual := testm.String()
assert.Equal(t, assert.Equal(t,
@ -136,7 +236,9 @@ func TestDifferentPrecisions(t *testing.T) {
actual) actual)
a.SetPrecision(0, time.Millisecond) a.SetPrecision(0, time.Millisecond)
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm = <-a.metrics testm = <-a.metrics
actual = testm.String() actual = testm.String()
assert.Equal(t, assert.Equal(t,
@ -144,7 +246,9 @@ func TestDifferentPrecisions(t *testing.T) {
actual) actual)
a.SetPrecision(0, time.Microsecond) a.SetPrecision(0, time.Microsecond)
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm = <-a.metrics testm = <-a.metrics
actual = testm.String() actual = testm.String()
assert.Equal(t, assert.Equal(t,
@ -152,7 +256,9 @@ func TestDifferentPrecisions(t *testing.T) {
actual) actual)
a.SetPrecision(0, time.Nanosecond) a.SetPrecision(0, time.Nanosecond)
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm = <-a.metrics testm = <-a.metrics
actual = testm.String() actual = testm.String()
assert.Equal(t, assert.Equal(t,
@ -166,11 +272,17 @@ func TestAddDefaultTags(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.Add("acctest", float64(101), map[string]string{}) a.AddFields("acctest",
a.Add("acctest", float64(101), map[string]string{"acc": "test"}) map[string]interface{}{"value": float64(101)},
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics testm := <-a.metrics
actual := testm.String() actual := testm.String()
@ -192,7 +304,7 @@ func TestAddFields(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
fields := map[string]interface{}{ fields := map[string]interface{}{
"usage": float64(99), "usage": float64(99),
@ -225,7 +337,7 @@ func TestAddInfFields(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
fields := map[string]interface{}{ fields := map[string]interface{}{
"usage": inf, "usage": inf,
@ -253,7 +365,7 @@ func TestAddNaNFields(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
fields := map[string]interface{}{ fields := map[string]interface{}{
"usage": nan, "usage": nan,
@ -277,7 +389,7 @@ func TestAddUint64Fields(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
fields := map[string]interface{}{ fields := map[string]interface{}{
"usage": uint64(99), "usage": uint64(99),
@ -306,7 +418,7 @@ func TestAddUint64Overflow(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
fields := map[string]interface{}{ fields := map[string]interface{}{
"usage": uint64(9223372036854775808), "usage": uint64(9223372036854775808),
@ -336,11 +448,17 @@ func TestAddInts(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.Add("acctest", int(101), map[string]string{}) a.AddFields("acctest",
a.Add("acctest", int32(101), map[string]string{"acc": "test"}) map[string]interface{}{"value": int(101)},
a.Add("acctest", int64(101), map[string]string{"acc": "test"}, now) map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": int32(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": int64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics testm := <-a.metrics
actual := testm.String() actual := testm.String()
@ -363,10 +481,14 @@ func TestAddFloats(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.Add("acctest", float32(101), map[string]string{"acc": "test"}) a.AddFields("acctest",
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) map[string]interface{}{"value": float32(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics testm := <-a.metrics
actual := testm.String() actual := testm.String()
@ -385,10 +507,14 @@ func TestAddStrings(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.Add("acctest", "test", map[string]string{"acc": "test"}) a.AddFields("acctest",
a.Add("acctest", "foo", map[string]string{"acc": "test"}, now) map[string]interface{}{"value": "test"},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": "foo"},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics testm := <-a.metrics
actual := testm.String() actual := testm.String()
@ -407,10 +533,12 @@ func TestAddBools(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.Add("acctest", true, map[string]string{"acc": "test"}) a.AddFields("acctest",
a.Add("acctest", false, map[string]string{"acc": "test"}, now) map[string]interface{}{"value": true}, map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": false}, map[string]string{"acc": "test"}, now)
testm := <-a.metrics testm := <-a.metrics
actual := testm.String() actual := testm.String()
@ -429,16 +557,22 @@ func TestAccFilterTags(t *testing.T) {
now := time.Now() now := time.Now()
a.metrics = make(chan telegraf.Metric, 10) a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics) defer close(a.metrics)
filter := internal_models.Filter{ filter := models.Filter{
TagExclude: []string{"acc"}, TagExclude: []string{"acc"},
} }
assert.NoError(t, filter.CompileFilter()) assert.NoError(t, filter.Compile())
a.inputConfig = &internal_models.InputConfig{} a.inputConfig = &models.InputConfig{}
a.inputConfig.Filter = filter a.inputConfig.Filter = filter
a.Add("acctest", float64(101), map[string]string{}) a.AddFields("acctest",
a.Add("acctest", float64(101), map[string]string{"acc": "test"}) map[string]interface{}{"value": float64(101)},
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics testm := <-a.metrics
actual := testm.String() actual := testm.String()
@ -454,3 +588,27 @@ func TestAccFilterTags(t *testing.T) {
fmt.Sprintf("acctest value=101 %d", now.UnixNano()), fmt.Sprintf("acctest value=101 %d", now.UnixNano()),
actual) actual)
} }
func TestAccAddError(t *testing.T) {
errBuf := bytes.NewBuffer(nil)
log.SetOutput(errBuf)
defer log.SetOutput(os.Stderr)
a := accumulator{}
a.inputConfig = &models.InputConfig{}
a.inputConfig.Name = "mock_plugin"
a.AddError(fmt.Errorf("foo"))
a.AddError(fmt.Errorf("bar"))
a.AddError(fmt.Errorf("baz"))
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
assert.EqualValues(t, 3, a.errCount)
require.Len(t, errs, 4) // 4 because of trailing newline
assert.Contains(t, string(errs[0]), "mock_plugin")
assert.Contains(t, string(errs[0]), "foo")
assert.Contains(t, string(errs[1]), "mock_plugin")
assert.Contains(t, string(errs[1]), "bar")
assert.Contains(t, string(errs[2]), "mock_plugin")
assert.Contains(t, string(errs[2]), "baz")
}

View File

@ -49,18 +49,16 @@ func (a *Agent) Connect() error {
switch ot := o.Output.(type) { switch ot := o.Output.(type) {
case telegraf.ServiceOutput: case telegraf.ServiceOutput:
if err := ot.Start(); err != nil { if err := ot.Start(); err != nil {
log.Printf("Service for output %s failed to start, exiting\n%s\n", log.Printf("E! Service for output %s failed to start, exiting\n%s\n",
o.Name, err.Error()) o.Name, err.Error())
return err return err
} }
} }
if a.Config.Agent.Debug { log.Printf("D! Attempting connection to output: %s\n", o.Name)
log.Printf("Attempting connection to output: %s\n", o.Name)
}
err := o.Output.Connect() err := o.Output.Connect()
if err != nil { if err != nil {
log.Printf("Failed to connect to output %s, retrying in 15s, "+ log.Printf("E! Failed to connect to output %s, retrying in 15s, "+
"error was '%s' \n", o.Name, err) "error was '%s' \n", o.Name, err)
time.Sleep(15 * time.Second) time.Sleep(15 * time.Second)
err = o.Output.Connect() err = o.Output.Connect()
@ -68,9 +66,7 @@ func (a *Agent) Connect() error {
return err return err
} }
} }
if a.Config.Agent.Debug { log.Printf("D! Successfully connected to output: %s\n", o.Name)
log.Printf("Successfully connected to output: %s\n", o.Name)
}
} }
return nil return nil
} }
@ -88,13 +84,13 @@ func (a *Agent) Close() error {
return err return err
} }
func panicRecover(input *internal_models.RunningInput) { func panicRecover(input *models.RunningInput) {
if err := recover(); err != nil { if err := recover(); err != nil {
trace := make([]byte, 2048) trace := make([]byte, 2048)
runtime.Stack(trace, true) runtime.Stack(trace, true)
log.Printf("FATAL: Input [%s] panicked: %s, Stack:\n%s\n", log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
input.Name, err, trace) input.Name, err, trace)
log.Println("PLEASE REPORT THIS PANIC ON GITHUB with " + log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " +
"stack trace, configuration, and OS information: " + "stack trace, configuration, and OS information: " +
"https://github.com/influxdata/telegraf/issues/new") "https://github.com/influxdata/telegraf/issues/new")
} }
@ -104,7 +100,7 @@ func panicRecover(input *internal_models.RunningInput) {
// reporting interval. // reporting interval.
func (a *Agent) gatherer( func (a *Agent) gatherer(
shutdown chan struct{}, shutdown chan struct{},
input *internal_models.RunningInput, input *models.RunningInput,
interval time.Duration, interval time.Duration,
metricC chan telegraf.Metric, metricC chan telegraf.Metric,
) error { ) error {
@ -117,7 +113,6 @@ func (a *Agent) gatherer(
var outerr error var outerr error
acc := NewAccumulator(input.Config, metricC) acc := NewAccumulator(input.Config, metricC)
acc.SetDebug(a.Config.Agent.Debug)
acc.SetPrecision(a.Config.Agent.Precision.Duration, acc.SetPrecision(a.Config.Agent.Precision.Duration,
a.Config.Agent.Interval.Duration) a.Config.Agent.Interval.Duration)
acc.setDefaultTags(a.Config.Tags) acc.setDefaultTags(a.Config.Tags)
@ -131,10 +126,8 @@ func (a *Agent) gatherer(
if outerr != nil { if outerr != nil {
return outerr return outerr
} }
if a.Config.Agent.Debug { log.Printf("D! Input [%s] gathered metrics, (%s interval) in %s\n",
log.Printf("Input [%s] gathered metrics, (%s interval) in %s\n", input.Name, interval, elapsed)
input.Name, interval, elapsed)
}
select { select {
case <-shutdown: case <-shutdown:
@ -152,7 +145,7 @@ func (a *Agent) gatherer(
// over. // over.
func gatherWithTimeout( func gatherWithTimeout(
shutdown chan struct{}, shutdown chan struct{},
input *internal_models.RunningInput, input *models.RunningInput,
acc *accumulator, acc *accumulator,
timeout time.Duration, timeout time.Duration,
) { ) {
@ -167,11 +160,11 @@ func gatherWithTimeout(
select { select {
case err := <-done: case err := <-done:
if err != nil { if err != nil {
log.Printf("ERROR in input [%s]: %s", input.Name, err) log.Printf("E! ERROR in input [%s]: %s", input.Name, err)
} }
return return
case <-ticker.C: case <-ticker.C:
log.Printf("ERROR: input [%s] took longer to collect than "+ log.Printf("E! ERROR: input [%s] took longer to collect than "+
"collection interval (%s)", "collection interval (%s)",
input.Name, timeout) input.Name, timeout)
continue continue
@ -215,6 +208,9 @@ func (a *Agent) Test() error {
if err := input.Input.Gather(acc); err != nil { if err := input.Input.Gather(acc); err != nil {
return err return err
} }
if acc.errCount > 0 {
return fmt.Errorf("Errors encountered during processing")
}
// Special instructions for some inputs. cpu, for example, needs to be // Special instructions for some inputs. cpu, for example, needs to be
// run twice in order to return cpu usage percentages. // run twice in order to return cpu usage percentages.
@ -237,11 +233,11 @@ func (a *Agent) flush() {
wg.Add(len(a.Config.Outputs)) wg.Add(len(a.Config.Outputs))
for _, o := range a.Config.Outputs { for _, o := range a.Config.Outputs {
go func(output *internal_models.RunningOutput) { go func(output *models.RunningOutput) {
defer wg.Done() defer wg.Done()
err := output.Write() err := output.Write()
if err != nil { if err != nil {
log.Printf("Error writing to output [%s]: %s\n", log.Printf("E! Error writing to output [%s]: %s\n",
output.Name, err.Error()) output.Name, err.Error())
} }
}(o) }(o)
@ -261,7 +257,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
for { for {
select { select {
case <-shutdown: case <-shutdown:
log.Println("Hang on, flushing any cached metrics before shutdown") log.Println("I! Hang on, flushing any cached metrics before shutdown")
a.flush() a.flush()
return nil return nil
case <-ticker.C: case <-ticker.C:
@ -299,9 +295,9 @@ func copyMetric(m telegraf.Metric) telegraf.Metric {
func (a *Agent) Run(shutdown chan struct{}) error { func (a *Agent) Run(shutdown chan struct{}) error {
var wg sync.WaitGroup var wg sync.WaitGroup
log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+ log.Printf("I! Agent Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+
"Flush Interval:%s \n", "Flush Interval:%s \n",
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet, a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet,
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
// channel shared between all input threads for accumulating metrics // channel shared between all input threads for accumulating metrics
@ -312,13 +308,12 @@ func (a *Agent) Run(shutdown chan struct{}) error {
switch p := input.Input.(type) { switch p := input.Input.(type) {
case telegraf.ServiceInput: case telegraf.ServiceInput:
acc := NewAccumulator(input.Config, metricC) acc := NewAccumulator(input.Config, metricC)
acc.SetDebug(a.Config.Agent.Debug)
// Service input plugins should set their own precision of their // Service input plugins should set their own precision of their
// metrics. // metrics.
acc.DisablePrecision() acc.DisablePrecision()
acc.setDefaultTags(a.Config.Tags) acc.setDefaultTags(a.Config.Tags)
if err := p.Start(acc); err != nil { if err := p.Start(acc); err != nil {
log.Printf("Service for input %s failed to start, exiting\n%s\n", log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
input.Name, err.Error()) input.Name, err.Error())
return err return err
} }
@ -336,7 +331,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
go func() { go func() {
defer wg.Done() defer wg.Done()
if err := a.flusher(shutdown, metricC); err != nil { if err := a.flusher(shutdown, metricC); err != nil {
log.Printf("Flusher routine failed, exiting: %s\n", err.Error()) log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error())
close(shutdown) close(shutdown)
} }
}() }()
@ -348,10 +343,10 @@ func (a *Agent) Run(shutdown chan struct{}) error {
if input.Config.Interval != 0 { if input.Config.Interval != 0 {
interval = input.Config.Interval interval = input.Config.Interval
} }
go func(in *internal_models.RunningInput, interv time.Duration) { go func(in *models.RunningInput, interv time.Duration) {
defer wg.Done() defer wg.Done()
if err := a.gatherer(shutdown, in, interv, metricC); err != nil { if err := a.gatherer(shutdown, in, interv, metricC); err != nil {
log.Printf(err.Error()) log.Printf("E! " + err.Error())
} }
}(input, interval) }(input, interval)
} }

View File

@ -4,9 +4,9 @@ machine:
post: post:
- sudo service zookeeper stop - sudo service zookeeper stop
- go version - go version
- go version | grep 1.6.2 || sudo rm -rf /usr/local/go - go version | grep 1.7.1 || sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.6.2.linux-amd64.tar.gz - wget https://storage.googleapis.com/golang/go1.7.1.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.6.2.linux-amd64.tar.gz - sudo tar -C /usr/local -xzf go1.7.1.linux-amd64.tar.gz
- go version - go version
dependencies: dependencies:

View File

@ -6,19 +6,23 @@ import (
"log" "log"
"os" "os"
"os/signal" "os/signal"
"runtime"
"strings" "strings"
"syscall" "syscall"
"github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/agent"
"github.com/influxdata/telegraf/internal/config" "github.com/influxdata/telegraf/internal/config"
"github.com/influxdata/telegraf/logger"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
_ "github.com/influxdata/telegraf/plugins/inputs/all" _ "github.com/influxdata/telegraf/plugins/inputs/all"
"github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/outputs"
_ "github.com/influxdata/telegraf/plugins/outputs/all" _ "github.com/influxdata/telegraf/plugins/outputs/all"
"github.com/kardianos/service"
) )
var fDebug = flag.Bool("debug", false, var fDebug = flag.Bool("debug", false,
"show metrics as they're generated to stdout") "turn on debug logging")
var fQuiet = flag.Bool("quiet", false, var fQuiet = flag.Bool("quiet", false,
"run in quiet mode") "run in quiet mode")
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit") var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
@ -39,12 +43,8 @@ var fOutputList = flag.Bool("output-list", false,
"print available output plugins.") "print available output plugins.")
var fUsage = flag.String("usage", "", var fUsage = flag.String("usage", "",
"print usage for a plugin, ie, 'telegraf -usage mysql'") "print usage for a plugin, ie, 'telegraf -usage mysql'")
var fInputFiltersLegacy = flag.String("filter", "", var fService = flag.String("service", "",
"filter the inputs to enable, separator is :") "operate on the service")
var fOutputFiltersLegacy = flag.String("outputfilter", "",
"filter the outputs to enable, separator is :")
var fConfigDirectoryLegacy = flag.String("configdirectory", "",
"directory containing additional *.conf files")
// Telegraf version, populated linker. // Telegraf version, populated linker.
// ie, -ldflags "-X main.version=`git describe --always --tags`" // ie, -ldflags "-X main.version=`git describe --always --tags`"
@ -54,6 +54,16 @@ var (
branch string branch string
) )
func init() {
// If commit or branch are not set, make that clear.
if commit == "" {
commit = "unknown"
}
if branch == "" {
branch = "unknown"
}
}
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics. const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
Usage: Usage:
@ -74,6 +84,7 @@ The flags are:
-debug print metrics as they're generated to stdout -debug print metrics as they're generated to stdout
-quiet run in quiet mode -quiet run in quiet mode
-version print the version to stdout -version print the version to stdout
-service Control the service, ie, 'telegraf -service install (windows only)'
In addition to the -config flag, telegraf will also load the config file from In addition to the -config flag, telegraf will also load the config file from
an environment variable or default location. Precedence is: an environment variable or default location. Precedence is:
@ -100,7 +111,19 @@ Examples:
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
` `
func main() { var stop chan struct{}
var srvc service.Service
type program struct{}
func reloadLoop(stop chan struct{}, s service.Service) {
defer func() {
if service.Interactive() {
os.Exit(0)
}
return
}()
reload := make(chan bool, 1) reload := make(chan bool, 1)
reload <- true reload <- true
for <-reload { for <-reload {
@ -110,24 +133,11 @@ func main() {
args := flag.Args() args := flag.Args()
var inputFilters []string var inputFilters []string
if *fInputFiltersLegacy != "" {
fmt.Printf("WARNING '--filter' flag is deprecated, please use" +
" '--input-filter'")
inputFilter := strings.TrimSpace(*fInputFiltersLegacy)
inputFilters = strings.Split(":"+inputFilter+":", ":")
}
if *fInputFilters != "" { if *fInputFilters != "" {
inputFilter := strings.TrimSpace(*fInputFilters) inputFilter := strings.TrimSpace(*fInputFilters)
inputFilters = strings.Split(":"+inputFilter+":", ":") inputFilters = strings.Split(":"+inputFilter+":", ":")
} }
var outputFilters []string var outputFilters []string
if *fOutputFiltersLegacy != "" {
fmt.Printf("WARNING '--outputfilter' flag is deprecated, please use" +
" '--output-filter'")
outputFilter := strings.TrimSpace(*fOutputFiltersLegacy)
outputFilters = strings.Split(":"+outputFilter+":", ":")
}
if *fOutputFilters != "" { if *fOutputFilters != "" {
outputFilter := strings.TrimSpace(*fOutputFilters) outputFilter := strings.TrimSpace(*fOutputFilters)
outputFilters = strings.Split(":"+outputFilter+":", ":") outputFilters = strings.Split(":"+outputFilter+":", ":")
@ -136,8 +146,7 @@ func main() {
if len(args) > 0 { if len(args) > 0 {
switch args[0] { switch args[0] {
case "version": case "version":
v := fmt.Sprintf("Telegraf - version %s", version) fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
fmt.Println(v)
return return
case "config": case "config":
config.PrintSampleConfig(inputFilters, outputFilters) config.PrintSampleConfig(inputFilters, outputFilters)
@ -145,34 +154,27 @@ func main() {
} }
} }
if *fOutputList { // switch for flags which just do something and exit immediately
switch {
case *fOutputList:
fmt.Println("Available Output Plugins:") fmt.Println("Available Output Plugins:")
for k, _ := range outputs.Outputs { for k, _ := range outputs.Outputs {
fmt.Printf(" %s\n", k) fmt.Printf(" %s\n", k)
} }
return return
} case *fInputList:
if *fInputList {
fmt.Println("Available Input Plugins:") fmt.Println("Available Input Plugins:")
for k, _ := range inputs.Inputs { for k, _ := range inputs.Inputs {
fmt.Printf(" %s\n", k) fmt.Printf(" %s\n", k)
} }
return return
} case *fVersion:
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
if *fVersion {
v := fmt.Sprintf("Telegraf - version %s", version)
fmt.Println(v)
return return
} case *fSampleConfig:
if *fSampleConfig {
config.PrintSampleConfig(inputFilters, outputFilters) config.PrintSampleConfig(inputFilters, outputFilters)
return return
} case *fUsage != "":
if *fUsage != "" {
if err := config.PrintInputConfig(*fUsage); err != nil { if err := config.PrintInputConfig(*fUsage); err != nil {
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil { if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
log.Fatalf("%s and %s", err, err2) log.Fatalf("%s and %s", err, err2)
@ -191,15 +193,6 @@ func main() {
os.Exit(1) os.Exit(1)
} }
if *fConfigDirectoryLegacy != "" {
fmt.Printf("WARNING '--configdirectory' flag is deprecated, please use" +
" '--config-directory'")
err = c.LoadDirectory(*fConfigDirectoryLegacy)
if err != nil {
log.Fatal(err)
}
}
if *fConfigDirectory != "" { if *fConfigDirectory != "" {
err = c.LoadDirectory(*fConfigDirectory) err = c.LoadDirectory(*fConfigDirectory)
if err != nil { if err != nil {
@ -218,13 +211,12 @@ func main() {
log.Fatal(err) log.Fatal(err)
} }
if *fDebug { // Setup logging
ag.Config.Agent.Debug = true logger.SetupLogging(
} ag.Config.Agent.Debug || *fDebug,
ag.Config.Agent.Quiet || *fQuiet,
if *fQuiet { ag.Config.Agent.Logfile,
ag.Config.Agent.Quiet = true )
}
if *fTest { if *fTest {
err = ag.Test() err = ag.Test()
@ -243,22 +235,26 @@ func main() {
signals := make(chan os.Signal) signals := make(chan os.Signal)
signal.Notify(signals, os.Interrupt, syscall.SIGHUP) signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
go func() { go func() {
sig := <-signals select {
if sig == os.Interrupt { case sig := <-signals:
close(shutdown) if sig == os.Interrupt {
} close(shutdown)
if sig == syscall.SIGHUP { }
log.Printf("Reloading Telegraf config\n") if sig == syscall.SIGHUP {
<-reload log.Printf("I! Reloading Telegraf config\n")
reload <- true <-reload
reload <- true
close(shutdown)
}
case <-stop:
close(shutdown) close(shutdown)
} }
}() }()
log.Printf("Starting Telegraf (version %s)\n", version) log.Printf("I! Starting Telegraf (version %s)\n", version)
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " ")) log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
log.Printf("Tags enabled: %s", c.ListTags()) log.Printf("I! Tags enabled: %s", c.ListTags())
if *fPidfile != "" { if *fPidfile != "" {
f, err := os.Create(*fPidfile) f, err := os.Create(*fPidfile)
@ -279,3 +275,55 @@ func usageExit(rc int) {
fmt.Println(usage) fmt.Println(usage)
os.Exit(rc) os.Exit(rc)
} }
func (p *program) Start(s service.Service) error {
srvc = s
go p.run()
return nil
}
func (p *program) run() {
stop = make(chan struct{})
reloadLoop(stop, srvc)
}
func (p *program) Stop(s service.Service) error {
close(stop)
return nil
}
func main() {
flag.Parse()
if runtime.GOOS == "windows" {
svcConfig := &service.Config{
Name: "telegraf",
DisplayName: "Telegraf Data Collector Service",
Description: "Collects data using a series of plugins and publishes it to" +
"another series of plugins.",
Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
}
prg := &program{}
s, err := service.New(prg, svcConfig)
if err != nil {
log.Fatal(err)
}
// Handle the -service flag here to prevent any issues with tooling that
// may not have an interactive session, e.g. installing from Ansible.
if *fService != "" {
if *fConfig != "" {
(*svcConfig).Arguments = []string{"-config", *fConfig}
}
err := service.Control(s, *fService)
if err != nil {
log.Fatal(err)
}
} else {
err = s.Run()
if err != nil {
log.Println("E! " + err.Error())
}
}
} else {
stop = make(chan struct{})
reloadLoop(stop, nil)
}
}

View File

@ -86,6 +86,10 @@ as it is more efficient to filter out tags at the ingestion point.
* **taginclude**: taginclude is the inverse of tagexclude. It will only include * **taginclude**: taginclude is the inverse of tagexclude. It will only include
the tag keys in the final measurement. the tag keys in the final measurement.
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
the plugin definition, otherwise subsequent plugin config options will be
interpreted as part of the tagpass/tagdrop map.
## Input Configuration ## Input Configuration
Some configuration options are configurable per input: Some configuration options are configurable per input:
@ -129,6 +133,10 @@ fields which begin with `time_`.
#### Input Config: tagpass and tagdrop #### Input Config: tagpass and tagdrop
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
the plugin definition, otherwise subsequent plugin config options will be
interpreted as part of the tagpass/tagdrop map.
```toml ```toml
[[inputs.cpu]] [[inputs.cpu]]
percpu = true percpu = true

View File

@ -16,6 +16,7 @@
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE) - github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE) - github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE) - github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
- github.com/kardianos/service [ZLIB LICENSE](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md) - github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) - github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE) - github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)

View File

@ -1,36 +1,39 @@
# Running Telegraf as a Windows Service # Running Telegraf as a Windows Service
If you have tried to install Go binaries as Windows Services with the **sc.exe** Telegraf natively supports running as a Windows Service. Outlined below is are
tool you may have seen that the service errors and stops running after a while. the general steps to set it up.
**NSSM** (the Non-Sucking Service Manager) is a tool that helps you in a 1. Obtain the telegraf windows distribution
[number of scenarios](http://nssm.cc/scenarios) including running Go binaries 2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
that were not specifically designed to run only in Windows platforms. location simply specify the `-config` parameter with the desired location)
3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf`
4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""):
## NSSM Installation via Chocolatey ```
> C:\"Program Files"\Telegraf\telegraf.exe --service install
```
You can install [Chocolatey](https://chocolatey.org/) and [NSSM](http://nssm.cc/) 5. Edit the configuration file to meet your needs
with these commands 6. To check that it works, run:
```powershell ```
iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1')) > C:\"Program Files"\Telegraf\telegraf.exe --config C:\"Program Files"\Telegraf\telegraf.conf --test
choco install -y nssm ```
```
## Installing Telegraf as a Windows Service with NSSM 7. To start collecting data, run:
You can download the latest Telegraf Windows binaries (still Experimental at ```
the moment) from [the Telegraf Github repo](https://github.com/influxdata/telegraf). > net start telegraf
```
Then you can create a C:\telegraf folder, unzip the binary there and modify the ## Other supported operations
**telegraf.conf** sample to allocate the metrics you want to send to **InfluxDB**.
Once you have NSSM installed in your system, the process is quite straightforward. Telegraf can manage its own service through the --service flag:
You only need to type this command in your Windows shell
```powershell | Command | Effect |
nssm install Telegraf c:\telegraf\telegraf.exe -config c:\telegraf\telegraf.config |------------------------------------|-------------------------------|
``` | `telegraf.exe --service install` | Install telegraf as a service |
| `telegraf.exe --service uninstall` | Remove the telegraf service |
| `telegraf.exe --service start` | Start the telegraf service |
| `telegraf.exe --service stop` | Stop the telegraf service |
And now your service will be installed in Windows and you will be able to start and
stop it gracefully

View File

@ -30,12 +30,15 @@
## ie, if interval="10s" then always collect on :00, :10, :20, etc. ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true round_interval = true
## Telegraf will send metrics to outputs in batches of at ## Telegraf will send metrics to outputs in batches of at most
## most metric_batch_size metrics. ## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000 metric_batch_size = 1000
## For failed writes, telegraf will cache metric_buffer_limit metrics for each ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
## output, and will flush this buffer on a successful write. Oldest metrics ## output, and will flush this buffer on a successful write. Oldest metrics
## are dropped first when this buffer fills. ## are dropped first when this buffer fills.
## This buffer only fills when writes fail to output plugin(s).
metric_buffer_limit = 10000 metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount. ## Collection jitter is used to jitter the collection by a random amount.
@ -55,12 +58,17 @@
## By default, precision will be set to the same timestamp order as the ## By default, precision will be set to the same timestamp order as the
## collection interval, with the maximum being 1s. ## collection interval, with the maximum being 1s.
## Precision will NOT be used for service inputs, such as logparser and statsd. ## Precision will NOT be used for service inputs, such as logparser and statsd.
## Valid values are "Nns", "Nus" (or "Nµs"), "Nms", "Ns". ## Valid values are "ns", "us" (or "µs"), "ms", "s".
precision = "" precision = ""
## Run telegraf in debug mode
## Logging configuration:
## Run telegraf with debug log messages.
debug = false debug = false
## Run telegraf in quiet mode ## Run telegraf in quiet mode (error log messages only).
quiet = false quiet = false
## Specify the log file name. The empty string means to log to stdout.
logfile = ""
## Override default hostname, if empty use os.Hostname() ## Override default hostname, if empty use os.Hostname()
hostname = "" hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent. ## If set to true, do no set the "host" tag in the telegraf agent.
@ -83,7 +91,7 @@
## Retention policy to write to. Empty string writes to the default rp. ## Retention policy to write to. Empty string writes to the default rp.
retention_policy = "" retention_policy = ""
## Write consistency (clusters only), can be: "any", "one", "quorom", "all" ## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
write_consistency = "any" write_consistency = "any"
## Write timeout (for the InfluxDB client), formatted as a string. ## Write timeout (for the InfluxDB client), formatted as a string.
@ -197,7 +205,7 @@
# # Configuration for Graphite server to send metrics to # # Configuration for Graphite server to send metrics to
# [[outputs.graphite]] # [[outputs.graphite]]
# ## TCP endpoint for your graphite instance. # ## TCP endpoint for your graphite instance.
# ## If multiple endpoints are configured, the output will be load balanced. # ## If multiple endpoints are configured, output will be load balanced.
# ## Only one of the endpoints will be written to with each iteration. # ## Only one of the endpoints will be written to with each iteration.
# servers = ["localhost:2003"] # servers = ["localhost:2003"]
# ## Prefix metrics name # ## Prefix metrics name
@ -321,14 +329,13 @@
# api_token = "my-secret-token" # required. # api_token = "my-secret-token" # required.
# ## Debug # ## Debug
# # debug = false # # debug = false
# ## Tag Field to populate source attribute (optional)
# ## This is typically the _hostname_ from which the metric was obtained.
# source_tag = "host"
# ## Connection timeout. # ## Connection timeout.
# # timeout = "5s" # # timeout = "5s"
# ## Output Name Template (same as graphite buckets) # ## Output source Template (same as graphite buckets)
# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
# template = "host.tags.measurement.field" # ## This template is used in librato's source (not metric's name)
# template = "host"
#
# # Configuration for MQTT server to send metrics to # # Configuration for MQTT server to send metrics to
@ -358,6 +365,30 @@
# data_format = "influx" # data_format = "influx"
# # Send telegraf measurements to NATS
# [[outputs.nats]]
# ## URLs of NATS servers
# servers = ["nats://localhost:4222"]
# ## Optional credentials
# # username = ""
# # password = ""
# ## NATS subject for producer messages
# subject = "telegraf"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
# # Send telegraf measurements to NSQD # # Send telegraf measurements to NSQD
# [[outputs.nsq]] # [[outputs.nsq]]
# ## Location of nsqd instance listening on TCP # ## Location of nsqd instance listening on TCP
@ -377,13 +408,18 @@
# ## prefix for metrics keys # ## prefix for metrics keys
# prefix = "my.specific.prefix." # prefix = "my.specific.prefix."
# #
# ## Telnet Mode ## # ## DNS name of the OpenTSDB server
# ## DNS name of the OpenTSDB server in telnet mode # ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
# ## telnet API. "http://opentsdb.example.com" will use the Http API.
# host = "opentsdb.example.com" # host = "opentsdb.example.com"
# #
# ## Port of the OpenTSDB server in telnet mode # ## Port of the OpenTSDB server
# port = 4242 # port = 4242
# #
# ## Number of data points to send to OpenTSDB in Http requests.
# ## Not used with telnet API.
# httpBatchSize = 50
#
# ## Debug true - Prints OpenTSDB communication # ## Debug true - Prints OpenTSDB communication
# debug = false # debug = false
@ -415,8 +451,8 @@
percpu = true percpu = true
## Whether to report total system cpu stats or not ## Whether to report total system cpu stats or not
totalcpu = true totalcpu = true
## Comment this line if you want the raw CPU time metrics ## If true, collect raw CPU time metrics.
fielddrop = ["time_*"] collect_cpu_time = false
# Read metrics about disk usage by mount point # Read metrics about disk usage by mount point
@ -436,8 +472,8 @@
## disk partitions. ## disk partitions.
## Setting devices will restrict the stats to the specified devices. ## Setting devices will restrict the stats to the specified devices.
# devices = ["sda", "sdb"] # devices = ["sda", "sdb"]
## Uncomment the following line if you do not need disk serial numbers. ## Uncomment the following line if you need disk serial numbers.
# skip_serial_number = true # skip_serial_number = false
# Get kernel statistics from /proc/stat # Get kernel statistics from /proc/stat
@ -465,7 +501,7 @@
# no configuration # no configuration
# # Read stats from an aerospike server # # Read stats from aerospike server(s)
# [[inputs.aerospike]] # [[inputs.aerospike]]
# ## Aerospike servers to connect to (with port) # ## Aerospike servers to connect to (with port)
# ## This plugin will query all namespaces the aerospike # ## This plugin will query all namespaces the aerospike
@ -512,6 +548,10 @@
# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. # # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
# [[inputs.ceph]] # [[inputs.ceph]]
# ## This is the recommended interval to poll. Too frequent and you will lose
# ## data points due to timeouts during rebalancing and recovery
# interval = '1m'
#
# ## All configuration values are optional, defaults are shown below # ## All configuration values are optional, defaults are shown below
# #
# ## location of ceph binary # ## location of ceph binary
@ -526,6 +566,18 @@
# #
# ## suffix used to identify socket files # ## suffix used to identify socket files
# socket_suffix = "asok" # socket_suffix = "asok"
#
# ## Ceph user to authenticate as
# ceph_user = "client.admin"
#
# ## Ceph configuration to use to locate the cluster
# ceph_config = "/etc/ceph/ceph.conf"
#
# ## Whether to gather statistics via the admin socket
# gather_admin_socket_stats = true
#
# ## Whether to gather statistics via ceph commands
# gather_cluster_stats = true
# # Read specific statistics per cgroup # # Read specific statistics per cgroup
@ -578,6 +630,11 @@
# ## Metric Statistic Namespace (required) # ## Metric Statistic Namespace (required)
# namespace = 'AWS/ELB' # namespace = 'AWS/ELB'
# #
# ## Maximum requests per second. Note that the global default AWS rate limit is
# ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
# ## maximum of 10. Optional - default value is 10.
# ratelimit = 10
#
# ## Metrics to Pull (optional) # ## Metrics to Pull (optional)
# ## Defaults to all Metrics in Namespace if nothing is provided # ## Defaults to all Metrics in Namespace if nothing is provided
# ## Refreshes Namespace available metrics every 1h # ## Refreshes Namespace available metrics every 1h
@ -666,6 +723,13 @@
# container_names = [] # container_names = []
# ## Timeout for docker list, info, and stats commands # ## Timeout for docker list, info, and stats commands
# timeout = "5s" # timeout = "5s"
#
# ## Whether to report for each container per-device blkio (8:0, 8:1...) and
# ## network (eth0, eth1, ...) stats or not
# perdevice = true
# ## Whether to report for each container total blkio and network stats or not
# total = false
#
# # Read statistics from one or many dovecot servers # # Read statistics from one or many dovecot servers
@ -688,6 +752,9 @@
# ## specify a list of one or more Elasticsearch servers # ## specify a list of one or more Elasticsearch servers
# servers = ["http://localhost:9200"] # servers = ["http://localhost:9200"]
# #
# ## Timeout for HTTP requests to the elastic search server(s)
# http_timeout = "5s"
#
# ## set local to false when you want to read the indices stats from all nodes # ## set local to false when you want to read the indices stats from all nodes
# ## within the cluster # ## within the cluster
# local = true # local = true
@ -782,9 +849,11 @@
# [[inputs.haproxy]] # [[inputs.haproxy]]
# ## An array of address to gather stats about. Specify an ip on hostname # ## An array of address to gather stats about. Specify an ip on hostname
# ## with optional port. ie localhost, 10.10.3.33:1936, etc. # ## with optional port. ie localhost, 10.10.3.33:1936, etc.
# # ## Make sure you specify the complete path to the stats endpoint
# ## If no servers are specified, then default to 127.0.0.1:1936 # ## ie 10.10.3.33:1936/haproxy?stats
# servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] # #
# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
# servers = ["http://myhaproxy.com:1936/haproxy?stats"]
# ## Or you can also use local socket # ## Or you can also use local socket
# ## servers = ["socket:/run/haproxy/admin.sock"] # ## servers = ["socket:/run/haproxy/admin.sock"]
@ -828,6 +897,8 @@
# "http://localhost:9999/stats/", # "http://localhost:9999/stats/",
# "http://localhost:9998/stats/", # "http://localhost:9998/stats/",
# ] # ]
# ## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
# #
# ## HTTP method to use: GET or POST (case-sensitive) # ## HTTP method to use: GET or POST (case-sensitive)
# method = "GET" # method = "GET"
@ -867,6 +938,9 @@
# urls = [ # urls = [
# "http://localhost:8086/debug/vars" # "http://localhost:8086/debug/vars"
# ] # ]
#
# ## http request & header timeout
# timeout = "5s"
# # Read metrics from one or many bare metal servers # # Read metrics from one or many bare metal servers
@ -882,6 +956,7 @@
# # Read JMX metrics through Jolokia # # Read JMX metrics through Jolokia
# [[inputs.jolokia]] # [[inputs.jolokia]]
# ## This is the context root used to compose the jolokia url # ## This is the context root used to compose the jolokia url
# ## NOTE that your jolokia security policy must allow for POST requests.
# context = "/jolokia" # context = "/jolokia"
# #
# ## This specifies the mode used # ## This specifies the mode used
@ -970,21 +1045,33 @@
# # Telegraf plugin for gathering metrics from N Mesos masters # # Telegraf plugin for gathering metrics from N Mesos masters
# [[inputs.mesos]] # [[inputs.mesos]]
# # Timeout, in ms. # ## Timeout, in ms.
# timeout = 100 # timeout = 100
# # A list of Mesos masters, default value is localhost:5050. # ## A list of Mesos masters.
# masters = ["localhost:5050"] # masters = ["localhost:5050"]
# # Metrics groups to be collected, by default, all enabled. # ## Master metrics groups to be collected, by default, all enabled.
# master_collections = [ # master_collections = [
# "resources", # "resources",
# "master", # "master",
# "system", # "system",
# "slaves", # "agents",
# "frameworks", # "frameworks",
# "tasks",
# "messages", # "messages",
# "evqueue", # "evqueue",
# "registrar", # "registrar",
# ] # ]
# ## A list of Mesos slaves, default is []
# # slaves = []
# ## Slave metrics groups to be collected, by default, all enabled.
# # slave_collections = [
# # "resources",
# # "agent",
# # "system",
# # "executors",
# # "tasks",
# # "messages",
# # ]
# # Read metrics from one or many MongoDB servers # # Read metrics from one or many MongoDB servers
@ -995,6 +1082,7 @@
# ## mongodb://10.10.3.33:18832, # ## mongodb://10.10.3.33:18832,
# ## 10.0.0.1:10000, etc. # ## 10.0.0.1:10000, etc.
# servers = ["127.0.0.1:27017"] # servers = ["127.0.0.1:27017"]
# gather_perdb_stats = false
# # Read metrics from one or many mysql servers # # Read metrics from one or many mysql servers
@ -1101,9 +1189,9 @@
# ## file paths for proc files. If empty default paths will be used: # ## file paths for proc files. If empty default paths will be used:
# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 # ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
# ## These can also be overridden with env variables, see README. # ## These can also be overridden with env variables, see README.
# proc_net_netstat = "" # proc_net_netstat = "/proc/net/netstat"
# proc_net_snmp = "" # proc_net_snmp = "/proc/net/snmp"
# proc_net_snmp6 = "" # proc_net_snmp6 = "/proc/net/snmp6"
# ## dump metrics with 0 values too # ## dump metrics with 0 values too
# dump_zeros = true # dump_zeros = true
@ -1185,8 +1273,12 @@
# ## # ##
# address = "host=localhost user=postgres sslmode=disable" # address = "host=localhost user=postgres sslmode=disable"
# #
# ## A list of databases to explicitly ignore. If not specified, metrics for all
# ## databases are gathered. Do NOT use with the 'databases' option.
# # ignored_databases = ["postgres", "template0", "template1"]
#
# ## A list of databases to pull metrics about. If not specified, metrics for all # ## A list of databases to pull metrics about. If not specified, metrics for all
# ## databases are gathered. # ## databases are gathered. Do NOT use with the 'ignore_databases' option.
# # databases = ["app_production", "testing"] # # databases = ["app_production", "testing"]
@ -1305,6 +1397,13 @@
# # username = "guest" # # username = "guest"
# # password = "guest" # # password = "guest"
# #
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## A list of nodes to pull metrics about. If not specified, metrics for # ## A list of nodes to pull metrics about. If not specified, metrics for
# ## all nodes are gathered. # ## all nodes are gathered.
# # nodes = ["rabbit@node1", "rabbit@node2"] # # nodes = ["rabbit@node1", "rabbit@node2"]
@ -1323,6 +1422,7 @@
# ## e.g. # ## e.g.
# ## tcp://localhost:6379 # ## tcp://localhost:6379
# ## tcp://:password@192.168.99.100 # ## tcp://:password@192.168.99.100
# ## unix:///var/run/redis.sock
# ## # ##
# ## If no servers are specified, then localhost is used as the host. # ## If no servers are specified, then localhost is used as the host.
# ## If no port is specified, 6379 is used # ## If no port is specified, 6379 is used
@ -1345,8 +1445,67 @@
# servers = ["http://localhost:8098"] # servers = ["http://localhost:8098"]
# # Reads oids value from one or many snmp agents # # Retrieves SNMP values from remote agents
# [[inputs.snmp]] # [[inputs.snmp]]
# agents = [ "127.0.0.1:161" ]
# ## Timeout for each SNMP query.
# timeout = "5s"
# ## Number of retries to attempt within timeout.
# retries = 3
# ## SNMP version, values can be 1, 2, or 3
# version = 2
#
# ## SNMP community string.
# community = "public"
#
# ## The GETBULK max-repetitions parameter
# max_repetitions = 10
#
# ## SNMPv3 auth parameters
# #sec_name = "myuser"
# #auth_protocol = "md5" # Values: "MD5", "SHA", ""
# #auth_password = "pass"
# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
# #context_name = ""
# #priv_protocol = "" # Values: "DES", "AES", ""
# #priv_password = ""
#
# ## measurement name
# name = "system"
# [[inputs.snmp.field]]
# name = "hostname"
# oid = ".1.0.0.1.1"
# [[inputs.snmp.field]]
# name = "uptime"
# oid = ".1.0.0.1.2"
# [[inputs.snmp.field]]
# name = "load"
# oid = ".1.0.0.1.3"
# [[inputs.snmp.field]]
# oid = "HOST-RESOURCES-MIB::hrMemorySize"
#
# [[inputs.snmp.table]]
# ## measurement name
# name = "remote_servers"
# inherit_tags = [ "hostname" ]
# [[inputs.snmp.table.field]]
# name = "server"
# oid = ".1.0.0.0.1.0"
# is_tag = true
# [[inputs.snmp.table.field]]
# name = "connections"
# oid = ".1.0.0.0.1.1"
# [[inputs.snmp.table.field]]
# name = "latency"
# oid = ".1.0.0.0.1.2"
#
# [[inputs.snmp.table]]
# ## auto populate table's fields using the MIB
# oid = "HOST-RESOURCES-MIB::hrNetworkTable"
# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
# [[inputs.snmp_legacy]]
# ## Use 'oids.txt' file to translate oids to names # ## Use 'oids.txt' file to translate oids to names
# ## To generate 'oids.txt' you need to run: # ## To generate 'oids.txt' you need to run:
# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt # ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
@ -1517,6 +1676,16 @@
# SERVICE INPUT PLUGINS # # SERVICE INPUT PLUGINS #
############################################################################### ###############################################################################
# # Influx HTTP write listener
# [[inputs.http_listener]]
# ## Address and port to host HTTP listener on
# service_address = ":8186"
#
# ## timeouts
# read_timeout = "10s"
# write_timeout = "10s"
# # Read metrics from Kafka topic(s) # # Read metrics from Kafka topic(s)
# [[inputs.kafka_consumer]] # [[inputs.kafka_consumer]]
# ## topic(s) to consume # ## topic(s) to consume
@ -1545,7 +1714,7 @@
# ## /var/log/**.log -> recursively find all .log files in /var/log # ## /var/log/**.log -> recursively find all .log files in /var/log
# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log # ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
# ## /var/log/apache.log -> only tail the apache log file # ## /var/log/apache.log -> only tail the apache log file
# files = ["/var/log/influxdb/influxdb.log"] # files = ["/var/log/apache/access.log"]
# ## Read file from beginning. # ## Read file from beginning.
# from_beginning = false # from_beginning = false
# #
@ -1558,7 +1727,9 @@
# ## Other common built-in patterns are: # ## Other common built-in patterns are:
# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) # ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) # ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
# patterns = ["%{INFLUXDB_HTTPD_LOG}"] # patterns = ["%{COMBINED_LOG_FORMAT}"]
# ## Name of the outputted measurement name.
# measurement = "apache_access_log"
# ## Full path(s) to custom pattern files. # ## Full path(s) to custom pattern files.
# custom_pattern_files = [] # custom_pattern_files = []
# ## Custom patterns can also be defined here. Put one pattern per line. # ## Custom patterns can also be defined here. Put one pattern per line.
@ -1622,6 +1793,21 @@
# data_format = "influx" # data_format = "influx"
# # Read NSQ topic for metrics.
# [[inputs.nsq_consumer]]
# ## An string representing the NSQD TCP Endpoint
# server = "localhost:4150"
# topic = "telegraf"
# channel = "consumer"
# max_in_flight = 100
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
# # Statsd Server # # Statsd Server
# [[inputs.statsd]] # [[inputs.statsd]]
# ## Address and port to host UDP listener on # ## Address and port to host UDP listener on
@ -1722,9 +1908,15 @@
# ## Address and port to host Webhook listener on # ## Address and port to host Webhook listener on
# service_address = ":1619" # service_address = ":1619"
# #
# [inputs.webhooks.filestack]
# path = "/filestack"
#
# [inputs.webhooks.github] # [inputs.webhooks.github]
# path = "/github" # path = "/github"
# #
# [inputs.webhooks.mandrill]
# path = "/mandrill"
#
# [inputs.webhooks.rollbar] # [inputs.webhooks.rollbar]
# path = "/rollbar" # path = "/rollbar"

View File

@ -42,10 +42,14 @@
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s" flush_jitter = "0s"
## Logging configuration:
## Run telegraf in debug mode ## Run telegraf in debug mode
debug = false debug = false
## Run telegraf in quiet mode ## Run telegraf in quiet mode
quiet = false quiet = false
## Specify the log file name. The empty string means to log to stdout.
logfile = "/Program Files/Telegraf/telegraf.log"
## Override default hostname, if empty use os.Hostname() ## Override default hostname, if empty use os.Hostname()
hostname = "" hostname = ""
@ -85,7 +89,7 @@
# Windows Performance Counters plugin. # Windows Performance Counters plugin.
# These are the recommended method of monitoring system metrics on windows, # These are the recommended method of monitoring system metrics on windows,
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI, # as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
# which utilizes a lot of system resources. # which utilize more system resources.
# #
# See more configuration examples at: # See more configuration examples at:
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters
@ -95,70 +99,104 @@
# Processor usage, alternative to native, reports on a per core. # Processor usage, alternative to native, reports on a per core.
ObjectName = "Processor" ObjectName = "Processor"
Instances = ["*"] Instances = ["*"]
Counters = ["% Idle Time", "% Interrupt Time", "% Privileged Time", "% User Time", "% Processor Time"] Counters = [
"% Idle Time",
"% Interrupt Time",
"% Privileged Time",
"% User Time",
"% Processor Time",
]
Measurement = "win_cpu" Measurement = "win_cpu"
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*). # Set to true to include _Total instance when querying for all (*).
#IncludeTotal=false
[[inputs.win_perf_counters.object]] [[inputs.win_perf_counters.object]]
# Disk times and queues # Disk times and queues
ObjectName = "LogicalDisk" ObjectName = "LogicalDisk"
Instances = ["*"] Instances = ["*"]
Counters = ["% Idle Time", "% Disk Time","% Disk Read Time", "% Disk Write Time", "% User Time", "Current Disk Queue Length"] Counters = [
"% Idle Time",
"% Disk Time","% Disk Read Time",
"% Disk Write Time",
"% User Time",
"Current Disk Queue Length",
]
Measurement = "win_disk" Measurement = "win_disk"
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*). # Set to true to include _Total instance when querying for all (*).
#IncludeTotal=false
[[inputs.win_perf_counters.object]] [[inputs.win_perf_counters.object]]
ObjectName = "System" ObjectName = "System"
Counters = ["Context Switches/sec","System Calls/sec"] Counters = [
"Context Switches/sec",
"System Calls/sec",
]
Instances = ["------"] Instances = ["------"]
Measurement = "win_system" Measurement = "win_system"
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*). # Set to true to include _Total instance when querying for all (*).
#IncludeTotal=false
[[inputs.win_perf_counters.object]] [[inputs.win_perf_counters.object]]
# Example query where the Instance portion must be removed to get data back, such as from the Memory object. # Example query where the Instance portion must be removed to get data back,
# such as from the Memory object.
ObjectName = "Memory" ObjectName = "Memory"
Counters = ["Available Bytes","Cache Faults/sec","Demand Zero Faults/sec","Page Faults/sec","Pages/sec","Transition Faults/sec","Pool Nonpaged Bytes","Pool Paged Bytes"] Counters = [
Instances = ["------"] # Use 6 x - to remove the Instance bit from the query. "Available Bytes",
"Cache Faults/sec",
"Demand Zero Faults/sec",
"Page Faults/sec",
"Pages/sec",
"Transition Faults/sec",
"Pool Nonpaged Bytes",
"Pool Paged Bytes",
]
# Use 6 x - to remove the Instance bit from the query.
Instances = ["------"]
Measurement = "win_mem" Measurement = "win_mem"
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*). # Set to true to include _Total instance when querying for all (*).
#IncludeTotal=false
# Windows system plugins using WMI (disabled by default, using # Windows system plugins using WMI (disabled by default, using
# win_perf_counters over WMI is recommended) # win_perf_counters over WMI is recommended)
# Read metrics about cpu usage # # Read metrics about cpu usage
#[[inputs.cpu]] # [[inputs.cpu]]
## Whether to report per-cpu stats or not # ## Whether to report per-cpu stats or not
#percpu = true # percpu = true
## Whether to report total system cpu stats or not # ## Whether to report total system cpu stats or not
#totalcpu = true # totalcpu = true
## Comment this line if you want the raw CPU time metrics # ## Comment this line if you want the raw CPU time metrics
#fielddrop = ["time_*"] # fielddrop = ["time_*"]
# Read metrics about disk usage by mount point
#[[inputs.disk]]
## By default, telegraf gather stats for all mountpoints.
## Setting mountpoints will restrict the stats to the specified mountpoints.
## mount_points=["/"]
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually # # Read metrics about disk usage by mount point
## present on /run, /var/run, /dev/shm or /dev). # [[inputs.disk]]
#ignore_fs = ["tmpfs", "devtmpfs"] # ## By default, telegraf gather stats for all mountpoints.
# ## Setting mountpoints will restrict the stats to the specified mountpoints.
# ## mount_points=["/"]
#
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
# ## present on /run, /var/run, /dev/shm or /dev).
# # ignore_fs = ["tmpfs", "devtmpfs"]
# Read metrics about disk IO by device
#[[inputs.diskio]]
## By default, telegraf will gather stats for all devices including
## disk partitions.
## Setting devices will restrict the stats to the specified devices.
## devices = ["sda", "sdb"]
## Uncomment the following line if you do not need disk serial numbers.
## skip_serial_number = true
# Read metrics about memory usage # # Read metrics about disk IO by device
#[[inputs.mem]] # [[inputs.diskio]]
# no configuration # ## By default, telegraf will gather stats for all devices including
# ## disk partitions.
# ## Setting devices will restrict the stats to the specified devices.
# ## devices = ["sda", "sdb"]
# ## Uncomment the following line if you do not need disk serial numbers.
# ## skip_serial_number = true
# Read metrics about swap memory usage
#[[inputs.swap]] # # Read metrics about memory usage
# no configuration # [[inputs.mem]]
# # no configuration
# # Read metrics about swap memory usage
# [[inputs.swap]]
# # no configuration

View File

@ -10,16 +10,16 @@ type Filter interface {
Match(string) bool Match(string) bool
} }
// CompileFilter takes a list of string filters and returns a Filter interface // Compile takes a list of string filters and returns a Filter interface
// for matching a given string against the filter list. The filter list // for matching a given string against the filter list. The filter list
// supports glob matching too, ie: // supports glob matching too, ie:
// //
// f, _ := CompileFilter([]string{"cpu", "mem", "net*"}) // f, _ := Compile([]string{"cpu", "mem", "net*"})
// f.Match("cpu") // true // f.Match("cpu") // true
// f.Match("network") // true // f.Match("network") // true
// f.Match("memory") // false // f.Match("memory") // false
// //
func CompileFilter(filters []string) (Filter, error) { func Compile(filters []string) (Filter, error) {
// return if there is nothing to compile // return if there is nothing to compile
if len(filters) == 0 { if len(filters) == 0 {
return nil, nil return nil, nil

View File

@ -6,30 +6,30 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestCompileFilter(t *testing.T) { func TestCompile(t *testing.T) {
f, err := CompileFilter([]string{}) f, err := Compile([]string{})
assert.NoError(t, err) assert.NoError(t, err)
assert.Nil(t, f) assert.Nil(t, f)
f, err = CompileFilter([]string{"cpu"}) f, err = Compile([]string{"cpu"})
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, f.Match("cpu")) assert.True(t, f.Match("cpu"))
assert.False(t, f.Match("cpu0")) assert.False(t, f.Match("cpu0"))
assert.False(t, f.Match("mem")) assert.False(t, f.Match("mem"))
f, err = CompileFilter([]string{"cpu*"}) f, err = Compile([]string{"cpu*"})
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, f.Match("cpu")) assert.True(t, f.Match("cpu"))
assert.True(t, f.Match("cpu0")) assert.True(t, f.Match("cpu0"))
assert.False(t, f.Match("mem")) assert.False(t, f.Match("mem"))
f, err = CompileFilter([]string{"cpu", "mem"}) f, err = Compile([]string{"cpu", "mem"})
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, f.Match("cpu")) assert.True(t, f.Match("cpu"))
assert.False(t, f.Match("cpu0")) assert.False(t, f.Match("cpu0"))
assert.True(t, f.Match("mem")) assert.True(t, f.Match("mem"))
f, err = CompileFilter([]string{"cpu", "mem", "net*"}) f, err = Compile([]string{"cpu", "mem", "net*"})
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, f.Match("cpu")) assert.True(t, f.Match("cpu"))
assert.False(t, f.Match("cpu0")) assert.False(t, f.Match("cpu0"))
@ -40,7 +40,7 @@ func TestCompileFilter(t *testing.T) {
var benchbool bool var benchbool bool
func BenchmarkFilterSingleNoGlobFalse(b *testing.B) { func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
f, _ := CompileFilter([]string{"cpu"}) f, _ := Compile([]string{"cpu"})
var tmp bool var tmp bool
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
tmp = f.Match("network") tmp = f.Match("network")
@ -49,7 +49,7 @@ func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
} }
func BenchmarkFilterSingleNoGlobTrue(b *testing.B) { func BenchmarkFilterSingleNoGlobTrue(b *testing.B) {
f, _ := CompileFilter([]string{"cpu"}) f, _ := Compile([]string{"cpu"})
var tmp bool var tmp bool
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
tmp = f.Match("cpu") tmp = f.Match("cpu")
@ -58,7 +58,7 @@ func BenchmarkFilterSingleNoGlobTrue(b *testing.B) {
} }
func BenchmarkFilter(b *testing.B) { func BenchmarkFilter(b *testing.B) {
f, _ := CompileFilter([]string{"cpu", "mem", "net*"}) f, _ := Compile([]string{"cpu", "mem", "net*"})
var tmp bool var tmp bool
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
tmp = f.Match("network") tmp = f.Match("network")
@ -67,7 +67,7 @@ func BenchmarkFilter(b *testing.B) {
} }
func BenchmarkFilterNoGlob(b *testing.B) { func BenchmarkFilterNoGlob(b *testing.B) {
f, _ := CompileFilter([]string{"cpu", "mem", "net"}) f, _ := Compile([]string{"cpu", "mem", "net"})
var tmp bool var tmp bool
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
tmp = f.Match("net") tmp = f.Match("net")
@ -76,7 +76,7 @@ func BenchmarkFilterNoGlob(b *testing.B) {
} }
func BenchmarkFilter2(b *testing.B) { func BenchmarkFilter2(b *testing.B) {
f, _ := CompileFilter([]string{"aa", "bb", "c", "ad", "ar", "at", "aq", f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
"aw", "az", "axxx", "ab", "cpu", "mem", "net*"}) "aw", "az", "axxx", "ab", "cpu", "mem", "net*"})
var tmp bool var tmp bool
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
@ -86,7 +86,7 @@ func BenchmarkFilter2(b *testing.B) {
} }
func BenchmarkFilter2NoGlob(b *testing.B) { func BenchmarkFilter2NoGlob(b *testing.B) {
f, _ := CompileFilter([]string{"aa", "bb", "c", "ad", "ar", "at", "aq", f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
"aw", "az", "axxx", "ab", "cpu", "mem", "net"}) "aw", "az", "axxx", "ab", "cpu", "mem", "net"})
var tmp bool var tmp bool
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {

View File

@ -9,6 +9,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"regexp" "regexp"
"runtime"
"sort" "sort"
"strings" "strings"
"time" "time"
@ -47,8 +48,8 @@ type Config struct {
OutputFilters []string OutputFilters []string
Agent *AgentConfig Agent *AgentConfig
Inputs []*internal_models.RunningInput Inputs []*models.RunningInput
Outputs []*internal_models.RunningOutput Outputs []*models.RunningOutput
} }
func NewConfig() *Config { func NewConfig() *Config {
@ -61,8 +62,8 @@ func NewConfig() *Config {
}, },
Tags: make(map[string]string), Tags: make(map[string]string),
Inputs: make([]*internal_models.RunningInput, 0), Inputs: make([]*models.RunningInput, 0),
Outputs: make([]*internal_models.RunningOutput, 0), Outputs: make([]*models.RunningOutput, 0),
InputFilters: make([]string, 0), InputFilters: make([]string, 0),
OutputFilters: make([]string, 0), OutputFilters: make([]string, 0),
} }
@ -124,6 +125,9 @@ type AgentConfig struct {
// Debug is the option for running in debug mode // Debug is the option for running in debug mode
Debug bool Debug bool
// Logfile specifies the file to send logs to
Logfile string
// Quiet is the option for running in quiet mode // Quiet is the option for running in quiet mode
Quiet bool Quiet bool
Hostname string Hostname string
@ -139,7 +143,7 @@ func (c *Config) InputNames() []string {
return name return name
} }
// Outputs returns a list of strings of the configured inputs. // Outputs returns a list of strings of the configured outputs.
func (c *Config) OutputNames() []string { func (c *Config) OutputNames() []string {
var name []string var name []string
for _, output := range c.Outputs { for _, output := range c.Outputs {
@ -194,12 +198,15 @@ var header = `# Telegraf Configuration
## ie, if interval="10s" then always collect on :00, :10, :20, etc. ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true round_interval = true
## Telegraf will send metrics to outputs in batches of at ## Telegraf will send metrics to outputs in batches of at most
## most metric_batch_size metrics. ## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000 metric_batch_size = 1000
## For failed writes, telegraf will cache metric_buffer_limit metrics for each ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
## output, and will flush this buffer on a successful write. Oldest metrics ## output, and will flush this buffer on a successful write. Oldest metrics
## are dropped first when this buffer fills. ## are dropped first when this buffer fills.
## This buffer only fills when writes fail to output plugin(s).
metric_buffer_limit = 10000 metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount. ## Collection jitter is used to jitter the collection by a random amount.
@ -219,12 +226,17 @@ var header = `# Telegraf Configuration
## By default, precision will be set to the same timestamp order as the ## By default, precision will be set to the same timestamp order as the
## collection interval, with the maximum being 1s. ## collection interval, with the maximum being 1s.
## Precision will NOT be used for service inputs, such as logparser and statsd. ## Precision will NOT be used for service inputs, such as logparser and statsd.
## Valid values are "Nns", "Nus" (or "Nµs"), "Nms", "Ns". ## Valid values are "ns", "us" (or "µs"), "ms", "s".
precision = "" precision = ""
## Run telegraf in debug mode
## Logging configuration:
## Run telegraf with debug log messages.
debug = false debug = false
## Run telegraf in quiet mode ## Run telegraf in quiet mode (error log messages only).
quiet = false quiet = false
## Specify the log file name. The empty string means to log to stdout.
logfile = ""
## Override default hostname, if empty use os.Hostname() ## Override default hostname, if empty use os.Hostname()
hostname = "" hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent. ## If set to true, do no set the "host" tag in the telegraf agent.
@ -403,24 +415,21 @@ func PrintOutputConfig(name string) error {
} }
func (c *Config) LoadDirectory(path string) error { func (c *Config) LoadDirectory(path string) error {
directoryEntries, err := ioutil.ReadDir(path) walkfn := func(thispath string, info os.FileInfo, _ error) error {
if err != nil { if info.IsDir() {
return err return nil
}
for _, entry := range directoryEntries {
if entry.IsDir() {
continue
} }
name := entry.Name() name := info.Name()
if len(name) < 6 || name[len(name)-5:] != ".conf" { if len(name) < 6 || name[len(name)-5:] != ".conf" {
continue return nil
} }
err := c.LoadConfig(filepath.Join(path, name)) err := c.LoadConfig(thispath)
if err != nil { if err != nil {
return err return err
} }
return nil
} }
return nil return filepath.Walk(path, walkfn)
} }
// Try to find a default config file at these locations (in order): // Try to find a default config file at these locations (in order):
@ -432,9 +441,12 @@ func getDefaultConfigPath() (string, error) {
envfile := os.Getenv("TELEGRAF_CONFIG_PATH") envfile := os.Getenv("TELEGRAF_CONFIG_PATH")
homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf") homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf")
etcfile := "/etc/telegraf/telegraf.conf" etcfile := "/etc/telegraf/telegraf.conf"
if runtime.GOOS == "windows" {
etcfile = `C:\Program Files\Telegraf\telegraf.conf`
}
for _, path := range []string{envfile, homefile, etcfile} { for _, path := range []string{envfile, homefile, etcfile} {
if _, err := os.Stat(path); err == nil { if _, err := os.Stat(path); err == nil {
log.Printf("Using config file: %s", path) log.Printf("I! Using config file: %s", path)
return path, nil return path, nil
} }
} }
@ -465,7 +477,7 @@ func (c *Config) LoadConfig(path string) error {
return fmt.Errorf("%s: invalid configuration", path) return fmt.Errorf("%s: invalid configuration", path)
} }
if err = config.UnmarshalTable(subTable, c.Tags); err != nil { if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
log.Printf("Could not parse [global_tags] config\n") log.Printf("E! Could not parse [global_tags] config\n")
return fmt.Errorf("Error parsing %s, %s", path, err) return fmt.Errorf("Error parsing %s, %s", path, err)
} }
} }
@ -478,7 +490,7 @@ func (c *Config) LoadConfig(path string) error {
return fmt.Errorf("%s: invalid configuration", path) return fmt.Errorf("%s: invalid configuration", path)
} }
if err = config.UnmarshalTable(subTable, c.Agent); err != nil { if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
log.Printf("Could not parse [agent] config\n") log.Printf("E! Could not parse [agent] config\n")
return fmt.Errorf("Error parsing %s, %s", path, err) return fmt.Errorf("Error parsing %s, %s", path, err)
} }
} }
@ -598,7 +610,7 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
return err return err
} }
ro := internal_models.NewRunningOutput(name, output, outputConfig, ro := models.NewRunningOutput(name, output, outputConfig,
c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit) c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit)
c.Outputs = append(c.Outputs, ro) c.Outputs = append(c.Outputs, ro)
return nil return nil
@ -639,7 +651,7 @@ func (c *Config) addInput(name string, table *ast.Table) error {
return err return err
} }
rp := &internal_models.RunningInput{ rp := &models.RunningInput{
Name: name, Name: name,
Input: input, Input: input,
Config: pluginConfig, Config: pluginConfig,
@ -650,10 +662,10 @@ func (c *Config) addInput(name string, table *ast.Table) error {
// buildFilter builds a Filter // buildFilter builds a Filter
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to // (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
// be inserted into the internal_models.OutputConfig/internal_models.InputConfig // be inserted into the models.OutputConfig/models.InputConfig
// to be used for glob filtering on tags and measurements // to be used for glob filtering on tags and measurements
func buildFilter(tbl *ast.Table) (internal_models.Filter, error) { func buildFilter(tbl *ast.Table) (models.Filter, error) {
f := internal_models.Filter{} f := models.Filter{}
if node, ok := tbl.Fields["namepass"]; ok { if node, ok := tbl.Fields["namepass"]; ok {
if kv, ok := node.(*ast.KeyValue); ok { if kv, ok := node.(*ast.KeyValue); ok {
@ -661,7 +673,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
for _, elem := range ary.Value { for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok { if str, ok := elem.(*ast.String); ok {
f.NamePass = append(f.NamePass, str.Value) f.NamePass = append(f.NamePass, str.Value)
f.IsActive = true
} }
} }
} }
@ -674,7 +685,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
for _, elem := range ary.Value { for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok { if str, ok := elem.(*ast.String); ok {
f.NameDrop = append(f.NameDrop, str.Value) f.NameDrop = append(f.NameDrop, str.Value)
f.IsActive = true
} }
} }
} }
@ -689,7 +699,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
for _, elem := range ary.Value { for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok { if str, ok := elem.(*ast.String); ok {
f.FieldPass = append(f.FieldPass, str.Value) f.FieldPass = append(f.FieldPass, str.Value)
f.IsActive = true
} }
} }
} }
@ -705,7 +714,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
for _, elem := range ary.Value { for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok { if str, ok := elem.(*ast.String); ok {
f.FieldDrop = append(f.FieldDrop, str.Value) f.FieldDrop = append(f.FieldDrop, str.Value)
f.IsActive = true
} }
} }
} }
@ -717,7 +725,7 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
if subtbl, ok := node.(*ast.Table); ok { if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields { for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok { if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &internal_models.TagFilter{Name: name} tagfilter := &models.TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok { if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value { for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok { if str, ok := elem.(*ast.String); ok {
@ -726,7 +734,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
} }
} }
f.TagPass = append(f.TagPass, *tagfilter) f.TagPass = append(f.TagPass, *tagfilter)
f.IsActive = true
} }
} }
} }
@ -736,7 +743,7 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
if subtbl, ok := node.(*ast.Table); ok { if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields { for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok { if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &internal_models.TagFilter{Name: name} tagfilter := &models.TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok { if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value { for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok { if str, ok := elem.(*ast.String); ok {
@ -745,7 +752,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
} }
} }
f.TagDrop = append(f.TagDrop, *tagfilter) f.TagDrop = append(f.TagDrop, *tagfilter)
f.IsActive = true
} }
} }
} }
@ -774,7 +780,7 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
} }
} }
} }
if err := f.CompileFilter(); err != nil { if err := f.Compile(); err != nil {
return f, err return f, err
} }
@ -793,9 +799,9 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
// buildInput parses input specific items from the ast.Table, // buildInput parses input specific items from the ast.Table,
// builds the filter and returns a // builds the filter and returns a
// internal_models.InputConfig to be inserted into internal_models.RunningInput // models.InputConfig to be inserted into models.RunningInput
func buildInput(name string, tbl *ast.Table) (*internal_models.InputConfig, error) { func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
cp := &internal_models.InputConfig{Name: name} cp := &models.InputConfig{Name: name}
if node, ok := tbl.Fields["interval"]; ok { if node, ok := tbl.Fields["interval"]; ok {
if kv, ok := node.(*ast.KeyValue); ok { if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok { if str, ok := kv.Value.(*ast.String); ok {
@ -837,7 +843,7 @@ func buildInput(name string, tbl *ast.Table) (*internal_models.InputConfig, erro
if node, ok := tbl.Fields["tags"]; ok { if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok { if subtbl, ok := node.(*ast.Table); ok {
if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil { if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
log.Printf("Could not parse tags for input %s\n", name) log.Printf("E! Could not parse tags for input %s\n", name)
} }
} }
} }
@ -969,14 +975,14 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
// buildOutput parses output specific items from the ast.Table, // buildOutput parses output specific items from the ast.Table,
// builds the filter and returns an // builds the filter and returns an
// internal_models.OutputConfig to be inserted into internal_models.RunningInput // models.OutputConfig to be inserted into models.RunningInput
// Note: error exists in the return for future calls that might require error // Note: error exists in the return for future calls that might require error
func buildOutput(name string, tbl *ast.Table) (*internal_models.OutputConfig, error) { func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) {
filter, err := buildFilter(tbl) filter, err := buildFilter(tbl)
if err != nil { if err != nil {
return nil, err return nil, err
} }
oc := &internal_models.OutputConfig{ oc := &models.OutputConfig{
Name: name, Name: name,
Filter: filter, Filter: filter,
} }

View File

@ -26,27 +26,26 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"192.168.1.1"} memcached.Servers = []string{"192.168.1.1"}
filter := internal_models.Filter{ filter := models.Filter{
NameDrop: []string{"metricname2"}, NameDrop: []string{"metricname2"},
NamePass: []string{"metricname1"}, NamePass: []string{"metricname1"},
FieldDrop: []string{"other", "stuff"}, FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"}, FieldPass: []string{"some", "strings"},
TagDrop: []internal_models.TagFilter{ TagDrop: []models.TagFilter{
internal_models.TagFilter{ models.TagFilter{
Name: "badtag", Name: "badtag",
Filter: []string{"othertag"}, Filter: []string{"othertag"},
}, },
}, },
TagPass: []internal_models.TagFilter{ TagPass: []models.TagFilter{
internal_models.TagFilter{ models.TagFilter{
Name: "goodtag", Name: "goodtag",
Filter: []string{"mytag"}, Filter: []string{"mytag"},
}, },
}, },
IsActive: true,
} }
assert.NoError(t, filter.CompileFilter()) assert.NoError(t, filter.Compile())
mConfig := &internal_models.InputConfig{ mConfig := &models.InputConfig{
Name: "memcached", Name: "memcached",
Filter: filter, Filter: filter,
Interval: 10 * time.Second, Interval: 10 * time.Second,
@ -66,27 +65,26 @@ func TestConfig_LoadSingleInput(t *testing.T) {
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"localhost"} memcached.Servers = []string{"localhost"}
filter := internal_models.Filter{ filter := models.Filter{
NameDrop: []string{"metricname2"}, NameDrop: []string{"metricname2"},
NamePass: []string{"metricname1"}, NamePass: []string{"metricname1"},
FieldDrop: []string{"other", "stuff"}, FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"}, FieldPass: []string{"some", "strings"},
TagDrop: []internal_models.TagFilter{ TagDrop: []models.TagFilter{
internal_models.TagFilter{ models.TagFilter{
Name: "badtag", Name: "badtag",
Filter: []string{"othertag"}, Filter: []string{"othertag"},
}, },
}, },
TagPass: []internal_models.TagFilter{ TagPass: []models.TagFilter{
internal_models.TagFilter{ models.TagFilter{
Name: "goodtag", Name: "goodtag",
Filter: []string{"mytag"}, Filter: []string{"mytag"},
}, },
}, },
IsActive: true,
} }
assert.NoError(t, filter.CompileFilter()) assert.NoError(t, filter.Compile())
mConfig := &internal_models.InputConfig{ mConfig := &models.InputConfig{
Name: "memcached", Name: "memcached",
Filter: filter, Filter: filter,
Interval: 5 * time.Second, Interval: 5 * time.Second,
@ -113,27 +111,26 @@ func TestConfig_LoadDirectory(t *testing.T) {
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"localhost"} memcached.Servers = []string{"localhost"}
filter := internal_models.Filter{ filter := models.Filter{
NameDrop: []string{"metricname2"}, NameDrop: []string{"metricname2"},
NamePass: []string{"metricname1"}, NamePass: []string{"metricname1"},
FieldDrop: []string{"other", "stuff"}, FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"}, FieldPass: []string{"some", "strings"},
TagDrop: []internal_models.TagFilter{ TagDrop: []models.TagFilter{
internal_models.TagFilter{ models.TagFilter{
Name: "badtag", Name: "badtag",
Filter: []string{"othertag"}, Filter: []string{"othertag"},
}, },
}, },
TagPass: []internal_models.TagFilter{ TagPass: []models.TagFilter{
internal_models.TagFilter{ models.TagFilter{
Name: "goodtag", Name: "goodtag",
Filter: []string{"mytag"}, Filter: []string{"mytag"},
}, },
}, },
IsActive: true,
} }
assert.NoError(t, filter.CompileFilter()) assert.NoError(t, filter.Compile())
mConfig := &internal_models.InputConfig{ mConfig := &models.InputConfig{
Name: "memcached", Name: "memcached",
Filter: filter, Filter: filter,
Interval: 5 * time.Second, Interval: 5 * time.Second,
@ -150,7 +147,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
ex.SetParser(p) ex.SetParser(p)
ex.Command = "/usr/bin/myothercollector --foo=bar" ex.Command = "/usr/bin/myothercollector --foo=bar"
eConfig := &internal_models.InputConfig{ eConfig := &models.InputConfig{
Name: "exec", Name: "exec",
MeasurementSuffix: "_myothercollector", MeasurementSuffix: "_myothercollector",
} }
@ -169,7 +166,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
pstat := inputs.Inputs["procstat"]().(*procstat.Procstat) pstat := inputs.Inputs["procstat"]().(*procstat.Procstat)
pstat.PidFile = "/var/run/grafana-server.pid" pstat.PidFile = "/var/run/grafana-server.pid"
pConfig := &internal_models.InputConfig{Name: "procstat"} pConfig := &models.InputConfig{Name: "procstat"}
pConfig.Tags = make(map[string]string) pConfig.Tags = make(map[string]string)
assert.Equal(t, pstat, c.Inputs[3].Input, assert.Equal(t, pstat, c.Inputs[3].Input,

View File

@ -12,21 +12,23 @@ import (
var sepStr = fmt.Sprintf("%v", string(os.PathSeparator)) var sepStr = fmt.Sprintf("%v", string(os.PathSeparator))
type GlobPath struct { type GlobPath struct {
path string path string
hasMeta bool hasMeta bool
g glob.Glob hasSuperMeta bool
root string g glob.Glob
root string
} }
func Compile(path string) (*GlobPath, error) { func Compile(path string) (*GlobPath, error) {
out := GlobPath{ out := GlobPath{
hasMeta: hasMeta(path), hasMeta: hasMeta(path),
path: path, hasSuperMeta: hasSuperMeta(path),
path: path,
} }
// if there are no glob meta characters in the path, don't bother compiling // if there are no glob meta characters in the path, don't bother compiling
// a glob object or finding the root directory. (see short-circuit in Match) // a glob object or finding the root directory. (see short-circuit in Match)
if !out.hasMeta { if !out.hasMeta || !out.hasSuperMeta {
return &out, nil return &out, nil
} }
@ -48,6 +50,17 @@ func (g *GlobPath) Match() map[string]os.FileInfo {
} }
return out return out
} }
if !g.hasSuperMeta {
out := make(map[string]os.FileInfo)
files, _ := filepath.Glob(g.path)
for _, file := range files {
info, err := os.Stat(file)
if !os.IsNotExist(err) {
out[file] = info
}
}
return out
}
return walkFilePath(g.root, g.g) return walkFilePath(g.root, g.g)
} }
@ -96,3 +109,8 @@ func findRootDir(path string) string {
func hasMeta(path string) bool { func hasMeta(path string) bool {
return strings.IndexAny(path, "*?[") >= 0 return strings.IndexAny(path, "*?[") >= 0
} }
// hasSuperMeta reports whether path contains any super magic glob characters (**).
func hasSuperMeta(path string) bool {
return strings.Index(path, "**") >= 0
}

View File

@ -198,7 +198,7 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
return err return err
case <-timer.C: case <-timer.C:
if err := c.Process.Kill(); err != nil { if err := c.Process.Kill(); err != nil {
log.Printf("FATAL error killing process: %s", err) log.Printf("E! FATAL error killing process: %s", err)
return err return err
} }
// wait for the command to return after killing it // wait for the command to return after killing it

View File

@ -118,7 +118,7 @@ func TestRandomSleep(t *testing.T) {
s = time.Now() s = time.Now()
RandomSleep(time.Millisecond*50, make(chan struct{})) RandomSleep(time.Millisecond*50, make(chan struct{}))
elapsed = time.Since(s) elapsed = time.Since(s)
assert.True(t, elapsed < time.Millisecond*50) assert.True(t, elapsed < time.Millisecond*100)
// test that shutdown is respected // test that shutdown is respected
s = time.Now() s = time.Now()

View File

@ -1,9 +1,8 @@
package internal_models package models
import ( import (
"fmt" "fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/filter"
) )
@ -34,47 +33,59 @@ type Filter struct {
TagInclude []string TagInclude []string
tagInclude filter.Filter tagInclude filter.Filter
IsActive bool isActive bool
} }
// Compile all Filter lists into filter.Filter objects. // Compile all Filter lists into filter.Filter objects.
func (f *Filter) CompileFilter() error { func (f *Filter) Compile() error {
if len(f.NameDrop) == 0 &&
len(f.NamePass) == 0 &&
len(f.FieldDrop) == 0 &&
len(f.FieldPass) == 0 &&
len(f.TagInclude) == 0 &&
len(f.TagExclude) == 0 &&
len(f.TagPass) == 0 &&
len(f.TagDrop) == 0 {
return nil
}
f.isActive = true
var err error var err error
f.nameDrop, err = filter.CompileFilter(f.NameDrop) f.nameDrop, err = filter.Compile(f.NameDrop)
if err != nil { if err != nil {
return fmt.Errorf("Error compiling 'namedrop', %s", err) return fmt.Errorf("Error compiling 'namedrop', %s", err)
} }
f.namePass, err = filter.CompileFilter(f.NamePass) f.namePass, err = filter.Compile(f.NamePass)
if err != nil { if err != nil {
return fmt.Errorf("Error compiling 'namepass', %s", err) return fmt.Errorf("Error compiling 'namepass', %s", err)
} }
f.fieldDrop, err = filter.CompileFilter(f.FieldDrop) f.fieldDrop, err = filter.Compile(f.FieldDrop)
if err != nil { if err != nil {
return fmt.Errorf("Error compiling 'fielddrop', %s", err) return fmt.Errorf("Error compiling 'fielddrop', %s", err)
} }
f.fieldPass, err = filter.CompileFilter(f.FieldPass) f.fieldPass, err = filter.Compile(f.FieldPass)
if err != nil { if err != nil {
return fmt.Errorf("Error compiling 'fieldpass', %s", err) return fmt.Errorf("Error compiling 'fieldpass', %s", err)
} }
f.tagExclude, err = filter.CompileFilter(f.TagExclude) f.tagExclude, err = filter.Compile(f.TagExclude)
if err != nil { if err != nil {
return fmt.Errorf("Error compiling 'tagexclude', %s", err) return fmt.Errorf("Error compiling 'tagexclude', %s", err)
} }
f.tagInclude, err = filter.CompileFilter(f.TagInclude) f.tagInclude, err = filter.Compile(f.TagInclude)
if err != nil { if err != nil {
return fmt.Errorf("Error compiling 'taginclude', %s", err) return fmt.Errorf("Error compiling 'taginclude', %s", err)
} }
for i, _ := range f.TagDrop { for i, _ := range f.TagDrop {
f.TagDrop[i].filter, err = filter.CompileFilter(f.TagDrop[i].Filter) f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter)
if err != nil { if err != nil {
return fmt.Errorf("Error compiling 'tagdrop', %s", err) return fmt.Errorf("Error compiling 'tagdrop', %s", err)
} }
} }
for i, _ := range f.TagPass { for i, _ := range f.TagPass {
f.TagPass[i].filter, err = filter.CompileFilter(f.TagPass[i].Filter) f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter)
if err != nil { if err != nil {
return fmt.Errorf("Error compiling 'tagpass', %s", err) return fmt.Errorf("Error compiling 'tagpass', %s", err)
} }
@ -82,16 +93,52 @@ func (f *Filter) CompileFilter() error {
return nil return nil
} }
func (f *Filter) ShouldMetricPass(metric telegraf.Metric) bool { // Apply applies the filter to the given measurement name, fields map, and
if f.ShouldNamePass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) { // tags map. It will return false if the metric should be "filtered out", and
// true if the metric should "pass".
// It will modify tags in-place if they need to be deleted.
func (f *Filter) Apply(
measurement string,
fields map[string]interface{},
tags map[string]string,
) bool {
if !f.isActive {
return true return true
} }
return false
// check if the measurement name should pass
if !f.shouldNamePass(measurement) {
return false
}
// check if the tags should pass
if !f.shouldTagsPass(tags) {
return false
}
// filter fields
for fieldkey, _ := range fields {
if !f.shouldFieldPass(fieldkey) {
delete(fields, fieldkey)
}
}
if len(fields) == 0 {
return false
}
// filter tags
f.filterTags(tags)
return true
} }
// ShouldFieldsPass returns true if the metric should pass, false if should drop func (f *Filter) IsActive() bool {
return f.isActive
}
// shouldNamePass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters // based on the drop/pass filter parameters
func (f *Filter) ShouldNamePass(key string) bool { func (f *Filter) shouldNamePass(key string) bool {
if f.namePass != nil { if f.namePass != nil {
if f.namePass.Match(key) { if f.namePass.Match(key) {
return true return true
@ -107,9 +154,9 @@ func (f *Filter) ShouldNamePass(key string) bool {
return true return true
} }
// ShouldFieldsPass returns true if the metric should pass, false if should drop // shouldFieldPass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters // based on the drop/pass filter parameters
func (f *Filter) ShouldFieldsPass(key string) bool { func (f *Filter) shouldFieldPass(key string) bool {
if f.fieldPass != nil { if f.fieldPass != nil {
if f.fieldPass.Match(key) { if f.fieldPass.Match(key) {
return true return true
@ -125,9 +172,9 @@ func (f *Filter) ShouldFieldsPass(key string) bool {
return true return true
} }
// ShouldTagsPass returns true if the metric should pass, false if should drop // shouldTagsPass returns true if the metric should pass, false if should drop
// based on the tagdrop/tagpass filter parameters // based on the tagdrop/tagpass filter parameters
func (f *Filter) ShouldTagsPass(tags map[string]string) bool { func (f *Filter) shouldTagsPass(tags map[string]string) bool {
if f.TagPass != nil { if f.TagPass != nil {
for _, pat := range f.TagPass { for _, pat := range f.TagPass {
if pat.filter == nil { if pat.filter == nil {
@ -161,7 +208,7 @@ func (f *Filter) ShouldTagsPass(tags map[string]string) bool {
// Apply TagInclude and TagExclude filters. // Apply TagInclude and TagExclude filters.
// modifies the tags map in-place. // modifies the tags map in-place.
func (f *Filter) FilterTags(tags map[string]string) { func (f *Filter) filterTags(tags map[string]string) {
if f.tagInclude != nil { if f.tagInclude != nil {
for k, _ := range tags { for k, _ := range tags {
if !f.tagInclude.Match(k) { if !f.tagInclude.Match(k) {

View File

@ -1,14 +1,64 @@
package internal_models package models
import ( import (
"testing" "testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestFilter_ApplyEmpty(t *testing.T) {
f := Filter{}
require.NoError(t, f.Compile())
assert.False(t, f.IsActive())
assert.True(t, f.Apply("m", map[string]interface{}{"value": int64(1)}, map[string]string{}))
}
func TestFilter_ApplyTagsDontPass(t *testing.T) {
filters := []TagFilter{
TagFilter{
Name: "cpu",
Filter: []string{"cpu-*"},
},
}
f := Filter{
TagDrop: filters,
}
require.NoError(t, f.Compile())
require.NoError(t, f.Compile())
assert.True(t, f.IsActive())
assert.False(t, f.Apply("m",
map[string]interface{}{"value": int64(1)},
map[string]string{"cpu": "cpu-total"}))
}
func TestFilter_ApplyDeleteFields(t *testing.T) {
f := Filter{
FieldDrop: []string{"value"},
}
require.NoError(t, f.Compile())
require.NoError(t, f.Compile())
assert.True(t, f.IsActive())
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
assert.True(t, f.Apply("m", fields, nil))
assert.Equal(t, map[string]interface{}{"value2": int64(2)}, fields)
}
func TestFilter_ApplyDeleteAllFields(t *testing.T) {
f := Filter{
FieldDrop: []string{"value*"},
}
require.NoError(t, f.Compile())
require.NoError(t, f.Compile())
assert.True(t, f.IsActive())
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
assert.False(t, f.Apply("m", fields, nil))
}
func TestFilter_Empty(t *testing.T) { func TestFilter_Empty(t *testing.T) {
f := Filter{} f := Filter{}
@ -23,7 +73,7 @@ func TestFilter_Empty(t *testing.T) {
} }
for _, measurement := range measurements { for _, measurement := range measurements {
if !f.ShouldFieldsPass(measurement) { if !f.shouldFieldPass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement) t.Errorf("Expected measurement %s to pass", measurement)
} }
} }
@ -33,7 +83,7 @@ func TestFilter_NamePass(t *testing.T) {
f := Filter{ f := Filter{
NamePass: []string{"foo*", "cpu_usage_idle"}, NamePass: []string{"foo*", "cpu_usage_idle"},
} }
require.NoError(t, f.CompileFilter()) require.NoError(t, f.Compile())
passes := []string{ passes := []string{
"foo", "foo",
@ -51,13 +101,13 @@ func TestFilter_NamePass(t *testing.T) {
} }
for _, measurement := range passes { for _, measurement := range passes {
if !f.ShouldNamePass(measurement) { if !f.shouldNamePass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement) t.Errorf("Expected measurement %s to pass", measurement)
} }
} }
for _, measurement := range drops { for _, measurement := range drops {
if f.ShouldNamePass(measurement) { if f.shouldNamePass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement) t.Errorf("Expected measurement %s to drop", measurement)
} }
} }
@ -67,7 +117,7 @@ func TestFilter_NameDrop(t *testing.T) {
f := Filter{ f := Filter{
NameDrop: []string{"foo*", "cpu_usage_idle"}, NameDrop: []string{"foo*", "cpu_usage_idle"},
} }
require.NoError(t, f.CompileFilter()) require.NoError(t, f.Compile())
drops := []string{ drops := []string{
"foo", "foo",
@ -85,13 +135,13 @@ func TestFilter_NameDrop(t *testing.T) {
} }
for _, measurement := range passes { for _, measurement := range passes {
if !f.ShouldNamePass(measurement) { if !f.shouldNamePass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement) t.Errorf("Expected measurement %s to pass", measurement)
} }
} }
for _, measurement := range drops { for _, measurement := range drops {
if f.ShouldNamePass(measurement) { if f.shouldNamePass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement) t.Errorf("Expected measurement %s to drop", measurement)
} }
} }
@ -101,7 +151,7 @@ func TestFilter_FieldPass(t *testing.T) {
f := Filter{ f := Filter{
FieldPass: []string{"foo*", "cpu_usage_idle"}, FieldPass: []string{"foo*", "cpu_usage_idle"},
} }
require.NoError(t, f.CompileFilter()) require.NoError(t, f.Compile())
passes := []string{ passes := []string{
"foo", "foo",
@ -119,13 +169,13 @@ func TestFilter_FieldPass(t *testing.T) {
} }
for _, measurement := range passes { for _, measurement := range passes {
if !f.ShouldFieldsPass(measurement) { if !f.shouldFieldPass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement) t.Errorf("Expected measurement %s to pass", measurement)
} }
} }
for _, measurement := range drops { for _, measurement := range drops {
if f.ShouldFieldsPass(measurement) { if f.shouldFieldPass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement) t.Errorf("Expected measurement %s to drop", measurement)
} }
} }
@ -135,7 +185,7 @@ func TestFilter_FieldDrop(t *testing.T) {
f := Filter{ f := Filter{
FieldDrop: []string{"foo*", "cpu_usage_idle"}, FieldDrop: []string{"foo*", "cpu_usage_idle"},
} }
require.NoError(t, f.CompileFilter()) require.NoError(t, f.Compile())
drops := []string{ drops := []string{
"foo", "foo",
@ -153,13 +203,13 @@ func TestFilter_FieldDrop(t *testing.T) {
} }
for _, measurement := range passes { for _, measurement := range passes {
if !f.ShouldFieldsPass(measurement) { if !f.shouldFieldPass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement) t.Errorf("Expected measurement %s to pass", measurement)
} }
} }
for _, measurement := range drops { for _, measurement := range drops {
if f.ShouldFieldsPass(measurement) { if f.shouldFieldPass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement) t.Errorf("Expected measurement %s to drop", measurement)
} }
} }
@ -178,7 +228,7 @@ func TestFilter_TagPass(t *testing.T) {
f := Filter{ f := Filter{
TagPass: filters, TagPass: filters,
} }
require.NoError(t, f.CompileFilter()) require.NoError(t, f.Compile())
passes := []map[string]string{ passes := []map[string]string{
{"cpu": "cpu-total"}, {"cpu": "cpu-total"},
@ -197,13 +247,13 @@ func TestFilter_TagPass(t *testing.T) {
} }
for _, tags := range passes { for _, tags := range passes {
if !f.ShouldTagsPass(tags) { if !f.shouldTagsPass(tags) {
t.Errorf("Expected tags %v to pass", tags) t.Errorf("Expected tags %v to pass", tags)
} }
} }
for _, tags := range drops { for _, tags := range drops {
if f.ShouldTagsPass(tags) { if f.shouldTagsPass(tags) {
t.Errorf("Expected tags %v to drop", tags) t.Errorf("Expected tags %v to drop", tags)
} }
} }
@ -222,7 +272,7 @@ func TestFilter_TagDrop(t *testing.T) {
f := Filter{ f := Filter{
TagDrop: filters, TagDrop: filters,
} }
require.NoError(t, f.CompileFilter()) require.NoError(t, f.Compile())
drops := []map[string]string{ drops := []map[string]string{
{"cpu": "cpu-total"}, {"cpu": "cpu-total"},
@ -241,30 +291,18 @@ func TestFilter_TagDrop(t *testing.T) {
} }
for _, tags := range passes { for _, tags := range passes {
if !f.ShouldTagsPass(tags) { if !f.shouldTagsPass(tags) {
t.Errorf("Expected tags %v to pass", tags) t.Errorf("Expected tags %v to pass", tags)
} }
} }
for _, tags := range drops { for _, tags := range drops {
if f.ShouldTagsPass(tags) { if f.shouldTagsPass(tags) {
t.Errorf("Expected tags %v to drop", tags) t.Errorf("Expected tags %v to drop", tags)
} }
} }
} }
func TestFilter_ShouldMetricsPass(t *testing.T) {
m := testutil.TestMetric(1, "testmetric")
f := Filter{
NameDrop: []string{"foobar"},
}
require.NoError(t, f.CompileFilter())
require.True(t, f.ShouldMetricPass(m))
m = testutil.TestMetric(1, "foobar")
require.False(t, f.ShouldMetricPass(m))
}
func TestFilter_FilterTagsNoMatches(t *testing.T) { func TestFilter_FilterTagsNoMatches(t *testing.T) {
pretags := map[string]string{ pretags := map[string]string{
"host": "localhost", "host": "localhost",
@ -273,9 +311,9 @@ func TestFilter_FilterTagsNoMatches(t *testing.T) {
f := Filter{ f := Filter{
TagExclude: []string{"nomatch"}, TagExclude: []string{"nomatch"},
} }
require.NoError(t, f.CompileFilter()) require.NoError(t, f.Compile())
f.FilterTags(pretags) f.filterTags(pretags)
assert.Equal(t, map[string]string{ assert.Equal(t, map[string]string{
"host": "localhost", "host": "localhost",
"mytag": "foobar", "mytag": "foobar",
@ -284,9 +322,9 @@ func TestFilter_FilterTagsNoMatches(t *testing.T) {
f = Filter{ f = Filter{
TagInclude: []string{"nomatch"}, TagInclude: []string{"nomatch"},
} }
require.NoError(t, f.CompileFilter()) require.NoError(t, f.Compile())
f.FilterTags(pretags) f.filterTags(pretags)
assert.Equal(t, map[string]string{}, pretags) assert.Equal(t, map[string]string{}, pretags)
} }
@ -298,9 +336,9 @@ func TestFilter_FilterTagsMatches(t *testing.T) {
f := Filter{ f := Filter{
TagExclude: []string{"ho*"}, TagExclude: []string{"ho*"},
} }
require.NoError(t, f.CompileFilter()) require.NoError(t, f.Compile())
f.FilterTags(pretags) f.filterTags(pretags)
assert.Equal(t, map[string]string{ assert.Equal(t, map[string]string{
"mytag": "foobar", "mytag": "foobar",
}, pretags) }, pretags)
@ -312,9 +350,9 @@ func TestFilter_FilterTagsMatches(t *testing.T) {
f = Filter{ f = Filter{
TagInclude: []string{"my*"}, TagInclude: []string{"my*"},
} }
require.NoError(t, f.CompileFilter()) require.NoError(t, f.Compile())
f.FilterTags(pretags) f.filterTags(pretags)
assert.Equal(t, map[string]string{ assert.Equal(t, map[string]string{
"mytag": "foobar", "mytag": "foobar",
}, pretags) }, pretags)

View File

@ -1,4 +1,4 @@
package internal_models package models
import ( import (
"time" "time"

View File

@ -1,4 +1,4 @@
package internal_models package models
import ( import (
"log" "log"
@ -57,21 +57,17 @@ func NewRunningOutput(
// AddMetric adds a metric to the output. This function can also write cached // AddMetric adds a metric to the output. This function can also write cached
// points if FlushBufferWhenFull is true. // points if FlushBufferWhenFull is true.
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
if ro.Config.Filter.IsActive {
if !ro.Config.Filter.ShouldMetricPass(metric) {
return
}
}
// Filter any tagexclude/taginclude parameters before adding metric // Filter any tagexclude/taginclude parameters before adding metric
if len(ro.Config.Filter.TagExclude) != 0 || len(ro.Config.Filter.TagInclude) != 0 { if ro.Config.Filter.IsActive() {
// In order to filter out tags, we need to create a new metric, since // In order to filter out tags, we need to create a new metric, since
// metrics are immutable once created. // metrics are immutable once created.
name := metric.Name()
tags := metric.Tags() tags := metric.Tags()
fields := metric.Fields() fields := metric.Fields()
t := metric.Time() t := metric.Time()
name := metric.Name() if ok := ro.Config.Filter.Apply(name, fields, tags); !ok {
ro.Config.Filter.FilterTags(tags) return
}
// error is not possible if creating from another metric, so ignore. // error is not possible if creating from another metric, so ignore.
metric, _ = telegraf.NewMetric(name, tags, fields, t) metric, _ = telegraf.NewMetric(name, tags, fields, t)
} }
@ -89,7 +85,7 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
// Write writes all cached points to this output. // Write writes all cached points to this output.
func (ro *RunningOutput) Write() error { func (ro *RunningOutput) Write() error {
if !ro.Quiet { if !ro.Quiet {
log.Printf("Output [%s] buffer fullness: %d / %d metrics. "+ log.Printf("I! Output [%s] buffer fullness: %d / %d metrics. "+
"Total gathered metrics: %d. Total dropped metrics: %d.", "Total gathered metrics: %d. Total dropped metrics: %d.",
ro.Name, ro.Name,
ro.failMetrics.Len()+ro.metrics.Len(), ro.failMetrics.Len()+ro.metrics.Len(),
@ -146,7 +142,7 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
elapsed := time.Since(start) elapsed := time.Since(start)
if err == nil { if err == nil {
if !ro.Quiet { if !ro.Quiet {
log.Printf("Output [%s] wrote batch of %d metrics in %s\n", log.Printf("I! Output [%s] wrote batch of %d metrics in %s\n",
ro.Name, len(metrics), elapsed) ro.Name, len(metrics), elapsed)
} }
} }

View File

@ -1,4 +1,4 @@
package internal_models package models
import ( import (
"fmt" "fmt"
@ -31,9 +31,7 @@ var next5 = []telegraf.Metric{
// Benchmark adding metrics. // Benchmark adding metrics.
func BenchmarkRunningOutputAddWrite(b *testing.B) { func BenchmarkRunningOutputAddWrite(b *testing.B) {
conf := &OutputConfig{ conf := &OutputConfig{
Filter: Filter{ Filter: Filter{},
IsActive: false,
},
} }
m := &perfOutput{} m := &perfOutput{}
@ -49,9 +47,7 @@ func BenchmarkRunningOutputAddWrite(b *testing.B) {
// Benchmark adding metrics. // Benchmark adding metrics.
func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) { func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
conf := &OutputConfig{ conf := &OutputConfig{
Filter: Filter{ Filter: Filter{},
IsActive: false,
},
} }
m := &perfOutput{} m := &perfOutput{}
@ -69,9 +65,7 @@ func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
// Benchmark adding metrics. // Benchmark adding metrics.
func BenchmarkRunningOutputAddFailWrites(b *testing.B) { func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
conf := &OutputConfig{ conf := &OutputConfig{
Filter: Filter{ Filter: Filter{},
IsActive: false,
},
} }
m := &perfOutput{} m := &perfOutput{}
@ -88,11 +82,10 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
func TestRunningOutput_DropFilter(t *testing.T) { func TestRunningOutput_DropFilter(t *testing.T) {
conf := &OutputConfig{ conf := &OutputConfig{
Filter: Filter{ Filter: Filter{
IsActive: true,
NameDrop: []string{"metric1", "metric2"}, NameDrop: []string{"metric1", "metric2"},
}, },
} }
assert.NoError(t, conf.Filter.CompileFilter()) assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{} m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000) ro := NewRunningOutput("test", m, conf, 1000, 10000)
@ -114,11 +107,10 @@ func TestRunningOutput_DropFilter(t *testing.T) {
func TestRunningOutput_PassFilter(t *testing.T) { func TestRunningOutput_PassFilter(t *testing.T) {
conf := &OutputConfig{ conf := &OutputConfig{
Filter: Filter{ Filter: Filter{
IsActive: true,
NameDrop: []string{"metric1000", "foo*"}, NameDrop: []string{"metric1000", "foo*"},
}, },
} }
assert.NoError(t, conf.Filter.CompileFilter()) assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{} m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000) ro := NewRunningOutput("test", m, conf, 1000, 10000)
@ -140,11 +132,11 @@ func TestRunningOutput_PassFilter(t *testing.T) {
func TestRunningOutput_TagIncludeNoMatch(t *testing.T) { func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
conf := &OutputConfig{ conf := &OutputConfig{
Filter: Filter{ Filter: Filter{
IsActive: true,
TagInclude: []string{"nothing*"}, TagInclude: []string{"nothing*"},
}, },
} }
assert.NoError(t, conf.Filter.CompileFilter()) assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{} m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000) ro := NewRunningOutput("test", m, conf, 1000, 10000)
@ -162,11 +154,11 @@ func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
func TestRunningOutput_TagExcludeMatch(t *testing.T) { func TestRunningOutput_TagExcludeMatch(t *testing.T) {
conf := &OutputConfig{ conf := &OutputConfig{
Filter: Filter{ Filter: Filter{
IsActive: true,
TagExclude: []string{"tag*"}, TagExclude: []string{"tag*"},
}, },
} }
assert.NoError(t, conf.Filter.CompileFilter()) assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{} m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000) ro := NewRunningOutput("test", m, conf, 1000, 10000)
@ -184,11 +176,11 @@ func TestRunningOutput_TagExcludeMatch(t *testing.T) {
func TestRunningOutput_TagExcludeNoMatch(t *testing.T) { func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
conf := &OutputConfig{ conf := &OutputConfig{
Filter: Filter{ Filter: Filter{
IsActive: true,
TagExclude: []string{"nothing*"}, TagExclude: []string{"nothing*"},
}, },
} }
assert.NoError(t, conf.Filter.CompileFilter()) assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{} m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000) ro := NewRunningOutput("test", m, conf, 1000, 10000)
@ -206,11 +198,11 @@ func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
func TestRunningOutput_TagIncludeMatch(t *testing.T) { func TestRunningOutput_TagIncludeMatch(t *testing.T) {
conf := &OutputConfig{ conf := &OutputConfig{
Filter: Filter{ Filter: Filter{
IsActive: true,
TagInclude: []string{"tag*"}, TagInclude: []string{"tag*"},
}, },
} }
assert.NoError(t, conf.Filter.CompileFilter()) assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{} m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000) ro := NewRunningOutput("test", m, conf, 1000, 10000)
@ -227,9 +219,7 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) {
// Test that we can write metrics with simple default setup. // Test that we can write metrics with simple default setup.
func TestRunningOutputDefault(t *testing.T) { func TestRunningOutputDefault(t *testing.T) {
conf := &OutputConfig{ conf := &OutputConfig{
Filter: Filter{ Filter: Filter{},
IsActive: false,
},
} }
m := &mockOutput{} m := &mockOutput{}
@ -252,9 +242,7 @@ func TestRunningOutputDefault(t *testing.T) {
// FlushBufferWhenFull is set. // FlushBufferWhenFull is set.
func TestRunningOutputFlushWhenFull(t *testing.T) { func TestRunningOutputFlushWhenFull(t *testing.T) {
conf := &OutputConfig{ conf := &OutputConfig{
Filter: Filter{ Filter: Filter{},
IsActive: false,
},
} }
m := &mockOutput{} m := &mockOutput{}
@ -283,9 +271,7 @@ func TestRunningOutputFlushWhenFull(t *testing.T) {
// FlushBufferWhenFull is set, twice. // FlushBufferWhenFull is set, twice.
func TestRunningOutputMultiFlushWhenFull(t *testing.T) { func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
conf := &OutputConfig{ conf := &OutputConfig{
Filter: Filter{ Filter: Filter{},
IsActive: false,
},
} }
m := &mockOutput{} m := &mockOutput{}
@ -304,9 +290,7 @@ func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
func TestRunningOutputWriteFail(t *testing.T) { func TestRunningOutputWriteFail(t *testing.T) {
conf := &OutputConfig{ conf := &OutputConfig{
Filter: Filter{ Filter: Filter{},
IsActive: false,
},
} }
m := &mockOutput{} m := &mockOutput{}
@ -339,9 +323,7 @@ func TestRunningOutputWriteFail(t *testing.T) {
// Verify that the order of points is preserved during a write failure. // Verify that the order of points is preserved during a write failure.
func TestRunningOutputWriteFailOrder(t *testing.T) { func TestRunningOutputWriteFailOrder(t *testing.T) {
conf := &OutputConfig{ conf := &OutputConfig{
Filter: Filter{ Filter: Filter{},
IsActive: false,
},
} }
m := &mockOutput{} m := &mockOutput{}
@ -379,9 +361,7 @@ func TestRunningOutputWriteFailOrder(t *testing.T) {
// Verify that the order of points is preserved during many write failures. // Verify that the order of points is preserved during many write failures.
func TestRunningOutputWriteFailOrder2(t *testing.T) { func TestRunningOutputWriteFailOrder2(t *testing.T) {
conf := &OutputConfig{ conf := &OutputConfig{
Filter: Filter{ Filter: Filter{},
IsActive: false,
},
} }
m := &mockOutput{} m := &mockOutput{}
@ -452,9 +432,7 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) {
// //
func TestRunningOutputWriteFailOrder3(t *testing.T) { func TestRunningOutputWriteFailOrder3(t *testing.T) {
conf := &OutputConfig{ conf := &OutputConfig{
Filter: Filter{ Filter: Filter{},
IsActive: false,
},
} }
m := &mockOutput{} m := &mockOutput{}

58
logger/logger.go Normal file
View File

@ -0,0 +1,58 @@
package logger
import (
"io"
"log"
"os"
"github.com/influxdata/wlog"
)
// newTelegrafWriter returns a logging-wrapped writer.
func newTelegrafWriter(w io.Writer) io.Writer {
return &telegrafLog{
writer: wlog.NewWriter(w),
}
}
type telegrafLog struct {
writer io.Writer
}
func (t *telegrafLog) Write(p []byte) (n int, err error) {
return t.writer.Write(p)
}
// SetupLogging configures the logging output.
// debug will set the log level to DEBUG
// quiet will set the log level to ERROR
// logfile will direct the logging output to a file. Empty string is
// interpreted as stdout. If there is an error opening the file the
// logger will fallback to stdout.
func SetupLogging(debug, quiet bool, logfile string) {
if debug {
wlog.SetLevel(wlog.DEBUG)
}
if quiet {
wlog.SetLevel(wlog.ERROR)
}
var oFile *os.File
if logfile != "" {
if _, err := os.Stat(logfile); os.IsNotExist(err) {
if oFile, err = os.Create(logfile); err != nil {
log.Printf("E! Unable to create %s (%s), using stdout", logfile, err)
oFile = os.Stdout
}
} else {
if oFile, err = os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, os.ModeAppend); err != nil {
log.Printf("E! Unable to append to %s (%s), using stdout", logfile, err)
oFile = os.Stdout
}
}
} else {
oFile = os.Stdout
}
log.SetOutput(newTelegrafWriter(oFile))
}

View File

@ -6,6 +6,17 @@ import (
"github.com/influxdata/influxdb/client/v2" "github.com/influxdata/influxdb/client/v2"
) )
// ValueType is an enumeration of metric types that represent a simple value.
type ValueType int
// Possible values for the ValueType enum.
const (
_ ValueType = iota
Counter
Gauge
Untyped
)
type Metric interface { type Metric interface {
// Name returns the measurement name of the metric // Name returns the measurement name of the metric
Name() string Name() string
@ -16,6 +27,9 @@ type Metric interface {
// Time return the timestamp for the metric // Time return the timestamp for the metric
Time() time.Time Time() time.Time
// Type returns the metric type. Can be either telegraf.Gauge or telegraf.Counter
Type() ValueType
// UnixNano returns the unix nano time of the metric // UnixNano returns the unix nano time of the metric
UnixNano() int64 UnixNano() int64
@ -35,12 +49,11 @@ type Metric interface {
// metric is a wrapper of the influxdb client.Point struct // metric is a wrapper of the influxdb client.Point struct
type metric struct { type metric struct {
pt *client.Point pt *client.Point
mType ValueType
} }
// NewMetric returns a metric with the given timestamp. If a timestamp is not // NewMetric returns an untyped metric.
// given, then data is sent to the database without a timestamp, in which case
// the server will assign local time upon reception. NOTE: it is recommended to
// send data with a timestamp.
func NewMetric( func NewMetric(
name string, name string,
tags map[string]string, tags map[string]string,
@ -52,7 +65,46 @@ func NewMetric(
return nil, err return nil, err
} }
return &metric{ return &metric{
pt: pt, pt: pt,
mType: Untyped,
}, nil
}
// NewGaugeMetric returns a gauge metric.
// Gauge metrics should be used when the metric is can arbitrarily go up and
// down. ie, temperature, memory usage, cpu usage, etc.
func NewGaugeMetric(
name string,
tags map[string]string,
fields map[string]interface{},
t time.Time,
) (Metric, error) {
pt, err := client.NewPoint(name, tags, fields, t)
if err != nil {
return nil, err
}
return &metric{
pt: pt,
mType: Gauge,
}, nil
}
// NewCounterMetric returns a Counter metric.
// Counter metrics should be used when the metric being created is an
// always-increasing counter. ie, net bytes received, requests served, errors, etc.
func NewCounterMetric(
name string,
tags map[string]string,
fields map[string]interface{},
t time.Time,
) (Metric, error) {
pt, err := client.NewPoint(name, tags, fields, t)
if err != nil {
return nil, err
}
return &metric{
pt: pt,
mType: Counter,
}, nil }, nil
} }
@ -68,6 +120,10 @@ func (m *metric) Time() time.Time {
return m.pt.Time() return m.pt.Time()
} }
func (m *metric) Type() ValueType {
return m.mType
}
func (m *metric) UnixNano() int64 { func (m *metric) UnixNano() int64 {
return m.pt.UnixNano() return m.pt.UnixNano()
} }

View File

@ -23,6 +23,51 @@ func TestNewMetric(t *testing.T) {
m, err := NewMetric("cpu", tags, fields, now) m, err := NewMetric("cpu", tags, fields, now)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, Untyped, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
func TestNewGaugeMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := NewGaugeMetric("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, Gauge, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
func TestNewCounterMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := NewCounterMetric("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, Counter, m.Type())
assert.Equal(t, tags, m.Tags()) assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields()) assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name()) assert.Equal(t, "cpu", m.Name())

View File

@ -27,6 +27,14 @@ The example plugin gathers metrics about example things
- tag2 - tag2
- measurement2 has the following tags: - measurement2 has the following tags:
- tag3 - tag3
### Sample Queries:
These are some useful queries (to generate dashboards or other) to run against data from this plugin:
```
SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag
```
### Example Output: ### Example Output:

View File

@ -1,6 +1,8 @@
package aerospike package aerospike
import ( import (
"errors"
"log"
"net" "net"
"strconv" "strconv"
"strings" "strings"
@ -11,7 +13,7 @@ import (
"github.com/influxdata/telegraf/internal/errchan" "github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
as "github.com/sparrc/aerospike-client-go" as "github.com/aerospike/aerospike-client-go"
) )
type Aerospike struct { type Aerospike struct {
@ -82,7 +84,12 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
return err return err
} }
for k, v := range stats { for k, v := range stats {
fields[strings.Replace(k, "-", "_", -1)] = parseValue(v) val, err := parseValue(v)
if err == nil {
fields[strings.Replace(k, "-", "_", -1)] = val
} else {
log.Printf("I! skipping aerospike field %v with int64 overflow", k)
}
} }
acc.AddFields("aerospike_node", fields, tags, time.Now()) acc.AddFields("aerospike_node", fields, tags, time.Now())
@ -110,7 +117,12 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
if len(parts) < 2 { if len(parts) < 2 {
continue continue
} }
nFields[strings.Replace(parts[0], "-", "_", -1)] = parseValue(parts[1]) val, err := parseValue(parts[1])
if err == nil {
nFields[strings.Replace(parts[0], "-", "_", -1)] = val
} else {
log.Printf("I! skipping aerospike field %v with int64 overflow", parts[0])
}
} }
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now()) acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
} }
@ -118,13 +130,16 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
return nil return nil
} }
func parseValue(v string) interface{} { func parseValue(v string) (interface{}, error) {
if parsed, err := strconv.ParseInt(v, 10, 64); err == nil { if parsed, err := strconv.ParseInt(v, 10, 64); err == nil {
return parsed return parsed, nil
} else if _, err := strconv.ParseUint(v, 10, 64); err == nil {
// int64 overflow, yet valid uint64
return nil, errors.New("Number is too large")
} else if parsed, err := strconv.ParseBool(v); err == nil { } else if parsed, err := strconv.ParseBool(v); err == nil {
return parsed return parsed, nil
} else { } else {
return v return v, nil
} }
} }

View File

@ -10,7 +10,7 @@ import (
func TestAerospikeStatistics(t *testing.T) { func TestAerospikeStatistics(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("Skipping integration test in short mode") t.Skip("Skipping aerospike integration tests.")
} }
a := &Aerospike{ a := &Aerospike{
@ -29,7 +29,7 @@ func TestAerospikeStatistics(t *testing.T) {
func TestAerospikeStatisticsPartialErr(t *testing.T) { func TestAerospikeStatisticsPartialErr(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("Skipping integration test in short mode") t.Skip("Skipping aerospike integration tests.")
} }
a := &Aerospike{ a := &Aerospike{
@ -48,3 +48,20 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) {
assert.True(t, acc.HasMeasurement("aerospike_namespace")) assert.True(t, acc.HasMeasurement("aerospike_namespace"))
assert.True(t, acc.HasIntField("aerospike_node", "batch_error")) assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
} }
func TestAerospikeParseValue(t *testing.T) {
// uint64 with value bigger than int64 max
val, err := parseValue("18446744041841121751")
assert.Nil(t, val)
assert.Error(t, err)
// int values
val, err = parseValue("42")
assert.NoError(t, err)
assert.Equal(t, val, int64(42), "must be parsed as int")
// string values
val, err = parseValue("BB977942A2CA502")
assert.NoError(t, err)
assert.Equal(t, val, `BB977942A2CA502`, "must be left as string")
}

View File

@ -22,10 +22,13 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/filestat" _ "github.com/influxdata/telegraf/plugins/inputs/filestat"
_ "github.com/influxdata/telegraf/plugins/inputs/graylog" _ "github.com/influxdata/telegraf/plugins/inputs/graylog"
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy" _ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp"
_ "github.com/influxdata/telegraf/plugins/inputs/http_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/http_response" _ "github.com/influxdata/telegraf/plugins/inputs/http_response"
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson" _ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia" _ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/leofs" _ "github.com/influxdata/telegraf/plugins/inputs/leofs"
@ -60,6 +63,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/riak" _ "github.com/influxdata/telegraf/plugins/inputs/riak"
_ "github.com/influxdata/telegraf/plugins/inputs/sensors" _ "github.com/influxdata/telegraf/plugins/inputs/sensors"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp" _ "github.com/influxdata/telegraf/plugins/inputs/snmp"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
_ "github.com/influxdata/telegraf/plugins/inputs/statsd" _ "github.com/influxdata/telegraf/plugins/inputs/statsd"
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat" _ "github.com/influxdata/telegraf/plugins/inputs/sysstat"

View File

@ -274,7 +274,7 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
m = newCassandraMetric(serverTokens["host"], metric, acc) m = newCassandraMetric(serverTokens["host"], metric, acc)
} else { } else {
// unsupported metric type // unsupported metric type
log.Printf("Unsupported Cassandra metric [%s], skipping", log.Printf("I! Unsupported Cassandra metric [%s], skipping",
metric) metric)
continue continue
} }

View File

@ -2,7 +2,9 @@
Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
The plugin works by scanning the configured SocketDir for OSD and MON socket files. When it finds *Admin Socket Stats*
This gatherer works by scanning the configured SocketDir for OSD and MON socket files. When it finds
a MON socket, it runs **ceph --admin-daemon $file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf dump** a MON socket, it runs **ceph --admin-daemon $file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf dump**
The resulting JSON is parsed and grouped into collections, based on top-level key. Top-level keys are The resulting JSON is parsed and grouped into collections, based on top-level key. Top-level keys are
@ -27,11 +29,26 @@ Would be parsed into the following metrics, all of which would be tagged with co
- refresh_latency.sum: 5378.794002000 - refresh_latency.sum: 5378.794002000
*Cluster Stats*
This gatherer works by invoking ceph commands against the cluster thus only requires the ceph client, valid
ceph configuration and an access key to function (the ceph_config and ceph_user configuration variables work
in conjunction to specify these prerequisites). It may be run on any server you wish which has access to
the cluster. The currently supported commands are:
* ceph status
* ceph df
* ceph osd pool stats
### Configuration: ### Configuration:
``` ```
# Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
[[inputs.ceph]] [[inputs.ceph]]
## This is the recommended interval to poll. Too frequent and you will lose
## data points due to timeouts during rebalancing and recovery
interval = '1m'
## All configuration values are optional, defaults are shown below ## All configuration values are optional, defaults are shown below
## location of ceph binary ## location of ceph binary
@ -46,15 +63,86 @@ Would be parsed into the following metrics, all of which would be tagged with co
## suffix used to identify socket files ## suffix used to identify socket files
socket_suffix = "asok" socket_suffix = "asok"
## Ceph user to authenticate as, ceph will search for the corresponding keyring
## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the
## client section of ceph.conf for example:
##
## [client.telegraf]
## keyring = /etc/ceph/client.telegraf.keyring
##
## Consult the ceph documentation for more detail on keyring generation.
ceph_user = "client.admin"
## Ceph configuration to use to locate the cluster
ceph_config = "/etc/ceph/ceph.conf"
## Whether to gather statistics via the admin socket
gather_admin_socket_stats = true
## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config
## to be specified
gather_cluster_stats = true
``` ```
### Measurements & Fields: ### Measurements & Fields:
*Admin Socket Stats*
All fields are collected under the **ceph** measurement and stored as float64s. For a full list of fields, see the sample perf dumps in ceph_test.go. All fields are collected under the **ceph** measurement and stored as float64s. For a full list of fields, see the sample perf dumps in ceph_test.go.
*Cluster Stats*
* ceph\_osdmap
* epoch (float)
* full (boolean)
* nearfull (boolean)
* num\_in\_osds (float)
* num\_osds (float)
* num\_remremapped\_pgs (float)
* num\_up\_osds (float)
* ceph\_pgmap
* bytes\_avail (float)
* bytes\_total (float)
* bytes\_used (float)
* data\_bytes (float)
* num\_pgs (float)
* op\_per\_sec (float)
* read\_bytes\_sec (float)
* version (float)
* write\_bytes\_sec (float)
* recovering\_bytes\_per\_sec (float)
* recovering\_keys\_per\_sec (float)
* recovering\_objects\_per\_sec (float)
* ceph\_pgmap\_state
* state name e.g. active+clean (float)
* ceph\_usage
* bytes\_used (float)
* kb\_used (float)
* max\_avail (float)
* objects (float)
* ceph\_pool\_usage
* bytes\_used (float)
* kb\_used (float)
* max\_avail (float)
* objects (float)
* ceph\_pool\_stats
* op\_per\_sec (float)
* read\_bytes\_sec (float)
* write\_bytes\_sec (float)
* recovering\_object\_per\_sec (float)
* recovering\_bytes\_per\_sec (float)
* recovering\_keys\_per\_sec (float)
### Tags: ### Tags:
*Admin Socket Stats*
All measurements will have the following tags: All measurements will have the following tags:
- type: either 'osd' or 'mon' to indicate which type of node was queried - type: either 'osd' or 'mon' to indicate which type of node was queried
@ -96,9 +184,21 @@ All measurements will have the following tags:
- throttle-osd_client_bytes - throttle-osd_client_bytes
- throttle-osd_client_messages - throttle-osd_client_messages
*Cluster Stats*
* ceph\_pg\_state has the following tags:
* state (state for which the value applies e.g. active+clean, active+remapped+backfill)
* ceph\_pool\_usage has the following tags:
* id
* name
* ceph\_pool\_stats has the following tags:
* id
* name
### Example Output: ### Example Output:
*Admin Socket Stats*
<pre> <pre>
telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d -input-filter ceph telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d -input-filter ceph
* Plugin: ceph, Collection 1 * Plugin: ceph, Collection 1
@ -107,3 +207,16 @@ telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegr
> ceph,collection=throttle-mon_daemon_bytes,id=node-2,type=mon get=4058121,get_or_fail_fail=0,get_or_fail_success=0,get_sum=6027348117,max=419430400,put=4058121,put_sum=6027348117,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814815661 > ceph,collection=throttle-mon_daemon_bytes,id=node-2,type=mon get=4058121,get_or_fail_fail=0,get_or_fail_success=0,get_sum=6027348117,max=419430400,put=4058121,put_sum=6027348117,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814815661
> ceph,collection=throttle-msgr_dispatch_throttler-mon,id=node-2,type=mon get=54276277,get_or_fail_fail=0,get_or_fail_success=0,get_sum=370232877040,max=104857600,put=54276277,put_sum=370232877040,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814872064 > ceph,collection=throttle-msgr_dispatch_throttler-mon,id=node-2,type=mon get=54276277,get_or_fail_fail=0,get_or_fail_success=0,get_sum=370232877040,max=104857600,put=54276277,put_sum=370232877040,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814872064
</pre> </pre>
*Cluster Stats*
<pre>
> ceph_osdmap,host=ceph-mon-0 epoch=170772,full=false,nearfull=false,num_in_osds=340,num_osds=340,num_remapped_pgs=0,num_up_osds=340 1468841037000000000
> ceph_pgmap,host=ceph-mon-0 bytes_avail=634895531270144,bytes_total=812117151809536,bytes_used=177221620539392,data_bytes=56979991615058,num_pgs=22952,op_per_sec=15869,read_bytes_sec=43956026,version=39387592,write_bytes_sec=165344818 1468841037000000000
> ceph_pgmap_state,host=ceph-mon-0 active+clean=22952 1468928660000000000
> ceph_usage,host=ceph-mon-0 total_avail_bytes=634895514791936,total_bytes=812117151809536,total_used_bytes=177221637017600 1468841037000000000
> ceph_pool_usage,host=ceph-mon-0,id=150,name=cinder.volumes bytes_used=12648553794802,kb_used=12352103316,max_avail=154342562489244,objects=3026295 1468841037000000000
> ceph_pool_usage,host=ceph-mon-0,id=182,name=cinder.volumes.flash bytes_used=8541308223964,kb_used=8341121313,max_avail=39388593563936,objects=2075066 1468841037000000000
> ceph_pool_stats,host=ceph-mon-0,id=150,name=cinder.volumes op_per_sec=1706,read_bytes_sec=28671674,write_bytes_sec=29994541 1468841037000000000
> ceph_pool_stats,host=ceph-mon-0,id=182,name=cinder.volumes.flash op_per_sec=9748,read_bytes_sec=9605524,write_bytes_sec=45593310 1468841037000000000
</pre>

View File

@ -23,33 +23,15 @@ const (
) )
type Ceph struct { type Ceph struct {
CephBinary string CephBinary string
OsdPrefix string OsdPrefix string
MonPrefix string MonPrefix string
SocketDir string SocketDir string
SocketSuffix string SocketSuffix string
} CephUser string
CephConfig string
func (c *Ceph) setDefaults() { GatherAdminSocketStats bool
if c.CephBinary == "" { GatherClusterStats bool
c.CephBinary = "/usr/bin/ceph"
}
if c.OsdPrefix == "" {
c.OsdPrefix = osdPrefix
}
if c.MonPrefix == "" {
c.MonPrefix = monPrefix
}
if c.SocketDir == "" {
c.SocketDir = "/var/run/ceph"
}
if c.SocketSuffix == "" {
c.SocketSuffix = sockSuffix
}
} }
func (c *Ceph) Description() string { func (c *Ceph) Description() string {
@ -57,6 +39,10 @@ func (c *Ceph) Description() string {
} }
var sampleConfig = ` var sampleConfig = `
## This is the recommended interval to poll. Too frequent and you will lose
## data points due to timeouts during rebalancing and recovery
interval = '1m'
## All configuration values are optional, defaults are shown below ## All configuration values are optional, defaults are shown below
## location of ceph binary ## location of ceph binary
@ -71,6 +57,18 @@ var sampleConfig = `
## suffix used to identify socket files ## suffix used to identify socket files
socket_suffix = "asok" socket_suffix = "asok"
## Ceph user to authenticate as
ceph_user = "client.admin"
## Ceph configuration to use to locate the cluster
ceph_config = "/etc/ceph/ceph.conf"
## Whether to gather statistics via the admin socket
gather_admin_socket_stats = true
## Whether to gather statistics via ceph commands
gather_cluster_stats = true
` `
func (c *Ceph) SampleConfig() string { func (c *Ceph) SampleConfig() string {
@ -78,7 +76,22 @@ func (c *Ceph) SampleConfig() string {
} }
func (c *Ceph) Gather(acc telegraf.Accumulator) error { func (c *Ceph) Gather(acc telegraf.Accumulator) error {
c.setDefaults() if c.GatherAdminSocketStats {
if err := c.gatherAdminSocketStats(acc); err != nil {
return err
}
}
if c.GatherClusterStats {
if err := c.gatherClusterStats(acc); err != nil {
return err
}
}
return nil
}
func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
sockets, err := findSockets(c) sockets, err := findSockets(c)
if err != nil { if err != nil {
return fmt.Errorf("failed to find sockets at path '%s': %v", c.SocketDir, err) return fmt.Errorf("failed to find sockets at path '%s': %v", c.SocketDir, err)
@ -87,12 +100,12 @@ func (c *Ceph) Gather(acc telegraf.Accumulator) error {
for _, s := range sockets { for _, s := range sockets {
dump, err := perfDump(c.CephBinary, s) dump, err := perfDump(c.CephBinary, s)
if err != nil { if err != nil {
log.Printf("error reading from socket '%s': %v", s.socket, err) log.Printf("E! error reading from socket '%s': %v", s.socket, err)
continue continue
} }
data, err := parseDump(dump) data, err := parseDump(dump)
if err != nil { if err != nil {
log.Printf("error parsing dump from socket '%s': %v", s.socket, err) log.Printf("E! error parsing dump from socket '%s': %v", s.socket, err)
continue continue
} }
for tag, metrics := range *data { for tag, metrics := range *data {
@ -104,8 +117,46 @@ func (c *Ceph) Gather(acc telegraf.Accumulator) error {
return nil return nil
} }
func (c *Ceph) gatherClusterStats(acc telegraf.Accumulator) error {
jobs := []struct {
command string
parser func(telegraf.Accumulator, string) error
}{
{"status", decodeStatus},
{"df", decodeDf},
{"osd pool stats", decodeOsdPoolStats},
}
// For each job, execute against the cluster, parse and accumulate the data points
for _, job := range jobs {
output, err := c.exec(job.command)
if err != nil {
return fmt.Errorf("error executing command: %v", err)
}
err = job.parser(acc, output)
if err != nil {
return fmt.Errorf("error parsing output: %v", err)
}
}
return nil
}
func init() { func init() {
inputs.Add(measurement, func() telegraf.Input { return &Ceph{} }) c := Ceph{
CephBinary: "/usr/bin/ceph",
OsdPrefix: osdPrefix,
MonPrefix: monPrefix,
SocketDir: "/var/run/ceph",
SocketSuffix: sockSuffix,
CephUser: "client.admin",
CephConfig: "/etc/ceph/ceph.conf",
GatherAdminSocketStats: true,
GatherClusterStats: false,
}
inputs.Add(measurement, func() telegraf.Input { return &c })
} }
var perfDump = func(binary string, socket *socket) (string, error) { var perfDump = func(binary string, socket *socket) (string, error) {
@ -242,8 +293,197 @@ func flatten(data interface{}) []*metric {
} }
} }
default: default:
log.Printf("Ignoring unexpected type '%T' for value %v", val, val) log.Printf("I! Ignoring unexpected type '%T' for value %v", val, val)
} }
return metrics return metrics
} }
func (c *Ceph) exec(command string) (string, error) {
cmdArgs := []string{"--conf", c.CephConfig, "--name", c.CephUser, "--format", "json"}
cmdArgs = append(cmdArgs, strings.Split(command, " ")...)
cmd := exec.Command(c.CephBinary, cmdArgs...)
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return "", fmt.Errorf("error running ceph %v: %s", command, err)
}
output := out.String()
// Ceph doesn't sanitize its output, and may return invalid JSON. Patch this
// up for them, as having some inaccurate data is better than none.
output = strings.Replace(output, "-inf", "0", -1)
output = strings.Replace(output, "inf", "0", -1)
return output, nil
}
func decodeStatus(acc telegraf.Accumulator, input string) error {
data := make(map[string]interface{})
err := json.Unmarshal([]byte(input), &data)
if err != nil {
return fmt.Errorf("failed to parse json: '%s': %v", input, err)
}
err = decodeStatusOsdmap(acc, data)
if err != nil {
return err
}
err = decodeStatusPgmap(acc, data)
if err != nil {
return err
}
err = decodeStatusPgmapState(acc, data)
if err != nil {
return err
}
return nil
}
func decodeStatusOsdmap(acc telegraf.Accumulator, data map[string]interface{}) error {
osdmap, ok := data["osdmap"].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement)
}
fields, ok := osdmap["osdmap"].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement)
}
acc.AddFields("ceph_osdmap", fields, map[string]string{})
return nil
}
func decodeStatusPgmap(acc telegraf.Accumulator, data map[string]interface{}) error {
pgmap, ok := data["pgmap"].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
}
fields := make(map[string]interface{})
for key, value := range pgmap {
switch value.(type) {
case float64:
fields[key] = value
}
}
acc.AddFields("ceph_pgmap", fields, map[string]string{})
return nil
}
func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error {
pgmap, ok := data["pgmap"].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
}
fields := make(map[string]interface{})
for key, value := range pgmap {
switch value.(type) {
case []interface{}:
if key != "pgs_by_state" {
continue
}
for _, state := range value.([]interface{}) {
state_map, ok := state.(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pg state", measurement)
}
state_name, ok := state_map["state_name"].(string)
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement)
}
state_count, ok := state_map["count"].(float64)
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement)
}
fields[state_name] = state_count
}
}
}
acc.AddFields("ceph_pgmap_state", fields, map[string]string{})
return nil
}
func decodeDf(acc telegraf.Accumulator, input string) error {
data := make(map[string]interface{})
err := json.Unmarshal([]byte(input), &data)
if err != nil {
return fmt.Errorf("failed to parse json: '%s': %v", input, err)
}
// ceph.usage: records global utilization and number of objects
stats_fields, ok := data["stats"].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode df stats", measurement)
}
acc.AddFields("ceph_usage", stats_fields, map[string]string{})
// ceph.pool.usage: records per pool utilization and number of objects
pools, ok := data["pools"].([]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode df pools", measurement)
}
for _, pool := range pools {
pool_map, ok := pool.(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode df pool", measurement)
}
pool_name, ok := pool_map["name"].(string)
if !ok {
return fmt.Errorf("WARNING %s - unable to decode df pool name", measurement)
}
fields, ok := pool_map["stats"].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode df pool stats", measurement)
}
tags := map[string]string{
"name": pool_name,
}
acc.AddFields("ceph_pool_usage", fields, tags)
}
return nil
}
func decodeOsdPoolStats(acc telegraf.Accumulator, input string) error {
data := make([]map[string]interface{}, 0)
err := json.Unmarshal([]byte(input), &data)
if err != nil {
return fmt.Errorf("failed to parse json: '%s': %v", input, err)
}
// ceph.pool.stats: records pre pool IO and recovery throughput
for _, pool := range data {
pool_name, ok := pool["pool_name"].(string)
if !ok {
return fmt.Errorf("WARNING %s - unable to decode osd pool stats name", measurement)
}
// Note: the 'recovery' object looks broken (in hammer), so it's omitted
objects := []string{
"client_io_rate",
"recovery_rate",
}
fields := make(map[string]interface{})
for _, object := range objects {
perfdata, ok := pool[object].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode osd pool stats", measurement)
}
for key, value := range perfdata {
fields[key] = value
}
}
tags := map[string]string{
"name": pool_name,
}
acc.AddFields("ceph_pool_stats", fields, tags)
}
return nil
}

View File

@ -65,12 +65,17 @@ func TestFindSockets(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
}() }()
c := &Ceph{ c := &Ceph{
CephBinary: "foo", CephBinary: "foo",
SocketDir: tmpdir, OsdPrefix: "ceph-osd",
MonPrefix: "ceph-mon",
SocketDir: tmpdir,
SocketSuffix: "asok",
CephUser: "client.admin",
CephConfig: "/etc/ceph/ceph.conf",
GatherAdminSocketStats: true,
GatherClusterStats: false,
} }
c.setDefaults()
for _, st := range sockTestParams { for _, st := range sockTestParams {
createTestFiles(tmpdir, st) createTestFiles(tmpdir, st)

View File

@ -34,6 +34,11 @@ API endpoint. In the following order the plugin will attempt to authenticate.
## Metric Statistic Namespace (required) ## Metric Statistic Namespace (required)
namespace = 'AWS/ELB' namespace = 'AWS/ELB'
## Maximum requests per second. Note that the global default AWS rate limit is
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
## maximum of 10. Optional - default value is 10.
ratelimit = 10
## Metrics to Pull (optional) ## Metrics to Pull (optional)
## Defaults to all Metrics in Namespace if nothing is provided ## Defaults to all Metrics in Namespace if nothing is provided
## Refreshes Namespace available metrics every 1h ## Refreshes Namespace available metrics every 1h

View File

@ -33,6 +33,7 @@ type (
Namespace string `toml:"namespace"` Namespace string `toml:"namespace"`
Metrics []*Metric `toml:"metrics"` Metrics []*Metric `toml:"metrics"`
CacheTTL internal.Duration `toml:"cache_ttl"` CacheTTL internal.Duration `toml:"cache_ttl"`
RateLimit int `toml:"ratelimit"`
client cloudwatchClient client cloudwatchClient
metricCache *MetricCache metricCache *MetricCache
} }
@ -96,6 +97,11 @@ func (c *CloudWatch) SampleConfig() string {
## Metric Statistic Namespace (required) ## Metric Statistic Namespace (required)
namespace = 'AWS/ELB' namespace = 'AWS/ELB'
## Maximum requests per second. Note that the global default AWS rate limit is
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
## maximum of 10. Optional - default value is 10.
ratelimit = 10
## Metrics to Pull (optional) ## Metrics to Pull (optional)
## Defaults to all Metrics in Namespace if nothing is provided ## Defaults to all Metrics in Namespace if nothing is provided
## Refreshes Namespace available metrics every 1h ## Refreshes Namespace available metrics every 1h
@ -175,7 +181,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
// limit concurrency or we can easily exhaust user connection limit // limit concurrency or we can easily exhaust user connection limit
// see cloudwatch API request limits: // see cloudwatch API request limits:
// http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html // http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html
lmtr := limiter.NewRateLimiter(10, time.Second) lmtr := limiter.NewRateLimiter(c.RateLimit, time.Second)
defer lmtr.Stop() defer lmtr.Stop()
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(len(metrics)) wg.Add(len(metrics))
@ -195,7 +201,8 @@ func init() {
inputs.Add("cloudwatch", func() telegraf.Input { inputs.Add("cloudwatch", func() telegraf.Input {
ttl, _ := time.ParseDuration("1hr") ttl, _ := time.ParseDuration("1hr")
return &CloudWatch{ return &CloudWatch{
CacheTTL: internal.Duration{Duration: ttl}, CacheTTL: internal.Duration{Duration: ttl},
RateLimit: 10,
} }
}) })
} }

View File

@ -58,6 +58,7 @@ func TestGather(t *testing.T) {
Namespace: "AWS/ELB", Namespace: "AWS/ELB",
Delay: internalDuration, Delay: internalDuration,
Period: internalDuration, Period: internalDuration,
RateLimit: 10,
} }
var acc testutil.Accumulator var acc testutil.Accumulator

View File

@ -93,13 +93,14 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
contents, err := ioutil.ReadFile(fName) contents, err := ioutil.ReadFile(fName)
if err != nil { if err != nil {
log.Printf("failed to read file '%s': %v", fName, err) log.Printf("E! failed to read file '%s': %v", fName, err)
continue
} }
v := strings.TrimSpace(string(contents)) v := strings.TrimSpace(string(contents))
fields[metricKey], err = strconv.ParseFloat(v, 64) fields[metricKey], err = strconv.ParseFloat(v, 64)
if err != nil { if err != nil {
log.Printf("failed to parse metric, expected number but "+ log.Printf("E! failed to parse metric, expected number but "+
" found '%s': %v", v, err) " found '%s': %v", v, err)
} }
} }

File diff suppressed because one or more lines are too long

View File

@ -103,6 +103,9 @@ based on the availability of per-cpu stats on your system.
- n_used_file_descriptors - n_used_file_descriptors
- n_cpus - n_cpus
- n_containers - n_containers
- n_containers_running
- n_containers_stopped
- n_containers_paused
- n_images - n_images
- n_goroutines - n_goroutines
- n_listener_events - n_listener_events
@ -153,6 +156,9 @@ based on the availability of per-cpu stats on your system.
> docker n_cpus=8i 1456926671065383978 > docker n_cpus=8i 1456926671065383978
> docker n_used_file_descriptors=15i 1456926671065383978 > docker n_used_file_descriptors=15i 1456926671065383978
> docker n_containers=7i 1456926671065383978 > docker n_containers=7i 1456926671065383978
> docker n_containers_running=7i 1456926671065383978
> docker n_containers_stopped=3i 1456926671065383978
> docker n_containers_paused=0i 1456926671065383978
> docker n_images=152i 1456926671065383978 > docker n_images=152i 1456926671065383978
> docker n_goroutines=36i 1456926671065383978 > docker n_goroutines=36i 1456926671065383978
> docker n_listener_events=0i 1456926671065383978 > docker n_listener_events=0i 1456926671065383978

View File

@ -25,8 +25,11 @@ type Docker struct {
Endpoint string Endpoint string
ContainerNames []string ContainerNames []string
Timeout internal.Duration Timeout internal.Duration
PerDevice bool `toml:"perdevice"`
Total bool `toml:"total"`
client DockerClient client DockerClient
engine_host string
} }
// DockerClient interface, useful for testing // DockerClient interface, useful for testing
@ -58,6 +61,13 @@ var sampleConfig = `
container_names = [] container_names = []
## Timeout for docker list, info, and stats commands ## Timeout for docker list, info, and stats commands
timeout = "5s" timeout = "5s"
## Whether to report for each container per-device blkio (8:0, 8:1...) and
## network (eth0, eth1, ...) stats or not
perdevice = true
## Whether to report for each container total blkio and network stats or not
total = false
` `
// Description returns input description // Description returns input description
@ -116,7 +126,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
defer wg.Done() defer wg.Done()
err := d.gatherContainer(c, acc) err := d.gatherContainer(c, acc)
if err != nil { if err != nil {
log.Printf("Error gathering container %s stats: %s\n", log.Printf("E! Error gathering container %s stats: %s\n",
c.Names, err.Error()) c.Names, err.Error())
} }
}(container) }(container)
@ -138,11 +148,15 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
if err != nil { if err != nil {
return err return err
} }
d.engine_host = info.Name
fields := map[string]interface{}{ fields := map[string]interface{}{
"n_cpus": info.NCPU, "n_cpus": info.NCPU,
"n_used_file_descriptors": info.NFd, "n_used_file_descriptors": info.NFd,
"n_containers": info.Containers, "n_containers": info.Containers,
"n_containers_running": info.ContainersRunning,
"n_containers_stopped": info.ContainersStopped,
"n_containers_paused": info.ContainersPaused,
"n_images": info.Images, "n_images": info.Images,
"n_goroutines": info.NGoroutines, "n_goroutines": info.NGoroutines,
"n_listener_events": info.NEventsListener, "n_listener_events": info.NEventsListener,
@ -150,11 +164,11 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
// Add metrics // Add metrics
acc.AddFields("docker", acc.AddFields("docker",
fields, fields,
nil, map[string]string{"engine_host": d.engine_host},
now) now)
acc.AddFields("docker", acc.AddFields("docker",
map[string]interface{}{"memory_total": info.MemTotal}, map[string]interface{}{"memory_total": info.MemTotal},
map[string]string{"unit": "bytes"}, map[string]string{"unit": "bytes", "engine_host": d.engine_host},
now) now)
// Get storage metrics // Get storage metrics
for _, rawData := range info.DriverStatus { for _, rawData := range info.DriverStatus {
@ -168,7 +182,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
// pool blocksize // pool blocksize
acc.AddFields("docker", acc.AddFields("docker",
map[string]interface{}{"pool_blocksize": value}, map[string]interface{}{"pool_blocksize": value},
map[string]string{"unit": "bytes"}, map[string]string{"unit": "bytes", "engine_host": d.engine_host},
now) now)
} else if strings.HasPrefix(name, "data_space_") { } else if strings.HasPrefix(name, "data_space_") {
// data space // data space
@ -183,13 +197,13 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
if len(dataFields) > 0 { if len(dataFields) > 0 {
acc.AddFields("docker_data", acc.AddFields("docker_data",
dataFields, dataFields,
map[string]string{"unit": "bytes"}, map[string]string{"unit": "bytes", "engine_host": d.engine_host},
now) now)
} }
if len(metadataFields) > 0 { if len(metadataFields) > 0 {
acc.AddFields("docker_metadata", acc.AddFields("docker_metadata",
metadataFields, metadataFields,
map[string]string{"unit": "bytes"}, map[string]string{"unit": "bytes", "engine_host": d.engine_host},
now) now)
} }
return nil return nil
@ -216,6 +230,7 @@ func (d *Docker) gatherContainer(
imageVersion = imageParts[1] imageVersion = imageParts[1]
} }
tags := map[string]string{ tags := map[string]string{
"engine_host": d.engine_host,
"container_name": cname, "container_name": cname,
"container_image": imageName, "container_image": imageName,
"container_version": imageVersion, "container_version": imageVersion,
@ -246,7 +261,7 @@ func (d *Docker) gatherContainer(
tags[k] = label tags[k] = label
} }
gatherContainerStats(v, acc, tags, container.ID) gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total)
return nil return nil
} }
@ -256,6 +271,8 @@ func gatherContainerStats(
acc telegraf.Accumulator, acc telegraf.Accumulator,
tags map[string]string, tags map[string]string,
id string, id string,
perDevice bool,
total bool,
) { ) {
now := stat.Read now := stat.Read
@ -323,6 +340,7 @@ func gatherContainerStats(
acc.AddFields("docker_container_cpu", fields, percputags, now) acc.AddFields("docker_container_cpu", fields, percputags, now)
} }
totalNetworkStatMap := make(map[string]interface{})
for network, netstats := range stat.Networks { for network, netstats := range stat.Networks {
netfields := map[string]interface{}{ netfields := map[string]interface{}{
"rx_dropped": netstats.RxDropped, "rx_dropped": netstats.RxDropped,
@ -336,12 +354,35 @@ func gatherContainerStats(
"container_id": id, "container_id": id,
} }
// Create a new network tag dictionary for the "network" tag // Create a new network tag dictionary for the "network" tag
nettags := copyTags(tags) if perDevice {
nettags["network"] = network nettags := copyTags(tags)
acc.AddFields("docker_container_net", netfields, nettags, now) nettags["network"] = network
acc.AddFields("docker_container_net", netfields, nettags, now)
}
if total {
for field, value := range netfields {
if field == "container_id" {
continue
}
_, ok := totalNetworkStatMap[field]
if ok {
totalNetworkStatMap[field] = totalNetworkStatMap[field].(uint64) + value.(uint64)
} else {
totalNetworkStatMap[field] = value
}
}
}
} }
gatherBlockIOMetrics(stat, acc, tags, now, id) // totalNetworkStatMap could be empty if container is running with --net=host.
if total && len(totalNetworkStatMap) != 0 {
nettags := copyTags(tags)
nettags["network"] = "total"
totalNetworkStatMap["container_id"] = id
acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, now)
}
gatherBlockIOMetrics(stat, acc, tags, now, id, perDevice, total)
} }
func calculateMemPercent(stat *types.StatsJSON) float64 { func calculateMemPercent(stat *types.StatsJSON) float64 {
@ -370,6 +411,8 @@ func gatherBlockIOMetrics(
tags map[string]string, tags map[string]string,
now time.Time, now time.Time,
id string, id string,
perDevice bool,
total bool,
) { ) {
blkioStats := stat.BlkioStats blkioStats := stat.BlkioStats
// Make a map of devices to their block io stats // Make a map of devices to their block io stats
@ -431,11 +474,33 @@ func gatherBlockIOMetrics(
deviceStatMap[device]["sectors_recursive"] = metric.Value deviceStatMap[device]["sectors_recursive"] = metric.Value
} }
totalStatMap := make(map[string]interface{})
for device, fields := range deviceStatMap { for device, fields := range deviceStatMap {
iotags := copyTags(tags)
iotags["device"] = device
fields["container_id"] = id fields["container_id"] = id
acc.AddFields("docker_container_blkio", fields, iotags, now) if perDevice {
iotags := copyTags(tags)
iotags["device"] = device
acc.AddFields("docker_container_blkio", fields, iotags, now)
}
if total {
for field, value := range fields {
if field == "container_id" {
continue
}
_, ok := totalStatMap[field]
if ok {
totalStatMap[field] = totalStatMap[field].(uint64) + value.(uint64)
} else {
totalStatMap[field] = value
}
}
}
}
if total {
totalStatMap["container_id"] = id
iotags := copyTags(tags)
iotags["device"] = "total"
acc.AddFields("docker_container_blkio", totalStatMap, iotags, now)
} }
} }
@ -480,7 +545,8 @@ func parseSize(sizeStr string) (int64, error) {
func init() { func init() {
inputs.Add("docker", func() telegraf.Input { inputs.Add("docker", func() telegraf.Input {
return &Docker{ return &Docker{
Timeout: internal.Duration{Duration: time.Second * 5}, PerDevice: true,
Timeout: internal.Duration{Duration: time.Second * 5},
} }
}) })
} }

View File

@ -24,7 +24,7 @@ func TestDockerGatherContainerStats(t *testing.T) {
"container_name": "redis", "container_name": "redis",
"container_image": "redis/image", "container_image": "redis/image",
} }
gatherContainerStats(stats, &acc, tags, "123456789") gatherContainerStats(stats, &acc, tags, "123456789", true, true)
// test docker_container_net measurement // test docker_container_net measurement
netfields := map[string]interface{}{ netfields := map[string]interface{}{
@ -42,6 +42,21 @@ func TestDockerGatherContainerStats(t *testing.T) {
nettags["network"] = "eth0" nettags["network"] = "eth0"
acc.AssertContainsTaggedFields(t, "docker_container_net", netfields, nettags) acc.AssertContainsTaggedFields(t, "docker_container_net", netfields, nettags)
netfields = map[string]interface{}{
"rx_dropped": uint64(6),
"rx_bytes": uint64(8),
"rx_errors": uint64(10),
"tx_packets": uint64(12),
"tx_dropped": uint64(6),
"rx_packets": uint64(8),
"tx_errors": uint64(10),
"tx_bytes": uint64(12),
"container_id": "123456789",
}
nettags = copyTags(tags)
nettags["network"] = "total"
acc.AssertContainsTaggedFields(t, "docker_container_net", netfields, nettags)
// test docker_blkio measurement // test docker_blkio measurement
blkiotags := copyTags(tags) blkiotags := copyTags(tags)
blkiotags["device"] = "6:0" blkiotags["device"] = "6:0"
@ -52,6 +67,15 @@ func TestDockerGatherContainerStats(t *testing.T) {
} }
acc.AssertContainsTaggedFields(t, "docker_container_blkio", blkiofields, blkiotags) acc.AssertContainsTaggedFields(t, "docker_container_blkio", blkiofields, blkiotags)
blkiotags = copyTags(tags)
blkiotags["device"] = "total"
blkiofields = map[string]interface{}{
"io_service_bytes_recursive_read": uint64(100),
"io_serviced_recursive_write": uint64(302),
"container_id": "123456789",
}
acc.AssertContainsTaggedFields(t, "docker_container_blkio", blkiofields, blkiotags)
// test docker_container_mem measurement // test docker_container_mem measurement
memfields := map[string]interface{}{ memfields := map[string]interface{}{
"max_usage": uint64(1001), "max_usage": uint64(1001),
@ -186,6 +210,17 @@ func testStats() *types.StatsJSON {
TxBytes: 4, TxBytes: 4,
} }
stats.Networks["eth1"] = types.NetworkStats{
RxDropped: 5,
RxBytes: 6,
RxErrors: 7,
TxPackets: 8,
TxDropped: 5,
RxPackets: 6,
TxErrors: 7,
TxBytes: 8,
}
sbr := types.BlkioStatEntry{ sbr := types.BlkioStatEntry{
Major: 6, Major: 6,
Minor: 0, Minor: 0,
@ -198,11 +233,19 @@ func testStats() *types.StatsJSON {
Op: "write", Op: "write",
Value: 101, Value: 101,
} }
sr2 := types.BlkioStatEntry{
Major: 6,
Minor: 1,
Op: "write",
Value: 201,
}
stats.BlkioStats.IoServiceBytesRecursive = append( stats.BlkioStats.IoServiceBytesRecursive = append(
stats.BlkioStats.IoServiceBytesRecursive, sbr) stats.BlkioStats.IoServiceBytesRecursive, sbr)
stats.BlkioStats.IoServicedRecursive = append( stats.BlkioStats.IoServicedRecursive = append(
stats.BlkioStats.IoServicedRecursive, sr) stats.BlkioStats.IoServicedRecursive, sr)
stats.BlkioStats.IoServicedRecursive = append(
stats.BlkioStats.IoServicedRecursive, sr2)
return stats return stats
} }
@ -213,6 +256,9 @@ type FakeDockerClient struct {
func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) { func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) {
env := types.Info{ env := types.Info{
Containers: 108, Containers: 108,
ContainersRunning: 98,
ContainersStopped: 6,
ContainersPaused: 3,
OomKillDisable: false, OomKillDisable: false,
SystemTime: "2016-02-24T00:55:09.15073105-05:00", SystemTime: "2016-02-24T00:55:09.15073105-05:00",
NEventsListener: 0, NEventsListener: 0,
@ -354,10 +400,13 @@ func TestDockerGatherInfo(t *testing.T) {
"n_cpus": int(4), "n_cpus": int(4),
"n_used_file_descriptors": int(19), "n_used_file_descriptors": int(19),
"n_containers": int(108), "n_containers": int(108),
"n_containers_running": int(98),
"n_containers_stopped": int(6),
"n_containers_paused": int(3),
"n_images": int(199), "n_images": int(199),
"n_goroutines": int(39), "n_goroutines": int(39),
}, },
map[string]string{}, map[string]string{"engine_host": "absol"},
) )
acc.AssertContainsTaggedFields(t, acc.AssertContainsTaggedFields(t,
@ -368,7 +417,8 @@ func TestDockerGatherInfo(t *testing.T) {
"available": int64(36530000000), "available": int64(36530000000),
}, },
map[string]string{ map[string]string{
"unit": "bytes", "unit": "bytes",
"engine_host": "absol",
}, },
) )
acc.AssertContainsTaggedFields(t, acc.AssertContainsTaggedFields(t,
@ -382,6 +432,7 @@ func TestDockerGatherInfo(t *testing.T) {
"container_image": "quay.io/coreos/etcd", "container_image": "quay.io/coreos/etcd",
"cpu": "cpu3", "cpu": "cpu3",
"container_version": "v2.2.2", "container_version": "v2.2.2",
"engine_host": "absol",
}, },
) )
acc.AssertContainsTaggedFields(t, acc.AssertContainsTaggedFields(t,
@ -424,6 +475,7 @@ func TestDockerGatherInfo(t *testing.T) {
"container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173", "container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
}, },
map[string]string{ map[string]string{
"engine_host": "absol",
"container_name": "etcd2", "container_name": "etcd2",
"container_image": "quay.io/coreos/etcd", "container_image": "quay.io/coreos/etcd",
"container_version": "v2.2.2", "container_version": "v2.2.2",

View File

@ -8,9 +8,18 @@ and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference
``` ```
[[inputs.elasticsearch]] [[inputs.elasticsearch]]
## specify a list of one or more Elasticsearch servers
servers = ["http://localhost:9200"] servers = ["http://localhost:9200"]
## Timeout for HTTP requests to the elastic search server(s)
http_timeout = "5s"
## set local to false when you want to read the indices stats from all nodes
## within the cluster
local = true local = true
cluster_health = true
## set cluster_health to true when you want to also obtain cluster level stats
cluster_health = false
## Optional SSL Config ## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem" # ssl_ca = "/etc/telegraf/ca.pem"

View File

@ -62,6 +62,9 @@ const sampleConfig = `
## specify a list of one or more Elasticsearch servers ## specify a list of one or more Elasticsearch servers
servers = ["http://localhost:9200"] servers = ["http://localhost:9200"]
## Timeout for HTTP requests to the elastic search server(s)
http_timeout = "5s"
## set local to false when you want to read the indices stats from all nodes ## set local to false when you want to read the indices stats from all nodes
## within the cluster ## within the cluster
local = true local = true
@ -82,6 +85,7 @@ const sampleConfig = `
type Elasticsearch struct { type Elasticsearch struct {
Local bool Local bool
Servers []string Servers []string
HttpTimeout internal.Duration
ClusterHealth bool ClusterHealth bool
SSLCA string `toml:"ssl_ca"` // Path to CA file SSLCA string `toml:"ssl_ca"` // Path to CA file
SSLCert string `toml:"ssl_cert"` // Path to host cert file SSLCert string `toml:"ssl_cert"` // Path to host cert file
@ -92,7 +96,9 @@ type Elasticsearch struct {
// NewElasticsearch return a new instance of Elasticsearch // NewElasticsearch return a new instance of Elasticsearch
func NewElasticsearch() *Elasticsearch { func NewElasticsearch() *Elasticsearch {
return &Elasticsearch{} return &Elasticsearch{
HttpTimeout: internal.Duration{Duration: time.Second * 5},
}
} }
// SampleConfig returns sample configuration for this plugin. // SampleConfig returns sample configuration for this plugin.
@ -150,12 +156,12 @@ func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
return nil, err return nil, err
} }
tr := &http.Transport{ tr := &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second), ResponseHeaderTimeout: e.HttpTimeout.Duration,
TLSClientConfig: tlsCfg, TLSClientConfig: tlsCfg,
} }
client := &http.Client{ client := &http.Client{
Transport: tr, Transport: tr,
Timeout: time.Duration(4 * time.Second), Timeout: e.HttpTimeout.Duration,
} }
return client, nil return client, nil

View File

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"strings" "strings"
"sync" "sync"
"syscall" "syscall"
@ -114,9 +115,36 @@ func (c CommandRunner) Run(
} }
} }
out = removeCarriageReturns(out)
return out.Bytes(), nil return out.Bytes(), nil
} }
// removeCarriageReturns removes all carriage returns from the input if the
// OS is Windows. It does not return any errors.
func removeCarriageReturns(b bytes.Buffer) bytes.Buffer {
if runtime.GOOS == "windows" {
var buf bytes.Buffer
for {
byt, er := b.ReadBytes(0x0D)
end := len(byt)
if nil == er {
end -= 1
}
if nil != byt {
buf.Write(byt[:end])
} else {
break
}
if nil != er {
break
}
}
b = buf
}
return b
}
func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync.WaitGroup) { func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync.WaitGroup) {
defer wg.Done() defer wg.Done()

View File

@ -1,7 +1,9 @@
package exec package exec
import ( import (
"bytes"
"fmt" "fmt"
"runtime"
"testing" "testing"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
@ -46,6 +48,29 @@ cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
` `
type CarriageReturnTest struct {
input []byte
output []byte
}
var crTests = []CarriageReturnTest{
{[]byte{0x4c, 0x69, 0x6e, 0x65, 0x20, 0x31, 0x0d, 0x0a, 0x4c, 0x69,
0x6e, 0x65, 0x20, 0x32, 0x0d, 0x0a, 0x4c, 0x69, 0x6e, 0x65,
0x20, 0x33},
[]byte{0x4c, 0x69, 0x6e, 0x65, 0x20, 0x31, 0x0a, 0x4c, 0x69, 0x6e,
0x65, 0x20, 0x32, 0x0a, 0x4c, 0x69, 0x6e, 0x65, 0x20, 0x33}},
{[]byte{0x4c, 0x69, 0x6e, 0x65, 0x20, 0x31, 0x0a, 0x4c, 0x69, 0x6e,
0x65, 0x20, 0x32, 0x0a, 0x4c, 0x69, 0x6e, 0x65, 0x20, 0x33},
[]byte{0x4c, 0x69, 0x6e, 0x65, 0x20, 0x31, 0x0a, 0x4c, 0x69, 0x6e,
0x65, 0x20, 0x32, 0x0a, 0x4c, 0x69, 0x6e, 0x65, 0x20, 0x33}},
{[]byte{0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, 0x6c,
0x6c, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x62, 0x69, 0x67, 0x20,
0x6c, 0x69, 0x6e, 0x65},
[]byte{0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, 0x6c,
0x6c, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x62, 0x69, 0x67, 0x20,
0x6c, 0x69, 0x6e, 0x65}},
}
type runnerMock struct { type runnerMock struct {
out []byte out []byte
err error err error
@ -217,3 +242,21 @@ func TestExecCommandWithoutGlobAndPath(t *testing.T) {
} }
acc.AssertContainsFields(t, "metric", fields) acc.AssertContainsFields(t, "metric", fields)
} }
func TestRemoveCarriageReturns(t *testing.T) {
if runtime.GOOS == "windows" {
// Test that all carriage returns are removed
for _, test := range crTests {
b := bytes.NewBuffer(test.input)
out := removeCarriageReturns(*b)
assert.True(t, bytes.Equal(test.output, out.Bytes()))
}
} else {
// Test that the buffer is returned unaltered
for _, test := range crTests {
b := bytes.NewBuffer(test.input)
out := removeCarriageReturns(*b)
assert.True(t, bytes.Equal(test.input, out.Bytes()))
}
}
}

View File

@ -0,0 +1,22 @@
# Hddtemp Input Plugin
This plugin reads data from hddtemp daemon
## Requirements
Hddtemp should be installed and its daemon running
## Configuration
```
[[inputs.hddtemp]]
## By default, telegraf gathers temps data from all disks detected by the
## hddtemp.
##
## Only collect temps from the selected disks.
##
## A * as the device name will return the temperature values of all disks.
##
# address = "127.0.0.1:7634"
# devices = ["sda", "*"]
```

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Mendelson Gusmão
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,61 @@
package hddtemp
import (
"bytes"
"io"
"net"
"strconv"
"strings"
)
type disk struct {
DeviceName string
Model string
Temperature int32
Unit string
Status string
}
func Fetch(address string) ([]disk, error) {
var (
err error
conn net.Conn
buffer bytes.Buffer
disks []disk
)
if conn, err = net.Dial("tcp", address); err != nil {
return nil, err
}
if _, err = io.Copy(&buffer, conn); err != nil {
return nil, err
}
fields := strings.Split(buffer.String(), "|")
for index := 0; index < len(fields)/5; index++ {
status := ""
offset := index * 5
device := fields[offset+1]
device = device[strings.LastIndex(device, "/")+1:]
temperatureField := fields[offset+3]
temperature, err := strconv.ParseInt(temperatureField, 10, 32)
if err != nil {
temperature = 0
status = temperatureField
}
disks = append(disks, disk{
DeviceName: device,
Model: fields[offset+2],
Temperature: int32(temperature),
Unit: fields[offset+4],
Status: status,
})
}
return disks, nil
}

View File

@ -0,0 +1,116 @@
package hddtemp
import (
"net"
"reflect"
"testing"
)
func TestFetch(t *testing.T) {
l := serve(t, []byte("|/dev/sda|foobar|36|C|"))
defer l.Close()
disks, err := Fetch(l.Addr().String())
if err != nil {
t.Error("expecting err to be nil")
}
expected := []disk{
{
DeviceName: "sda",
Model: "foobar",
Temperature: 36,
Unit: "C",
},
}
if !reflect.DeepEqual(expected, disks) {
t.Error("disks' slice is different from expected")
}
}
func TestFetchWrongAddress(t *testing.T) {
_, err := Fetch("127.0.0.1:1")
if err == nil {
t.Error("expecting err to be non-nil")
}
}
func TestFetchStatus(t *testing.T) {
l := serve(t, []byte("|/dev/sda|foobar|SLP|C|"))
defer l.Close()
disks, err := Fetch(l.Addr().String())
if err != nil {
t.Error("expecting err to be nil")
}
expected := []disk{
{
DeviceName: "sda",
Model: "foobar",
Temperature: 0,
Unit: "C",
Status: "SLP",
},
}
if !reflect.DeepEqual(expected, disks) {
t.Error("disks' slice is different from expected")
}
}
func TestFetchTwoDisks(t *testing.T) {
l := serve(t, []byte("|/dev/hda|ST380011A|46|C||/dev/hdd|ST340016A|SLP|*|"))
defer l.Close()
disks, err := Fetch(l.Addr().String())
if err != nil {
t.Error("expecting err to be nil")
}
expected := []disk{
{
DeviceName: "hda",
Model: "ST380011A",
Temperature: 46,
Unit: "C",
},
{
DeviceName: "hdd",
Model: "ST340016A",
Temperature: 0,
Unit: "*",
Status: "SLP",
},
}
if !reflect.DeepEqual(expected, disks) {
t.Error("disks' slice is different from expected")
}
}
func serve(t *testing.T, data []byte) net.Listener {
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
go func(t *testing.T) {
conn, err := l.Accept()
if err != nil {
t.Fatal(err)
}
conn.Write(data)
conn.Close()
}(t)
return l
}

View File

@ -0,0 +1,74 @@
// +build linux
package hddtemp
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
gohddtemp "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp"
)
const defaultAddress = "127.0.0.1:7634"
type HDDTemp struct {
Address string
Devices []string
}
func (_ *HDDTemp) Description() string {
return "Monitor disks' temperatures using hddtemp"
}
var hddtempSampleConfig = `
## By default, telegraf gathers temps data from all disks detected by the
## hddtemp.
##
## Only collect temps from the selected disks.
##
## A * as the device name will return the temperature values of all disks.
##
# address = "127.0.0.1:7634"
# devices = ["sda", "*"]
`
func (_ *HDDTemp) SampleConfig() string {
return hddtempSampleConfig
}
func (h *HDDTemp) Gather(acc telegraf.Accumulator) error {
disks, err := gohddtemp.Fetch(h.Address)
if err != nil {
return err
}
for _, disk := range disks {
for _, chosenDevice := range h.Devices {
if chosenDevice == "*" || chosenDevice == disk.DeviceName {
tags := map[string]string{
"device": disk.DeviceName,
"model": disk.Model,
"unit": disk.Unit,
"status": disk.Status,
}
fields := map[string]interface{}{
disk.DeviceName: disk.Temperature,
}
acc.AddFields("hddtemp", fields, tags)
}
}
}
return nil
}
func init() {
inputs.Add("hddtemp", func() telegraf.Input {
return &HDDTemp{
Address: defaultAddress,
Devices: []string{"*"},
}
})
}

View File

@ -0,0 +1,3 @@
// +build !linux
package hddtemp

View File

@ -0,0 +1,24 @@
# HTTP listener service input plugin
The HTTP listener is a service input plugin that listens for messages sent via HTTP POST.
The plugin expects messages in the InfluxDB line-protocol ONLY, other Telegraf input data formats are not supported.
The intent of the plugin is to allow Telegraf to serve as a proxy/router for the /write endpoint of the InfluxDB HTTP API.
When chaining Telegraf instances using this plugin, CREATE DATABASE requests receive a 200 OK response with message body `{"results":[]}` but they are not relayed. The output configuration of the Telegraf instance which ultimately submits data to InfluxDB determines the destination database.
See: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx).
Example: curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000'
### Configuration:
This is a sample configuration for the plugin.
```toml
# # Influx HTTP write listener
[[inputs.http_listener]]
## Address and port to host HTTP listener on
service_address = ":8186"
## timeouts
read_timeout = "10s"
write_timeout = "10s"
```

View File

@ -0,0 +1,165 @@
package http_listener
import (
"bufio"
"bytes"
"fmt"
"log"
"net"
"net/http"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/inputs/http_listener/stoppableListener"
"github.com/influxdata/telegraf/plugins/parsers"
)
type HttpListener struct {
ServiceAddress string
ReadTimeout internal.Duration
WriteTimeout internal.Duration
sync.Mutex
wg sync.WaitGroup
listener *stoppableListener.StoppableListener
parser parsers.Parser
acc telegraf.Accumulator
}
const sampleConfig = `
## Address and port to host HTTP listener on
service_address = ":8186"
## timeouts
read_timeout = "10s"
write_timeout = "10s"
`
func (t *HttpListener) SampleConfig() string {
return sampleConfig
}
func (t *HttpListener) Description() string {
return "Influx HTTP write listener"
}
func (t *HttpListener) Gather(_ telegraf.Accumulator) error {
return nil
}
func (t *HttpListener) SetParser(parser parsers.Parser) {
t.parser = parser
}
// Start starts the http listener service.
func (t *HttpListener) Start(acc telegraf.Accumulator) error {
t.Lock()
defer t.Unlock()
t.acc = acc
var rawListener, err = net.Listen("tcp", t.ServiceAddress)
if err != nil {
return err
}
t.listener, err = stoppableListener.New(rawListener)
if err != nil {
return err
}
go t.httpListen()
log.Printf("I! Started HTTP listener service on %s\n", t.ServiceAddress)
return nil
}
// Stop cleans up all resources
func (t *HttpListener) Stop() {
t.Lock()
defer t.Unlock()
t.listener.Stop()
t.listener.Close()
t.wg.Wait()
log.Println("I! Stopped HTTP listener service on ", t.ServiceAddress)
}
// httpListen listens for HTTP requests.
func (t *HttpListener) httpListen() error {
if t.ReadTimeout.Duration < time.Second {
t.ReadTimeout.Duration = time.Second * 10
}
if t.WriteTimeout.Duration < time.Second {
t.WriteTimeout.Duration = time.Second * 10
}
var server = http.Server{
Handler: t,
ReadTimeout: t.ReadTimeout.Duration,
WriteTimeout: t.WriteTimeout.Duration,
}
return server.Serve(t.listener)
}
func (t *HttpListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
t.wg.Add(1)
defer t.wg.Done()
switch req.URL.Path {
case "/write":
var http400msg bytes.Buffer
var partial string
scanner := bufio.NewScanner(req.Body)
scanner.Buffer([]byte(""), 128*1024)
for scanner.Scan() {
metrics, err := t.parser.Parse(scanner.Bytes())
if err == nil {
for _, m := range metrics {
t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
}
partial = "partial write: "
} else {
http400msg.WriteString(err.Error() + " ")
}
}
if err := scanner.Err(); err != nil {
http.Error(res, "Internal server error: "+err.Error(), http.StatusInternalServerError)
} else if http400msg.Len() > 0 {
res.Header().Set("Content-Type", "application/json")
res.Header().Set("X-Influxdb-Version", "1.0")
res.WriteHeader(http.StatusBadRequest)
res.Write([]byte(fmt.Sprintf(`{"error":"%s%s"}`, partial, http400msg.String())))
} else {
res.WriteHeader(http.StatusNoContent)
}
case "/query":
// Deliver a dummy response to the query endpoint, as some InfluxDB
// clients test endpoint availability with a query
res.Header().Set("Content-Type", "application/json")
res.Header().Set("X-Influxdb-Version", "1.0")
res.WriteHeader(http.StatusOK)
res.Write([]byte("{\"results\":[]}"))
case "/ping":
// respond to ping requests
res.WriteHeader(http.StatusNoContent)
default:
// Don't know how to respond to calls to other endpoints
http.NotFound(res, req)
}
}
func init() {
inputs.Add("http_listener", func() telegraf.Input {
return &HttpListener{}
})
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,10 @@
Copyright (c) 2014, Eric Urban
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,62 @@
package stoppableListener
import (
"errors"
"net"
"time"
)
type StoppableListener struct {
*net.TCPListener //Wrapped listener
stop chan int //Channel used only to indicate listener should shutdown
}
func New(l net.Listener) (*StoppableListener, error) {
tcpL, ok := l.(*net.TCPListener)
if !ok {
return nil, errors.New("Cannot wrap listener")
}
retval := &StoppableListener{}
retval.TCPListener = tcpL
retval.stop = make(chan int)
return retval, nil
}
var StoppedError = errors.New("Listener stopped")
func (sl *StoppableListener) Accept() (net.Conn, error) {
for {
//Wait up to one second for a new connection
sl.SetDeadline(time.Now().Add(time.Second))
newConn, err := sl.TCPListener.Accept()
//Check for the channel being closed
select {
case <-sl.stop:
return nil, StoppedError
default:
//If the channel is still open, continue as normal
}
if err != nil {
netErr, ok := err.(net.Error)
//If this is a timeout, then continue to wait for
//new connections
if ok && netErr.Timeout() && netErr.Temporary() {
continue
}
}
return newConn, err
}
}
func (sl *StoppableListener) Stop() {
close(sl.stop)
}

View File

@ -2,8 +2,7 @@
The httpjson plugin can collect data from remote URLs which respond with JSON. Then it flattens JSON and finds all numeric values, treating them as floats. The httpjson plugin can collect data from remote URLs which respond with JSON. Then it flattens JSON and finds all numeric values, treating them as floats.
For example, if you have a service called _mycollector_, which has HTTP endpoint for gathering stats at http://my.service.com/_stats, you would configure the HTTP JSON For example, if you have a service called _mycollector_, which has HTTP endpoint for gathering stats at http://my.service.com/_stats, you would configure the HTTP JSON plugin like this:
plugin like this:
``` ```
[[inputs.httpjson]] [[inputs.httpjson]]
@ -15,12 +14,17 @@ plugin like this:
# HTTP method to use (case-sensitive) # HTTP method to use (case-sensitive)
method = "GET" method = "GET"
# Set response_timeout (default 5 seconds)
response_timeout = "5s"
``` ```
`name` is used as a prefix for the measurements. `name` is used as a prefix for the measurements.
`method` specifies HTTP method to use for requests. `method` specifies HTTP method to use for requests.
`response_timeout` specifies timeout to wait to get the response
You can also specify which keys from server response should be considered tags: You can also specify which keys from server response should be considered tags:
``` ```
@ -94,8 +98,7 @@ httpjson_mycollector_b_e,service='service01',server='http://my.service.com/_stat
# Example 2, Multiple Services: # Example 2, Multiple Services:
There is also the option to collect JSON from multiple services, here is an There is also the option to collect JSON from multiple services, here is an example doing that.
example doing that.
``` ```
[[inputs.httpjson]] [[inputs.httpjson]]

View File

@ -16,13 +16,15 @@ import (
"github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers"
) )
// HttpJson struct
type HttpJson struct { type HttpJson struct {
Name string Name string
Servers []string Servers []string
Method string Method string
TagKeys []string TagKeys []string
Parameters map[string]string ResponseTimeout internal.Duration
Headers map[string]string Parameters map[string]string
Headers map[string]string
// Path to CA file // Path to CA file
SSLCA string `toml:"ssl_ca"` SSLCA string `toml:"ssl_ca"`
@ -79,6 +81,8 @@ var sampleConfig = `
"http://localhost:9999/stats/", "http://localhost:9999/stats/",
"http://localhost:9998/stats/", "http://localhost:9998/stats/",
] ]
## Set response_timeout (default 5 seconds)
response_timeout = "5s"
## HTTP method to use: GET or POST (case-sensitive) ## HTTP method to use: GET or POST (case-sensitive)
method = "GET" method = "GET"
@ -126,12 +130,12 @@ func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
return err return err
} }
tr := &http.Transport{ tr := &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second), ResponseHeaderTimeout: h.ResponseTimeout.Duration,
TLSClientConfig: tlsCfg, TLSClientConfig: tlsCfg,
} }
client := &http.Client{ client := &http.Client{
Transport: tr, Transport: tr,
Timeout: time.Duration(4 * time.Second), Timeout: h.ResponseTimeout.Duration,
} }
h.client.SetHTTPClient(client) h.client.SetHTTPClient(client)
} }
@ -291,6 +295,9 @@ func init() {
inputs.Add("httpjson", func() telegraf.Input { inputs.Add("httpjson", func() telegraf.Input {
return &HttpJson{ return &HttpJson{
client: &RealHTTPClient{}, client: &RealHTTPClient{},
ResponseTimeout: internal.Duration{
Duration: 5 * time.Second,
},
} }
}) })
} }

View File

@ -10,11 +10,16 @@ import (
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
) )
type InfluxDB struct { type InfluxDB struct {
URLs []string `toml:"urls"` URLs []string `toml:"urls"`
Timeout internal.Duration
client *http.Client
} }
func (*InfluxDB) Description() string { func (*InfluxDB) Description() string {
@ -32,6 +37,9 @@ func (*InfluxDB) SampleConfig() string {
urls = [ urls = [
"http://localhost:8086/debug/vars" "http://localhost:8086/debug/vars"
] ]
## http request & header timeout
timeout = "5s"
` `
} }
@ -39,6 +47,16 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error {
if len(i.URLs) == 0 { if len(i.URLs) == 0 {
i.URLs = []string{"http://localhost:8086/debug/vars"} i.URLs = []string{"http://localhost:8086/debug/vars"}
} }
if i.client == nil {
i.client = &http.Client{
Transport: &http.Transport{
ResponseHeaderTimeout: i.Timeout.Duration,
},
Timeout: i.Timeout.Duration,
}
}
errorChannel := make(chan error, len(i.URLs)) errorChannel := make(chan error, len(i.URLs))
var wg sync.WaitGroup var wg sync.WaitGroup
@ -104,15 +122,6 @@ type memstats struct {
GCCPUFraction float64 `json:"GCCPUFraction"` GCCPUFraction float64 `json:"GCCPUFraction"`
} }
var tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}
var client = &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
// Gathers data from a particular URL // Gathers data from a particular URL
// Parameters: // Parameters:
// acc : The telegraf Accumulator to use // acc : The telegraf Accumulator to use
@ -127,7 +136,7 @@ func (i *InfluxDB) gatherURL(
shardCounter := 0 shardCounter := 0
now := time.Now() now := time.Now()
resp, err := client.Get(url) resp, err := i.client.Get(url)
if err != nil { if err != nil {
return err return err
} }
@ -210,9 +219,13 @@ func (i *InfluxDB) gatherURL(
continue continue
} }
if p.Tags == nil {
p.Tags = make(map[string]string)
}
// If the object was a point, but was not fully initialized, // If the object was a point, but was not fully initialized,
// ignore it and move on. // ignore it and move on.
if p.Name == "" || p.Tags == nil || p.Values == nil || len(p.Values) == 0 { if p.Name == "" || p.Values == nil || len(p.Values) == 0 {
continue continue
} }
@ -244,6 +257,8 @@ func (i *InfluxDB) gatherURL(
func init() { func init() {
inputs.Add("influxdb", func() telegraf.Input { inputs.Add("influxdb", func() telegraf.Input {
return &InfluxDB{} return &InfluxDB{
Timeout: internal.Duration{Duration: time.Second * 5},
}
}) })
} }

View File

@ -116,6 +116,31 @@ func TestInfluxDB(t *testing.T) {
}, map[string]string{}) }, map[string]string{})
} }
func TestInfluxDB2(t *testing.T) {
fakeInfluxServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/endpoint" {
_, _ = w.Write([]byte(influxReturn2))
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
defer fakeInfluxServer.Close()
plugin := &influxdb.InfluxDB{
URLs: []string{fakeInfluxServer.URL + "/endpoint"},
}
var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc))
require.Len(t, acc.Metrics, 34)
acc.AssertContainsTaggedFields(t, "influxdb",
map[string]interface{}{
"n_shards": 1,
}, map[string]string{})
}
func TestErrorHandling(t *testing.T) { func TestErrorHandling(t *testing.T) {
badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/endpoint" { if r.URL.Path == "/endpoint" {
@ -241,3 +266,49 @@ const influxReturn = `
"tsm1_wal:/Users/csparr/.influxdb/wal/udp/default/1": {"name": "tsm1_wal", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/wal/udp/default/1", "retentionPolicy": "default"}, "values": {"currentSegmentDiskBytes": 193728, "oldSegmentsDiskBytes": 1008330}}, "tsm1_wal:/Users/csparr/.influxdb/wal/udp/default/1": {"name": "tsm1_wal", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/wal/udp/default/1", "retentionPolicy": "default"}, "values": {"currentSegmentDiskBytes": 193728, "oldSegmentsDiskBytes": 1008330}},
"write": {"name": "write", "tags": {}, "values": {"pointReq": 3613, "pointReqLocal": 3613, "req": 110, "subWriteOk": 110, "writeOk": 110}} "write": {"name": "write", "tags": {}, "values": {"pointReq": 3613, "pointReqLocal": 3613, "req": 110, "subWriteOk": 110, "writeOk": 110}}
}` }`
// InfluxDB 1.0+ with tags: null instead of tags: {}.
const influxReturn2 = `
{
"cluster": {"name": "cluster", "tags": null, "values": {}},
"cmdline": ["influxd"],
"cq": {"name": "cq", "tags": null, "values": {}},
"database:_internal": {"name": "database", "tags": {"database": "_internal"}, "values": {"numMeasurements": 8, "numSeries": 12}},
"database:udp": {"name": "database", "tags": {"database": "udp"}, "values": {"numMeasurements": 14, "numSeries": 38}},
"hh:/Users/csparr/.influxdb/hh": {"name": "hh", "tags": {"path": "/Users/csparr/.influxdb/hh"}, "values": {}},
"httpd::8086": {"name": "httpd", "tags": {"bind": ":8086"}, "values": {"req": 7, "reqActive": 1, "reqDurationNs": 4488799}},
"measurement:cpu_idle.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "cpu_idle"}, "values": {"numSeries": 1}},
"measurement:cpu_usage.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "cpu_usage"}, "values": {"numSeries": 1}},
"measurement:database._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "database"}, "values": {"numSeries": 2}},
"measurement:database.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "database"}, "values": {"numSeries": 2}},
"measurement:httpd.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "httpd"}, "values": {"numSeries": 1}},
"measurement:measurement.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "measurement"}, "values": {"numSeries": 22}},
"measurement:mem.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "mem"}, "values": {"numSeries": 1}},
"measurement:net.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "net"}, "values": {"numSeries": 1}},
"measurement:runtime._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "runtime"}, "values": {"numSeries": 1}},
"measurement:runtime.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "runtime"}, "values": {"numSeries": 1}},
"measurement:shard._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "shard"}, "values": {"numSeries": 2}},
"measurement:shard.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "shard"}, "values": {"numSeries": 1}},
"measurement:subscriber._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "subscriber"}, "values": {"numSeries": 1}},
"measurement:subscriber.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "subscriber"}, "values": {"numSeries": 1}},
"measurement:swap_used.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "swap_used"}, "values": {"numSeries": 1}},
"measurement:tsm1_cache._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "tsm1_cache"}, "values": {"numSeries": 2}},
"measurement:tsm1_cache.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "tsm1_cache"}, "values": {"numSeries": 2}},
"measurement:tsm1_wal._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "tsm1_wal"}, "values": {"numSeries": 2}},
"measurement:tsm1_wal.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "tsm1_wal"}, "values": {"numSeries": 2}},
"measurement:udp._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "udp"}, "values": {"numSeries": 1}},
"measurement:write._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "write"}, "values": {"numSeries": 1}},
"measurement:write.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "write"}, "values": {"numSeries": 1}},
"memstats": {"Alloc":17034016,"TotalAlloc":201739016,"Sys":38537464,"Lookups":77,"Mallocs":570251,"Frees":381008,"HeapAlloc":17034016,"HeapSys":33849344,"HeapIdle":15802368,"HeapInuse":18046976,"HeapReleased":3473408,"HeapObjects":189243,"StackInuse":753664,"StackSys":753664,"MSpanInuse":97440,"MSpanSys":114688,"MCacheInuse":4800,"MCacheSys":16384,"BuckHashSys":1461583,"GCSys":1112064,"OtherSys":1229737,"NextGC":20843042,"LastGC":1460434886475114239,"PauseTotalNs":5132914,"PauseNs":[195052,117751,139370,156933,263089,165249,713747,103904,122015,294408,213753,170864,175845,114221,121563,122409,113098,162219,229257,126726,250774,254235,117206,293588,144279,124306,127053,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"PauseEnd":[1460433856394860455,1460433856398162739,1460433856405888337,1460433856411784017,1460433856417924684,1460433856428385687,1460433856443782908,1460433856456522851,1460433857392743223,1460433866484394564,1460433866494076235,1460433896472438632,1460433957839825106,1460433976473440328,1460434016473413006,1460434096471892794,1460434126470792929,1460434246480428250,1460434366554468369,1460434396471249528,1460434456471205885,1460434476479487292,1460434536471435965,1460434616469784776,1460434736482078216,1460434856544251733,1460434886475114239,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"NumGC":27,"GCCPUFraction":4.287178819113636e-05,"EnableGC":true,"DebugGC":false,"BySize":[{"Size":0,"Mallocs":0,"Frees":0},{"Size":8,"Mallocs":1031,"Frees":955},{"Size":16,"Mallocs":308485,"Frees":142064},{"Size":32,"Mallocs":64937,"Frees":54321},{"Size":48,"Mallocs":33012,"Frees":29754},{"Size":64,"Mallocs":20299,"Frees":18173},{"Size":80,"Mallocs":8186,"Frees":7597},{"Size":96,"Mallocs":9806,"Frees":8982},{"Size":112,"Mallocs":5671,"Frees":4850},{"Size":128,"Mallocs":2972,"Frees":2684},{"Size":144,"Mallocs":4106,"Frees":3719},{"Size":160,"Mallocs":1324,"Frees":911},{"Size":176,"Mallocs":2574,"Frees":2391},{"Size":192,"Mallocs":4053,"Frees":3863},{"Size":208,"Mallocs":442,"Frees":307},{"Size":224,"Mallocs":336,"Frees":172},{"Size":240,"Mallocs":143,"Frees":125},{"Size":256,"Mallocs":542,"Frees":497},{"Size":288,"Mallocs":15971,"Frees":14761},{"Size":320,"Mallocs":245,"Frees":30},{"Size":352,"Mallocs":1299,"Frees":1065},{"Size":384,"Mallocs":138,"Frees":2},{"Size":416,"Mallocs":54,"Frees":47},{"Size":448,"Mallocs":75,"Frees":29},{"Size":480,"Mallocs":6,"Frees":4},{"Size":512,"Mallocs":452,"Frees":422},{"Size":576,"Mallocs":486,"Frees":395},{"Size":640,"Mallocs":81,"Frees":67},{"Size":704,"Mallocs":421,"Frees":397},{"Size":768,"Mallocs":469,"Frees":468},{"Size":896,"Mallocs":1049,"Frees":1010},{"Size":1024,"Mallocs":1078,"Frees":960},{"Size":1152,"Mallocs":750,"Frees":498},{"Size":1280,"Mallocs":84,"Frees":72},{"Size":1408,"Mallocs":218,"Frees":187},{"Size":1536,"Mallocs":73,"Frees":48},{"Size":1664,"Mallocs":43,"Frees":30},{"Size":2048,"Mallocs":153,"Frees":57},{"Size":2304,"Mallocs":41,"Frees":30},{"Size":2560,"Mallocs":18,"Frees":15},{"Size":2816,"Mallocs":164,"Frees":157},{"Size":3072,"Mallocs":0,"Frees":0},{"Size":3328,"Mallocs":13,"Frees":6},{"Size":4096,"Mallocs":101,"Frees":82},{"Size":4608,"Mallocs":32,"Frees":26},{"Size":5376,"Mallocs":165,"Frees":151},{"Size":6144,"Mallocs":15,"Frees":9},{"Size":6400,"Mallocs":1,"Frees":1},{"Size":6656,"Mallocs":1,"Frees":0},{"Size":6912,"Mallocs":0,"Frees":0},{"Size":8192,"Mallocs":13,"Frees":13},{"Size":8448,"Mallocs":0,"Frees":0},{"Size":8704,"Mallocs":1,"Frees":1},{"Size":9472,"Mallocs":6,"Frees":4},{"Size":10496,"Mallocs":0,"Frees":0},{"Size":12288,"Mallocs":41,"Frees":35},{"Size":13568,"Mallocs":0,"Frees":0},{"Size":14080,"Mallocs":0,"Frees":0},{"Size":16384,"Mallocs":4,"Frees":4},{"Size":16640,"Mallocs":0,"Frees":0},{"Size":17664,"Mallocs":0,"Frees":0}]},
"queryExecutor": {"name": "queryExecutor", "tags": null, "values": {}},
"shard:/Users/csparr/.influxdb/data/_internal/monitor/2:2": {"name": "shard", "tags": {"database": "_internal", "engine": "tsm1", "id": "2", "path": "/Users/csparr/.influxdb/data/_internal/monitor/2", "retentionPolicy": "monitor"}, "values": {}},
"shard:/Users/csparr/.influxdb/data/udp/default/1:1": {"name": "shard", "tags": {"database": "udp", "engine": "tsm1", "id": "1", "path": "/Users/csparr/.influxdb/data/udp/default/1", "retentionPolicy": "default"}, "values": {"fieldsCreate": 61, "seriesCreate": 33, "writePointsOk": 3613, "writeReq": 110}},
"subscriber": {"name": "subscriber", "tags": null, "values": {"pointsWritten": 3613}},
"tsm1_cache:/Users/csparr/.influxdb/data/_internal/monitor/2": {"name": "tsm1_cache", "tags": {"database": "_internal", "path": "/Users/csparr/.influxdb/data/_internal/monitor/2", "retentionPolicy": "monitor"}, "values": {"WALCompactionTimeMs": 0, "cacheAgeMs": 1103932, "cachedBytes": 0, "diskBytes": 0, "memBytes": 40480, "snapshotCount": 0}},
"tsm1_cache:/Users/csparr/.influxdb/data/udp/default/1": {"name": "tsm1_cache", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/data/udp/default/1", "retentionPolicy": "default"}, "values": {"WALCompactionTimeMs": 0, "cacheAgeMs": 1103029, "cachedBytes": 0, "diskBytes": 0, "memBytes": 2359472, "snapshotCount": 0}},
"tsm1_filestore:/Users/csparr/.influxdb/data/_internal/monitor/2": {"name": "tsm1_filestore", "tags": {"database": "_internal", "path": "/Users/csparr/.influxdb/data/_internal/monitor/2", "retentionPolicy": "monitor"}, "values": {}},
"tsm1_filestore:/Users/csparr/.influxdb/data/udp/default/1": {"name": "tsm1_filestore", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/data/udp/default/1", "retentionPolicy": "default"}, "values": {}},
"tsm1_wal:/Users/csparr/.influxdb/wal/_internal/monitor/2": {"name": "tsm1_wal", "tags": {"database": "_internal", "path": "/Users/csparr/.influxdb/wal/_internal/monitor/2", "retentionPolicy": "monitor"}, "values": {"currentSegmentDiskBytes": 0, "oldSegmentsDiskBytes": 69532}},
"tsm1_wal:/Users/csparr/.influxdb/wal/udp/default/1": {"name": "tsm1_wal", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/wal/udp/default/1", "retentionPolicy": "default"}, "values": {"currentSegmentDiskBytes": 193728, "oldSegmentsDiskBytes": 1008330}},
"write": {"name": "write", "tags": null, "values": {"pointReq": 3613, "pointReqLocal": 3613, "req": 110, "subWriteOk": 110, "writeOk": 110}}
}`

View File

@ -0,0 +1,74 @@
# Iptables Plugin
The iptables plugin gathers packets and bytes counters for rules within a set of table and chain from the Linux's iptables firewall.
Rules are identified through associated comment. Rules without comment are ignored.
The iptables command requires CAP_NET_ADMIN and CAP_NET_RAW capabilities. You have several options to grant telegraf to run iptables:
* Run telegraf as root. This is strongly discouraged.
* Configure systemd to run telegraf with CAP_NET_ADMIN and CAP_NET_RAW. This is the simplest and recommended option.
* Configure sudo to grant telegraf to run iptables. This is the most restrictive option, but require sudo setup.
### Using systemd capabilities
You may run `systemctl edit telegraf.service` and add the following:
```
[Service]
CapabilityBoundingSet=CAP_NET_RAW CAP_NET_ADMIN
AmbientCapabilities=CAP_NET_RAW CAP_NET_ADMIN
```
Since telegraf will fork a process to run iptables, `AmbientCapabilities` is required to transmit the capabilities bounding set to the forked process.
### Using sudo
You may edit your sudo configuration with the following:
```sudo
telegraf ALL=(root) NOPASSWD: /usr/bin/iptables -nvL *
```
### Configuration:
```toml
# use sudo to run iptables
use_sudo = false
# defines the table to monitor:
table = "filter"
# defines the chains to monitor:
chains = [ "INPUT" ]
```
### Measurements & Fields:
- iptables
- pkts (integer, count)
- bytes (integer, bytes)
### Tags:
- All measurements have the following tags:
- table
- chain
- ruleid
The `ruleid` is the comment associated to the rule.
### Example Output:
```
$ iptables -nvL INPUT
Chain INPUT (policy DROP 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
100 1024 ACCEPT tcp -- * * 192.168.0.0/24 0.0.0.0/0 tcp dpt:22 /* ssh */
42 2048 ACCEPT tcp -- * * 192.168.0.0/24 0.0.0.0/0 tcp dpt:80 /* httpd */
```
```
$ ./telegraf -config telegraf.conf -input-filter iptables -test
iptables,table=filter,chain=INPUT,ruleid=ssh pkts=100i,bytes=1024i 1453831884664956455
iptables,table=filter,chain=INPUT,ruleid=httpd pkts=42i,bytes=2048i 1453831884664956455
```

View File

@ -0,0 +1,128 @@
// +build linux
package iptables
import (
"errors"
"os/exec"
"regexp"
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
// Iptables is a telegraf plugin to gather packets and bytes throughput from Linux's iptables packet filter.
type Iptables struct {
UseSudo bool
Table string
Chains []string
lister chainLister
}
// Description returns a short description of the plugin.
func (ipt *Iptables) Description() string {
return "Gather packets and bytes throughput from iptables"
}
// SampleConfig returns sample configuration options.
func (ipt *Iptables) SampleConfig() string {
return `
## iptables require root access on most systems.
## Setting 'use_sudo' to true will make use of sudo to run iptables.
## Users must configure sudo to allow telegraf user to run iptables with no password.
## iptables can be restricted to only list command "iptables -nvL"
use_sudo = false
## defines the table to monitor:
table = "filter"
## defines the chains to monitor:
chains = [ "INPUT" ]
`
}
// Gather gathers iptables packets and bytes throughput from the configured tables and chains.
func (ipt *Iptables) Gather(acc telegraf.Accumulator) error {
if ipt.Table == "" || len(ipt.Chains) == 0 {
return nil
}
// best effort : we continue through the chains even if an error is encountered,
// but we keep track of the last error.
var err error
for _, chain := range ipt.Chains {
data, e := ipt.lister(ipt.Table, chain)
if e != nil {
err = e
continue
}
e = ipt.parseAndGather(data, acc)
if e != nil {
err = e
continue
}
}
return err
}
func (ipt *Iptables) chainList(table, chain string) (string, error) {
iptablePath, err := exec.LookPath("iptables")
if err != nil {
return "", err
}
var args []string
name := iptablePath
if ipt.UseSudo {
name = "sudo"
args = append(args, iptablePath)
}
args = append(args, "-nvL", chain, "-t", table, "-x")
c := exec.Command(name, args...)
out, err := c.Output()
return string(out), err
}
const measurement = "iptables"
var errParse = errors.New("Cannot parse iptables list information")
var chainNameRe = regexp.MustCompile(`^Chain\s+(\S+)`)
var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+`)
var valuesRe = regexp.MustCompile(`^\s*([0-9]+)\s+([0-9]+)\s+.*?(/\*\s(.*)\s\*/)?$`)
func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error {
lines := strings.Split(data, "\n")
if len(lines) < 3 {
return nil
}
mchain := chainNameRe.FindStringSubmatch(lines[0])
if mchain == nil {
return errParse
}
if !fieldsHeaderRe.MatchString(lines[1]) {
return errParse
}
for _, line := range lines[2:] {
mv := valuesRe.FindAllStringSubmatch(line, -1)
// best effort : if line does not match or rule is not commented forget about it
if len(mv) == 0 || len(mv[0]) != 5 || mv[0][4] == "" {
continue
}
tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": mv[0][4]}
fields := make(map[string]interface{})
// since parse error is already catched by the regexp,
// we never enter ther error case here => no error check (but still need a test to cover the case)
fields["pkts"], _ = strconv.ParseUint(mv[0][1], 10, 64)
fields["bytes"], _ = strconv.ParseUint(mv[0][2], 10, 64)
acc.AddFields(measurement, fields, tags)
}
return nil
}
type chainLister func(table, chain string) (string, error)
func init() {
inputs.Add("iptables", func() telegraf.Input {
ipt := new(Iptables)
ipt.lister = ipt.chainList
return ipt
})
}

View File

@ -0,0 +1,3 @@
// +build !linux
package iptables

View File

@ -0,0 +1,206 @@
// +build linux
package iptables
import (
"errors"
"reflect"
"testing"
"github.com/influxdata/telegraf/testutil"
)
func TestIptables_Gather(t *testing.T) {
tests := []struct {
table string
chains []string
values []string
tags []map[string]string
fields [][]map[string]interface{}
err error
}{
{ // 1 - no configured table => no results
values: []string{
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
`},
},
{ // 2 - no configured chains => no results
table: "filter",
values: []string{
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
`},
},
{ // 3 - pkts and bytes are gathered as integers
table: "filter",
chains: []string{"INPUT"},
values: []string{
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foobar */
`},
tags: []map[string]string{map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foobar"}},
fields: [][]map[string]interface{}{
{map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}},
},
},
{ // 4 - missing fields header => no results
table: "filter",
chains: []string{"INPUT"},
values: []string{`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)`},
},
{ // 5 - invalid chain header => error
table: "filter",
chains: []string{"INPUT"},
values: []string{
`INPUT (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
`},
err: errParse,
},
{ // 6 - invalid fields header => error
table: "filter",
chains: []string{"INPUT"},
values: []string{
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
`},
err: errParse,
},
{ // 7 - invalid integer value => best effort, no error
table: "filter",
chains: []string{"INPUT"},
values: []string{
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
K 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
`},
},
{ // 8 - Multiple rows, multipe chains => no error
table: "filter",
chains: []string{"INPUT", "FORWARD"},
values: []string{
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
100 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
200 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foo */
`,
`Chain FORWARD (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
300 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* bar */
400 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
500 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foobar */
`,
},
tags: []map[string]string{
map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foo"},
map[string]string{"table": "filter", "chain": "FORWARD", "ruleid": "bar"},
map[string]string{"table": "filter", "chain": "FORWARD", "ruleid": "foobar"},
},
fields: [][]map[string]interface{}{
{map[string]interface{}{"pkts": uint64(200), "bytes": uint64(4520)}},
{map[string]interface{}{"pkts": uint64(300), "bytes": uint64(4520)}},
{map[string]interface{}{"pkts": uint64(500), "bytes": uint64(4520)}},
},
},
{ // 9 - comments are used as ruleid if any
table: "filter",
chains: []string{"INPUT"},
values: []string{
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:22 /* foobar */
100 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:80
`},
tags: []map[string]string{
map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foobar"},
},
fields: [][]map[string]interface{}{
{map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}},
},
},
}
for i, tt := range tests {
i++
ipt := &Iptables{
Table: tt.table,
Chains: tt.chains,
lister: func(table, chain string) (string, error) {
if len(tt.values) > 0 {
v := tt.values[0]
tt.values = tt.values[1:]
return v, nil
}
return "", nil
},
}
acc := new(testutil.Accumulator)
err := ipt.Gather(acc)
if !reflect.DeepEqual(tt.err, err) {
t.Errorf("%d: expected error '%#v' got '%#v'", i, tt.err, err)
}
if tt.table == "" {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty table got %d", i, n)
}
continue
}
if len(tt.chains) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty chains got %d", i, n)
}
continue
}
if len(tt.tags) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 values got %d", i, n)
}
continue
}
n := 0
for j, tags := range tt.tags {
for k, fields := range tt.fields[j] {
if len(acc.Metrics) < n+1 {
t.Errorf("%d: expected at least %d values got %d", i, n+1, len(acc.Metrics))
break
}
m := acc.Metrics[n]
if !reflect.DeepEqual(m.Measurement, measurement) {
t.Errorf("%d %d %d: expected measurement '%#v' got '%#v'\n", i, j, k, measurement, m.Measurement)
}
if !reflect.DeepEqual(m.Tags, tags) {
t.Errorf("%d %d %d: expected tags\n%#v got\n%#v\n", i, j, k, tags, m.Tags)
}
if !reflect.DeepEqual(m.Fields, fields) {
t.Errorf("%d %d %d: expected fields\n%#v got\n%#v\n", i, j, k, fields, m.Fields)
}
n++
}
}
}
}
func TestIptables_Gather_listerError(t *testing.T) {
errFoo := errors.New("error foobar")
ipt := &Iptables{
Table: "nat",
Chains: []string{"foo", "bar"},
lister: func(table, chain string) (string, error) {
return "", errFoo
},
}
acc := new(testutil.Accumulator)
err := ipt.Gather(acc)
if !reflect.DeepEqual(err, errFoo) {
t.Errorf("Expected error %#v got\n%#v\n", errFoo, err)
}
}

View File

@ -52,6 +52,7 @@ type Jolokia struct {
const sampleConfig = ` const sampleConfig = `
## This is the context root used to compose the jolokia url ## This is the context root used to compose the jolokia url
## NOTE that your jolokia security policy must allow for POST requests.
context = "/jolokia" context = "/jolokia"
## This specifies the mode used ## This specifies the mode used
@ -104,7 +105,6 @@ func (j *Jolokia) Description() string {
} }
func (j *Jolokia) doRequest(req *http.Request) (map[string]interface{}, error) { func (j *Jolokia) doRequest(req *http.Request) (map[string]interface{}, error) {
resp, err := j.jClient.MakeRequest(req) resp, err := j.jClient.MakeRequest(req)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -90,7 +90,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
case "newest": case "newest":
config.Offsets.Initial = sarama.OffsetNewest config.Offsets.Initial = sarama.OffsetNewest
default: default:
log.Printf("WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n", log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
k.Offset) k.Offset)
config.Offsets.Initial = sarama.OffsetOldest config.Offsets.Initial = sarama.OffsetOldest
} }
@ -115,7 +115,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
// Start the kafka message reader // Start the kafka message reader
go k.receiver() go k.receiver()
log.Printf("Started the kafka consumer service, peers: %v, topics: %v\n", log.Printf("I! Started the kafka consumer service, peers: %v, topics: %v\n",
k.ZookeeperPeers, k.Topics) k.ZookeeperPeers, k.Topics)
return nil return nil
} }
@ -128,11 +128,13 @@ func (k *Kafka) receiver() {
case <-k.done: case <-k.done:
return return
case err := <-k.errs: case err := <-k.errs:
log.Printf("Kafka Consumer Error: %s\n", err.Error()) if err != nil {
log.Printf("E! Kafka Consumer Error: %s\n", err)
}
case msg := <-k.in: case msg := <-k.in:
metrics, err := k.parser.Parse(msg.Value) metrics, err := k.parser.Parse(msg.Value)
if err != nil { if err != nil {
log.Printf("KAFKA PARSE ERROR\nmessage: %s\nerror: %s", log.Printf("E! Kafka Message Parse Error\nmessage: %s\nerror: %s",
string(msg.Value), err.Error()) string(msg.Value), err.Error())
} }
@ -156,7 +158,7 @@ func (k *Kafka) Stop() {
defer k.Unlock() defer k.Unlock()
close(k.done) close(k.done)
if err := k.Consumer.Close(); err != nil { if err := k.Consumer.Close(); err != nil {
log.Printf("Error closing kafka consumer: %s\n", err.Error()) log.Printf("E! Error closing kafka consumer: %s\n", err.Error())
} }
} }

View File

@ -43,7 +43,7 @@ func TestRunParser(t *testing.T) {
k.parser, _ = parsers.NewInfluxParser() k.parser, _ = parsers.NewInfluxParser()
go k.receiver() go k.receiver()
in <- saramaMsg(testMsg) in <- saramaMsg(testMsg)
time.Sleep(time.Millisecond) time.Sleep(time.Millisecond * 5)
assert.Equal(t, acc.NFields(), 1) assert.Equal(t, acc.NFields(), 1)
} }
@ -58,7 +58,7 @@ func TestRunParserInvalidMsg(t *testing.T) {
k.parser, _ = parsers.NewInfluxParser() k.parser, _ = parsers.NewInfluxParser()
go k.receiver() go k.receiver()
in <- saramaMsg(invalidMsg) in <- saramaMsg(invalidMsg)
time.Sleep(time.Millisecond) time.Sleep(time.Millisecond * 5)
assert.Equal(t, acc.NFields(), 0) assert.Equal(t, acc.NFields(), 0)
} }
@ -73,7 +73,7 @@ func TestRunParserAndGather(t *testing.T) {
k.parser, _ = parsers.NewInfluxParser() k.parser, _ = parsers.NewInfluxParser()
go k.receiver() go k.receiver()
in <- saramaMsg(testMsg) in <- saramaMsg(testMsg)
time.Sleep(time.Millisecond) time.Sleep(time.Millisecond * 5)
k.Gather(&acc) k.Gather(&acc)
@ -92,7 +92,7 @@ func TestRunParserAndGatherGraphite(t *testing.T) {
k.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) k.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil)
go k.receiver() go k.receiver()
in <- saramaMsg(testMsgGraphite) in <- saramaMsg(testMsgGraphite)
time.Sleep(time.Millisecond) time.Sleep(time.Millisecond * 5)
k.Gather(&acc) k.Gather(&acc)
@ -111,7 +111,7 @@ func TestRunParserAndGatherJSON(t *testing.T) {
k.parser, _ = parsers.NewJSONParser("kafka_json_test", []string{}, nil) k.parser, _ = parsers.NewJSONParser("kafka_json_test", []string{}, nil)
go k.receiver() go k.receiver()
in <- saramaMsg(testMsgJSON) in <- saramaMsg(testMsgJSON)
time.Sleep(time.Millisecond) time.Sleep(time.Millisecond * 5)
k.Gather(&acc) k.Gather(&acc)

View File

@ -14,17 +14,22 @@ regex patterns.
## /var/log/**.log -> recursively find all .log files in /var/log ## /var/log/**.log -> recursively find all .log files in /var/log
## /var/log/*/*.log -> find all .log files with a parent dir in /var/log ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
## /var/log/apache.log -> only tail the apache log file ## /var/log/apache.log -> only tail the apache log file
files = ["/var/log/influxdb/influxdb.log"] files = ["/var/log/apache/access.log"]
## Read file from beginning. ## Read file from beginning.
from_beginning = false from_beginning = false
## Parse logstash-style "grok" patterns: ## Parse logstash-style "grok" patterns:
## Telegraf builtin parsing patterns: https://goo.gl/dkay10 ## Telegraf built-in parsing patterns: https://goo.gl/dkay10
[inputs.logparser.grok] [inputs.logparser.grok]
## This is a list of patterns to check the given log file(s) for. ## This is a list of patterns to check the given log file(s) for.
## Note that adding patterns here increases processing time. The most ## Note that adding patterns here increases processing time. The most
## efficient configuration is to have one file & pattern per logparser. ## efficient configuration is to have one pattern per logparser.
patterns = ["%{INFLUXDB_HTTPD_LOG}"] ## Other common built-in patterns are:
## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
patterns = ["%{COMBINED_LOG_FORMAT}"]
## Name of the outputted measurement name.
measurement = "apache_access_log"
## Full path(s) to custom pattern files. ## Full path(s) to custom pattern files.
custom_pattern_files = [] custom_pattern_files = []
## Custom patterns can also be defined here. Put one pattern per line. ## Custom patterns can also be defined here. Put one pattern per line.
@ -32,8 +37,6 @@ regex patterns.
''' '''
``` ```
> **Note:** The InfluxDB log pattern in the default configuration only works for Influx versions 1.0.0-beta1 or higher.
## Grok Parser ## Grok Parser
The grok parser uses a slightly modified version of logstash "grok" patterns, The grok parser uses a slightly modified version of logstash "grok" patterns,
@ -69,6 +72,7 @@ Timestamp modifiers can be used to convert captures to the timestamp of the
- tag (converts the field into a tag) - tag (converts the field into a tag)
- drop (drops the field completely) - drop (drops the field completely)
- Timestamp modifiers: - Timestamp modifiers:
- ts (This will auto-learn the timestamp format)
- ts-ansic ("Mon Jan _2 15:04:05 2006") - ts-ansic ("Mon Jan _2 15:04:05 2006")
- ts-unix ("Mon Jan _2 15:04:05 MST 2006") - ts-unix ("Mon Jan _2 15:04:05 MST 2006")
- ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") - ts-ruby ("Mon Jan 02 15:04:05 -0700 2006")

View File

@ -15,7 +15,7 @@ import (
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
) )
var timeFormats = map[string]string{ var timeLayouts = map[string]string{
"ts-ansic": "Mon Jan _2 15:04:05 2006", "ts-ansic": "Mon Jan _2 15:04:05 2006",
"ts-unix": "Mon Jan _2 15:04:05 MST 2006", "ts-unix": "Mon Jan _2 15:04:05 MST 2006",
"ts-ruby": "Mon Jan 02 15:04:05 -0700 2006", "ts-ruby": "Mon Jan 02 15:04:05 -0700 2006",
@ -27,27 +27,33 @@ var timeFormats = map[string]string{
"ts-rfc3339": "2006-01-02T15:04:05Z07:00", "ts-rfc3339": "2006-01-02T15:04:05Z07:00",
"ts-rfc3339nano": "2006-01-02T15:04:05.999999999Z07:00", "ts-rfc3339nano": "2006-01-02T15:04:05.999999999Z07:00",
"ts-httpd": "02/Jan/2006:15:04:05 -0700", "ts-httpd": "02/Jan/2006:15:04:05 -0700",
"ts-epoch": "EPOCH", // These three are not exactly "layouts", but they are special cases that
"ts-epochnano": "EPOCH_NANO", // will get handled in the ParseLine function.
"ts-epoch": "EPOCH",
"ts-epochnano": "EPOCH_NANO",
"ts": "GENERIC_TIMESTAMP", // try parsing all known timestamp layouts.
} }
const ( const (
INT = "int" INT = "int"
TAG = "tag" TAG = "tag"
FLOAT = "float" FLOAT = "float"
STRING = "string" STRING = "string"
DURATION = "duration" DURATION = "duration"
DROP = "drop" DROP = "drop"
EPOCH = "EPOCH"
EPOCH_NANO = "EPOCH_NANO"
GENERIC_TIMESTAMP = "GENERIC_TIMESTAMP"
) )
var ( var (
// matches named captures that contain a type. // matches named captures that contain a modifier.
// ie, // ie,
// %{NUMBER:bytes:int} // %{NUMBER:bytes:int}
// %{IPORHOST:clientip:tag} // %{IPORHOST:clientip:tag}
// %{HTTPDATE:ts1:ts-http} // %{HTTPDATE:ts1:ts-http}
// %{HTTPDATE:ts2:ts-"02 Jan 06 15:04"} // %{HTTPDATE:ts2:ts-"02 Jan 06 15:04"}
typedRe = regexp.MustCompile(`%{\w+:(\w+):(ts-".+"|t?s?-?\w+)}`) modifierRe = regexp.MustCompile(`%{\w+:(\w+):(ts-".+"|t?s?-?\w+)}`)
// matches a plain pattern name. ie, %{NUMBER} // matches a plain pattern name. ie, %{NUMBER}
patternOnlyRe = regexp.MustCompile(`%{(\w+)}`) patternOnlyRe = regexp.MustCompile(`%{(\w+)}`)
) )
@ -87,6 +93,12 @@ type Parser struct {
// "RESPONSE_CODE": "%{NUMBER:rc:tag}" // "RESPONSE_CODE": "%{NUMBER:rc:tag}"
// } // }
patterns map[string]string patterns map[string]string
// foundTsLayouts is a slice of timestamp patterns that have been found
// in the log lines. This slice gets updated if the user uses the generic
// 'ts' modifier for timestamps. This slice is checked first for matches,
// so that previously-matched layouts get priority over all other timestamp
// layouts.
foundTsLayouts []string
g *grok.Grok g *grok.Grok
tsModder *tsModder tsModder *tsModder
@ -140,6 +152,7 @@ func (p *Parser) Compile() error {
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
var err error var err error
// values are the parsed fields from the log line
var values map[string]string var values map[string]string
// the matching pattern string // the matching pattern string
var patternName string var patternName string
@ -165,6 +178,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
continue continue
} }
// t is the modifier of the field
var t string var t string
// check if pattern has some modifiers // check if pattern has some modifiers
if types, ok := p.typeMap[patternName]; ok { if types, ok := p.typeMap[patternName]; ok {
@ -188,21 +202,21 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
case INT: case INT:
iv, err := strconv.ParseInt(v, 10, 64) iv, err := strconv.ParseInt(v, 10, 64)
if err != nil { if err != nil {
log.Printf("ERROR parsing %s to int: %s", v, err) log.Printf("E! Error parsing %s to int: %s", v, err)
} else { } else {
fields[k] = iv fields[k] = iv
} }
case FLOAT: case FLOAT:
fv, err := strconv.ParseFloat(v, 64) fv, err := strconv.ParseFloat(v, 64)
if err != nil { if err != nil {
log.Printf("ERROR parsing %s to float: %s", v, err) log.Printf("E! Error parsing %s to float: %s", v, err)
} else { } else {
fields[k] = fv fields[k] = fv
} }
case DURATION: case DURATION:
d, err := time.ParseDuration(v) d, err := time.ParseDuration(v)
if err != nil { if err != nil {
log.Printf("ERROR parsing %s to duration: %s", v, err) log.Printf("E! Error parsing %s to duration: %s", v, err)
} else { } else {
fields[k] = int64(d) fields[k] = int64(d)
} }
@ -210,20 +224,50 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
tags[k] = v tags[k] = v
case STRING: case STRING:
fields[k] = strings.Trim(v, `"`) fields[k] = strings.Trim(v, `"`)
case "EPOCH": case EPOCH:
iv, err := strconv.ParseInt(v, 10, 64) iv, err := strconv.ParseInt(v, 10, 64)
if err != nil { if err != nil {
log.Printf("ERROR parsing %s to int: %s", v, err) log.Printf("E! Error parsing %s to int: %s", v, err)
} else { } else {
timestamp = time.Unix(iv, 0) timestamp = time.Unix(iv, 0)
} }
case "EPOCH_NANO": case EPOCH_NANO:
iv, err := strconv.ParseInt(v, 10, 64) iv, err := strconv.ParseInt(v, 10, 64)
if err != nil { if err != nil {
log.Printf("ERROR parsing %s to int: %s", v, err) log.Printf("E! Error parsing %s to int: %s", v, err)
} else { } else {
timestamp = time.Unix(0, iv) timestamp = time.Unix(0, iv)
} }
case GENERIC_TIMESTAMP:
var foundTs bool
// first try timestamp layouts that we've already found
for _, layout := range p.foundTsLayouts {
ts, err := time.Parse(layout, v)
if err == nil {
timestamp = ts
foundTs = true
break
}
}
// if we haven't found a timestamp layout yet, try all timestamp
// layouts.
if !foundTs {
for _, layout := range timeLayouts {
ts, err := time.Parse(layout, v)
if err == nil {
timestamp = ts
foundTs = true
p.foundTsLayouts = append(p.foundTsLayouts, layout)
break
}
}
}
// if we still haven't found a timestamp layout, log it and we will
// just use time.Now()
if !foundTs {
log.Printf("E! Error parsing timestamp [%s], could not find any "+
"suitable time layouts.", v)
}
case DROP: case DROP:
// goodbye! // goodbye!
default: default:
@ -231,7 +275,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
if err == nil { if err == nil {
timestamp = ts timestamp = ts
} else { } else {
log.Printf("ERROR parsing %s to time layout [%s]: %s", v, t, err) log.Printf("E! Error parsing %s to time layout [%s]: %s", v, t, err)
} }
} }
} }
@ -267,7 +311,7 @@ func (p *Parser) compileCustomPatterns() error {
// check if pattern contains modifiers. Parse them out if it does. // check if pattern contains modifiers. Parse them out if it does.
for name, pattern := range p.patterns { for name, pattern := range p.patterns {
if typedRe.MatchString(pattern) { if modifierRe.MatchString(pattern) {
// this pattern has modifiers, so parse out the modifiers // this pattern has modifiers, so parse out the modifiers
pattern, err = p.parseTypedCaptures(name, pattern) pattern, err = p.parseTypedCaptures(name, pattern)
if err != nil { if err != nil {
@ -280,13 +324,13 @@ func (p *Parser) compileCustomPatterns() error {
return p.g.AddPatternsFromMap(p.patterns) return p.g.AddPatternsFromMap(p.patterns)
} }
// parseTypedCaptures parses the capture types, and then deletes the type from // parseTypedCaptures parses the capture modifiers, and then deletes the
// the line so that it is a valid "grok" pattern again. // modifier from the line so that it is a valid "grok" pattern again.
// ie, // ie,
// %{NUMBER:bytes:int} => %{NUMBER:bytes} (stores %{NUMBER}->bytes->int) // %{NUMBER:bytes:int} => %{NUMBER:bytes} (stores %{NUMBER}->bytes->int)
// %{IPORHOST:clientip:tag} => %{IPORHOST:clientip} (stores %{IPORHOST}->clientip->tag) // %{IPORHOST:clientip:tag} => %{IPORHOST:clientip} (stores %{IPORHOST}->clientip->tag)
func (p *Parser) parseTypedCaptures(name, pattern string) (string, error) { func (p *Parser) parseTypedCaptures(name, pattern string) (string, error) {
matches := typedRe.FindAllStringSubmatch(pattern, -1) matches := modifierRe.FindAllStringSubmatch(pattern, -1)
// grab the name of the capture pattern // grab the name of the capture pattern
patternName := "%{" + name + "}" patternName := "%{" + name + "}"
@ -298,16 +342,18 @@ func (p *Parser) parseTypedCaptures(name, pattern string) (string, error) {
hasTimestamp := false hasTimestamp := false
for _, match := range matches { for _, match := range matches {
// regex capture 1 is the name of the capture // regex capture 1 is the name of the capture
// regex capture 2 is the type of the capture // regex capture 2 is the modifier of the capture
if strings.HasPrefix(match[2], "ts-") { if strings.HasPrefix(match[2], "ts") {
if hasTimestamp { if hasTimestamp {
return pattern, fmt.Errorf("logparser pattern compile error: "+ return pattern, fmt.Errorf("logparser pattern compile error: "+
"Each pattern is allowed only one named "+ "Each pattern is allowed only one named "+
"timestamp data type. pattern: %s", pattern) "timestamp data type. pattern: %s", pattern)
} }
if f, ok := timeFormats[match[2]]; ok { if layout, ok := timeLayouts[match[2]]; ok {
p.tsMap[patternName][match[1]] = f // built-in time format
p.tsMap[patternName][match[1]] = layout
} else { } else {
// custom time format
p.tsMap[patternName][match[1]] = strings.TrimSuffix(strings.TrimPrefix(match[2], `ts-"`), `"`) p.tsMap[patternName][match[1]] = strings.TrimSuffix(strings.TrimPrefix(match[2], `ts-"`), `"`)
} }
hasTimestamp = true hasTimestamp = true

View File

@ -38,32 +38,6 @@ func Benchmark_ParseLine_CombinedLogFormat(b *testing.B) {
benchM = m benchM = m
} }
func Benchmark_ParseLine_InfluxLog(b *testing.B) {
p := &Parser{
Patterns: []string{"%{INFLUXDB_HTTPD_LOG}"},
}
p.Compile()
var m telegraf.Metric
for n := 0; n < b.N; n++ {
m, _ = p.ParseLine(`[httpd] 192.168.1.1 - - [14/Jun/2016:11:33:29 +0100] "POST /write?consistency=any&db=telegraf&precision=ns&rp= HTTP/1.1" 204 0 "-" "InfluxDBClient" 6f61bc44-321b-11e6-8050-000000000000 2513`)
}
benchM = m
}
func Benchmark_ParseLine_InfluxLog_NoMatch(b *testing.B) {
p := &Parser{
Patterns: []string{"%{INFLUXDB_HTTPD_LOG}"},
}
p.Compile()
var m telegraf.Metric
for n := 0; n < b.N; n++ {
m, _ = p.ParseLine(`[retention] 2016/06/14 14:38:24 retention policy shard deletion check commencing`)
}
benchM = m
}
func Benchmark_ParseLine_CustomPattern(b *testing.B) { func Benchmark_ParseLine_CustomPattern(b *testing.B) {
p := &Parser{ p := &Parser{
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
@ -108,9 +82,9 @@ func TestMeasurementName(t *testing.T) {
assert.Equal(t, "my_web_log", m.Name()) assert.Equal(t, "my_web_log", m.Name())
} }
func TestBuiltinInfluxdbHttpd(t *testing.T) { func TestCustomInfluxdbHttpd(t *testing.T) {
p := &Parser{ p := &Parser{
Patterns: []string{"%{INFLUXDB_HTTPD_LOG}"}, Patterns: []string{`\[httpd\] %{COMBINED_LOG_FORMAT} %{UUID:uuid:drop} %{NUMBER:response_time_us:int}`},
} }
assert.NoError(t, p.Compile()) assert.NoError(t, p.Compile())
@ -333,6 +307,55 @@ func TestParseEpochErrors(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
} }
func TestParseGenericTimestamp(t *testing.T) {
p := &Parser{
Patterns: []string{`\[%{HTTPDATE:ts:ts}\] response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}`},
}
assert.NoError(t, p.Compile())
metricA, err := p.ParseLine(`[09/Jun/2016:03:37:03 +0000] response_time=20821 mymetric=10890.645`)
require.NotNil(t, metricA)
assert.NoError(t, err)
assert.Equal(t,
map[string]interface{}{
"response_time": int64(20821),
"metric": float64(10890.645),
},
metricA.Fields())
assert.Equal(t, map[string]string{}, metricA.Tags())
assert.Equal(t, time.Unix(1465443423, 0).UTC(), metricA.Time().UTC())
metricB, err := p.ParseLine(`[09/Jun/2016:03:37:04 +0000] response_time=20821 mymetric=10890.645`)
require.NotNil(t, metricB)
assert.NoError(t, err)
assert.Equal(t,
map[string]interface{}{
"response_time": int64(20821),
"metric": float64(10890.645),
},
metricB.Fields())
assert.Equal(t, map[string]string{}, metricB.Tags())
assert.Equal(t, time.Unix(1465443424, 0).UTC(), metricB.Time().UTC())
}
func TestParseGenericTimestampNotFound(t *testing.T) {
p := &Parser{
Patterns: []string{`\[%{NOTSPACE:ts:ts}\] response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}`},
}
assert.NoError(t, p.Compile())
metricA, err := p.ParseLine(`[foobar] response_time=20821 mymetric=10890.645`)
require.NotNil(t, metricA)
assert.NoError(t, err)
assert.Equal(t,
map[string]interface{}{
"response_time": int64(20821),
"metric": float64(10890.645),
},
metricA.Fields())
assert.Equal(t, map[string]string{}, metricA.Tags())
}
func TestCompileFileAndParse(t *testing.T) { func TestCompileFileAndParse(t *testing.T) {
p := &Parser{ p := &Parser{
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},

View File

@ -55,15 +55,13 @@ EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE}
# Wider-ranging username matching vs. logstash built-in %{USER} # Wider-ranging username matching vs. logstash built-in %{USER}
NGUSERNAME [a-zA-Z\.\@\-\+_%]+ NGUSERNAME [a-zA-Z\.\@\-\+_%]+
NGUSER %{NGUSERNAME} NGUSER %{NGUSERNAME}
# Wider-ranging client IP matching
CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
## ##
## COMMON LOG PATTERNS ## COMMON LOG PATTERNS
## ##
# InfluxDB log patterns
CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
INFLUXDB_HTTPD_LOG \[httpd\] %{COMBINED_LOG_FORMAT} %{UUID:uuid:drop} %{NUMBER:response_time_us:int}
# apache & nginx logs, this is also known as the "common log format" # apache & nginx logs, this is also known as the "common log format"
# see https://en.wikipedia.org/wiki/Common_Log_Format # see https://en.wikipedia.org/wiki/Common_Log_Format
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-) COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-)

View File

@ -51,15 +51,13 @@ EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE}
# Wider-ranging username matching vs. logstash built-in %{USER} # Wider-ranging username matching vs. logstash built-in %{USER}
NGUSERNAME [a-zA-Z\.\@\-\+_%]+ NGUSERNAME [a-zA-Z\.\@\-\+_%]+
NGUSER %{NGUSERNAME} NGUSER %{NGUSERNAME}
# Wider-ranging client IP matching
CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
## ##
## COMMON LOG PATTERNS ## COMMON LOG PATTERNS
## ##
# InfluxDB log patterns
CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
INFLUXDB_HTTPD_LOG \[httpd\] %{COMBINED_LOG_FORMAT} %{UUID:uuid:drop} %{NUMBER:response_time_us:int}
# apache & nginx logs, this is also known as the "common log format" # apache & nginx logs, this is also known as the "common log format"
# see https://en.wikipedia.org/wiki/Common_Log_Format # see https://en.wikipedia.org/wiki/Common_Log_Format
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-) COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-)

View File

@ -45,7 +45,7 @@ const sampleConfig = `
## /var/log/**.log -> recursively find all .log files in /var/log ## /var/log/**.log -> recursively find all .log files in /var/log
## /var/log/*/*.log -> find all .log files with a parent dir in /var/log ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
## /var/log/apache.log -> only tail the apache log file ## /var/log/apache.log -> only tail the apache log file
files = ["/var/log/influxdb/influxdb.log"] files = ["/var/log/apache/access.log"]
## Read file from beginning. ## Read file from beginning.
from_beginning = false from_beginning = false
@ -58,9 +58,9 @@ const sampleConfig = `
## Other common built-in patterns are: ## Other common built-in patterns are:
## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
patterns = ["%{INFLUXDB_HTTPD_LOG}"] patterns = ["%{COMBINED_LOG_FORMAT}"]
## Name of the outputted measurement name. ## Name of the outputted measurement name.
measurement = "influxdb_log" measurement = "apache_access_log"
## Full path(s) to custom pattern files. ## Full path(s) to custom pattern files.
custom_pattern_files = [] custom_pattern_files = []
## Custom patterns can also be defined here. Put one pattern per line. ## Custom patterns can also be defined here. Put one pattern per line.
@ -134,7 +134,7 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error {
for _, filepath := range l.Files { for _, filepath := range l.Files {
g, err := globpath.Compile(filepath) g, err := globpath.Compile(filepath)
if err != nil { if err != nil {
log.Printf("ERROR Glob %s failed to compile, %s", filepath, err) log.Printf("E! Error Glob %s failed to compile, %s", filepath, err)
continue continue
} }
files := g.Match() files := g.Match()
@ -167,7 +167,7 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) {
var line *tail.Line var line *tail.Line
for line = range tailer.Lines { for line = range tailer.Lines {
if line.Err != nil { if line.Err != nil {
log.Printf("ERROR tailing file %s, Error: %s\n", log.Printf("E! Error tailing file %s, Error: %s\n",
tailer.Filename, line.Err) tailer.Filename, line.Err)
continue continue
} }
@ -216,7 +216,7 @@ func (l *LogParserPlugin) Stop() {
for _, t := range l.tailers { for _, t := range l.tailers {
err := t.Stop() err := t.Stop()
if err != nil { if err != nil {
log.Printf("ERROR stopping tail on file %s\n", t.Filename) log.Printf("E! Error stopping tail on file %s\n", t.Filename)
} }
t.Cleanup() t.Cleanup()
} }

View File

@ -134,7 +134,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) {
req.URL.RawQuery = params.String() req.URL.RawQuery = params.String()
req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin") req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin")
if api.Debug { if api.Debug {
log.Printf("Request URL: %s", req.URL.String()) log.Printf("D! Request URL: %s", req.URL.String())
} }
resp, err := client.Do(req) resp, err := client.Do(req)
@ -148,7 +148,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) {
return nil, err return nil, err
} }
if api.Debug { if api.Debug {
log.Printf("Response Body:%s", string(body)) log.Printf("D! Response Body:%s", string(body))
} }
if err = chimpErrorCheck(body); err != nil { if err = chimpErrorCheck(body); err != nil {

View File

@ -1,6 +1,6 @@
# Mesos Input Plugin # Mesos Input Plugin
This input plugin gathers metrics from Mesos (*currently only Mesos masters*). This input plugin gathers metrics from Mesos.
For more information, please check the [Mesos Observability Metrics](http://mesos.apache.org/documentation/latest/monitoring/) page. For more information, please check the [Mesos Observability Metrics](http://mesos.apache.org/documentation/latest/monitoring/) page.
### Configuration: ### Configuration:
@ -8,14 +8,38 @@ For more information, please check the [Mesos Observability Metrics](http://meso
```toml ```toml
# Telegraf plugin for gathering metrics from N Mesos masters # Telegraf plugin for gathering metrics from N Mesos masters
[[inputs.mesos]] [[inputs.mesos]]
# Timeout, in ms. ## Timeout, in ms.
timeout = 100 timeout = 100
# A list of Mesos masters, default value is localhost:5050. ## A list of Mesos masters.
masters = ["localhost:5050"] masters = ["localhost:5050"]
# Metrics groups to be collected, by default, all enabled. ## Master metrics groups to be collected, by default, all enabled.
master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] master_collections = [
"resources",
"master",
"system",
"agents",
"frameworks",
"tasks",
"messages",
"evqueue",
"registrar",
]
## A list of Mesos slaves, default is []
# slaves = []
## Slave metrics groups to be collected, by default, all enabled.
# slave_collections = [
# "resources",
# "agent",
# "system",
# "executors",
# "tasks",
# "messages",
# ]
``` ```
By default this plugin is not configured to gather metrics from mesos. Since a mesos cluster can be deployed in numerous ways it does not provide any default
values. User needs to specify master/slave nodes this plugin will gather metrics from.
### Measurements & Fields: ### Measurements & Fields:
Mesos master metric groups Mesos master metric groups
@ -33,6 +57,12 @@ Mesos master metric groups
- master/disk_revocable_percent - master/disk_revocable_percent
- master/disk_revocable_total - master/disk_revocable_total
- master/disk_revocable_used - master/disk_revocable_used
- master/gpus_percent
- master/gpus_used
- master/gpus_total
- master/gpus_revocable_percent
- master/gpus_revocable_total
- master/gpus_revocable_used
- master/mem_percent - master/mem_percent
- master/mem_used - master/mem_used
- master/mem_total - master/mem_total
@ -136,17 +166,87 @@ Mesos master metric groups
- registrar/state_store_ms/p999 - registrar/state_store_ms/p999
- registrar/state_store_ms/p9999 - registrar/state_store_ms/p9999
Mesos slave metric groups
- resources
- slave/cpus_percent
- slave/cpus_used
- slave/cpus_total
- slave/cpus_revocable_percent
- slave/cpus_revocable_total
- slave/cpus_revocable_used
- slave/disk_percent
- slave/disk_used
- slave/disk_total
- slave/disk_revocable_percent
- slave/disk_revocable_total
- slave/disk_revocable_used
- slave/gpus_percent
- slave/gpus_used
- slave/gpus_total,
- slave/gpus_revocable_percent
- slave/gpus_revocable_total
- slave/gpus_revocable_used
- slave/mem_percent
- slave/mem_used
- slave/mem_total
- slave/mem_revocable_percent
- slave/mem_revocable_total
- slave/mem_revocable_used
- agent
- slave/registered
- slave/uptime_secs
- system
- system/cpus_total
- system/load_15min
- system/load_5min
- system/load_1min
- system/mem_free_bytes
- system/mem_total_bytes
- executors
- containerizer/mesos/container_destroy_errors
- slave/container_launch_errors
- slave/executors_preempted
- slave/frameworks_active
- slave/executor_directory_max_allowed_age_secs
- slave/executors_registering
- slave/executors_running
- slave/executors_terminated
- slave/executors_terminating
- slave/recovery_errors
- tasks
- slave/tasks_failed
- slave/tasks_finished
- slave/tasks_killed
- slave/tasks_lost
- slave/tasks_running
- slave/tasks_staging
- slave/tasks_starting
- messages
- slave/invalid_framework_messages
- slave/invalid_status_updates
- slave/valid_framework_messages
- slave/valid_status_updates
### Tags: ### Tags:
- All measurements have the following tags: - All master/slave measurements have the following tags:
- server - server
- role (master/slave)
- All master measurements have the extra tags:
- state (leader/follower)
### Example Output: ### Example Output:
``` ```
$ telegraf -config ~/mesos.conf -input-filter mesos -test $ telegraf -config ~/mesos.conf -input-filter mesos -test
* Plugin: mesos, Collection 1 * Plugin: mesos, Collection 1
mesos,server=172.17.8.101 allocator/event_queue_dispatches=0,master/cpus_percent=0, mesos,role=master,state=leader,host=172.17.8.102,server=172.17.8.101
allocator/event_queue_dispatches=0,master/cpus_percent=0,
master/cpus_revocable_percent=0,master/cpus_revocable_total=0, master/cpus_revocable_percent=0,master/cpus_revocable_total=0,
master/cpus_revocable_used=0,master/cpus_total=2, master/cpus_revocable_used=0,master/cpus_total=2,
master/cpus_used=0,master/disk_percent=0,master/disk_revocable_percent=0, master/cpus_used=0,master/disk_percent=0,master/disk_revocable_percent=0,
@ -163,3 +263,4 @@ master/mem_revocable_used=0,master/mem_total=1002,
master/mem_used=0,master/messages_authenticate=0, master/mem_used=0,master/messages_authenticate=0,
master/messages_deactivate_framework=0 ... master/messages_deactivate_framework=0 ...
``` ```

View File

@ -17,33 +17,55 @@ import (
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
) )
type Role string
const (
MASTER Role = "master"
SLAVE = "slave"
)
type Mesos struct { type Mesos struct {
Timeout int Timeout int
Masters []string Masters []string
MasterCols []string `toml:"master_collections"` MasterCols []string `toml:"master_collections"`
Slaves []string
SlaveCols []string `toml:"slave_collections"`
//SlaveTasks bool
} }
var defaultMetrics = []string{ var allMetrics = map[Role][]string{
"resources", "master", "system", "slaves", "frameworks", MASTER: []string{"resources", "master", "system", "agents", "frameworks", "tasks", "messages", "evqueue", "registrar"},
"tasks", "messages", "evqueue", "messages", "registrar", SLAVE: []string{"resources", "agent", "system", "executors", "tasks", "messages"},
} }
var sampleConfig = ` var sampleConfig = `
# Timeout, in ms. ## Timeout, in ms.
timeout = 100 timeout = 100
# A list of Mesos masters, default value is localhost:5050. ## A list of Mesos masters.
masters = ["localhost:5050"] masters = ["localhost:5050"]
# Metrics groups to be collected, by default, all enabled. ## Master metrics groups to be collected, by default, all enabled.
master_collections = [ master_collections = [
"resources", "resources",
"master", "master",
"system", "system",
"slaves", "agents",
"frameworks", "frameworks",
"tasks",
"messages", "messages",
"evqueue", "evqueue",
"registrar", "registrar",
] ]
## A list of Mesos slaves, default is []
# slaves = []
## Slave metrics groups to be collected, by default, all enabled.
# slave_collections = [
# "resources",
# "agent",
# "system",
# "executors",
# "tasks",
# "messages",
# ]
` `
// SampleConfig returns a sample configuration block // SampleConfig returns a sample configuration block
@ -56,26 +78,59 @@ func (m *Mesos) Description() string {
return "Telegraf plugin for gathering metrics from N Mesos masters" return "Telegraf plugin for gathering metrics from N Mesos masters"
} }
func (m *Mesos) SetDefaults() {
if len(m.MasterCols) == 0 {
m.MasterCols = allMetrics[MASTER]
}
if len(m.SlaveCols) == 0 {
m.SlaveCols = allMetrics[SLAVE]
}
if m.Timeout == 0 {
log.Println("I! [mesos] Missing timeout value, setting default value (100ms)")
m.Timeout = 100
}
}
// Gather() metrics from given list of Mesos Masters // Gather() metrics from given list of Mesos Masters
func (m *Mesos) Gather(acc telegraf.Accumulator) error { func (m *Mesos) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
var errorChannel chan error var errorChannel chan error
if len(m.Masters) == 0 { m.SetDefaults()
m.Masters = []string{"localhost:5050"}
}
errorChannel = make(chan error, len(m.Masters)*2) errorChannel = make(chan error, len(m.Masters)+2*len(m.Slaves))
for _, v := range m.Masters { for _, v := range m.Masters {
wg.Add(1) wg.Add(1)
go func(c string) { go func(c string) {
errorChannel <- m.gatherMetrics(c, acc) errorChannel <- m.gatherMainMetrics(c, ":5050", MASTER, acc)
wg.Done() wg.Done()
return return
}(v) }(v)
} }
for _, v := range m.Slaves {
wg.Add(1)
go func(c string) {
errorChannel <- m.gatherMainMetrics(c, ":5051", SLAVE, acc)
wg.Done()
return
}(v)
// if !m.SlaveTasks {
// continue
// }
// wg.Add(1)
// go func(c string) {
// errorChannel <- m.gatherSlaveTaskMetrics(c, ":5051", acc)
// wg.Done()
// return
// }(v)
}
wg.Wait() wg.Wait()
close(errorChannel) close(errorChannel)
errorStrings := []string{} errorStrings := []string{}
@ -94,7 +149,7 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error {
} }
// metricsDiff() returns set names for removal // metricsDiff() returns set names for removal
func metricsDiff(w []string) []string { func metricsDiff(role Role, w []string) []string {
b := []string{} b := []string{}
s := make(map[string]bool) s := make(map[string]bool)
@ -106,7 +161,7 @@ func metricsDiff(w []string) []string {
s[v] = true s[v] = true
} }
for _, d := range defaultMetrics { for _, d := range allMetrics[role] {
if _, ok := s[d]; !ok { if _, ok := s[d]; !ok {
b = append(b, d) b = append(b, d)
} }
@ -116,156 +171,239 @@ func metricsDiff(w []string) []string {
} }
// masterBlocks serves as kind of metrics registry groupping them in sets // masterBlocks serves as kind of metrics registry groupping them in sets
func masterBlocks(g string) []string { func getMetrics(role Role, group string) []string {
var m map[string][]string var m map[string][]string
m = make(map[string][]string) m = make(map[string][]string)
m["resources"] = []string{ if role == MASTER {
"master/cpus_percent", m["resources"] = []string{
"master/cpus_used", "master/cpus_percent",
"master/cpus_total", "master/cpus_used",
"master/cpus_revocable_percent", "master/cpus_total",
"master/cpus_revocable_total", "master/cpus_revocable_percent",
"master/cpus_revocable_used", "master/cpus_revocable_total",
"master/disk_percent", "master/cpus_revocable_used",
"master/disk_used", "master/disk_percent",
"master/disk_total", "master/disk_used",
"master/disk_revocable_percent", "master/disk_total",
"master/disk_revocable_total", "master/disk_revocable_percent",
"master/disk_revocable_used", "master/disk_revocable_total",
"master/mem_percent", "master/disk_revocable_used",
"master/mem_used", "master/gpus_percent",
"master/mem_total", "master/gpus_used",
"master/mem_revocable_percent", "master/gpus_total",
"master/mem_revocable_total", "master/gpus_revocable_percent",
"master/mem_revocable_used", "master/gpus_revocable_total",
"master/gpus_revocable_used",
"master/mem_percent",
"master/mem_used",
"master/mem_total",
"master/mem_revocable_percent",
"master/mem_revocable_total",
"master/mem_revocable_used",
}
m["master"] = []string{
"master/elected",
"master/uptime_secs",
}
m["system"] = []string{
"system/cpus_total",
"system/load_15min",
"system/load_5min",
"system/load_1min",
"system/mem_free_bytes",
"system/mem_total_bytes",
}
m["agents"] = []string{
"master/slave_registrations",
"master/slave_removals",
"master/slave_reregistrations",
"master/slave_shutdowns_scheduled",
"master/slave_shutdowns_canceled",
"master/slave_shutdowns_completed",
"master/slaves_active",
"master/slaves_connected",
"master/slaves_disconnected",
"master/slaves_inactive",
}
m["frameworks"] = []string{
"master/frameworks_active",
"master/frameworks_connected",
"master/frameworks_disconnected",
"master/frameworks_inactive",
"master/outstanding_offers",
}
m["tasks"] = []string{
"master/tasks_error",
"master/tasks_failed",
"master/tasks_finished",
"master/tasks_killed",
"master/tasks_lost",
"master/tasks_running",
"master/tasks_staging",
"master/tasks_starting",
}
m["messages"] = []string{
"master/invalid_executor_to_framework_messages",
"master/invalid_framework_to_executor_messages",
"master/invalid_status_update_acknowledgements",
"master/invalid_status_updates",
"master/dropped_messages",
"master/messages_authenticate",
"master/messages_deactivate_framework",
"master/messages_decline_offers",
"master/messages_executor_to_framework",
"master/messages_exited_executor",
"master/messages_framework_to_executor",
"master/messages_kill_task",
"master/messages_launch_tasks",
"master/messages_reconcile_tasks",
"master/messages_register_framework",
"master/messages_register_slave",
"master/messages_reregister_framework",
"master/messages_reregister_slave",
"master/messages_resource_request",
"master/messages_revive_offers",
"master/messages_status_update",
"master/messages_status_update_acknowledgement",
"master/messages_unregister_framework",
"master/messages_unregister_slave",
"master/messages_update_slave",
"master/recovery_slave_removals",
"master/slave_removals/reason_registered",
"master/slave_removals/reason_unhealthy",
"master/slave_removals/reason_unregistered",
"master/valid_framework_to_executor_messages",
"master/valid_status_update_acknowledgements",
"master/valid_status_updates",
"master/task_lost/source_master/reason_invalid_offers",
"master/task_lost/source_master/reason_slave_removed",
"master/task_lost/source_slave/reason_executor_terminated",
"master/valid_executor_to_framework_messages",
}
m["evqueue"] = []string{
"master/event_queue_dispatches",
"master/event_queue_http_requests",
"master/event_queue_messages",
}
m["registrar"] = []string{
"registrar/state_fetch_ms",
"registrar/state_store_ms",
"registrar/state_store_ms/max",
"registrar/state_store_ms/min",
"registrar/state_store_ms/p50",
"registrar/state_store_ms/p90",
"registrar/state_store_ms/p95",
"registrar/state_store_ms/p99",
"registrar/state_store_ms/p999",
"registrar/state_store_ms/p9999",
}
} else if role == SLAVE {
m["resources"] = []string{
"slave/cpus_percent",
"slave/cpus_used",
"slave/cpus_total",
"slave/cpus_revocable_percent",
"slave/cpus_revocable_total",
"slave/cpus_revocable_used",
"slave/disk_percent",
"slave/disk_used",
"slave/disk_total",
"slave/disk_revocable_percent",
"slave/disk_revocable_total",
"slave/disk_revocable_used",
"slave/gpus_percent",
"slave/gpus_used",
"slave/gpus_total",
"slave/gpus_revocable_percent",
"slave/gpus_revocable_total",
"slave/gpus_revocable_used",
"slave/mem_percent",
"slave/mem_used",
"slave/mem_total",
"slave/mem_revocable_percent",
"slave/mem_revocable_total",
"slave/mem_revocable_used",
}
m["agent"] = []string{
"slave/registered",
"slave/uptime_secs",
}
m["system"] = []string{
"system/cpus_total",
"system/load_15min",
"system/load_5min",
"system/load_1min",
"system/mem_free_bytes",
"system/mem_total_bytes",
}
m["executors"] = []string{
"containerizer/mesos/container_destroy_errors",
"slave/container_launch_errors",
"slave/executors_preempted",
"slave/frameworks_active",
"slave/executor_directory_max_allowed_age_secs",
"slave/executors_registering",
"slave/executors_running",
"slave/executors_terminated",
"slave/executors_terminating",
"slave/recovery_errors",
}
m["tasks"] = []string{
"slave/tasks_failed",
"slave/tasks_finished",
"slave/tasks_killed",
"slave/tasks_lost",
"slave/tasks_running",
"slave/tasks_staging",
"slave/tasks_starting",
}
m["messages"] = []string{
"slave/invalid_framework_messages",
"slave/invalid_status_updates",
"slave/valid_framework_messages",
"slave/valid_status_updates",
}
} }
m["master"] = []string{ ret, ok := m[group]
"master/elected",
"master/uptime_secs",
}
m["system"] = []string{
"system/cpus_total",
"system/load_15min",
"system/load_5min",
"system/load_1min",
"system/mem_free_bytes",
"system/mem_total_bytes",
}
m["slaves"] = []string{
"master/slave_registrations",
"master/slave_removals",
"master/slave_reregistrations",
"master/slave_shutdowns_scheduled",
"master/slave_shutdowns_canceled",
"master/slave_shutdowns_completed",
"master/slaves_active",
"master/slaves_connected",
"master/slaves_disconnected",
"master/slaves_inactive",
}
m["frameworks"] = []string{
"master/frameworks_active",
"master/frameworks_connected",
"master/frameworks_disconnected",
"master/frameworks_inactive",
"master/outstanding_offers",
}
m["tasks"] = []string{
"master/tasks_error",
"master/tasks_failed",
"master/tasks_finished",
"master/tasks_killed",
"master/tasks_lost",
"master/tasks_running",
"master/tasks_staging",
"master/tasks_starting",
}
m["messages"] = []string{
"master/invalid_executor_to_framework_messages",
"master/invalid_framework_to_executor_messages",
"master/invalid_status_update_acknowledgements",
"master/invalid_status_updates",
"master/dropped_messages",
"master/messages_authenticate",
"master/messages_deactivate_framework",
"master/messages_decline_offers",
"master/messages_executor_to_framework",
"master/messages_exited_executor",
"master/messages_framework_to_executor",
"master/messages_kill_task",
"master/messages_launch_tasks",
"master/messages_reconcile_tasks",
"master/messages_register_framework",
"master/messages_register_slave",
"master/messages_reregister_framework",
"master/messages_reregister_slave",
"master/messages_resource_request",
"master/messages_revive_offers",
"master/messages_status_update",
"master/messages_status_update_acknowledgement",
"master/messages_unregister_framework",
"master/messages_unregister_slave",
"master/messages_update_slave",
"master/recovery_slave_removals",
"master/slave_removals/reason_registered",
"master/slave_removals/reason_unhealthy",
"master/slave_removals/reason_unregistered",
"master/valid_framework_to_executor_messages",
"master/valid_status_update_acknowledgements",
"master/valid_status_updates",
"master/task_lost/source_master/reason_invalid_offers",
"master/task_lost/source_master/reason_slave_removed",
"master/task_lost/source_slave/reason_executor_terminated",
"master/valid_executor_to_framework_messages",
}
m["evqueue"] = []string{
"master/event_queue_dispatches",
"master/event_queue_http_requests",
"master/event_queue_messages",
}
m["registrar"] = []string{
"registrar/state_fetch_ms",
"registrar/state_store_ms",
"registrar/state_store_ms/max",
"registrar/state_store_ms/min",
"registrar/state_store_ms/p50",
"registrar/state_store_ms/p90",
"registrar/state_store_ms/p95",
"registrar/state_store_ms/p99",
"registrar/state_store_ms/p999",
"registrar/state_store_ms/p9999",
}
ret, ok := m[g]
if !ok { if !ok {
log.Println("[mesos] Unkown metrics group: ", g) log.Printf("I! [mesos] Unkown %s metrics group: %s\n", role, group)
return []string{} return []string{}
} }
return ret return ret
} }
// removeGroup(), remove unwanted sets func (m *Mesos) filterMetrics(role Role, metrics *map[string]interface{}) {
func (m *Mesos) removeGroup(j *map[string]interface{}) {
var ok bool var ok bool
var selectedMetrics []string
b := metricsDiff(m.MasterCols) if role == MASTER {
selectedMetrics = m.MasterCols
} else if role == SLAVE {
selectedMetrics = m.SlaveCols
}
for _, k := range b { for _, k := range metricsDiff(role, selectedMetrics) {
for _, v := range masterBlocks(k) { for _, v := range getMetrics(role, k) {
if _, ok = (*j)[v]; ok { if _, ok = (*metrics)[v]; ok {
delete((*j), v) delete((*metrics), v)
} }
} }
} }
@ -280,23 +418,76 @@ var client = &http.Client{
Timeout: time.Duration(4 * time.Second), Timeout: time.Duration(4 * time.Second),
} }
// This should not belong to the object // TaskStats struct for JSON API output /monitor/statistics
func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error { type TaskStats struct {
var jsonOut map[string]interface{} ExecutorID string `json:"executor_id"`
FrameworkID string `json:"framework_id"`
Statistics map[string]interface{} `json:"statistics"`
}
host, _, err := net.SplitHostPort(a) func (m *Mesos) gatherSlaveTaskMetrics(address string, defaultPort string, acc telegraf.Accumulator) error {
var metrics []TaskStats
host, _, err := net.SplitHostPort(address)
if err != nil { if err != nil {
host = a host = address
a = a + ":5050" address = address + defaultPort
} }
tags := map[string]string{ tags := map[string]string{
"server": host, "server": host,
} }
if m.Timeout == 0 { ts := strconv.Itoa(m.Timeout) + "ms"
log.Println("[mesos] Missing timeout value, setting default value (100ms)")
m.Timeout = 100 resp, err := client.Get("http://" + address + "/monitor/statistics?timeout=" + ts)
if err != nil {
return err
}
data, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return err
}
if err = json.Unmarshal([]byte(data), &metrics); err != nil {
return errors.New("Error decoding JSON response")
}
for _, task := range metrics {
tags["framework_id"] = task.FrameworkID
jf := jsonparser.JSONFlattener{}
err = jf.FlattenJSON("", task.Statistics)
if err != nil {
return err
}
timestamp := time.Unix(int64(jf.Fields["timestamp"].(float64)), 0)
jf.Fields["executor_id"] = task.ExecutorID
acc.AddFields("mesos_tasks", jf.Fields, tags, timestamp)
}
return nil
}
// This should not belong to the object
func (m *Mesos) gatherMainMetrics(a string, defaultPort string, role Role, acc telegraf.Accumulator) error {
var jsonOut map[string]interface{}
host, _, err := net.SplitHostPort(a)
if err != nil {
host = a
a = a + defaultPort
}
tags := map[string]string{
"server": host,
"role": string(role),
} }
ts := strconv.Itoa(m.Timeout) + "ms" ts := strconv.Itoa(m.Timeout) + "ms"
@ -317,7 +508,7 @@ func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error {
return errors.New("Error decoding JSON response") return errors.New("Error decoding JSON response")
} }
m.removeGroup(&jsonOut) m.filterMetrics(role, &jsonOut)
jf := jsonparser.JSONFlattener{} jf := jsonparser.JSONFlattener{}
@ -327,6 +518,14 @@ func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error {
return err return err
} }
if role == MASTER {
if jf.Fields["master/elected"] != 0.0 {
tags["state"] = "leader"
} else {
tags["state"] = "standby"
}
}
acc.AddFields("mesos", jf.Fields, tags) acc.AddFields("mesos", jf.Fields, tags)
return nil return nil

View File

@ -2,6 +2,7 @@ package mesos
import ( import (
"encoding/json" "encoding/json"
"fmt"
"math/rand" "math/rand"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -11,61 +12,265 @@ import (
"github.com/influxdata/telegraf/testutil" "github.com/influxdata/telegraf/testutil"
) )
var mesosMetrics map[string]interface{} var masterMetrics map[string]interface{}
var ts *httptest.Server var masterTestServer *httptest.Server
var slaveMetrics map[string]interface{}
// var slaveTaskMetrics map[string]interface{}
var slaveTestServer *httptest.Server
func randUUID() string {
b := make([]byte, 16)
rand.Read(b)
return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
}
func generateMetrics() { func generateMetrics() {
mesosMetrics = make(map[string]interface{}) masterMetrics = make(map[string]interface{})
metricNames := []string{"master/cpus_percent", "master/cpus_used", "master/cpus_total", metricNames := []string{
"master/cpus_revocable_percent", "master/cpus_revocable_total", "master/cpus_revocable_used", // resources
"master/disk_percent", "master/disk_used", "master/disk_total", "master/disk_revocable_percent", "master/cpus_percent",
"master/disk_revocable_total", "master/disk_revocable_used", "master/mem_percent", "master/cpus_used",
"master/mem_used", "master/mem_total", "master/mem_revocable_percent", "master/mem_revocable_total", "master/cpus_total",
"master/mem_revocable_used", "master/elected", "master/uptime_secs", "system/cpus_total", "master/cpus_revocable_percent",
"system/load_15min", "system/load_5min", "system/load_1min", "system/mem_free_bytes", "master/cpus_revocable_total",
"system/mem_total_bytes", "master/slave_registrations", "master/slave_removals", "master/cpus_revocable_used",
"master/slave_reregistrations", "master/slave_shutdowns_scheduled", "master/slave_shutdowns_canceled", "master/disk_percent",
"master/slave_shutdowns_completed", "master/slaves_active", "master/slaves_connected", "master/disk_used",
"master/slaves_disconnected", "master/slaves_inactive", "master/frameworks_active", "master/disk_total",
"master/frameworks_connected", "master/frameworks_disconnected", "master/frameworks_inactive", "master/disk_revocable_percent",
"master/outstanding_offers", "master/tasks_error", "master/tasks_failed", "master/tasks_finished", "master/disk_revocable_total",
"master/tasks_killed", "master/tasks_lost", "master/tasks_running", "master/tasks_staging", "master/disk_revocable_used",
"master/tasks_starting", "master/invalid_executor_to_framework_messages", "master/invalid_framework_to_executor_messages", "master/gpus_percent",
"master/invalid_status_update_acknowledgements", "master/invalid_status_updates", "master/gpus_used",
"master/dropped_messages", "master/messages_authenticate", "master/messages_deactivate_framework", "master/gpus_total",
"master/messages_decline_offers", "master/messages_executor_to_framework", "master/messages_exited_executor", "master/gpus_revocable_percent",
"master/messages_framework_to_executor", "master/messages_kill_task", "master/messages_launch_tasks", "master/gpus_revocable_total",
"master/messages_reconcile_tasks", "master/messages_register_framework", "master/messages_register_slave", "master/gpus_revocable_used",
"master/messages_reregister_framework", "master/messages_reregister_slave", "master/messages_resource_request", "master/mem_percent",
"master/messages_revive_offers", "master/messages_status_update", "master/messages_status_update_acknowledgement", "master/mem_used",
"master/messages_unregister_framework", "master/messages_unregister_slave", "master/messages_update_slave", "master/mem_total",
"master/recovery_slave_removals", "master/slave_removals/reason_registered", "master/slave_removals/reason_unhealthy", "master/mem_revocable_percent",
"master/slave_removals/reason_unregistered", "master/valid_framework_to_executor_messages", "master/valid_status_update_acknowledgements", "master/mem_revocable_total",
"master/valid_status_updates", "master/task_lost/source_master/reason_invalid_offers", "master/mem_revocable_used",
"master/task_lost/source_master/reason_slave_removed", "master/task_lost/source_slave/reason_executor_terminated", // master
"master/valid_executor_to_framework_messages", "master/event_queue_dispatches", "master/elected",
"master/event_queue_http_requests", "master/event_queue_messages", "registrar/state_fetch_ms", "master/uptime_secs",
"registrar/state_store_ms", "registrar/state_store_ms/max", "registrar/state_store_ms/min", // system
"registrar/state_store_ms/p50", "registrar/state_store_ms/p90", "registrar/state_store_ms/p95", "system/cpus_total",
"registrar/state_store_ms/p99", "registrar/state_store_ms/p999", "registrar/state_store_ms/p9999"} "system/load_15min",
"system/load_5min",
"system/load_1min",
"system/mem_free_bytes",
"system/mem_total_bytes",
// agents
"master/slave_registrations",
"master/slave_removals",
"master/slave_reregistrations",
"master/slave_shutdowns_scheduled",
"master/slave_shutdowns_canceled",
"master/slave_shutdowns_completed",
"master/slaves_active",
"master/slaves_connected",
"master/slaves_disconnected",
"master/slaves_inactive",
// frameworks
"master/frameworks_active",
"master/frameworks_connected",
"master/frameworks_disconnected",
"master/frameworks_inactive",
"master/outstanding_offers",
// tasks
"master/tasks_error",
"master/tasks_failed",
"master/tasks_finished",
"master/tasks_killed",
"master/tasks_lost",
"master/tasks_running",
"master/tasks_staging",
"master/tasks_starting",
// messages
"master/invalid_executor_to_framework_messages",
"master/invalid_framework_to_executor_messages",
"master/invalid_status_update_acknowledgements",
"master/invalid_status_updates",
"master/dropped_messages",
"master/messages_authenticate",
"master/messages_deactivate_framework",
"master/messages_decline_offers",
"master/messages_executor_to_framework",
"master/messages_exited_executor",
"master/messages_framework_to_executor",
"master/messages_kill_task",
"master/messages_launch_tasks",
"master/messages_reconcile_tasks",
"master/messages_register_framework",
"master/messages_register_slave",
"master/messages_reregister_framework",
"master/messages_reregister_slave",
"master/messages_resource_request",
"master/messages_revive_offers",
"master/messages_status_update",
"master/messages_status_update_acknowledgement",
"master/messages_unregister_framework",
"master/messages_unregister_slave",
"master/messages_update_slave",
"master/recovery_slave_removals",
"master/slave_removals/reason_registered",
"master/slave_removals/reason_unhealthy",
"master/slave_removals/reason_unregistered",
"master/valid_framework_to_executor_messages",
"master/valid_status_update_acknowledgements",
"master/valid_status_updates",
"master/task_lost/source_master/reason_invalid_offers",
"master/task_lost/source_master/reason_slave_removed",
"master/task_lost/source_slave/reason_executor_terminated",
"master/valid_executor_to_framework_messages",
// evgqueue
"master/event_queue_dispatches",
"master/event_queue_http_requests",
"master/event_queue_messages",
// registrar
"registrar/state_fetch_ms",
"registrar/state_store_ms",
"registrar/state_store_ms/max",
"registrar/state_store_ms/min",
"registrar/state_store_ms/p50",
"registrar/state_store_ms/p90",
"registrar/state_store_ms/p95",
"registrar/state_store_ms/p99",
"registrar/state_store_ms/p999",
"registrar/state_store_ms/p9999",
}
for _, k := range metricNames { for _, k := range metricNames {
mesosMetrics[k] = rand.Float64() masterMetrics[k] = rand.Float64()
} }
slaveMetrics = make(map[string]interface{})
metricNames = []string{
// resources
"slave/cpus_percent",
"slave/cpus_used",
"slave/cpus_total",
"slave/cpus_revocable_percent",
"slave/cpus_revocable_total",
"slave/cpus_revocable_used",
"slave/disk_percent",
"slave/disk_used",
"slave/disk_total",
"slave/disk_revocable_percent",
"slave/disk_revocable_total",
"slave/disk_revocable_used",
"slave/gpus_percent",
"slave/gpus_used",
"slave/gpus_total",
"slave/gpus_revocable_percent",
"slave/gpus_revocable_total",
"slave/gpus_revocable_used",
"slave/mem_percent",
"slave/mem_used",
"slave/mem_total",
"slave/mem_revocable_percent",
"slave/mem_revocable_total",
"slave/mem_revocable_used",
// agent
"slave/registered",
"slave/uptime_secs",
// system
"system/cpus_total",
"system/load_15min",
"system/load_5min",
"system/load_1min",
"system/mem_free_bytes",
"system/mem_total_bytes",
// executors
"containerizer/mesos/container_destroy_errors",
"slave/container_launch_errors",
"slave/executors_preempted",
"slave/frameworks_active",
"slave/executor_directory_max_allowed_age_secs",
"slave/executors_registering",
"slave/executors_running",
"slave/executors_terminated",
"slave/executors_terminating",
"slave/recovery_errors",
// tasks
"slave/tasks_failed",
"slave/tasks_finished",
"slave/tasks_killed",
"slave/tasks_lost",
"slave/tasks_running",
"slave/tasks_staging",
"slave/tasks_starting",
// messages
"slave/invalid_framework_messages",
"slave/invalid_status_updates",
"slave/valid_framework_messages",
"slave/valid_status_updates",
}
for _, k := range metricNames {
slaveMetrics[k] = rand.Float64()
}
// slaveTaskMetrics = map[string]interface{}{
// "executor_id": fmt.Sprintf("task_name.%s", randUUID()),
// "executor_name": "Some task description",
// "framework_id": randUUID(),
// "source": fmt.Sprintf("task_source.%s", randUUID()),
// "statistics": map[string]interface{}{
// "cpus_limit": rand.Float64(),
// "cpus_system_time_secs": rand.Float64(),
// "cpus_user_time_secs": rand.Float64(),
// "mem_anon_bytes": float64(rand.Int63()),
// "mem_cache_bytes": float64(rand.Int63()),
// "mem_critical_pressure_counter": float64(rand.Int63()),
// "mem_file_bytes": float64(rand.Int63()),
// "mem_limit_bytes": float64(rand.Int63()),
// "mem_low_pressure_counter": float64(rand.Int63()),
// "mem_mapped_file_bytes": float64(rand.Int63()),
// "mem_medium_pressure_counter": float64(rand.Int63()),
// "mem_rss_bytes": float64(rand.Int63()),
// "mem_swap_bytes": float64(rand.Int63()),
// "mem_total_bytes": float64(rand.Int63()),
// "mem_total_memsw_bytes": float64(rand.Int63()),
// "mem_unevictable_bytes": float64(rand.Int63()),
// "timestamp": rand.Float64(),
// },
// }
} }
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
generateMetrics() generateMetrics()
r := http.NewServeMux()
r.HandleFunc("/metrics/snapshot", func(w http.ResponseWriter, r *http.Request) { masterRouter := http.NewServeMux()
masterRouter.HandleFunc("/metrics/snapshot", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(mesosMetrics) json.NewEncoder(w).Encode(masterMetrics)
}) })
ts = httptest.NewServer(r) masterTestServer = httptest.NewServer(masterRouter)
slaveRouter := http.NewServeMux()
slaveRouter.HandleFunc("/metrics/snapshot", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(slaveMetrics)
})
// slaveRouter.HandleFunc("/monitor/statistics", func(w http.ResponseWriter, r *http.Request) {
// w.WriteHeader(http.StatusOK)
// w.Header().Set("Content-Type", "application/json")
// json.NewEncoder(w).Encode([]map[string]interface{}{slaveTaskMetrics})
// })
slaveTestServer = httptest.NewServer(slaveRouter)
rc := m.Run() rc := m.Run()
ts.Close()
masterTestServer.Close()
slaveTestServer.Close()
os.Exit(rc) os.Exit(rc)
} }
@ -73,7 +278,7 @@ func TestMesosMaster(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
m := Mesos{ m := Mesos{
Masters: []string{ts.Listener.Addr().String()}, Masters: []string{masterTestServer.Listener.Addr().String()},
Timeout: 10, Timeout: 10,
} }
@ -83,34 +288,91 @@ func TestMesosMaster(t *testing.T) {
t.Errorf(err.Error()) t.Errorf(err.Error())
} }
acc.AssertContainsFields(t, "mesos", mesosMetrics) acc.AssertContainsFields(t, "mesos", masterMetrics)
} }
func TestRemoveGroup(t *testing.T) { func TestMasterFilter(t *testing.T) {
generateMetrics()
m := Mesos{ m := Mesos{
MasterCols: []string{ MasterCols: []string{
"resources", "master", "registrar", "resources", "master", "registrar",
}, },
} }
b := []string{ b := []string{
"system", "slaves", "frameworks", "system", "agents", "frameworks",
"messages", "evqueue", "messages", "evqueue", "tasks",
} }
m.removeGroup(&mesosMetrics) m.filterMetrics(MASTER, &masterMetrics)
for _, v := range b { for _, v := range b {
for _, x := range masterBlocks(v) { for _, x := range getMetrics(MASTER, v) {
if _, ok := mesosMetrics[x]; ok { if _, ok := masterMetrics[x]; ok {
t.Errorf("Found key %s, it should be gone.", x) t.Errorf("Found key %s, it should be gone.", x)
} }
} }
} }
for _, v := range m.MasterCols { for _, v := range m.MasterCols {
for _, x := range masterBlocks(v) { for _, x := range getMetrics(MASTER, v) {
if _, ok := mesosMetrics[x]; !ok { if _, ok := masterMetrics[x]; !ok {
t.Errorf("Didn't find key %s, it should present.", x)
}
}
}
}
func TestMesosSlave(t *testing.T) {
var acc testutil.Accumulator
m := Mesos{
Masters: []string{},
Slaves: []string{slaveTestServer.Listener.Addr().String()},
// SlaveTasks: true,
Timeout: 10,
}
err := m.Gather(&acc)
if err != nil {
t.Errorf(err.Error())
}
acc.AssertContainsFields(t, "mesos", slaveMetrics)
// expectedFields := make(map[string]interface{}, len(slaveTaskMetrics["statistics"].(map[string]interface{}))+1)
// for k, v := range slaveTaskMetrics["statistics"].(map[string]interface{}) {
// expectedFields[k] = v
// }
// expectedFields["executor_id"] = slaveTaskMetrics["executor_id"]
// acc.AssertContainsTaggedFields(
// t,
// "mesos_tasks",
// expectedFields,
// map[string]string{"server": "127.0.0.1", "framework_id": slaveTaskMetrics["framework_id"].(string)})
}
func TestSlaveFilter(t *testing.T) {
m := Mesos{
SlaveCols: []string{
"resources", "agent", "tasks",
},
}
b := []string{
"system", "executors", "messages",
}
m.filterMetrics(SLAVE, &slaveMetrics)
for _, v := range b {
for _, x := range getMetrics(SLAVE, v) {
if _, ok := slaveMetrics[x]; ok {
t.Errorf("Found key %s, it should be gone.", x)
}
}
}
for _, v := range m.MasterCols {
for _, x := range getMetrics(SLAVE, v) {
if _, ok := slaveMetrics[x]; !ok {
t.Errorf("Didn't find key %s, it should present.", x) t.Errorf("Didn't find key %s, it should present.", x)
} }
} }

View File

@ -6,10 +6,22 @@ import (
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
) )
// MockPlugin struct should be named the same as the Plugin
type MockPlugin struct { type MockPlugin struct {
mock.Mock mock.Mock
} }
// Description will appear directly above the plugin definition in the config file
func (m *MockPlugin) Description() string {
return `This is an example plugin`
}
// SampleConfig will populate the sample configuration portion of the plugin's configuration
func (m *MockPlugin) SampleConfig() string {
return ` sampleVar = 'foo'`
}
// Gather defines what data the plugin will gather.
func (m *MockPlugin) Gather(_a0 telegraf.Accumulator) error { func (m *MockPlugin) Gather(_a0 telegraf.Accumulator) error {
ret := m.Called(_a0) ret := m.Called(_a0)

View File

@ -26,14 +26,28 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
s.Session.SetMode(mgo.Eventual, true) s.Session.SetMode(mgo.Eventual, true)
s.Session.SetSocketTimeout(0) s.Session.SetSocketTimeout(0)
result_server := &ServerStatus{} result_server := &ServerStatus{}
err := s.Session.DB("admin").Run(bson.D{{"serverStatus", 1}, {"recordStats", 0}}, result_server) err := s.Session.DB("admin").Run(bson.D{
{
Name: "serverStatus",
Value: 1,
},
{
Name: "recordStats",
Value: 0,
},
}, result_server)
if err != nil { if err != nil {
return err return err
} }
result_repl := &ReplSetStatus{} result_repl := &ReplSetStatus{}
err = s.Session.DB("admin").Run(bson.D{{"replSetGetStatus", 1}}, result_repl) err = s.Session.DB("admin").Run(bson.D{
{
Name: "replSetGetStatus",
Value: 1,
},
}, result_repl)
if err != nil { if err != nil {
log.Println("Not gathering replica set status, member not in replica set (" + err.Error() + ")") log.Println("E! Not gathering replica set status, member not in replica set (" + err.Error() + ")")
} }
jumbo_chunks, _ := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count() jumbo_chunks, _ := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count()
@ -48,13 +62,18 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
names := []string{} names := []string{}
names, err = s.Session.DatabaseNames() names, err = s.Session.DatabaseNames()
if err != nil { if err != nil {
log.Println("Error getting database names (" + err.Error() + ")") log.Println("E! Error getting database names (" + err.Error() + ")")
} }
for _, db_name := range names { for _, db_name := range names {
db_stat_line := &DbStatsData{} db_stat_line := &DbStatsData{}
err = s.Session.DB(db_name).Run(bson.D{{"dbStats", 1}}, db_stat_line) err = s.Session.DB(db_name).Run(bson.D{
{
Name: "dbStats",
Value: 1,
},
}, db_stat_line)
if err != nil { if err != nil {
log.Println("Error getting db stats from " + db_name + "(" + err.Error() + ")") log.Println("E! Error getting db stats from " + db_name + "(" + err.Error() + ")")
} }
db := &Db{ db := &Db{
Name: db_name, Name: db_name,

View File

@ -514,7 +514,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
returnVal.Command = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs) returnVal.Command = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs)
} }
if newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil { if newStat.Metrics != nil && newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil {
returnVal.Passes = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs) returnVal.Passes = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs)
returnVal.DeletedDocuments = diff(newStat.Metrics.TTL.DeletedDocuments, oldStat.Metrics.TTL.DeletedDocuments, sampleSecs) returnVal.DeletedDocuments = diff(newStat.Metrics.TTL.DeletedDocuments, oldStat.Metrics.TTL.DeletedDocuments, sampleSecs)
} }

View File

@ -133,7 +133,7 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {
return nil return nil
} }
func (m *MQTTConsumer) onConnect(c mqtt.Client) { func (m *MQTTConsumer) onConnect(c mqtt.Client) {
log.Printf("MQTT Client Connected") log.Printf("I! MQTT Client Connected")
if !m.PersistentSession || !m.started { if !m.PersistentSession || !m.started {
topics := make(map[string]byte) topics := make(map[string]byte)
for _, topic := range m.Topics { for _, topic := range m.Topics {
@ -142,7 +142,7 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) {
subscribeToken := c.SubscribeMultiple(topics, m.recvMessage) subscribeToken := c.SubscribeMultiple(topics, m.recvMessage)
subscribeToken.Wait() subscribeToken.Wait()
if subscribeToken.Error() != nil { if subscribeToken.Error() != nil {
log.Printf("MQTT SUBSCRIBE ERROR\ntopics: %s\nerror: %s", log.Printf("E! MQTT Subscribe Error\ntopics: %s\nerror: %s",
strings.Join(m.Topics[:], ","), subscribeToken.Error()) strings.Join(m.Topics[:], ","), subscribeToken.Error())
} }
m.started = true m.started = true
@ -151,7 +151,7 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) {
} }
func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) { func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) {
log.Printf("MQTT Connection lost\nerror: %s\nMQTT Client will try to reconnect", err.Error()) log.Printf("E! MQTT Connection lost\nerror: %s\nMQTT Client will try to reconnect", err.Error())
return return
} }
@ -166,7 +166,7 @@ func (m *MQTTConsumer) receiver() {
topic := msg.Topic() topic := msg.Topic()
metrics, err := m.parser.Parse(msg.Payload()) metrics, err := m.parser.Parse(msg.Payload())
if err != nil { if err != nil {
log.Printf("MQTT PARSE ERROR\nmessage: %s\nerror: %s", log.Printf("E! MQTT Parse Error\nmessage: %s\nerror: %s",
string(msg.Payload()), err.Error()) string(msg.Payload()), err.Error())
} }

View File

@ -313,6 +313,10 @@ var mappings = []*mapping{
onServer: "wsrep_", onServer: "wsrep_",
inExport: "wsrep_", inExport: "wsrep_",
}, },
{
onServer: "Uptime_",
inExport: "uptime_",
},
} }
var ( var (
@ -1376,6 +1380,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf
&rowsAffected, &rowsSent, &rowsExamined, &rowsAffected, &rowsSent, &rowsExamined,
&tmpTables, &tmpDiskTables, &tmpTables, &tmpDiskTables,
&sortMergePasses, &sortRows, &sortMergePasses, &sortRows,
&noIndexUsed,
) )
if err != nil { if err != nil {
@ -1477,19 +1482,23 @@ func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumula
tags["schema"] = tableSchema tags["schema"] = tableSchema
tags["table"] = tableName tags["table"] = tableName
acc.Add(newNamespace("info_schema", "table_rows"), tableRows, tags) acc.AddFields(newNamespace("info_schema", "table_rows"),
map[string]interface{}{"value": tableRows}, tags)
dlTags := copyTags(tags) dlTags := copyTags(tags)
dlTags["component"] = "data_length" dlTags["component"] = "data_length"
acc.Add(newNamespace("info_schema", "table_size", "data_length"), dataLength, dlTags) acc.AddFields(newNamespace("info_schema", "table_size", "data_length"),
map[string]interface{}{"value": dataLength}, dlTags)
ilTags := copyTags(tags) ilTags := copyTags(tags)
ilTags["component"] = "index_length" ilTags["component"] = "index_length"
acc.Add(newNamespace("info_schema", "table_size", "index_length"), indexLength, ilTags) acc.AddFields(newNamespace("info_schema", "table_size", "index_length"),
map[string]interface{}{"value": indexLength}, ilTags)
dfTags := copyTags(tags) dfTags := copyTags(tags)
dfTags["component"] = "data_free" dfTags["component"] = "data_free"
acc.Add(newNamespace("info_schema", "table_size", "data_free"), dataFree, dfTags) acc.AddFields(newNamespace("info_schema", "table_size", "data_free"),
map[string]interface{}{"value": dataFree}, dfTags)
versionTags := copyTags(tags) versionTags := copyTags(tags)
versionTags["type"] = tableType versionTags["type"] = tableType
@ -1497,7 +1506,8 @@ func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumula
versionTags["row_format"] = rowFormat versionTags["row_format"] = rowFormat
versionTags["create_options"] = createOptions versionTags["create_options"] = createOptions
acc.Add(newNamespace("info_schema", "table_version"), version, versionTags) acc.AddFields(newNamespace("info_schema", "table_version"),
map[string]interface{}{"value": version}, versionTags)
} }
} }
return nil return nil
@ -1510,7 +1520,7 @@ func parseValue(value sql.RawBytes) (float64, bool) {
} }
if bytes.Compare(value, []byte("No")) == 0 || bytes.Compare(value, []byte("OFF")) == 0 { if bytes.Compare(value, []byte("No")) == 0 || bytes.Compare(value, []byte("OFF")) == 0 {
return 0, false return 0, true
} }
n, err := strconv.ParseFloat(string(value), 64) n, err := strconv.ParseFloat(string(value), 64)
return n, err == nil return n, err == nil

View File

@ -119,7 +119,7 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error {
// Start the message reader // Start the message reader
go n.receiver() go n.receiver()
log.Printf("Started the NATS consumer service, nats: %v, subjects: %v, queue: %v\n", log.Printf("I! Started the NATS consumer service, nats: %v, subjects: %v, queue: %v\n",
n.Conn.ConnectedUrl(), n.Subjects, n.QueueGroup) n.Conn.ConnectedUrl(), n.Subjects, n.QueueGroup)
return nil return nil
@ -134,11 +134,11 @@ func (n *natsConsumer) receiver() {
case <-n.done: case <-n.done:
return return
case err := <-n.errs: case err := <-n.errs:
log.Printf("error reading from %s\n", err.Error()) log.Printf("E! error reading from %s\n", err.Error())
case msg := <-n.in: case msg := <-n.in:
metrics, err := n.parser.Parse(msg.Data) metrics, err := n.parser.Parse(msg.Data)
if err != nil { if err != nil {
log.Printf("subject: %s, error: %s", msg.Subject, err.Error()) log.Printf("E! subject: %s, error: %s", msg.Subject, err.Error())
} }
for _, metric := range metrics { for _, metric := range metrics {
@ -157,7 +157,7 @@ func (n *natsConsumer) clean() {
for _, sub := range n.Subs { for _, sub := range n.Subs {
if err := sub.Unsubscribe(); err != nil { if err := sub.Unsubscribe(); err != nil {
log.Printf("Error unsubscribing from subject %s in queue %s: %s\n", log.Printf("E! Error unsubscribing from subject %s in queue %s: %s\n",
sub.Subject, sub.Queue, err.Error()) sub.Subject, sub.Queue, err.Error())
} }
} }

View File

@ -62,7 +62,7 @@ func (n *NSQConsumer) Start(acc telegraf.Accumulator) error {
n.consumer.AddConcurrentHandlers(nsq.HandlerFunc(func(message *nsq.Message) error { n.consumer.AddConcurrentHandlers(nsq.HandlerFunc(func(message *nsq.Message) error {
metrics, err := n.parser.Parse(message.Body) metrics, err := n.parser.Parse(message.Body)
if err != nil { if err != nil {
log.Printf("NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error()) log.Printf("E! NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error())
return nil return nil
} }
for _, metric := range metrics { for _, metric := range metrics {

View File

@ -119,7 +119,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
// Get integer metrics from output // Get integer metrics from output
for key, index := range intI { for key, index := range intI {
if index == -1 { if index == -1 || index >= len(fields) {
continue continue
} }
if fields[index] == "-" { if fields[index] == "-" {
@ -132,7 +132,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
case strings.HasSuffix(when, "h"): case strings.HasSuffix(when, "h"):
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h")) m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h"))
if err != nil { if err != nil {
log.Printf("ERROR ntpq: parsing int: %s", fields[index]) log.Printf("E! Error ntpq: parsing int: %s", fields[index])
continue continue
} }
// seconds in an hour // seconds in an hour
@ -141,7 +141,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
case strings.HasSuffix(when, "d"): case strings.HasSuffix(when, "d"):
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d")) m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d"))
if err != nil { if err != nil {
log.Printf("ERROR ntpq: parsing int: %s", fields[index]) log.Printf("E! Error ntpq: parsing int: %s", fields[index])
continue continue
} }
// seconds in a day // seconds in a day
@ -150,7 +150,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
case strings.HasSuffix(when, "m"): case strings.HasSuffix(when, "m"):
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m")) m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m"))
if err != nil { if err != nil {
log.Printf("ERROR ntpq: parsing int: %s", fields[index]) log.Printf("E! Error ntpq: parsing int: %s", fields[index])
continue continue
} }
// seconds in a day // seconds in a day
@ -161,7 +161,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
m, err := strconv.Atoi(fields[index]) m, err := strconv.Atoi(fields[index])
if err != nil { if err != nil {
log.Printf("ERROR ntpq: parsing int: %s", fields[index]) log.Printf("E! Error ntpq: parsing int: %s", fields[index])
continue continue
} }
mFields[key] = int64(m) mFields[key] = int64(m)
@ -169,7 +169,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
// get float metrics from output // get float metrics from output
for key, index := range floatI { for key, index := range floatI {
if index == -1 { if index == -1 || index >= len(fields) {
continue continue
} }
if fields[index] == "-" { if fields[index] == "-" {
@ -178,7 +178,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
m, err := strconv.ParseFloat(fields[index], 64) m, err := strconv.ParseFloat(fields[index], 64)
if err != nil { if err != nil {
log.Printf("ERROR ntpq: parsing float: %s", fields[index]) log.Printf("E! Error ntpq: parsing float: %s", fields[index])
continue continue
} }
mFields[key] = m mFields[key] = m

View File

@ -41,6 +41,35 @@ func TestSingleNTPQ(t *testing.T) {
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
} }
func TestMissingJitterField(t *testing.T) {
tt := tester{
ret: []byte(missingJitterField),
err: nil,
}
n := &NTPQ{
runQ: tt.runqTest,
}
acc := testutil.Accumulator{}
assert.NoError(t, n.Gather(&acc))
fields := map[string]interface{}{
"when": int64(101),
"poll": int64(256),
"reach": int64(37),
"delay": float64(51.016),
"offset": float64(233.010),
}
tags := map[string]string{
"remote": "uschi5-ntp-002.",
"state_prefix": "*",
"refid": "10.177.80.46",
"stratum": "2",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
}
func TestBadIntNTPQ(t *testing.T) { func TestBadIntNTPQ(t *testing.T) {
tt := tester{ tt := tester{
ret: []byte(badIntParseNTPQ), ret: []byte(badIntParseNTPQ),
@ -381,6 +410,11 @@ var singleNTPQ = ` remote refid st t when poll reach delay
*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462 *uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462
` `
var missingJitterField = ` remote refid st t when poll reach delay offset jitter
==============================================================================
*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010
`
var badHeaderNTPQ = `remote refid foobar t when poll reach delay offset jitter var badHeaderNTPQ = `remote refid foobar t when poll reach delay offset jitter
============================================================================== ==============================================================================
*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462 *uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462

View File

@ -0,0 +1,36 @@
# Ping input plugin
This input plugin will measures the round-trip
## Windows:
### Configration:
```
## urls to ping
urls = ["www.google.com"] # required
## number of pings to send per collection (ping -n <COUNT>)
count = 4 # required
## Ping timeout, in seconds. 0 means default timeout (ping -w <TIMEOUT>)
Timeout = 0
```
### Measurements & Fields:
- packets_transmitted ( from ping output )
- reply_received ( increasing only on valid metric from echo replay, eg. 'Destination net unreachable' reply will increment packets_received but not reply_received )
- packets_received ( from ping output )
- percent_reply_loss ( compute from packets_transmitted and reply_received )
- percent_packets_loss ( compute from packets_transmitted and packets_received )
- errors ( when host can not be found or wrong prameters is passed to application )
- response time
- average_response_ms ( compute from minimum_response_ms and maximum_response_ms )
- minimum_response_ms ( from ping output )
- maximum_response_ms ( from ping output )
### Tags:
- server
### Example Output:
```
* Plugin: ping, Collection 1
ping,host=WIN-PBAPLP511R7,url=www.google.com average_response_ms=7i,maximum_response_ms=9i,minimum_response_ms=7i,packets_received=4i,packets_transmitted=4i,percent_packet_loss=0,percent_reply_loss=0,reply_received=4i 1469879119000000000
```

View File

@ -1,3 +1,223 @@
// +build windows // +build windows
package ping package ping
import (
"errors"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
"os/exec"
"regexp"
"strconv"
"strings"
"sync"
"time"
)
// HostPinger is a function that runs the "ping" function using a list of
// passed arguments. This can be easily switched with a mocked ping function
// for unit test purposes (see ping_test.go)
type HostPinger func(timeout float64, args ...string) (string, error)
type Ping struct {
// Number of pings to send (ping -c <COUNT>)
Count int
// Ping timeout, in seconds. 0 means no timeout (ping -W <TIMEOUT>)
Timeout float64
// URLs to ping
Urls []string
// host ping function
pingHost HostPinger
}
func (s *Ping) Description() string {
return "Ping given url(s) and return statistics"
}
const sampleConfig = `
## urls to ping
urls = ["www.google.com"] # required
## number of pings to send per collection (ping -n <COUNT>)
count = 4 # required
## Ping timeout, in seconds. 0 means default timeout (ping -w <TIMEOUT>)
Timeout = 0
`
func (s *Ping) SampleConfig() string {
return sampleConfig
}
func hostPinger(timeout float64, args ...string) (string, error) {
bin, err := exec.LookPath("ping")
if err != nil {
return "", err
}
c := exec.Command(bin, args...)
out, err := internal.CombinedOutputTimeout(c,
time.Second*time.Duration(timeout+1))
return string(out), err
}
// processPingOutput takes in a string output from the ping command
// based on linux implementation but using regex ( multilanguage support ) ( shouldn't affect the performance of the program )
// It returns (<transmitted packets>, <received reply>, <received packet>, <average response>, <min response>, <max response>)
func processPingOutput(out string) (int, int, int, int, int, int, error) {
// So find a line contain 3 numbers except reply lines
var stats, aproxs []string = nil, nil
err := errors.New("Fatal error processing ping output")
stat := regexp.MustCompile(`=\W*(\d+)\D*=\W*(\d+)\D*=\W*(\d+)`)
aprox := regexp.MustCompile(`=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms`)
tttLine := regexp.MustCompile(`TTL=\d+`)
lines := strings.Split(out, "\n")
var receivedReply int = 0
for _, line := range lines {
if tttLine.MatchString(line) {
receivedReply++
} else {
if stats == nil {
stats = stat.FindStringSubmatch(line)
}
if stats != nil && aproxs == nil {
aproxs = aprox.FindStringSubmatch(line)
}
}
}
// stats data should contain 4 members: entireExpression + ( Send, Receive, Lost )
if len(stats) != 4 {
return 0, 0, 0, 0, 0, 0, err
}
trans, err := strconv.Atoi(stats[1])
if err != nil {
return 0, 0, 0, 0, 0, 0, err
}
receivedPacket, err := strconv.Atoi(stats[2])
if err != nil {
return 0, 0, 0, 0, 0, 0, err
}
// aproxs data should contain 4 members: entireExpression + ( min, max, avg )
if len(aproxs) != 4 {
return trans, receivedReply, receivedPacket, 0, 0, 0, err
}
min, err := strconv.Atoi(aproxs[1])
if err != nil {
return trans, receivedReply, receivedPacket, 0, 0, 0, err
}
max, err := strconv.Atoi(aproxs[2])
if err != nil {
return trans, receivedReply, receivedPacket, 0, 0, 0, err
}
avg, err := strconv.Atoi(aproxs[3])
if err != nil {
return 0, 0, 0, 0, 0, 0, err
}
return trans, receivedReply, receivedPacket, avg, min, max, err
}
func (p *Ping) timeout() float64 {
// According to MSDN, default ping timeout for windows is 4 second
// Add also one second interval
if p.Timeout > 0 {
return p.Timeout + 1
}
return 4 + 1
}
// args returns the arguments for the 'ping' executable
func (p *Ping) args(url string) []string {
args := []string{"-n", strconv.Itoa(p.Count)}
if p.Timeout > 0 {
args = append(args, "-w", strconv.FormatFloat(p.Timeout*1000, 'f', 0, 64))
}
args = append(args, url)
return args
}
func (p *Ping) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
errorChannel := make(chan error, len(p.Urls)*2)
var pendingError error = nil
// Spin off a go routine for each url to ping
for _, url := range p.Urls {
wg.Add(1)
go func(u string) {
defer wg.Done()
args := p.args(u)
totalTimeout := p.timeout() * float64(p.Count)
out, err := p.pingHost(totalTimeout, args...)
// ping host return exitcode != 0 also when there was no response from host
// but command was execute succesfully
if err != nil {
// Combine go err + stderr output
pendingError = errors.New(strings.TrimSpace(out) + ", " + err.Error())
}
tags := map[string]string{"url": u}
trans, recReply, receivePacket, avg, min, max, err := processPingOutput(out)
if err != nil {
// fatal error
if pendingError != nil {
errorChannel <- pendingError
}
errorChannel <- err
fields := map[string]interface{}{
"errors": 100.0,
}
acc.AddFields("ping", fields, tags)
return
}
// Calculate packet loss percentage
lossReply := float64(trans-recReply) / float64(trans) * 100.0
lossPackets := float64(trans-receivePacket) / float64(trans) * 100.0
fields := map[string]interface{}{
"packets_transmitted": trans,
"reply_received": recReply,
"packets_received": receivePacket,
"percent_packet_loss": lossPackets,
"percent_reply_loss": lossReply,
}
if avg > 0 {
fields["average_response_ms"] = avg
}
if min > 0 {
fields["minimum_response_ms"] = min
}
if max > 0 {
fields["maximum_response_ms"] = max
}
acc.AddFields("ping", fields, tags)
}(url)
}
wg.Wait()
close(errorChannel)
// Get all errors and return them as one giant error
errorStrings := []string{}
for err := range errorChannel {
errorStrings = append(errorStrings, err.Error())
}
if len(errorStrings) == 0 {
return nil
}
return errors.New(strings.Join(errorStrings, "\n"))
}
func init() {
inputs.Add("ping", func() telegraf.Input {
return &Ping{pingHost: hostPinger}
})
}

View File

@ -0,0 +1,328 @@
// +build windows
package ping
import (
"errors"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"testing"
)
// Windows ping format ( should support multilanguage ?)
var winPLPingOutput = `
Badanie 8.8.8.8 z 32 bajtami danych:
Odpowiedz z 8.8.8.8: bajtow=32 czas=49ms TTL=43
Odpowiedz z 8.8.8.8: bajtow=32 czas=46ms TTL=43
Odpowiedz z 8.8.8.8: bajtow=32 czas=48ms TTL=43
Odpowiedz z 8.8.8.8: bajtow=32 czas=57ms TTL=43
Statystyka badania ping dla 8.8.8.8:
Pakiety: Wyslane = 4, Odebrane = 4, Utracone = 0
(0% straty),
Szacunkowy czas bladzenia pakietww w millisekundach:
Minimum = 46 ms, Maksimum = 57 ms, Czas sredni = 50 ms
`
// Windows ping format ( should support multilanguage ?)
var winENPingOutput = `
Pinging 8.8.8.8 with 32 bytes of data:
Reply from 8.8.8.8: bytes=32 time=52ms TTL=43
Reply from 8.8.8.8: bytes=32 time=50ms TTL=43
Reply from 8.8.8.8: bytes=32 time=50ms TTL=43
Reply from 8.8.8.8: bytes=32 time=51ms TTL=43
Ping statistics for 8.8.8.8:
Packets: Sent = 4, Received = 4, Lost = 0 (0% loss),
Approximate round trip times in milli-seconds:
Minimum = 50ms, Maximum = 52ms, Average = 50ms
`
func TestHost(t *testing.T) {
trans, recReply, recPacket, avg, min, max, err := processPingOutput(winPLPingOutput)
assert.NoError(t, err)
assert.Equal(t, 4, trans, "4 packets were transmitted")
assert.Equal(t, 4, recReply, "4 packets were reply")
assert.Equal(t, 4, recPacket, "4 packets were received")
assert.Equal(t, 50, avg, "Average 50")
assert.Equal(t, 46, min, "Min 46")
assert.Equal(t, 57, max, "max 57")
trans, recReply, recPacket, avg, min, max, err = processPingOutput(winENPingOutput)
assert.NoError(t, err)
assert.Equal(t, 4, trans, "4 packets were transmitted")
assert.Equal(t, 4, recReply, "4 packets were reply")
assert.Equal(t, 4, recPacket, "4 packets were received")
assert.Equal(t, 50, avg, "Average 50")
assert.Equal(t, 50, min, "Min 50")
assert.Equal(t, 52, max, "Max 52")
}
func mockHostPinger(timeout float64, args ...string) (string, error) {
return winENPingOutput, nil
}
// Test that Gather function works on a normal ping
func TestPingGather(t *testing.T) {
var acc testutil.Accumulator
p := Ping{
Urls: []string{"www.google.com", "www.reddit.com"},
pingHost: mockHostPinger,
}
p.Gather(&acc)
tags := map[string]string{"url": "www.google.com"}
fields := map[string]interface{}{
"packets_transmitted": 4,
"packets_received": 4,
"reply_received": 4,
"percent_packet_loss": 0.0,
"percent_reply_loss": 0.0,
"average_response_ms": 50,
"minimum_response_ms": 50,
"maximum_response_ms": 52,
}
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
tags = map[string]string{"url": "www.reddit.com"}
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
}
var errorPingOutput = `
Badanie nask.pl [195.187.242.157] z 32 bajtami danych:
Upłynął limit czasu żądania.
Upłynął limit czasu żądania.
Upłynął limit czasu żądania.
Upłynął limit czasu żądania.
Statystyka badania ping dla 195.187.242.157:
Pakiety: Wysłane = 4, Odebrane = 0, Utracone = 4
(100% straty),
`
func mockErrorHostPinger(timeout float64, args ...string) (string, error) {
return errorPingOutput, errors.New("No packets received")
}
// Test that Gather works on a ping with no transmitted packets, even though the
// command returns an error
func TestBadPingGather(t *testing.T) {
var acc testutil.Accumulator
p := Ping{
Urls: []string{"www.amazon.com"},
pingHost: mockErrorHostPinger,
}
p.Gather(&acc)
tags := map[string]string{"url": "www.amazon.com"}
fields := map[string]interface{}{
"packets_transmitted": 4,
"packets_received": 0,
"reply_received": 0,
"percent_packet_loss": 100.0,
"percent_reply_loss": 100.0,
}
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
}
var lossyPingOutput = `
Badanie thecodinglove.com [66.6.44.4] z 9800 bajtami danych:
Upłynął limit czasu żądania.
Odpowiedź z 66.6.44.4: bajtów=9800 czas=114ms TTL=48
Odpowiedź z 66.6.44.4: bajtów=9800 czas=114ms TTL=48
Odpowiedź z 66.6.44.4: bajtów=9800 czas=118ms TTL=48
Odpowiedź z 66.6.44.4: bajtów=9800 czas=114ms TTL=48
Odpowiedź z 66.6.44.4: bajtów=9800 czas=114ms TTL=48
Upłynął limit czasu żądania.
Odpowiedź z 66.6.44.4: bajtów=9800 czas=119ms TTL=48
Odpowiedź z 66.6.44.4: bajtów=9800 czas=116ms TTL=48
Statystyka badania ping dla 66.6.44.4:
Pakiety: Wysłane = 9, Odebrane = 7, Utracone = 2
(22% straty),
Szacunkowy czas błądzenia pakietów w millisekundach:
Minimum = 114 ms, Maksimum = 119 ms, Czas średni = 115 ms
`
func mockLossyHostPinger(timeout float64, args ...string) (string, error) {
return lossyPingOutput, nil
}
// Test that Gather works on a ping with lossy packets
func TestLossyPingGather(t *testing.T) {
var acc testutil.Accumulator
p := Ping{
Urls: []string{"www.google.com"},
pingHost: mockLossyHostPinger,
}
p.Gather(&acc)
tags := map[string]string{"url": "www.google.com"}
fields := map[string]interface{}{
"packets_transmitted": 9,
"packets_received": 7,
"reply_received": 7,
"percent_packet_loss": 22.22222222222222,
"percent_reply_loss": 22.22222222222222,
"average_response_ms": 115,
"minimum_response_ms": 114,
"maximum_response_ms": 119,
}
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
}
// Fatal ping output (invalid argument)
var fatalPingOutput = `
Bad option -d.
Usage: ping [-t] [-a] [-n count] [-l size] [-f] [-i TTL] [-v TOS]
[-r count] [-s count] [[-j host-list] | [-k host-list]]
[-w timeout] [-R] [-S srcaddr] [-4] [-6] target_name
Options:
-t Ping the specified host until stopped.
To see statistics and continue - type Control-Break;
To stop - type Control-C.
-a Resolve addresses to hostnames.
-n count Number of echo requests to send.
-l size Send buffer size.
-f Set Don't Fragment flag in packet (IPv4-only).
-i TTL Time To Live.
-v TOS Type Of Service (IPv4-only. This setting has been deprecated
and has no effect on the type of service field in the IP Header).
-r count Record route for count hops (IPv4-only).
-s count Timestamp for count hops (IPv4-only).
-j host-list Loose source route along host-list (IPv4-only).
-k host-list Strict source route along host-list (IPv4-only).
-w timeout Timeout in milliseconds to wait for each reply.
-R Use routing header to test reverse route also (IPv6-only).
-S srcaddr Source address to use.
-4 Force using IPv4.
-6 Force using IPv6.
`
func mockFatalHostPinger(timeout float64, args ...string) (string, error) {
return fatalPingOutput, errors.New("So very bad")
}
// Test that a fatal ping command does not gather any statistics.
func TestFatalPingGather(t *testing.T) {
var acc testutil.Accumulator
p := Ping{
Urls: []string{"www.amazon.com"},
pingHost: mockFatalHostPinger,
}
p.Gather(&acc)
assert.True(t, acc.HasFloatField("ping", "errors"),
"Fatal ping should have packet measurements")
assert.False(t, acc.HasIntField("ping", "packets_transmitted"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "packets_received"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasFloatField("ping", "percent_packet_loss"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasFloatField("ping", "percent_reply_loss"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "average_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "maximum_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "minimum_response_ms"),
"Fatal ping should not have packet measurements")
}
var UnreachablePingOutput = `
Pinging www.google.pl [8.8.8.8] with 32 bytes of data:
Request timed out.
Request timed out.
Reply from 194.204.175.50: Destination net unreachable.
Request timed out.
Ping statistics for 8.8.8.8:
Packets: Sent = 4, Received = 1, Lost = 3 (75% loss),
`
func mockUnreachableHostPinger(timeout float64, args ...string) (string, error) {
return UnreachablePingOutput, errors.New("So very bad")
}
//Reply from 185.28.251.217: TTL expired in transit.
// in case 'Destination net unreachable' ping app return receive packet which is not what we need
// it's not contain valid metric so treat it as lost one
func TestUnreachablePingGather(t *testing.T) {
var acc testutil.Accumulator
p := Ping{
Urls: []string{"www.google.com"},
pingHost: mockUnreachableHostPinger,
}
p.Gather(&acc)
tags := map[string]string{"url": "www.google.com"}
fields := map[string]interface{}{
"packets_transmitted": 4,
"packets_received": 1,
"reply_received": 0,
"percent_packet_loss": 75.0,
"percent_reply_loss": 100.0,
}
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
assert.False(t, acc.HasFloatField("ping", "errors"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "average_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "maximum_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "minimum_response_ms"),
"Fatal ping should not have packet measurements")
}
var TTLExpiredPingOutput = `
Pinging www.google.pl [8.8.8.8] with 32 bytes of data:
Request timed out.
Request timed out.
Reply from 185.28.251.217: TTL expired in transit.
Request timed out.
Ping statistics for 8.8.8.8:
Packets: Sent = 4, Received = 1, Lost = 3 (75% loss),
`
func mockTTLExpiredPinger(timeout float64, args ...string) (string, error) {
return TTLExpiredPingOutput, errors.New("So very bad")
}
// in case 'Destination net unreachable' ping app return receive packet which is not what we need
// it's not contain valid metric so treat it as lost one
func TestTTLExpiredPingGather(t *testing.T) {
var acc testutil.Accumulator
p := Ping{
Urls: []string{"www.google.com"},
pingHost: mockTTLExpiredPinger,
}
p.Gather(&acc)
tags := map[string]string{"url": "www.google.com"}
fields := map[string]interface{}{
"packets_transmitted": 4,
"packets_received": 1,
"reply_received": 0,
"percent_packet_loss": 75.0,
"percent_reply_loss": 100.0,
}
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
assert.False(t, acc.HasFloatField("ping", "errors"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "average_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "maximum_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "minimum_response_ms"),
"Fatal ping should not have packet measurements")
}

View File

@ -17,6 +17,7 @@ import (
type Postgresql struct { type Postgresql struct {
Address string Address string
Databases []string Databases []string
IgnoredDatabases []string
OrderedColumns []string OrderedColumns []string
AllColumns []string AllColumns []string
sanitizedAddress string sanitizedAddress string
@ -40,8 +41,12 @@ var sampleConfig = `
## ##
address = "host=localhost user=postgres sslmode=disable" address = "host=localhost user=postgres sslmode=disable"
## A list of databases to explicitly ignore. If not specified, metrics for all
## databases are gathered. Do NOT use with the 'databases' option.
# ignored_databases = ["postgres", "template0", "template1"]
## A list of databases to pull metrics about. If not specified, metrics for all ## A list of databases to pull metrics about. If not specified, metrics for all
## databases are gathered. ## databases are gathered. Do NOT use with the 'ignore_databases' option.
# databases = ["app_production", "testing"] # databases = ["app_production", "testing"]
` `
@ -73,8 +78,11 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
defer db.Close() defer db.Close()
if len(p.Databases) == 0 { if len(p.Databases) == 0 && len(p.IgnoredDatabases) == 0 {
query = `SELECT * FROM pg_stat_database` query = `SELECT * FROM pg_stat_database`
} else if len(p.IgnoredDatabases) != 0 {
query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname NOT IN ('%s')`,
strings.Join(p.IgnoredDatabases, "','"))
} else { } else {
query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname IN ('%s')`, query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname IN ('%s')`,
strings.Join(p.Databases, "','")) strings.Join(p.Databases, "','"))

View File

@ -150,3 +150,75 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) {
assert.False(t, acc.HasMeasurement(col)) assert.False(t, acc.HasMeasurement(col))
} }
} }
func TestPostgresqlDatabaseWhitelistTest(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := &Postgresql{
Address: fmt.Sprintf("host=%s user=postgres sslmode=disable",
testutil.GetLocalHost()),
Databases: []string{"template0"},
}
var acc testutil.Accumulator
err := p.Gather(&acc)
require.NoError(t, err)
var foundTemplate0 = false
var foundTemplate1 = false
for _, pnt := range acc.Metrics {
if pnt.Measurement == "postgresql" {
if pnt.Tags["db"] == "template0" {
foundTemplate0 = true
}
}
if pnt.Measurement == "postgresql" {
if pnt.Tags["db"] == "template1" {
foundTemplate1 = true
}
}
}
assert.True(t, foundTemplate0)
assert.False(t, foundTemplate1)
}
func TestPostgresqlDatabaseBlacklistTest(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := &Postgresql{
Address: fmt.Sprintf("host=%s user=postgres sslmode=disable",
testutil.GetLocalHost()),
IgnoredDatabases: []string{"template0"},
}
var acc testutil.Accumulator
err := p.Gather(&acc)
require.NoError(t, err)
var foundTemplate0 = false
var foundTemplate1 = false
for _, pnt := range acc.Metrics {
if pnt.Measurement == "postgresql" {
if pnt.Tags["db"] == "template0" {
foundTemplate0 = true
}
}
if pnt.Measurement == "postgresql" {
if pnt.Tags["db"] == "template1" {
foundTemplate1 = true
}
}
}
assert.False(t, foundTemplate0)
assert.True(t, foundTemplate1)
}

View File

@ -266,29 +266,31 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula
tags := map[string]string{} tags := map[string]string{}
tags["server"] = tagAddress tags["server"] = tagAddress
tags["db"] = dbname.String() tags["db"] = dbname.String()
var isATag int
fields := make(map[string]interface{}) fields := make(map[string]interface{})
COLUMN:
for col, val := range columnMap { for col, val := range columnMap {
if acc.Debug() { log.Printf("D! postgresql_extensible: column: %s = %T: %s\n", col, *val, *val)
log.Printf("postgresql_extensible: column: %s = %T: %s\n", col, *val, *val)
}
_, ignore := ignoredColumns[col] _, ignore := ignoredColumns[col]
if !ignore && *val != nil { if ignore || *val == nil {
isATag = 0 continue
for tag := range p.AdditionalTags { }
if col == p.AdditionalTags[tag] { for _, tag := range p.AdditionalTags {
isATag = 1 if col != tag {
value_type_p := fmt.Sprintf(`%T`, *val) continue
if value_type_p == "[]uint8" {
tags[col] = fmt.Sprintf(`%s`, *val)
} else if value_type_p == "int64" {
tags[col] = fmt.Sprintf(`%v`, *val)
}
}
} }
if isATag == 0 { switch v := (*val).(type) {
fields[col] = *val case []byte:
tags[col] = string(v)
case int64:
tags[col] = fmt.Sprintf("%d", v)
} }
continue COLUMN
}
if v, ok := (*val).([]byte); ok {
fields[col] = string(v)
} else {
fields[col] = *val
} }
} }
acc.AddFields(meas_name, fields, tags) acc.AddFields(meas_name, fields, tags)

Some files were not shown because too many files have changed in this diff Show More