diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 19bb38765..2e838a8e4 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,5 +1,5 @@ ### Required for all PRs: -- [ ] CHANGELOG.md updated +- [ ] CHANGELOG.md updated (we recommend not updating this until the PR has been approved by a maintainer) - [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed) - [ ] README.md updated (if adding a new plugin) diff --git a/CHANGELOG.md b/CHANGELOG.md index dda3ba750..f2aaeba55 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,19 +1,81 @@ -## v1.0 [unreleased] +## v1.1 [unreleased] + +### Release Notes + +- On systemd Telegraf will no longer redirect it's stdout to /var/log/telegraf/telegraf.log. +On most systems, the logs will be directed to the systemd journal and can be +accessed by `journalctl -u telegraf.service`. Consult the systemd journal +documentation for configuring journald. There is also a [`logfile` config option](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf#L70) +available in 1.1, which will allow users to easily configure telegraf to +continue sending logs to /var/log/telegraf/telegraf.log. ### Features -- [#1413](https://github.com/influxdata/telegraf/issues/1413): Separate container_version from container_image tag. +- [#1732](https://github.com/influxdata/telegraf/pull/1732): Telegraf systemd service, log to journal. +- [#1782](https://github.com/influxdata/telegraf/pull/1782): Allow numeric and non-string values for tag_keys. +- [#1694](https://github.com/influxdata/telegraf/pull/1694): Adding Gauge and Counter metric types. +- [#1606](https://github.com/influxdata/telegraf/pull/1606): Remove carraige returns from exec plugin output on Windows +- [#1674](https://github.com/influxdata/telegraf/issues/1674): elasticsearch input: configurable timeout. +- [#1607](https://github.com/influxdata/telegraf/pull/1607): Massage metric names in Instrumental output plugin +- [#1572](https://github.com/influxdata/telegraf/pull/1572): mesos improvements. +- [#1513](https://github.com/influxdata/telegraf/issues/1513): Add Ceph Cluster Performance Statistics +- [#1650](https://github.com/influxdata/telegraf/issues/1650): Ability to configure response_timeout in httpjson input. +- [#1685](https://github.com/influxdata/telegraf/issues/1685): Add additional redis metrics. +- [#1539](https://github.com/influxdata/telegraf/pull/1539): Added capability to send metrics through Http API for OpenTSDB. +- [#1471](https://github.com/influxdata/telegraf/pull/1471): iptables input plugin. +- [#1542](https://github.com/influxdata/telegraf/pull/1542): Add filestack webhook plugin. +- [#1599](https://github.com/influxdata/telegraf/pull/1599): Add server hostname for each docker measurements. +- [#1697](https://github.com/influxdata/telegraf/pull/1697): Add NATS output plugin. +- [#1407](https://github.com/influxdata/telegraf/pull/1407): HTTP service listener input plugin. +- [#1699](https://github.com/influxdata/telegraf/pull/1699): Add database blacklist option for Postgresql +- [#1791](https://github.com/influxdata/telegraf/pull/1791): Add Docker container state metrics to Docker input plugin output +- [#1755](https://github.com/influxdata/telegraf/issues/1755): Add support to SNMP for IP & MAC address conversion. +- [#1729](https://github.com/influxdata/telegraf/issues/1729): Add support to SNMP for OID index suffixes. +- [#1813](https://github.com/influxdata/telegraf/pull/1813): Change default arguments for SNMP plugin. +- [#1686](https://github.com/influxdata/telegraf/pull/1686): Mesos input plugin: very high-cardinality mesos-task metrics removed. +- [#1838](https://github.com/influxdata/telegraf/pull/1838): Logging overhaul to centralize the logger & log levels, & provide a logfile config option. ### Bugfixes -- [#1519](https://github.com/influxdata/telegraf/pull/1519): Fix error race conditions and partial failures. -- [#1477](https://github.com/influxdata/telegraf/issues/1477): nstat: fix inaccurate config panic. -- [#1481](https://github.com/influxdata/telegraf/issues/1481): jolokia: fix handling multiple multi-dimensional attributes. +- [#1746](https://github.com/influxdata/telegraf/issues/1746): Fix handling of non-string values for JSON keys listed in tag_keys. +- [#1628](https://github.com/influxdata/telegraf/issues/1628): Fix mongodb input panic on version 2.2. +- [#1733](https://github.com/influxdata/telegraf/issues/1733): Fix statsd scientific notation parsing +- [#1716](https://github.com/influxdata/telegraf/issues/1716): Sensors plugin strconv.ParseFloat: parsing "": invalid syntax +- [#1530](https://github.com/influxdata/telegraf/issues/1530): Fix prometheus_client reload panic +- [#1764](https://github.com/influxdata/telegraf/issues/1764): Fix kafka consumer panic when nil error is returned down errs channel. +- [#1768](https://github.com/influxdata/telegraf/pull/1768): Speed up statsd parsing. +- [#1751](https://github.com/influxdata/telegraf/issues/1751): Fix powerdns integer parse error handling. +- [#1752](https://github.com/influxdata/telegraf/issues/1752): Fix varnish plugin defaults not being used. +- [#1517](https://github.com/influxdata/telegraf/issues/1517): Fix windows glob paths. +- [#1137](https://github.com/influxdata/telegraf/issues/1137): Fix issue loading config directory on windows. +- [#1772](https://github.com/influxdata/telegraf/pull/1772): Windows remote management interactive service fix. +- [#1702](https://github.com/influxdata/telegraf/issues/1702): sqlserver, fix issue when case sensitive collation is activated. +- [#1823](https://github.com/influxdata/telegraf/issues/1823): Fix huge allocations in http_listener when dealing with huge payloads. +- [#1833](https://github.com/influxdata/telegraf/issues/1833): Fix translating SNMP fields not in MIB. +- [#1835](https://github.com/influxdata/telegraf/issues/1835): Fix SNMP emitting empty fields. +- [#1854](https://github.com/influxdata/telegraf/pull/1853): SQL Server waitstats truncation bug. -## v1.0 beta 3 [2016-07-18] +## v1.0.1 [2016-09-26] + +### Bugfixes + +- [#1775](https://github.com/influxdata/telegraf/issues/1775): Prometheus output: Fix bug with multi-batch writes. +- [#1738](https://github.com/influxdata/telegraf/issues/1738): Fix unmarshal of influxdb metrics with null tags. +- [#1773](https://github.com/influxdata/telegraf/issues/1773): Add configurable timeout to influxdb input plugin. +- [#1785](https://github.com/influxdata/telegraf/pull/1785): Fix statsd no default value panic. + +## v1.0 [2016-09-08] ### Release Notes +**Breaking Change** The SNMP plugin is being deprecated in it's current form. +There is a [new SNMP plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp) +which fixes many of the issues and confusions +of its predecessor. For users wanting to continue to use the deprecated SNMP +plugin, you will need to change your config file from `[[inputs.snmp]]` to +`[[inputs.snmp_legacy]]`. The configuration of the new SNMP plugin is _not_ +backwards-compatible. + **Breaking Change**: Aerospike main server node measurements have been renamed aerospike_node. Aerospike namespace measurements have been renamed to aerospike_namespace. They will also now be tagged with the node_name @@ -44,8 +106,19 @@ should now look like: path = "/" ``` +- Telegraf now supports being installed as an official windows service, +which can be installed via +`> C:\Program Files\Telegraf\telegraf.exe --service install` + +- `flush_jitter` behavior has been changed. The random jitter will now be +evaluated at every flush interval, rather than once at startup. This makes it +consistent with the behavior of `collection_jitter`. + ### Features +- [#1413](https://github.com/influxdata/telegraf/issues/1413): Separate container_version from container_image tag. +- [#1525](https://github.com/influxdata/telegraf/pull/1525): Support setting per-device and total metrics for Docker network and blockio. +- [#1466](https://github.com/influxdata/telegraf/pull/1466): MongoDB input plugin: adding per DB stats from db.stats() - [#1503](https://github.com/influxdata/telegraf/pull/1503): Add tls support for certs to RabbitMQ input plugin - [#1289](https://github.com/influxdata/telegraf/pull/1289): webhooks input plugin. Thanks @francois2metz and @cduez! - [#1247](https://github.com/influxdata/telegraf/pull/1247): rollbar webhook plugin. @@ -59,10 +132,41 @@ should now look like: - [#1500](https://github.com/influxdata/telegraf/pull/1500): Aerospike plugin refactored to use official client lib. - [#1434](https://github.com/influxdata/telegraf/pull/1434): Add measurement name arg to logparser plugin. - [#1479](https://github.com/influxdata/telegraf/pull/1479): logparser: change resp_code from a field to a tag. -- [#1466](https://github.com/influxdata/telegraf/pull/1466): MongoDB input plugin: adding per DB stats from db.stats() +- [#1411](https://github.com/influxdata/telegraf/pull/1411): Implement support for fetching hddtemp data +- [#1340](https://github.com/influxdata/telegraf/issues/1340): statsd: do not log every dropped metric. +- [#1368](https://github.com/influxdata/telegraf/pull/1368): Add precision rounding to all metrics on collection. +- [#1390](https://github.com/influxdata/telegraf/pull/1390): Add support for Tengine +- [#1320](https://github.com/influxdata/telegraf/pull/1320): Logparser input plugin for parsing grok-style log patterns. +- [#1397](https://github.com/influxdata/telegraf/issues/1397): ElasticSearch: now supports connecting to ElasticSearch via SSL +- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging. +- [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash +- [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy! +- [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm! +- [#1208](https://github.com/influxdata/telegraf/pull/1208): Standardized AWS credentials evaluation & wildcard CloudWatch dimensions. Thanks @johnrengelman! +- [#1264](https://github.com/influxdata/telegraf/pull/1264): Add SSL config options to http_response plugin. +- [#1272](https://github.com/influxdata/telegraf/pull/1272): graphite parser: add ability to specify multiple tag keys, for consistency with influxdb parser. +- [#1265](https://github.com/influxdata/telegraf/pull/1265): Make dns lookups for chrony configurable. Thanks @zbindenren! +- [#1275](https://github.com/influxdata/telegraf/pull/1275): Allow wildcard filtering of varnish stats. +- [#1142](https://github.com/influxdata/telegraf/pull/1142): Support for glob patterns in exec plugin commands configuration. +- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL (http://localhost:15672) if not specified +- [#1197](https://github.com/influxdata/telegraf/pull/1197): Limit AWS GetMetricStatistics requests to 10 per second. +- [#1278](https://github.com/influxdata/telegraf/pull/1278) & [#1288](https://github.com/influxdata/telegraf/pull/1288) & [#1295](https://github.com/influxdata/telegraf/pull/1295): RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified +- [#1296](https://github.com/influxdata/telegraf/issues/1296): Refactor of flush_jitter argument. +- [#1213](https://github.com/influxdata/telegraf/issues/1213): Add inactive & active memory to mem plugin. +- [#1543](https://github.com/influxdata/telegraf/pull/1543): Official Windows service. +- [#1414](https://github.com/influxdata/telegraf/pull/1414): Forking sensors command to remove C package dependency. +- [#1389](https://github.com/influxdata/telegraf/pull/1389): Add a new SNMP plugin. ### Bugfixes +- [#1619](https://github.com/influxdata/telegraf/issues/1619): Fix `make windows` build target +- [#1519](https://github.com/influxdata/telegraf/pull/1519): Fix error race conditions and partial failures. +- [#1477](https://github.com/influxdata/telegraf/issues/1477): nstat: fix inaccurate config panic. +- [#1481](https://github.com/influxdata/telegraf/issues/1481): jolokia: fix handling multiple multi-dimensional attributes. +- [#1430](https://github.com/influxdata/telegraf/issues/1430): Fix prometheus character sanitizing. Sanitize more win_perf_counters characters. +- [#1534](https://github.com/influxdata/telegraf/pull/1534): Add diskio io_time to FreeBSD & report timing metrics as ms (as linux does). +- [#1379](https://github.com/influxdata/telegraf/issues/1379): Fix covering Amazon Linux for post remove flow. +- [#1584](https://github.com/influxdata/telegraf/issues/1584): procstat missing fields: read/write bytes & count - [#1472](https://github.com/influxdata/telegraf/pull/1472): diskio input plugin: set 'skip_serial_number = true' by default to avoid high cardinality. - [#1426](https://github.com/influxdata/telegraf/pull/1426): nil metrics panic fix. - [#1384](https://github.com/influxdata/telegraf/pull/1384): Fix datarace in apache input plugin. @@ -81,19 +185,6 @@ should now look like: - [#1418](https://github.com/influxdata/telegraf/issues/1418): logparser: error and exit on file permissions/missing errors. - [#1499](https://github.com/influxdata/telegraf/pull/1499): Make the user able to specify full path for HAproxy stats - [#1521](https://github.com/influxdata/telegraf/pull/1521): Fix Redis url, an extra "tcp://" was added. - -## v1.0 beta 2 [2016-06-21] - -### Features - -- [#1340](https://github.com/influxdata/telegraf/issues/1340): statsd: do not log every dropped metric. -- [#1368](https://github.com/influxdata/telegraf/pull/1368): Add precision rounding to all metrics on collection. -- [#1390](https://github.com/influxdata/telegraf/pull/1390): Add support for Tengine -- [#1320](https://github.com/influxdata/telegraf/pull/1320): Logparser input plugin for parsing grok-style log patterns. -- [#1397](https://github.com/influxdata/telegraf/issues/1397): ElasticSearch: now supports connecting to ElasticSearch via SSL - -### Bugfixes - - [#1330](https://github.com/influxdata/telegraf/issues/1330): Fix exec plugin panic when using single binary. - [#1336](https://github.com/influxdata/telegraf/issues/1336): Fixed incorrect prometheus metrics source selection. - [#1112](https://github.com/influxdata/telegraf/issues/1112): Set default Zookeeper chroot to empty string. @@ -101,50 +192,6 @@ should now look like: - [#1374](https://github.com/influxdata/telegraf/pull/1374): Change "default" retention policy to "". - [#1377](https://github.com/influxdata/telegraf/issues/1377): Graphite output mangling '%' character. - [#1396](https://github.com/influxdata/telegraf/pull/1396): Prometheus input plugin now supports x509 certs authentication - -## v1.0 beta 1 [2016-06-07] - -### Release Notes - -- `flush_jitter` behavior has been changed. The random jitter will now be -evaluated at every flush interval, rather than once at startup. This makes it -consistent with the behavior of `collection_jitter`. - -- All AWS plugins now utilize a standard mechanism for evaluating credentials. -This allows all AWS plugins to support environment variables, shared credential -files & profiles, and role assumptions. See the specific plugin README for -details. - -- The AWS CloudWatch input plugin can now declare a wildcard value for a metric -dimension. This causes the plugin to read all metrics that contain the specified -dimension key regardless of value. This is used to export collections of metrics -without having to know the dimension values ahead of time. - -- The AWS CloudWatch input plugin can now be configured with the `cache_ttl` -attribute. This configures the TTL of the internal metric cache. This is useful -in conjunction with wildcard dimension values as it will control the amount of -time before a new metric is included by the plugin. - -### Features - -- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging. -- [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash -- [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy! -- [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm! -- [#1208](https://github.com/influxdata/telegraf/pull/1208): Standardized AWS credentials evaluation & wildcard CloudWatch dimensions. Thanks @johnrengelman! -- [#1264](https://github.com/influxdata/telegraf/pull/1264): Add SSL config options to http_response plugin. -- [#1272](https://github.com/influxdata/telegraf/pull/1272): graphite parser: add ability to specify multiple tag keys, for consistency with influxdb parser. -- [#1265](https://github.com/influxdata/telegraf/pull/1265): Make dns lookups for chrony configurable. Thanks @zbindenren! -- [#1275](https://github.com/influxdata/telegraf/pull/1275): Allow wildcard filtering of varnish stats. -- [#1142](https://github.com/influxdata/telegraf/pull/1142): Support for glob patterns in exec plugin commands configuration. -- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL (http://localhost:15672) if not specified -- [#1197](https://github.com/influxdata/telegraf/pull/1197): Limit AWS GetMetricStatistics requests to 10 per second. -- [#1278](https://github.com/influxdata/telegraf/pull/1278) & [#1288](https://github.com/influxdata/telegraf/pull/1288) & [#1295](https://github.com/influxdata/telegraf/pull/1295): RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified -- [#1296](https://github.com/influxdata/telegraf/issues/1296): Refactor of flush_jitter argument. -- [#1213](https://github.com/influxdata/telegraf/issues/1213): Add inactive & active memory to mem plugin. - -### Bugfixes - - [#1252](https://github.com/influxdata/telegraf/pull/1252) & [#1279](https://github.com/influxdata/telegraf/pull/1279): Fix systemd service. Thanks @zbindenren & @PierreF! - [#1221](https://github.com/influxdata/telegraf/pull/1221): Fix influxdb n_shards counter. - [#1258](https://github.com/influxdata/telegraf/pull/1258): Fix potential kernel plugin integer parse error. @@ -154,6 +201,12 @@ time before a new metric is included by the plugin. - [#1316](https://github.com/influxdata/telegraf/pull/1316): Removed leaked "database" tag on redis metrics. Thanks @PierreF! - [#1323](https://github.com/influxdata/telegraf/issues/1323): Processes plugin: fix potential error with /proc/net/stat directory. - [#1322](https://github.com/influxdata/telegraf/issues/1322): Fix rare RHEL 5.2 panic in gopsutil diskio gathering function. +- [#1586](https://github.com/influxdata/telegraf/pull/1586): Remove IF NOT EXISTS from influxdb output database creation. +- [#1600](https://github.com/influxdata/telegraf/issues/1600): Fix quoting with text values in postgresql_extensible plugin. +- [#1425](https://github.com/influxdata/telegraf/issues/1425): Fix win_perf_counter "index out of range" panic. +- [#1634](https://github.com/influxdata/telegraf/issues/1634): Fix ntpq panic when field is missing. +- [#1637](https://github.com/influxdata/telegraf/issues/1637): Sanitize graphite output field names. +- [#1695](https://github.com/influxdata/telegraf/pull/1695): Fix MySQL plugin not sending 0 value fields. ## v0.13.1 [2016-05-24] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f02f109fd..ec7a35363 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,6 +11,8 @@ Output plugins READMEs are less structured, but any information you can provide on how the data will look is appreciated. See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb) for a good example. +1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin. +1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf). Or mention @jackzampolin in a PR comment with some common queries that you would want to alert on and he will write one for you. ## GoDoc @@ -30,7 +32,7 @@ Assuming you can already build the project, run these in the telegraf directory: 1. `go get github.com/sparrc/gdm` 1. `gdm restore` -1. `gdm save` +1. `GOOS=linux gdm save` ## Input Plugins @@ -82,9 +84,9 @@ func (s *Simple) SampleConfig() string { func (s *Simple) Gather(acc telegraf.Accumulator) error { if s.Ok { - acc.Add("state", "pretty good", nil) + acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil) } else { - acc.Add("state", "not great", nil) + acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil) } return nil @@ -95,6 +97,13 @@ func init() { } ``` +## Adding Typed Metrics + +In addition the the `AddFields` function, the accumulator also supports an +`AddGauge` and `AddCounter` function. These functions are for adding _typed_ +metrics. Metric types are ignored for the InfluxDB output, but can be used +for other outputs, such as [prometheus](https://prometheus.io/docs/concepts/metric_types/). + ## Input Plugins Accepting Arbitrary Data Formats Some input plugins (such as diff --git a/Godeps b/Godeps index 5caa6a9e2..76dc1673e 100644 --- a/Godeps +++ b/Godeps @@ -1,6 +1,6 @@ github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9 github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc -github.com/aerospike/aerospike-client-go 45863b7fd8640dc12f7fdd397104d97e1986f25a +github.com/aerospike/aerospike-client-go 7f3a312c3b2a60ac083ec6da296091c52c795c63 github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687 github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857 github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4 @@ -29,14 +29,17 @@ github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56 github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0 +github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec +github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc +github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720 github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36 github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453 github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504 github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b -github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3 -github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa +github.com/nats-io/nats ea8b4fd12ebb823073c0004b9f09ac8748f4f165 +github.com/nats-io/nuid a5152d67cf63cbfb5d992a395458722a45194715 github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980 github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8 github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831 @@ -44,9 +47,8 @@ github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37 github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8 github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f -github.com/shirou/gopsutil 586bb697f3ec9f8ec08ffefe18f521a64534037c -github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d -github.com/sparrc/aerospike-client-go d4bb42d2c2d39dae68e054116f4538af189e05d5 +github.com/shirou/gopsutil 4d0c402af66c78735c5ccf820dc2ca7de5e4ff08 +github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26 github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2 diff --git a/Godeps_windows b/Godeps_windows index cc3077fd4..067c98c1c 100644 --- a/Godeps_windows +++ b/Godeps_windows @@ -1,59 +1,12 @@ -github.com/Microsoft/go-winio 9f57cbbcbcb41dea496528872a4f0e37a4f7ae98 -github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9 -github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc +github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34 github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5 -github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687 -github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857 -github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4 -github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99 -github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1 -github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2 -github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6 -github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc -github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d -github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537 -github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb -github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444 -github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 -github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 -github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86 -github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4 -github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee -github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032 -github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7 -github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2 -github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a -github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e -github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 -github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da -github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48 -github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0 -github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720 -github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36 -github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1 -github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453 -github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd -github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504 -github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b -github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3 -github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa -github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980 -github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831 -github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 -github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37 -github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8 -github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f -github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42 -github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5 -github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d -github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 -github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c -github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866 -github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 -github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363 -golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172 -golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34 -gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef -gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715 -gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886 -gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4 +github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7 +github.com/lxn/win 950a0e81e7678e63d8e6cd32412bdecb325ccd88 +github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad +golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8 +github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438 +github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d +github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2 +github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94 +gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb +gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8 diff --git a/Makefile b/Makefile index ee96e10bd..3b4ecfa1d 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,6 @@ VERSION := $(shell sh -c 'git describe --always --tags') +BRANCH := $(shell sh -c 'git rev-parse --abbrev-ref HEAD') +COMMIT := $(shell sh -c 'git rev-parse HEAD') ifdef GOBIN PATH := $(GOBIN):$(PATH) else @@ -13,17 +15,18 @@ windows: prepare-windows build-windows # Only run the build (no dependency grabbing) build: - go install -ldflags "-X main.version=$(VERSION)" ./... + go install -ldflags \ + "-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" ./... build-windows: - go build -o telegraf.exe -ldflags \ - "-X main.version=$(VERSION)" \ + GOOS=windows GOARCH=amd64 go build -o telegraf.exe -ldflags \ + "-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \ ./cmd/telegraf/telegraf.go build-for-docker: CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \ - "-s -X main.version=$(VERSION)" \ - ./cmd/telegraf/telegraf.go + "-s -X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \ + ./cmd/telegraf/telegraf.go # run package script package: @@ -37,10 +40,12 @@ prepare: # Use the windows godeps file to prepare dependencies prepare-windows: go get github.com/sparrc/gdm + gdm restore gdm restore -f Godeps_windows # Run all docker containers necessary for unit tests docker-run: + docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0 docker run --name kafka \ -e ADVERTISED_HOST=localhost \ -e ADVERTISED_PORT=9092 \ @@ -51,29 +56,28 @@ docker-run: docker run --name postgres -p "5432:5432" -d postgres docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management docker run --name redis -p "6379:6379" -d redis - docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt docker run --name riemann -p "5555:5555" -d blalor/riemann - docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim + docker run --name nats -p "4222:4222" -d nats # Run docker containers necessary for CircleCI unit tests docker-run-circle: + docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0 docker run --name kafka \ -e ADVERTISED_HOST=localhost \ -e ADVERTISED_PORT=9092 \ -p "2181:2181" -p "9092:9092" \ -d spotify/kafka - docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt docker run --name riemann -p "5555:5555" -d blalor/riemann - docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim + docker run --name nats -p "4222:4222" -d nats # Kill all docker containers, ignore errors docker-kill: - -docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann snmp - -docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann snmp + -docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats + -docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats # Run full unit tests using docker containers (includes setup and teardown) test: vet docker-kill docker-run diff --git a/README.md b/README.md index aa8d9e039..737a3fe07 100644 --- a/README.md +++ b/README.md @@ -20,12 +20,12 @@ new plugins. ### Linux deb and rpm Packages: Latest: -* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta3_amd64.deb -* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta3.x86_64.rpm +* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.1_amd64.deb +* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.x86_64.rpm Latest (arm): -* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta3_armhf.deb -* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta3.armhf.rpm +* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.1_armhf.deb +* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.armhf.rpm ##### Package Instructions: @@ -46,14 +46,14 @@ to use this repo to install & update telegraf. ### Linux tarballs: Latest: -* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_amd64.tar.gz -* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_i386.tar.gz -* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_armhf.tar.gz +* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_amd64.tar.gz +* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_i386.tar.gz +* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_armhf.tar.gz ### FreeBSD tarball: Latest: -* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_freebsd_amd64.tar.gz +* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_freebsd_amd64.tar.gz ### Ansible Role: @@ -69,7 +69,7 @@ brew install telegraf ### Windows Binaries (EXPERIMENTAL) Latest: -* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_windows_amd64.zip +* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_windows_amd64.zip ### From Source: @@ -156,10 +156,12 @@ Currently implemented sources: * [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) * [filestat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/filestat) * [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy) +* [hddtemp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/hddtemp) * [http_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_response) * [httpjson](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson) (generic JSON-emitting http service plugin) * [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb) * [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor) +* [iptables](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/iptables) * [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia) * [leofs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/leofs) * [lustre2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/lustre2) @@ -187,8 +189,9 @@ Currently implemented sources: * [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis) * [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb) * [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak) -* [sensors ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors) (only available if built from source) +* [sensors](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors) * [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp) +* [snmp_legacy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp_legacy) * [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft) * [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy) * [varnish](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/varnish) @@ -210,18 +213,21 @@ Currently implemented sources: Telegraf can also collect metrics via the following service plugins: +* [http_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_listener) +* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer) +* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer) +* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer) +* [nsq_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq_consumer) +* [logparser](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/logparser) * [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd) * [tail](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail) -* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener) * [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener) -* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer) -* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer) -* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer) +* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener) * [webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks) + * [filestack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/filestack) * [github](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/github) * [mandrill](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/mandrill) * [rollbar](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/rollbar) -* [nsq_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq_consumer) We'll be adding support for many more over the coming months. Read on if you want to add support for another service or third-party API. @@ -241,6 +247,7 @@ want to add support for another service or third-party API. * [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka) * [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato) * [mqtt](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/mqtt) +* [nats](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nats) * [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nsq) * [opentsdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb) * [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client) diff --git a/accumulator.go b/accumulator.go index 15c5485f8..bb6e4dc85 100644 --- a/accumulator.go +++ b/accumulator.go @@ -2,20 +2,35 @@ package telegraf import "time" +// Accumulator is an interface for "accumulating" metrics from input plugin(s). +// The metrics are sent down a channel shared between all input plugins and then +// flushed on the configured flush_interval. type Accumulator interface { + // AddFields adds a metric to the accumulator with the given measurement + // name, fields, and tags (and timestamp). If a timestamp is not provided, + // then the accumulator sets it to "now". // Create a point with a value, decorating it with tags // NOTE: tags is expected to be owned by the caller, don't mutate // it after passing to Add. - Add(measurement string, - value interface{}, - tags map[string]string, - t ...time.Time) - AddFields(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) + // AddGauge is the same as AddFields, but will add the metric as a "Gauge" type + AddGauge(measurement string, + fields map[string]interface{}, + tags map[string]string, + t ...time.Time) + + // AddCounter is the same as AddFields, but will add the metric as a "Counter" type + AddCounter(measurement string, + fields map[string]interface{}, + tags map[string]string, + t ...time.Time) + + AddError(err error) + Debug() bool SetDebug(enabled bool) diff --git a/agent/accumulator.go b/agent/accumulator.go index 504731720..752e2b91f 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -4,6 +4,7 @@ import ( "fmt" "log" "math" + "sync/atomic" "time" "github.com/influxdata/telegraf" @@ -11,7 +12,7 @@ import ( ) func NewAccumulator( - inputConfig *internal_models.InputConfig, + inputConfig *models.InputConfig, metrics chan telegraf.Metric, ) *accumulator { acc := accumulator{} @@ -30,27 +31,11 @@ type accumulator struct { // print every point added to the accumulator trace bool - inputConfig *internal_models.InputConfig - - prefix string + inputConfig *models.InputConfig precision time.Duration -} -func (ac *accumulator) Add( - measurement string, - value interface{}, - tags map[string]string, - t ...time.Time, -) { - fields := make(map[string]interface{}) - fields["value"] = value - - if !ac.inputConfig.Filter.ShouldNamePass(measurement) { - return - } - - ac.AddFields(measurement, fields, tags, t...) + errCount uint64 } func (ac *accumulator) AddFields( @@ -59,16 +44,47 @@ func (ac *accumulator) AddFields( tags map[string]string, t ...time.Time, ) { + if m := ac.makeMetric(measurement, fields, tags, telegraf.Untyped, t...); m != nil { + ac.metrics <- m + } +} + +func (ac *accumulator) AddGauge( + measurement string, + fields map[string]interface{}, + tags map[string]string, + t ...time.Time, +) { + if m := ac.makeMetric(measurement, fields, tags, telegraf.Gauge, t...); m != nil { + ac.metrics <- m + } +} + +func (ac *accumulator) AddCounter( + measurement string, + fields map[string]interface{}, + tags map[string]string, + t ...time.Time, +) { + if m := ac.makeMetric(measurement, fields, tags, telegraf.Counter, t...); m != nil { + ac.metrics <- m + } +} + +// makeMetric either returns a metric, or returns nil if the metric doesn't +// need to be created (because of filtering, an error, etc.) +func (ac *accumulator) makeMetric( + measurement string, + fields map[string]interface{}, + tags map[string]string, + mType telegraf.ValueType, + t ...time.Time, +) telegraf.Metric { if len(fields) == 0 || len(measurement) == 0 { - return + return nil } - - if !ac.inputConfig.Filter.ShouldNamePass(measurement) { - return - } - - if !ac.inputConfig.Filter.ShouldTagsPass(tags) { - return + if tags == nil { + tags = make(map[string]string) } // Override measurement name if set @@ -83,9 +99,6 @@ func (ac *accumulator) AddFields( measurement = measurement + ac.inputConfig.MeasurementSuffix } - if tags == nil { - tags = make(map[string]string) - } // Apply plugin-wide tags if set for k, v := range ac.inputConfig.Tags { if _, ok := tags[k]; !ok { @@ -98,44 +111,37 @@ func (ac *accumulator) AddFields( tags[k] = v } } - ac.inputConfig.Filter.FilterTags(tags) - result := make(map[string]interface{}) + // Apply the metric filter(s) + if ok := ac.inputConfig.Filter.Apply(measurement, fields, tags); !ok { + return nil + } + for k, v := range fields { - // Filter out any filtered fields - if ac.inputConfig != nil { - if !ac.inputConfig.Filter.ShouldFieldsPass(k) { - continue - } - } - // Validate uint64 and float64 fields switch val := v.(type) { case uint64: // InfluxDB does not support writing uint64 if val < uint64(9223372036854775808) { - result[k] = int64(val) + fields[k] = int64(val) } else { - result[k] = int64(9223372036854775807) + fields[k] = int64(9223372036854775807) } continue case float64: // NaNs are invalid values in influxdb, skip measurement if math.IsNaN(val) || math.IsInf(val, 0) { if ac.debug { - log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+ + log.Printf("I! Measurement [%s] field [%s] has a NaN or Inf "+ "field, skipping", measurement, k) } + delete(fields, k) continue } } - result[k] = v - } - fields = nil - if len(result) == 0 { - return + fields[k] = v } var timestamp time.Time @@ -146,19 +152,37 @@ func (ac *accumulator) AddFields( } timestamp = timestamp.Round(ac.precision) - if ac.prefix != "" { - measurement = ac.prefix + measurement + var m telegraf.Metric + var err error + switch mType { + case telegraf.Counter: + m, err = telegraf.NewCounterMetric(measurement, tags, fields, timestamp) + case telegraf.Gauge: + m, err = telegraf.NewGaugeMetric(measurement, tags, fields, timestamp) + default: + m, err = telegraf.NewMetric(measurement, tags, fields, timestamp) + } + if err != nil { + log.Printf("E! Error adding point [%s]: %s\n", measurement, err.Error()) + return nil } - m, err := telegraf.NewMetric(measurement, tags, result, timestamp) - if err != nil { - log.Printf("Error adding point [%s]: %s\n", measurement, err.Error()) - return - } if ac.trace { fmt.Println("> " + m.String()) } - ac.metrics <- m + + return m +} + +// AddError passes a runtime error to the accumulator. +// The error will be tagged with the plugin name and written to the log. +func (ac *accumulator) AddError(err error) { + if err == nil { + return + } + atomic.AddUint64(&ac.errCount, 1) + //TODO suppress/throttle consecutive duplicate errors? + log.Printf("E! Error in input [%s]: %s", ac.inputConfig.Name, err) } func (ac *accumulator) Debug() bool { diff --git a/agent/accumulator_test.go b/agent/accumulator_test.go index 9bf681192..ef5a34ec9 100644 --- a/agent/accumulator_test.go +++ b/agent/accumulator_test.go @@ -1,8 +1,11 @@ package agent import ( + "bytes" "fmt" + "log" "math" + "os" "testing" "time" @@ -10,6 +13,7 @@ import ( "github.com/influxdata/telegraf/internal/models" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAdd(t *testing.T) { @@ -17,11 +21,17 @@ func TestAdd(t *testing.T) { now := time.Now() a.metrics = make(chan telegraf.Metric, 10) defer close(a.metrics) - a.inputConfig = &internal_models.InputConfig{} + a.inputConfig = &models.InputConfig{} - a.Add("acctest", float64(101), map[string]string{}) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{}) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}, now) testm := <-a.metrics actual := testm.String() @@ -38,17 +48,93 @@ func TestAdd(t *testing.T) { actual) } +func TestAddGauge(t *testing.T) { + a := accumulator{} + now := time.Now() + a.metrics = make(chan telegraf.Metric, 10) + defer close(a.metrics) + a.inputConfig = &models.InputConfig{} + + a.AddGauge("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{}) + a.AddGauge("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}) + a.AddGauge("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}, now) + + testm := <-a.metrics + actual := testm.String() + assert.Contains(t, actual, "acctest value=101") + assert.Equal(t, testm.Type(), telegraf.Gauge) + + testm = <-a.metrics + actual = testm.String() + assert.Contains(t, actual, "acctest,acc=test value=101") + assert.Equal(t, testm.Type(), telegraf.Gauge) + + testm = <-a.metrics + actual = testm.String() + assert.Equal(t, + fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()), + actual) + assert.Equal(t, testm.Type(), telegraf.Gauge) +} + +func TestAddCounter(t *testing.T) { + a := accumulator{} + now := time.Now() + a.metrics = make(chan telegraf.Metric, 10) + defer close(a.metrics) + a.inputConfig = &models.InputConfig{} + + a.AddCounter("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{}) + a.AddCounter("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}) + a.AddCounter("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}, now) + + testm := <-a.metrics + actual := testm.String() + assert.Contains(t, actual, "acctest value=101") + assert.Equal(t, testm.Type(), telegraf.Counter) + + testm = <-a.metrics + actual = testm.String() + assert.Contains(t, actual, "acctest,acc=test value=101") + assert.Equal(t, testm.Type(), telegraf.Counter) + + testm = <-a.metrics + actual = testm.String() + assert.Equal(t, + fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()), + actual) + assert.Equal(t, testm.Type(), telegraf.Counter) +} + func TestAddNoPrecisionWithInterval(t *testing.T) { a := accumulator{} now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC) a.metrics = make(chan telegraf.Metric, 10) defer close(a.metrics) - a.inputConfig = &internal_models.InputConfig{} + a.inputConfig = &models.InputConfig{} a.SetPrecision(0, time.Second) - a.Add("acctest", float64(101), map[string]string{}) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{}) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}, now) testm := <-a.metrics actual := testm.String() @@ -70,12 +156,18 @@ func TestAddNoIntervalWithPrecision(t *testing.T) { now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC) a.metrics = make(chan telegraf.Metric, 10) defer close(a.metrics) - a.inputConfig = &internal_models.InputConfig{} + a.inputConfig = &models.InputConfig{} a.SetPrecision(time.Second, time.Millisecond) - a.Add("acctest", float64(101), map[string]string{}) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{}) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}, now) testm := <-a.metrics actual := testm.String() @@ -97,13 +189,19 @@ func TestAddDisablePrecision(t *testing.T) { now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC) a.metrics = make(chan telegraf.Metric, 10) defer close(a.metrics) - a.inputConfig = &internal_models.InputConfig{} + a.inputConfig = &models.InputConfig{} a.SetPrecision(time.Second, time.Millisecond) a.DisablePrecision() - a.Add("acctest", float64(101), map[string]string{}) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{}) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}, now) testm := <-a.metrics actual := testm.String() @@ -125,10 +223,12 @@ func TestDifferentPrecisions(t *testing.T) { now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC) a.metrics = make(chan telegraf.Metric, 10) defer close(a.metrics) - a.inputConfig = &internal_models.InputConfig{} + a.inputConfig = &models.InputConfig{} a.SetPrecision(0, time.Second) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}, now) testm := <-a.metrics actual := testm.String() assert.Equal(t, @@ -136,7 +236,9 @@ func TestDifferentPrecisions(t *testing.T) { actual) a.SetPrecision(0, time.Millisecond) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}, now) testm = <-a.metrics actual = testm.String() assert.Equal(t, @@ -144,7 +246,9 @@ func TestDifferentPrecisions(t *testing.T) { actual) a.SetPrecision(0, time.Microsecond) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}, now) testm = <-a.metrics actual = testm.String() assert.Equal(t, @@ -152,7 +256,9 @@ func TestDifferentPrecisions(t *testing.T) { actual) a.SetPrecision(0, time.Nanosecond) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}, now) testm = <-a.metrics actual = testm.String() assert.Equal(t, @@ -166,11 +272,17 @@ func TestAddDefaultTags(t *testing.T) { now := time.Now() a.metrics = make(chan telegraf.Metric, 10) defer close(a.metrics) - a.inputConfig = &internal_models.InputConfig{} + a.inputConfig = &models.InputConfig{} - a.Add("acctest", float64(101), map[string]string{}) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{}) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}, now) testm := <-a.metrics actual := testm.String() @@ -192,7 +304,7 @@ func TestAddFields(t *testing.T) { now := time.Now() a.metrics = make(chan telegraf.Metric, 10) defer close(a.metrics) - a.inputConfig = &internal_models.InputConfig{} + a.inputConfig = &models.InputConfig{} fields := map[string]interface{}{ "usage": float64(99), @@ -225,7 +337,7 @@ func TestAddInfFields(t *testing.T) { now := time.Now() a.metrics = make(chan telegraf.Metric, 10) defer close(a.metrics) - a.inputConfig = &internal_models.InputConfig{} + a.inputConfig = &models.InputConfig{} fields := map[string]interface{}{ "usage": inf, @@ -253,7 +365,7 @@ func TestAddNaNFields(t *testing.T) { now := time.Now() a.metrics = make(chan telegraf.Metric, 10) defer close(a.metrics) - a.inputConfig = &internal_models.InputConfig{} + a.inputConfig = &models.InputConfig{} fields := map[string]interface{}{ "usage": nan, @@ -277,7 +389,7 @@ func TestAddUint64Fields(t *testing.T) { now := time.Now() a.metrics = make(chan telegraf.Metric, 10) defer close(a.metrics) - a.inputConfig = &internal_models.InputConfig{} + a.inputConfig = &models.InputConfig{} fields := map[string]interface{}{ "usage": uint64(99), @@ -306,7 +418,7 @@ func TestAddUint64Overflow(t *testing.T) { now := time.Now() a.metrics = make(chan telegraf.Metric, 10) defer close(a.metrics) - a.inputConfig = &internal_models.InputConfig{} + a.inputConfig = &models.InputConfig{} fields := map[string]interface{}{ "usage": uint64(9223372036854775808), @@ -336,11 +448,17 @@ func TestAddInts(t *testing.T) { now := time.Now() a.metrics = make(chan telegraf.Metric, 10) defer close(a.metrics) - a.inputConfig = &internal_models.InputConfig{} + a.inputConfig = &models.InputConfig{} - a.Add("acctest", int(101), map[string]string{}) - a.Add("acctest", int32(101), map[string]string{"acc": "test"}) - a.Add("acctest", int64(101), map[string]string{"acc": "test"}, now) + a.AddFields("acctest", + map[string]interface{}{"value": int(101)}, + map[string]string{}) + a.AddFields("acctest", + map[string]interface{}{"value": int32(101)}, + map[string]string{"acc": "test"}) + a.AddFields("acctest", + map[string]interface{}{"value": int64(101)}, + map[string]string{"acc": "test"}, now) testm := <-a.metrics actual := testm.String() @@ -363,10 +481,14 @@ func TestAddFloats(t *testing.T) { now := time.Now() a.metrics = make(chan telegraf.Metric, 10) defer close(a.metrics) - a.inputConfig = &internal_models.InputConfig{} + a.inputConfig = &models.InputConfig{} - a.Add("acctest", float32(101), map[string]string{"acc": "test"}) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) + a.AddFields("acctest", + map[string]interface{}{"value": float32(101)}, + map[string]string{"acc": "test"}) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}, now) testm := <-a.metrics actual := testm.String() @@ -385,10 +507,14 @@ func TestAddStrings(t *testing.T) { now := time.Now() a.metrics = make(chan telegraf.Metric, 10) defer close(a.metrics) - a.inputConfig = &internal_models.InputConfig{} + a.inputConfig = &models.InputConfig{} - a.Add("acctest", "test", map[string]string{"acc": "test"}) - a.Add("acctest", "foo", map[string]string{"acc": "test"}, now) + a.AddFields("acctest", + map[string]interface{}{"value": "test"}, + map[string]string{"acc": "test"}) + a.AddFields("acctest", + map[string]interface{}{"value": "foo"}, + map[string]string{"acc": "test"}, now) testm := <-a.metrics actual := testm.String() @@ -407,10 +533,12 @@ func TestAddBools(t *testing.T) { now := time.Now() a.metrics = make(chan telegraf.Metric, 10) defer close(a.metrics) - a.inputConfig = &internal_models.InputConfig{} + a.inputConfig = &models.InputConfig{} - a.Add("acctest", true, map[string]string{"acc": "test"}) - a.Add("acctest", false, map[string]string{"acc": "test"}, now) + a.AddFields("acctest", + map[string]interface{}{"value": true}, map[string]string{"acc": "test"}) + a.AddFields("acctest", + map[string]interface{}{"value": false}, map[string]string{"acc": "test"}, now) testm := <-a.metrics actual := testm.String() @@ -429,16 +557,22 @@ func TestAccFilterTags(t *testing.T) { now := time.Now() a.metrics = make(chan telegraf.Metric, 10) defer close(a.metrics) - filter := internal_models.Filter{ + filter := models.Filter{ TagExclude: []string{"acc"}, } - assert.NoError(t, filter.CompileFilter()) - a.inputConfig = &internal_models.InputConfig{} + assert.NoError(t, filter.Compile()) + a.inputConfig = &models.InputConfig{} a.inputConfig.Filter = filter - a.Add("acctest", float64(101), map[string]string{}) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}) - a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{}) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}) + a.AddFields("acctest", + map[string]interface{}{"value": float64(101)}, + map[string]string{"acc": "test"}, now) testm := <-a.metrics actual := testm.String() @@ -454,3 +588,27 @@ func TestAccFilterTags(t *testing.T) { fmt.Sprintf("acctest value=101 %d", now.UnixNano()), actual) } + +func TestAccAddError(t *testing.T) { + errBuf := bytes.NewBuffer(nil) + log.SetOutput(errBuf) + defer log.SetOutput(os.Stderr) + + a := accumulator{} + a.inputConfig = &models.InputConfig{} + a.inputConfig.Name = "mock_plugin" + + a.AddError(fmt.Errorf("foo")) + a.AddError(fmt.Errorf("bar")) + a.AddError(fmt.Errorf("baz")) + + errs := bytes.Split(errBuf.Bytes(), []byte{'\n'}) + assert.EqualValues(t, 3, a.errCount) + require.Len(t, errs, 4) // 4 because of trailing newline + assert.Contains(t, string(errs[0]), "mock_plugin") + assert.Contains(t, string(errs[0]), "foo") + assert.Contains(t, string(errs[1]), "mock_plugin") + assert.Contains(t, string(errs[1]), "bar") + assert.Contains(t, string(errs[2]), "mock_plugin") + assert.Contains(t, string(errs[2]), "baz") +} diff --git a/agent/agent.go b/agent/agent.go index ae520b89e..8fef8ca41 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -49,18 +49,16 @@ func (a *Agent) Connect() error { switch ot := o.Output.(type) { case telegraf.ServiceOutput: if err := ot.Start(); err != nil { - log.Printf("Service for output %s failed to start, exiting\n%s\n", + log.Printf("E! Service for output %s failed to start, exiting\n%s\n", o.Name, err.Error()) return err } } - if a.Config.Agent.Debug { - log.Printf("Attempting connection to output: %s\n", o.Name) - } + log.Printf("D! Attempting connection to output: %s\n", o.Name) err := o.Output.Connect() if err != nil { - log.Printf("Failed to connect to output %s, retrying in 15s, "+ + log.Printf("E! Failed to connect to output %s, retrying in 15s, "+ "error was '%s' \n", o.Name, err) time.Sleep(15 * time.Second) err = o.Output.Connect() @@ -68,9 +66,7 @@ func (a *Agent) Connect() error { return err } } - if a.Config.Agent.Debug { - log.Printf("Successfully connected to output: %s\n", o.Name) - } + log.Printf("D! Successfully connected to output: %s\n", o.Name) } return nil } @@ -88,13 +84,13 @@ func (a *Agent) Close() error { return err } -func panicRecover(input *internal_models.RunningInput) { +func panicRecover(input *models.RunningInput) { if err := recover(); err != nil { trace := make([]byte, 2048) runtime.Stack(trace, true) - log.Printf("FATAL: Input [%s] panicked: %s, Stack:\n%s\n", + log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n", input.Name, err, trace) - log.Println("PLEASE REPORT THIS PANIC ON GITHUB with " + + log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " + "stack trace, configuration, and OS information: " + "https://github.com/influxdata/telegraf/issues/new") } @@ -104,7 +100,7 @@ func panicRecover(input *internal_models.RunningInput) { // reporting interval. func (a *Agent) gatherer( shutdown chan struct{}, - input *internal_models.RunningInput, + input *models.RunningInput, interval time.Duration, metricC chan telegraf.Metric, ) error { @@ -117,7 +113,6 @@ func (a *Agent) gatherer( var outerr error acc := NewAccumulator(input.Config, metricC) - acc.SetDebug(a.Config.Agent.Debug) acc.SetPrecision(a.Config.Agent.Precision.Duration, a.Config.Agent.Interval.Duration) acc.setDefaultTags(a.Config.Tags) @@ -131,10 +126,8 @@ func (a *Agent) gatherer( if outerr != nil { return outerr } - if a.Config.Agent.Debug { - log.Printf("Input [%s] gathered metrics, (%s interval) in %s\n", - input.Name, interval, elapsed) - } + log.Printf("D! Input [%s] gathered metrics, (%s interval) in %s\n", + input.Name, interval, elapsed) select { case <-shutdown: @@ -152,7 +145,7 @@ func (a *Agent) gatherer( // over. func gatherWithTimeout( shutdown chan struct{}, - input *internal_models.RunningInput, + input *models.RunningInput, acc *accumulator, timeout time.Duration, ) { @@ -167,11 +160,11 @@ func gatherWithTimeout( select { case err := <-done: if err != nil { - log.Printf("ERROR in input [%s]: %s", input.Name, err) + log.Printf("E! ERROR in input [%s]: %s", input.Name, err) } return case <-ticker.C: - log.Printf("ERROR: input [%s] took longer to collect than "+ + log.Printf("E! ERROR: input [%s] took longer to collect than "+ "collection interval (%s)", input.Name, timeout) continue @@ -215,6 +208,9 @@ func (a *Agent) Test() error { if err := input.Input.Gather(acc); err != nil { return err } + if acc.errCount > 0 { + return fmt.Errorf("Errors encountered during processing") + } // Special instructions for some inputs. cpu, for example, needs to be // run twice in order to return cpu usage percentages. @@ -237,11 +233,11 @@ func (a *Agent) flush() { wg.Add(len(a.Config.Outputs)) for _, o := range a.Config.Outputs { - go func(output *internal_models.RunningOutput) { + go func(output *models.RunningOutput) { defer wg.Done() err := output.Write() if err != nil { - log.Printf("Error writing to output [%s]: %s\n", + log.Printf("E! Error writing to output [%s]: %s\n", output.Name, err.Error()) } }(o) @@ -261,7 +257,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er for { select { case <-shutdown: - log.Println("Hang on, flushing any cached metrics before shutdown") + log.Println("I! Hang on, flushing any cached metrics before shutdown") a.flush() return nil case <-ticker.C: @@ -299,9 +295,9 @@ func copyMetric(m telegraf.Metric) telegraf.Metric { func (a *Agent) Run(shutdown chan struct{}) error { var wg sync.WaitGroup - log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+ + log.Printf("I! Agent Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+ "Flush Interval:%s \n", - a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet, + a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet, a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) // channel shared between all input threads for accumulating metrics @@ -312,13 +308,12 @@ func (a *Agent) Run(shutdown chan struct{}) error { switch p := input.Input.(type) { case telegraf.ServiceInput: acc := NewAccumulator(input.Config, metricC) - acc.SetDebug(a.Config.Agent.Debug) // Service input plugins should set their own precision of their // metrics. acc.DisablePrecision() acc.setDefaultTags(a.Config.Tags) if err := p.Start(acc); err != nil { - log.Printf("Service for input %s failed to start, exiting\n%s\n", + log.Printf("E! Service for input %s failed to start, exiting\n%s\n", input.Name, err.Error()) return err } @@ -336,7 +331,7 @@ func (a *Agent) Run(shutdown chan struct{}) error { go func() { defer wg.Done() if err := a.flusher(shutdown, metricC); err != nil { - log.Printf("Flusher routine failed, exiting: %s\n", err.Error()) + log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error()) close(shutdown) } }() @@ -348,10 +343,10 @@ func (a *Agent) Run(shutdown chan struct{}) error { if input.Config.Interval != 0 { interval = input.Config.Interval } - go func(in *internal_models.RunningInput, interv time.Duration) { + go func(in *models.RunningInput, interv time.Duration) { defer wg.Done() if err := a.gatherer(shutdown, in, interv, metricC); err != nil { - log.Printf(err.Error()) + log.Printf("E! " + err.Error()) } }(input, interval) } diff --git a/circle.yml b/circle.yml index 7a269f29f..4d5ede725 100644 --- a/circle.yml +++ b/circle.yml @@ -4,9 +4,9 @@ machine: post: - sudo service zookeeper stop - go version - - go version | grep 1.6.2 || sudo rm -rf /usr/local/go - - wget https://storage.googleapis.com/golang/go1.6.2.linux-amd64.tar.gz - - sudo tar -C /usr/local -xzf go1.6.2.linux-amd64.tar.gz + - go version | grep 1.7.1 || sudo rm -rf /usr/local/go + - wget https://storage.googleapis.com/golang/go1.7.1.linux-amd64.tar.gz + - sudo tar -C /usr/local -xzf go1.7.1.linux-amd64.tar.gz - go version dependencies: diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 6681ad073..dd8d8431b 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -6,19 +6,23 @@ import ( "log" "os" "os/signal" + "runtime" "strings" "syscall" "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/internal/config" + "github.com/influxdata/telegraf/logger" "github.com/influxdata/telegraf/plugins/inputs" _ "github.com/influxdata/telegraf/plugins/inputs/all" "github.com/influxdata/telegraf/plugins/outputs" _ "github.com/influxdata/telegraf/plugins/outputs/all" + + "github.com/kardianos/service" ) var fDebug = flag.Bool("debug", false, - "show metrics as they're generated to stdout") + "turn on debug logging") var fQuiet = flag.Bool("quiet", false, "run in quiet mode") var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit") @@ -39,12 +43,8 @@ var fOutputList = flag.Bool("output-list", false, "print available output plugins.") var fUsage = flag.String("usage", "", "print usage for a plugin, ie, 'telegraf -usage mysql'") -var fInputFiltersLegacy = flag.String("filter", "", - "filter the inputs to enable, separator is :") -var fOutputFiltersLegacy = flag.String("outputfilter", "", - "filter the outputs to enable, separator is :") -var fConfigDirectoryLegacy = flag.String("configdirectory", "", - "directory containing additional *.conf files") +var fService = flag.String("service", "", + "operate on the service") // Telegraf version, populated linker. // ie, -ldflags "-X main.version=`git describe --always --tags`" @@ -54,6 +54,16 @@ var ( branch string ) +func init() { + // If commit or branch are not set, make that clear. + if commit == "" { + commit = "unknown" + } + if branch == "" { + branch = "unknown" + } +} + const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics. Usage: @@ -74,6 +84,7 @@ The flags are: -debug print metrics as they're generated to stdout -quiet run in quiet mode -version print the version to stdout + -service Control the service, ie, 'telegraf -service install (windows only)' In addition to the -config flag, telegraf will also load the config file from an environment variable or default location. Precedence is: @@ -100,7 +111,19 @@ Examples: telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb ` -func main() { +var stop chan struct{} + +var srvc service.Service + +type program struct{} + +func reloadLoop(stop chan struct{}, s service.Service) { + defer func() { + if service.Interactive() { + os.Exit(0) + } + return + }() reload := make(chan bool, 1) reload <- true for <-reload { @@ -110,24 +133,11 @@ func main() { args := flag.Args() var inputFilters []string - if *fInputFiltersLegacy != "" { - fmt.Printf("WARNING '--filter' flag is deprecated, please use" + - " '--input-filter'") - inputFilter := strings.TrimSpace(*fInputFiltersLegacy) - inputFilters = strings.Split(":"+inputFilter+":", ":") - } if *fInputFilters != "" { inputFilter := strings.TrimSpace(*fInputFilters) inputFilters = strings.Split(":"+inputFilter+":", ":") } - var outputFilters []string - if *fOutputFiltersLegacy != "" { - fmt.Printf("WARNING '--outputfilter' flag is deprecated, please use" + - " '--output-filter'") - outputFilter := strings.TrimSpace(*fOutputFiltersLegacy) - outputFilters = strings.Split(":"+outputFilter+":", ":") - } if *fOutputFilters != "" { outputFilter := strings.TrimSpace(*fOutputFilters) outputFilters = strings.Split(":"+outputFilter+":", ":") @@ -136,8 +146,7 @@ func main() { if len(args) > 0 { switch args[0] { case "version": - v := fmt.Sprintf("Telegraf - version %s", version) - fmt.Println(v) + fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit) return case "config": config.PrintSampleConfig(inputFilters, outputFilters) @@ -145,34 +154,27 @@ func main() { } } - if *fOutputList { + // switch for flags which just do something and exit immediately + switch { + case *fOutputList: fmt.Println("Available Output Plugins:") for k, _ := range outputs.Outputs { fmt.Printf(" %s\n", k) } return - } - - if *fInputList { + case *fInputList: fmt.Println("Available Input Plugins:") for k, _ := range inputs.Inputs { fmt.Printf(" %s\n", k) } return - } - - if *fVersion { - v := fmt.Sprintf("Telegraf - version %s", version) - fmt.Println(v) + case *fVersion: + fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit) return - } - - if *fSampleConfig { + case *fSampleConfig: config.PrintSampleConfig(inputFilters, outputFilters) return - } - - if *fUsage != "" { + case *fUsage != "": if err := config.PrintInputConfig(*fUsage); err != nil { if err2 := config.PrintOutputConfig(*fUsage); err2 != nil { log.Fatalf("%s and %s", err, err2) @@ -191,15 +193,6 @@ func main() { os.Exit(1) } - if *fConfigDirectoryLegacy != "" { - fmt.Printf("WARNING '--configdirectory' flag is deprecated, please use" + - " '--config-directory'") - err = c.LoadDirectory(*fConfigDirectoryLegacy) - if err != nil { - log.Fatal(err) - } - } - if *fConfigDirectory != "" { err = c.LoadDirectory(*fConfigDirectory) if err != nil { @@ -218,13 +211,12 @@ func main() { log.Fatal(err) } - if *fDebug { - ag.Config.Agent.Debug = true - } - - if *fQuiet { - ag.Config.Agent.Quiet = true - } + // Setup logging + logger.SetupLogging( + ag.Config.Agent.Debug || *fDebug, + ag.Config.Agent.Quiet || *fQuiet, + ag.Config.Agent.Logfile, + ) if *fTest { err = ag.Test() @@ -243,22 +235,26 @@ func main() { signals := make(chan os.Signal) signal.Notify(signals, os.Interrupt, syscall.SIGHUP) go func() { - sig := <-signals - if sig == os.Interrupt { - close(shutdown) - } - if sig == syscall.SIGHUP { - log.Printf("Reloading Telegraf config\n") - <-reload - reload <- true + select { + case sig := <-signals: + if sig == os.Interrupt { + close(shutdown) + } + if sig == syscall.SIGHUP { + log.Printf("I! Reloading Telegraf config\n") + <-reload + reload <- true + close(shutdown) + } + case <-stop: close(shutdown) } }() - log.Printf("Starting Telegraf (version %s)\n", version) - log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) - log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " ")) - log.Printf("Tags enabled: %s", c.ListTags()) + log.Printf("I! Starting Telegraf (version %s)\n", version) + log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) + log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " ")) + log.Printf("I! Tags enabled: %s", c.ListTags()) if *fPidfile != "" { f, err := os.Create(*fPidfile) @@ -279,3 +275,55 @@ func usageExit(rc int) { fmt.Println(usage) os.Exit(rc) } + +func (p *program) Start(s service.Service) error { + srvc = s + go p.run() + return nil +} +func (p *program) run() { + stop = make(chan struct{}) + reloadLoop(stop, srvc) +} +func (p *program) Stop(s service.Service) error { + close(stop) + return nil +} + +func main() { + flag.Parse() + if runtime.GOOS == "windows" { + svcConfig := &service.Config{ + Name: "telegraf", + DisplayName: "Telegraf Data Collector Service", + Description: "Collects data using a series of plugins and publishes it to" + + "another series of plugins.", + Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"}, + } + + prg := &program{} + s, err := service.New(prg, svcConfig) + if err != nil { + log.Fatal(err) + } + // Handle the -service flag here to prevent any issues with tooling that + // may not have an interactive session, e.g. installing from Ansible. + if *fService != "" { + if *fConfig != "" { + (*svcConfig).Arguments = []string{"-config", *fConfig} + } + err := service.Control(s, *fService) + if err != nil { + log.Fatal(err) + } + } else { + err = s.Run() + if err != nil { + log.Println("E! " + err.Error()) + } + } + } else { + stop = make(chan struct{}) + reloadLoop(stop, nil) + } +} diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index a01178919..46f044ab7 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -86,6 +86,10 @@ as it is more efficient to filter out tags at the ingestion point. * **taginclude**: taginclude is the inverse of tagexclude. It will only include the tag keys in the final measurement. +**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of +the plugin definition, otherwise subsequent plugin config options will be +interpreted as part of the tagpass/tagdrop map. + ## Input Configuration Some configuration options are configurable per input: @@ -129,6 +133,10 @@ fields which begin with `time_`. #### Input Config: tagpass and tagdrop +**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of +the plugin definition, otherwise subsequent plugin config options will be +interpreted as part of the tagpass/tagdrop map. + ```toml [[inputs.cpu]] percpu = true diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index d448872f6..5553fda70 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -16,6 +16,7 @@ - github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE) - github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE) - github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE) +- github.com/kardianos/service [ZLIB LICENSE](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib) - github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md) - github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) - github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE) diff --git a/docs/WINDOWS_SERVICE.md b/docs/WINDOWS_SERVICE.md index 679a41527..66046cdbb 100644 --- a/docs/WINDOWS_SERVICE.md +++ b/docs/WINDOWS_SERVICE.md @@ -1,36 +1,39 @@ # Running Telegraf as a Windows Service -If you have tried to install Go binaries as Windows Services with the **sc.exe** -tool you may have seen that the service errors and stops running after a while. +Telegraf natively supports running as a Windows Service. Outlined below is are +the general steps to set it up. -**NSSM** (the Non-Sucking Service Manager) is a tool that helps you in a -[number of scenarios](http://nssm.cc/scenarios) including running Go binaries -that were not specifically designed to run only in Windows platforms. +1. Obtain the telegraf windows distribution +2. Create the directory `C:\Program Files\Telegraf` (if you install in a different + location simply specify the `-config` parameter with the desired location) +3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf` +4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""): -## NSSM Installation via Chocolatey + ``` + > C:\"Program Files"\Telegraf\telegraf.exe --service install + ``` -You can install [Chocolatey](https://chocolatey.org/) and [NSSM](http://nssm.cc/) -with these commands +5. Edit the configuration file to meet your needs +6. To check that it works, run: -```powershell -iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1')) -choco install -y nssm -``` + ``` + > C:\"Program Files"\Telegraf\telegraf.exe --config C:\"Program Files"\Telegraf\telegraf.conf --test + ``` -## Installing Telegraf as a Windows Service with NSSM +7. To start collecting data, run: -You can download the latest Telegraf Windows binaries (still Experimental at -the moment) from [the Telegraf Github repo](https://github.com/influxdata/telegraf). + ``` + > net start telegraf + ``` -Then you can create a C:\telegraf folder, unzip the binary there and modify the -**telegraf.conf** sample to allocate the metrics you want to send to **InfluxDB**. +## Other supported operations -Once you have NSSM installed in your system, the process is quite straightforward. -You only need to type this command in your Windows shell +Telegraf can manage its own service through the --service flag: -```powershell -nssm install Telegraf c:\telegraf\telegraf.exe -config c:\telegraf\telegraf.config -``` +| Command | Effect | +|------------------------------------|-------------------------------| +| `telegraf.exe --service install` | Install telegraf as a service | +| `telegraf.exe --service uninstall` | Remove the telegraf service | +| `telegraf.exe --service start` | Start the telegraf service | +| `telegraf.exe --service stop` | Stop the telegraf service | -And now your service will be installed in Windows and you will be able to start and -stop it gracefully \ No newline at end of file diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 10e949302..2ad0bcbae 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -30,12 +30,15 @@ ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true - ## Telegraf will send metrics to outputs in batches of at - ## most metric_batch_size metrics. + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 + ## For failed writes, telegraf will cache metric_buffer_limit metrics for each ## output, and will flush this buffer on a successful write. Oldest metrics ## are dropped first when this buffer fills. + ## This buffer only fills when writes fail to output plugin(s). metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. @@ -55,12 +58,17 @@ ## By default, precision will be set to the same timestamp order as the ## collection interval, with the maximum being 1s. ## Precision will NOT be used for service inputs, such as logparser and statsd. - ## Valid values are "Nns", "Nus" (or "Nµs"), "Nms", "Ns". + ## Valid values are "ns", "us" (or "µs"), "ms", "s". precision = "" - ## Run telegraf in debug mode + + ## Logging configuration: + ## Run telegraf with debug log messages. debug = false - ## Run telegraf in quiet mode + ## Run telegraf in quiet mode (error log messages only). quiet = false + ## Specify the log file name. The empty string means to log to stdout. + logfile = "" + ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. @@ -83,7 +91,7 @@ ## Retention policy to write to. Empty string writes to the default rp. retention_policy = "" - ## Write consistency (clusters only), can be: "any", "one", "quorom", "all" + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all" write_consistency = "any" ## Write timeout (for the InfluxDB client), formatted as a string. @@ -197,7 +205,7 @@ # # Configuration for Graphite server to send metrics to # [[outputs.graphite]] # ## TCP endpoint for your graphite instance. -# ## If multiple endpoints are configured, the output will be load balanced. +# ## If multiple endpoints are configured, output will be load balanced. # ## Only one of the endpoints will be written to with each iteration. # servers = ["localhost:2003"] # ## Prefix metrics name @@ -321,14 +329,13 @@ # api_token = "my-secret-token" # required. # ## Debug # # debug = false -# ## Tag Field to populate source attribute (optional) -# ## This is typically the _hostname_ from which the metric was obtained. -# source_tag = "host" # ## Connection timeout. # # timeout = "5s" -# ## Output Name Template (same as graphite buckets) +# ## Output source Template (same as graphite buckets) # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite -# template = "host.tags.measurement.field" +# ## This template is used in librato's source (not metric's name) +# template = "host" +# # # Configuration for MQTT server to send metrics to @@ -358,6 +365,30 @@ # data_format = "influx" +# # Send telegraf measurements to NATS +# [[outputs.nats]] +# ## URLs of NATS servers +# servers = ["nats://localhost:4222"] +# ## Optional credentials +# # username = "" +# # password = "" +# ## NATS subject for producer messages +# subject = "telegraf" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + # # Send telegraf measurements to NSQD # [[outputs.nsq]] # ## Location of nsqd instance listening on TCP @@ -377,13 +408,18 @@ # ## prefix for metrics keys # prefix = "my.specific.prefix." # -# ## Telnet Mode ## -# ## DNS name of the OpenTSDB server in telnet mode +# ## DNS name of the OpenTSDB server +# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the +# ## telnet API. "http://opentsdb.example.com" will use the Http API. # host = "opentsdb.example.com" # -# ## Port of the OpenTSDB server in telnet mode +# ## Port of the OpenTSDB server # port = 4242 # +# ## Number of data points to send to OpenTSDB in Http requests. +# ## Not used with telnet API. +# httpBatchSize = 50 +# # ## Debug true - Prints OpenTSDB communication # debug = false @@ -415,8 +451,8 @@ percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## Comment this line if you want the raw CPU time metrics - fielddrop = ["time_*"] + ## If true, collect raw CPU time metrics. + collect_cpu_time = false # Read metrics about disk usage by mount point @@ -436,8 +472,8 @@ ## disk partitions. ## Setting devices will restrict the stats to the specified devices. # devices = ["sda", "sdb"] - ## Uncomment the following line if you do not need disk serial numbers. - # skip_serial_number = true + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false # Get kernel statistics from /proc/stat @@ -465,7 +501,7 @@ # no configuration -# # Read stats from an aerospike server +# # Read stats from aerospike server(s) # [[inputs.aerospike]] # ## Aerospike servers to connect to (with port) # ## This plugin will query all namespaces the aerospike @@ -512,6 +548,10 @@ # # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. # [[inputs.ceph]] +# ## This is the recommended interval to poll. Too frequent and you will lose +# ## data points due to timeouts during rebalancing and recovery +# interval = '1m' +# # ## All configuration values are optional, defaults are shown below # # ## location of ceph binary @@ -526,6 +566,18 @@ # # ## suffix used to identify socket files # socket_suffix = "asok" +# +# ## Ceph user to authenticate as +# ceph_user = "client.admin" +# +# ## Ceph configuration to use to locate the cluster +# ceph_config = "/etc/ceph/ceph.conf" +# +# ## Whether to gather statistics via the admin socket +# gather_admin_socket_stats = true +# +# ## Whether to gather statistics via ceph commands +# gather_cluster_stats = true # # Read specific statistics per cgroup @@ -578,6 +630,11 @@ # ## Metric Statistic Namespace (required) # namespace = 'AWS/ELB' # +# ## Maximum requests per second. Note that the global default AWS rate limit is +# ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a +# ## maximum of 10. Optional - default value is 10. +# ratelimit = 10 +# # ## Metrics to Pull (optional) # ## Defaults to all Metrics in Namespace if nothing is provided # ## Refreshes Namespace available metrics every 1h @@ -666,6 +723,13 @@ # container_names = [] # ## Timeout for docker list, info, and stats commands # timeout = "5s" +# +# ## Whether to report for each container per-device blkio (8:0, 8:1...) and +# ## network (eth0, eth1, ...) stats or not +# perdevice = true +# ## Whether to report for each container total blkio and network stats or not +# total = false +# # # Read statistics from one or many dovecot servers @@ -688,6 +752,9 @@ # ## specify a list of one or more Elasticsearch servers # servers = ["http://localhost:9200"] # +# ## Timeout for HTTP requests to the elastic search server(s) +# http_timeout = "5s" +# # ## set local to false when you want to read the indices stats from all nodes # ## within the cluster # local = true @@ -782,9 +849,11 @@ # [[inputs.haproxy]] # ## An array of address to gather stats about. Specify an ip on hostname # ## with optional port. ie localhost, 10.10.3.33:1936, etc. -# -# ## If no servers are specified, then default to 127.0.0.1:1936 -# servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] +# ## Make sure you specify the complete path to the stats endpoint +# ## ie 10.10.3.33:1936/haproxy?stats +# # +# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats +# servers = ["http://myhaproxy.com:1936/haproxy?stats"] # ## Or you can also use local socket # ## servers = ["socket:/run/haproxy/admin.sock"] @@ -828,6 +897,8 @@ # "http://localhost:9999/stats/", # "http://localhost:9998/stats/", # ] +# ## Set response_timeout (default 5 seconds) +# response_timeout = "5s" # # ## HTTP method to use: GET or POST (case-sensitive) # method = "GET" @@ -867,6 +938,9 @@ # urls = [ # "http://localhost:8086/debug/vars" # ] +# +# ## http request & header timeout +# timeout = "5s" # # Read metrics from one or many bare metal servers @@ -882,6 +956,7 @@ # # Read JMX metrics through Jolokia # [[inputs.jolokia]] # ## This is the context root used to compose the jolokia url +# ## NOTE that your jolokia security policy must allow for POST requests. # context = "/jolokia" # # ## This specifies the mode used @@ -970,21 +1045,33 @@ # # Telegraf plugin for gathering metrics from N Mesos masters # [[inputs.mesos]] -# # Timeout, in ms. +# ## Timeout, in ms. # timeout = 100 -# # A list of Mesos masters, default value is localhost:5050. +# ## A list of Mesos masters. # masters = ["localhost:5050"] -# # Metrics groups to be collected, by default, all enabled. +# ## Master metrics groups to be collected, by default, all enabled. # master_collections = [ # "resources", # "master", # "system", -# "slaves", +# "agents", # "frameworks", +# "tasks", # "messages", # "evqueue", # "registrar", # ] +# ## A list of Mesos slaves, default is [] +# # slaves = [] +# ## Slave metrics groups to be collected, by default, all enabled. +# # slave_collections = [ +# # "resources", +# # "agent", +# # "system", +# # "executors", +# # "tasks", +# # "messages", +# # ] # # Read metrics from one or many MongoDB servers @@ -995,6 +1082,7 @@ # ## mongodb://10.10.3.33:18832, # ## 10.0.0.1:10000, etc. # servers = ["127.0.0.1:27017"] +# gather_perdb_stats = false # # Read metrics from one or many mysql servers @@ -1101,9 +1189,9 @@ # ## file paths for proc files. If empty default paths will be used: # ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 # ## These can also be overridden with env variables, see README. -# proc_net_netstat = "" -# proc_net_snmp = "" -# proc_net_snmp6 = "" +# proc_net_netstat = "/proc/net/netstat" +# proc_net_snmp = "/proc/net/snmp" +# proc_net_snmp6 = "/proc/net/snmp6" # ## dump metrics with 0 values too # dump_zeros = true @@ -1185,8 +1273,12 @@ # ## # address = "host=localhost user=postgres sslmode=disable" # +# ## A list of databases to explicitly ignore. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'databases' option. +# # ignored_databases = ["postgres", "template0", "template1"] +# # ## A list of databases to pull metrics about. If not specified, metrics for all -# ## databases are gathered. +# ## databases are gathered. Do NOT use with the 'ignore_databases' option. # # databases = ["app_production", "testing"] @@ -1305,6 +1397,13 @@ # # username = "guest" # # password = "guest" # +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# # ## A list of nodes to pull metrics about. If not specified, metrics for # ## all nodes are gathered. # # nodes = ["rabbit@node1", "rabbit@node2"] @@ -1323,6 +1422,7 @@ # ## e.g. # ## tcp://localhost:6379 # ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis.sock # ## # ## If no servers are specified, then localhost is used as the host. # ## If no port is specified, 6379 is used @@ -1345,8 +1445,67 @@ # servers = ["http://localhost:8098"] -# # Reads oids value from one or many snmp agents +# # Retrieves SNMP values from remote agents # [[inputs.snmp]] +# agents = [ "127.0.0.1:161" ] +# ## Timeout for each SNMP query. +# timeout = "5s" +# ## Number of retries to attempt within timeout. +# retries = 3 +# ## SNMP version, values can be 1, 2, or 3 +# version = 2 +# +# ## SNMP community string. +# community = "public" +# +# ## The GETBULK max-repetitions parameter +# max_repetitions = 10 +# +# ## SNMPv3 auth parameters +# #sec_name = "myuser" +# #auth_protocol = "md5" # Values: "MD5", "SHA", "" +# #auth_password = "pass" +# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv" +# #context_name = "" +# #priv_protocol = "" # Values: "DES", "AES", "" +# #priv_password = "" +# +# ## measurement name +# name = "system" +# [[inputs.snmp.field]] +# name = "hostname" +# oid = ".1.0.0.1.1" +# [[inputs.snmp.field]] +# name = "uptime" +# oid = ".1.0.0.1.2" +# [[inputs.snmp.field]] +# name = "load" +# oid = ".1.0.0.1.3" +# [[inputs.snmp.field]] +# oid = "HOST-RESOURCES-MIB::hrMemorySize" +# +# [[inputs.snmp.table]] +# ## measurement name +# name = "remote_servers" +# inherit_tags = [ "hostname" ] +# [[inputs.snmp.table.field]] +# name = "server" +# oid = ".1.0.0.0.1.0" +# is_tag = true +# [[inputs.snmp.table.field]] +# name = "connections" +# oid = ".1.0.0.0.1.1" +# [[inputs.snmp.table.field]] +# name = "latency" +# oid = ".1.0.0.0.1.2" +# +# [[inputs.snmp.table]] +# ## auto populate table's fields using the MIB +# oid = "HOST-RESOURCES-MIB::hrNetworkTable" + + +# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. +# [[inputs.snmp_legacy]] # ## Use 'oids.txt' file to translate oids to names # ## To generate 'oids.txt' you need to run: # ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt @@ -1517,6 +1676,16 @@ # SERVICE INPUT PLUGINS # ############################################################################### +# # Influx HTTP write listener +# [[inputs.http_listener]] +# ## Address and port to host HTTP listener on +# service_address = ":8186" +# +# ## timeouts +# read_timeout = "10s" +# write_timeout = "10s" + + # # Read metrics from Kafka topic(s) # [[inputs.kafka_consumer]] # ## topic(s) to consume @@ -1545,7 +1714,7 @@ # ## /var/log/**.log -> recursively find all .log files in /var/log # ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log # ## /var/log/apache.log -> only tail the apache log file -# files = ["/var/log/influxdb/influxdb.log"] +# files = ["/var/log/apache/access.log"] # ## Read file from beginning. # from_beginning = false # @@ -1558,7 +1727,9 @@ # ## Other common built-in patterns are: # ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) # ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) -# patterns = ["%{INFLUXDB_HTTPD_LOG}"] +# patterns = ["%{COMBINED_LOG_FORMAT}"] +# ## Name of the outputted measurement name. +# measurement = "apache_access_log" # ## Full path(s) to custom pattern files. # custom_pattern_files = [] # ## Custom patterns can also be defined here. Put one pattern per line. @@ -1622,6 +1793,21 @@ # data_format = "influx" +# # Read NSQ topic for metrics. +# [[inputs.nsq_consumer]] +# ## An string representing the NSQD TCP Endpoint +# server = "localhost:4150" +# topic = "telegraf" +# channel = "consumer" +# max_in_flight = 100 +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + # # Statsd Server # [[inputs.statsd]] # ## Address and port to host UDP listener on @@ -1722,9 +1908,15 @@ # ## Address and port to host Webhook listener on # service_address = ":1619" # +# [inputs.webhooks.filestack] +# path = "/filestack" +# # [inputs.webhooks.github] # path = "/github" # +# [inputs.webhooks.mandrill] +# path = "/mandrill" +# # [inputs.webhooks.rollbar] # path = "/rollbar" diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 9ce067c39..4825d715a 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -42,10 +42,14 @@ ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" + ## Logging configuration: ## Run telegraf in debug mode debug = false ## Run telegraf in quiet mode quiet = false + ## Specify the log file name. The empty string means to log to stdout. + logfile = "/Program Files/Telegraf/telegraf.log" + ## Override default hostname, if empty use os.Hostname() hostname = "" @@ -85,7 +89,7 @@ # Windows Performance Counters plugin. # These are the recommended method of monitoring system metrics on windows, # as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI, -# which utilizes a lot of system resources. +# which utilize more system resources. # # See more configuration examples at: # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters @@ -95,70 +99,104 @@ # Processor usage, alternative to native, reports on a per core. ObjectName = "Processor" Instances = ["*"] - Counters = ["% Idle Time", "% Interrupt Time", "% Privileged Time", "% User Time", "% Processor Time"] + Counters = [ + "% Idle Time", + "% Interrupt Time", + "% Privileged Time", + "% User Time", + "% Processor Time", + ] Measurement = "win_cpu" - #IncludeTotal=false #Set to true to include _Total instance when querying for all (*). + # Set to true to include _Total instance when querying for all (*). + #IncludeTotal=false [[inputs.win_perf_counters.object]] # Disk times and queues ObjectName = "LogicalDisk" Instances = ["*"] - Counters = ["% Idle Time", "% Disk Time","% Disk Read Time", "% Disk Write Time", "% User Time", "Current Disk Queue Length"] + Counters = [ + "% Idle Time", + "% Disk Time","% Disk Read Time", + "% Disk Write Time", + "% User Time", + "Current Disk Queue Length", + ] Measurement = "win_disk" - #IncludeTotal=false #Set to true to include _Total instance when querying for all (*). + # Set to true to include _Total instance when querying for all (*). + #IncludeTotal=false [[inputs.win_perf_counters.object]] ObjectName = "System" - Counters = ["Context Switches/sec","System Calls/sec"] + Counters = [ + "Context Switches/sec", + "System Calls/sec", + ] Instances = ["------"] Measurement = "win_system" - #IncludeTotal=false #Set to true to include _Total instance when querying for all (*). + # Set to true to include _Total instance when querying for all (*). + #IncludeTotal=false [[inputs.win_perf_counters.object]] - # Example query where the Instance portion must be removed to get data back, such as from the Memory object. + # Example query where the Instance portion must be removed to get data back, + # such as from the Memory object. ObjectName = "Memory" - Counters = ["Available Bytes","Cache Faults/sec","Demand Zero Faults/sec","Page Faults/sec","Pages/sec","Transition Faults/sec","Pool Nonpaged Bytes","Pool Paged Bytes"] - Instances = ["------"] # Use 6 x - to remove the Instance bit from the query. + Counters = [ + "Available Bytes", + "Cache Faults/sec", + "Demand Zero Faults/sec", + "Page Faults/sec", + "Pages/sec", + "Transition Faults/sec", + "Pool Nonpaged Bytes", + "Pool Paged Bytes", + ] + # Use 6 x - to remove the Instance bit from the query. + Instances = ["------"] Measurement = "win_mem" - #IncludeTotal=false #Set to true to include _Total instance when querying for all (*). + # Set to true to include _Total instance when querying for all (*). + #IncludeTotal=false # Windows system plugins using WMI (disabled by default, using # win_perf_counters over WMI is recommended) -# Read metrics about cpu usage -#[[inputs.cpu]] - ## Whether to report per-cpu stats or not - #percpu = true - ## Whether to report total system cpu stats or not - #totalcpu = true - ## Comment this line if you want the raw CPU time metrics - #fielddrop = ["time_*"] +# # Read metrics about cpu usage +# [[inputs.cpu]] +# ## Whether to report per-cpu stats or not +# percpu = true +# ## Whether to report total system cpu stats or not +# totalcpu = true +# ## Comment this line if you want the raw CPU time metrics +# fielddrop = ["time_*"] -# Read metrics about disk usage by mount point -#[[inputs.disk]] - ## By default, telegraf gather stats for all mountpoints. - ## Setting mountpoints will restrict the stats to the specified mountpoints. - ## mount_points=["/"] - ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually - ## present on /run, /var/run, /dev/shm or /dev). - #ignore_fs = ["tmpfs", "devtmpfs"] +# # Read metrics about disk usage by mount point +# [[inputs.disk]] +# ## By default, telegraf gather stats for all mountpoints. +# ## Setting mountpoints will restrict the stats to the specified mountpoints. +# ## mount_points=["/"] +# +# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually +# ## present on /run, /var/run, /dev/shm or /dev). +# # ignore_fs = ["tmpfs", "devtmpfs"] -# Read metrics about disk IO by device -#[[inputs.diskio]] - ## By default, telegraf will gather stats for all devices including - ## disk partitions. - ## Setting devices will restrict the stats to the specified devices. - ## devices = ["sda", "sdb"] - ## Uncomment the following line if you do not need disk serial numbers. - ## skip_serial_number = true -# Read metrics about memory usage -#[[inputs.mem]] - # no configuration +# # Read metrics about disk IO by device +# [[inputs.diskio]] +# ## By default, telegraf will gather stats for all devices including +# ## disk partitions. +# ## Setting devices will restrict the stats to the specified devices. +# ## devices = ["sda", "sdb"] +# ## Uncomment the following line if you do not need disk serial numbers. +# ## skip_serial_number = true -# Read metrics about swap memory usage -#[[inputs.swap]] - # no configuration + +# # Read metrics about memory usage +# [[inputs.mem]] +# # no configuration + + +# # Read metrics about swap memory usage +# [[inputs.swap]] +# # no configuration diff --git a/filter/filter.go b/filter/filter.go index 85eed17ac..9a28c2627 100644 --- a/filter/filter.go +++ b/filter/filter.go @@ -10,16 +10,16 @@ type Filter interface { Match(string) bool } -// CompileFilter takes a list of string filters and returns a Filter interface +// Compile takes a list of string filters and returns a Filter interface // for matching a given string against the filter list. The filter list // supports glob matching too, ie: // -// f, _ := CompileFilter([]string{"cpu", "mem", "net*"}) +// f, _ := Compile([]string{"cpu", "mem", "net*"}) // f.Match("cpu") // true // f.Match("network") // true // f.Match("memory") // false // -func CompileFilter(filters []string) (Filter, error) { +func Compile(filters []string) (Filter, error) { // return if there is nothing to compile if len(filters) == 0 { return nil, nil diff --git a/filter/filter_test.go b/filter/filter_test.go index 85072e2ac..2f52e036a 100644 --- a/filter/filter_test.go +++ b/filter/filter_test.go @@ -6,30 +6,30 @@ import ( "github.com/stretchr/testify/assert" ) -func TestCompileFilter(t *testing.T) { - f, err := CompileFilter([]string{}) +func TestCompile(t *testing.T) { + f, err := Compile([]string{}) assert.NoError(t, err) assert.Nil(t, f) - f, err = CompileFilter([]string{"cpu"}) + f, err = Compile([]string{"cpu"}) assert.NoError(t, err) assert.True(t, f.Match("cpu")) assert.False(t, f.Match("cpu0")) assert.False(t, f.Match("mem")) - f, err = CompileFilter([]string{"cpu*"}) + f, err = Compile([]string{"cpu*"}) assert.NoError(t, err) assert.True(t, f.Match("cpu")) assert.True(t, f.Match("cpu0")) assert.False(t, f.Match("mem")) - f, err = CompileFilter([]string{"cpu", "mem"}) + f, err = Compile([]string{"cpu", "mem"}) assert.NoError(t, err) assert.True(t, f.Match("cpu")) assert.False(t, f.Match("cpu0")) assert.True(t, f.Match("mem")) - f, err = CompileFilter([]string{"cpu", "mem", "net*"}) + f, err = Compile([]string{"cpu", "mem", "net*"}) assert.NoError(t, err) assert.True(t, f.Match("cpu")) assert.False(t, f.Match("cpu0")) @@ -40,7 +40,7 @@ func TestCompileFilter(t *testing.T) { var benchbool bool func BenchmarkFilterSingleNoGlobFalse(b *testing.B) { - f, _ := CompileFilter([]string{"cpu"}) + f, _ := Compile([]string{"cpu"}) var tmp bool for n := 0; n < b.N; n++ { tmp = f.Match("network") @@ -49,7 +49,7 @@ func BenchmarkFilterSingleNoGlobFalse(b *testing.B) { } func BenchmarkFilterSingleNoGlobTrue(b *testing.B) { - f, _ := CompileFilter([]string{"cpu"}) + f, _ := Compile([]string{"cpu"}) var tmp bool for n := 0; n < b.N; n++ { tmp = f.Match("cpu") @@ -58,7 +58,7 @@ func BenchmarkFilterSingleNoGlobTrue(b *testing.B) { } func BenchmarkFilter(b *testing.B) { - f, _ := CompileFilter([]string{"cpu", "mem", "net*"}) + f, _ := Compile([]string{"cpu", "mem", "net*"}) var tmp bool for n := 0; n < b.N; n++ { tmp = f.Match("network") @@ -67,7 +67,7 @@ func BenchmarkFilter(b *testing.B) { } func BenchmarkFilterNoGlob(b *testing.B) { - f, _ := CompileFilter([]string{"cpu", "mem", "net"}) + f, _ := Compile([]string{"cpu", "mem", "net"}) var tmp bool for n := 0; n < b.N; n++ { tmp = f.Match("net") @@ -76,7 +76,7 @@ func BenchmarkFilterNoGlob(b *testing.B) { } func BenchmarkFilter2(b *testing.B) { - f, _ := CompileFilter([]string{"aa", "bb", "c", "ad", "ar", "at", "aq", + f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq", "aw", "az", "axxx", "ab", "cpu", "mem", "net*"}) var tmp bool for n := 0; n < b.N; n++ { @@ -86,7 +86,7 @@ func BenchmarkFilter2(b *testing.B) { } func BenchmarkFilter2NoGlob(b *testing.B) { - f, _ := CompileFilter([]string{"aa", "bb", "c", "ad", "ar", "at", "aq", + f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq", "aw", "az", "axxx", "ab", "cpu", "mem", "net"}) var tmp bool for n := 0; n < b.N; n++ { diff --git a/internal/config/config.go b/internal/config/config.go index 8f7821624..b76c9b520 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" "regexp" + "runtime" "sort" "strings" "time" @@ -47,8 +48,8 @@ type Config struct { OutputFilters []string Agent *AgentConfig - Inputs []*internal_models.RunningInput - Outputs []*internal_models.RunningOutput + Inputs []*models.RunningInput + Outputs []*models.RunningOutput } func NewConfig() *Config { @@ -61,8 +62,8 @@ func NewConfig() *Config { }, Tags: make(map[string]string), - Inputs: make([]*internal_models.RunningInput, 0), - Outputs: make([]*internal_models.RunningOutput, 0), + Inputs: make([]*models.RunningInput, 0), + Outputs: make([]*models.RunningOutput, 0), InputFilters: make([]string, 0), OutputFilters: make([]string, 0), } @@ -124,6 +125,9 @@ type AgentConfig struct { // Debug is the option for running in debug mode Debug bool + // Logfile specifies the file to send logs to + Logfile string + // Quiet is the option for running in quiet mode Quiet bool Hostname string @@ -139,7 +143,7 @@ func (c *Config) InputNames() []string { return name } -// Outputs returns a list of strings of the configured inputs. +// Outputs returns a list of strings of the configured outputs. func (c *Config) OutputNames() []string { var name []string for _, output := range c.Outputs { @@ -194,12 +198,15 @@ var header = `# Telegraf Configuration ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true - ## Telegraf will send metrics to outputs in batches of at - ## most metric_batch_size metrics. + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 + ## For failed writes, telegraf will cache metric_buffer_limit metrics for each ## output, and will flush this buffer on a successful write. Oldest metrics ## are dropped first when this buffer fills. + ## This buffer only fills when writes fail to output plugin(s). metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. @@ -219,12 +226,17 @@ var header = `# Telegraf Configuration ## By default, precision will be set to the same timestamp order as the ## collection interval, with the maximum being 1s. ## Precision will NOT be used for service inputs, such as logparser and statsd. - ## Valid values are "Nns", "Nus" (or "Nµs"), "Nms", "Ns". + ## Valid values are "ns", "us" (or "µs"), "ms", "s". precision = "" - ## Run telegraf in debug mode + + ## Logging configuration: + ## Run telegraf with debug log messages. debug = false - ## Run telegraf in quiet mode + ## Run telegraf in quiet mode (error log messages only). quiet = false + ## Specify the log file name. The empty string means to log to stdout. + logfile = "" + ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. @@ -403,24 +415,21 @@ func PrintOutputConfig(name string) error { } func (c *Config) LoadDirectory(path string) error { - directoryEntries, err := ioutil.ReadDir(path) - if err != nil { - return err - } - for _, entry := range directoryEntries { - if entry.IsDir() { - continue + walkfn := func(thispath string, info os.FileInfo, _ error) error { + if info.IsDir() { + return nil } - name := entry.Name() + name := info.Name() if len(name) < 6 || name[len(name)-5:] != ".conf" { - continue + return nil } - err := c.LoadConfig(filepath.Join(path, name)) + err := c.LoadConfig(thispath) if err != nil { return err } + return nil } - return nil + return filepath.Walk(path, walkfn) } // Try to find a default config file at these locations (in order): @@ -432,9 +441,12 @@ func getDefaultConfigPath() (string, error) { envfile := os.Getenv("TELEGRAF_CONFIG_PATH") homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf") etcfile := "/etc/telegraf/telegraf.conf" + if runtime.GOOS == "windows" { + etcfile = `C:\Program Files\Telegraf\telegraf.conf` + } for _, path := range []string{envfile, homefile, etcfile} { if _, err := os.Stat(path); err == nil { - log.Printf("Using config file: %s", path) + log.Printf("I! Using config file: %s", path) return path, nil } } @@ -465,7 +477,7 @@ func (c *Config) LoadConfig(path string) error { return fmt.Errorf("%s: invalid configuration", path) } if err = config.UnmarshalTable(subTable, c.Tags); err != nil { - log.Printf("Could not parse [global_tags] config\n") + log.Printf("E! Could not parse [global_tags] config\n") return fmt.Errorf("Error parsing %s, %s", path, err) } } @@ -478,7 +490,7 @@ func (c *Config) LoadConfig(path string) error { return fmt.Errorf("%s: invalid configuration", path) } if err = config.UnmarshalTable(subTable, c.Agent); err != nil { - log.Printf("Could not parse [agent] config\n") + log.Printf("E! Could not parse [agent] config\n") return fmt.Errorf("Error parsing %s, %s", path, err) } } @@ -598,7 +610,7 @@ func (c *Config) addOutput(name string, table *ast.Table) error { return err } - ro := internal_models.NewRunningOutput(name, output, outputConfig, + ro := models.NewRunningOutput(name, output, outputConfig, c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit) c.Outputs = append(c.Outputs, ro) return nil @@ -639,7 +651,7 @@ func (c *Config) addInput(name string, table *ast.Table) error { return err } - rp := &internal_models.RunningInput{ + rp := &models.RunningInput{ Name: name, Input: input, Config: pluginConfig, @@ -650,10 +662,10 @@ func (c *Config) addInput(name string, table *ast.Table) error { // buildFilter builds a Filter // (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to -// be inserted into the internal_models.OutputConfig/internal_models.InputConfig +// be inserted into the models.OutputConfig/models.InputConfig // to be used for glob filtering on tags and measurements -func buildFilter(tbl *ast.Table) (internal_models.Filter, error) { - f := internal_models.Filter{} +func buildFilter(tbl *ast.Table) (models.Filter, error) { + f := models.Filter{} if node, ok := tbl.Fields["namepass"]; ok { if kv, ok := node.(*ast.KeyValue); ok { @@ -661,7 +673,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) { for _, elem := range ary.Value { if str, ok := elem.(*ast.String); ok { f.NamePass = append(f.NamePass, str.Value) - f.IsActive = true } } } @@ -674,7 +685,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) { for _, elem := range ary.Value { if str, ok := elem.(*ast.String); ok { f.NameDrop = append(f.NameDrop, str.Value) - f.IsActive = true } } } @@ -689,7 +699,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) { for _, elem := range ary.Value { if str, ok := elem.(*ast.String); ok { f.FieldPass = append(f.FieldPass, str.Value) - f.IsActive = true } } } @@ -705,7 +714,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) { for _, elem := range ary.Value { if str, ok := elem.(*ast.String); ok { f.FieldDrop = append(f.FieldDrop, str.Value) - f.IsActive = true } } } @@ -717,7 +725,7 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) { if subtbl, ok := node.(*ast.Table); ok { for name, val := range subtbl.Fields { if kv, ok := val.(*ast.KeyValue); ok { - tagfilter := &internal_models.TagFilter{Name: name} + tagfilter := &models.TagFilter{Name: name} if ary, ok := kv.Value.(*ast.Array); ok { for _, elem := range ary.Value { if str, ok := elem.(*ast.String); ok { @@ -726,7 +734,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) { } } f.TagPass = append(f.TagPass, *tagfilter) - f.IsActive = true } } } @@ -736,7 +743,7 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) { if subtbl, ok := node.(*ast.Table); ok { for name, val := range subtbl.Fields { if kv, ok := val.(*ast.KeyValue); ok { - tagfilter := &internal_models.TagFilter{Name: name} + tagfilter := &models.TagFilter{Name: name} if ary, ok := kv.Value.(*ast.Array); ok { for _, elem := range ary.Value { if str, ok := elem.(*ast.String); ok { @@ -745,7 +752,6 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) { } } f.TagDrop = append(f.TagDrop, *tagfilter) - f.IsActive = true } } } @@ -774,7 +780,7 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) { } } } - if err := f.CompileFilter(); err != nil { + if err := f.Compile(); err != nil { return f, err } @@ -793,9 +799,9 @@ func buildFilter(tbl *ast.Table) (internal_models.Filter, error) { // buildInput parses input specific items from the ast.Table, // builds the filter and returns a -// internal_models.InputConfig to be inserted into internal_models.RunningInput -func buildInput(name string, tbl *ast.Table) (*internal_models.InputConfig, error) { - cp := &internal_models.InputConfig{Name: name} +// models.InputConfig to be inserted into models.RunningInput +func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { + cp := &models.InputConfig{Name: name} if node, ok := tbl.Fields["interval"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { @@ -837,7 +843,7 @@ func buildInput(name string, tbl *ast.Table) (*internal_models.InputConfig, erro if node, ok := tbl.Fields["tags"]; ok { if subtbl, ok := node.(*ast.Table); ok { if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil { - log.Printf("Could not parse tags for input %s\n", name) + log.Printf("E! Could not parse tags for input %s\n", name) } } } @@ -969,14 +975,14 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error // buildOutput parses output specific items from the ast.Table, // builds the filter and returns an -// internal_models.OutputConfig to be inserted into internal_models.RunningInput +// models.OutputConfig to be inserted into models.RunningInput // Note: error exists in the return for future calls that might require error -func buildOutput(name string, tbl *ast.Table) (*internal_models.OutputConfig, error) { +func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { filter, err := buildFilter(tbl) if err != nil { return nil, err } - oc := &internal_models.OutputConfig{ + oc := &models.OutputConfig{ Name: name, Filter: filter, } diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 1659cd6ec..3498d815d 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -26,27 +26,26 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) memcached.Servers = []string{"192.168.1.1"} - filter := internal_models.Filter{ + filter := models.Filter{ NameDrop: []string{"metricname2"}, NamePass: []string{"metricname1"}, FieldDrop: []string{"other", "stuff"}, FieldPass: []string{"some", "strings"}, - TagDrop: []internal_models.TagFilter{ - internal_models.TagFilter{ + TagDrop: []models.TagFilter{ + models.TagFilter{ Name: "badtag", Filter: []string{"othertag"}, }, }, - TagPass: []internal_models.TagFilter{ - internal_models.TagFilter{ + TagPass: []models.TagFilter{ + models.TagFilter{ Name: "goodtag", Filter: []string{"mytag"}, }, }, - IsActive: true, } - assert.NoError(t, filter.CompileFilter()) - mConfig := &internal_models.InputConfig{ + assert.NoError(t, filter.Compile()) + mConfig := &models.InputConfig{ Name: "memcached", Filter: filter, Interval: 10 * time.Second, @@ -66,27 +65,26 @@ func TestConfig_LoadSingleInput(t *testing.T) { memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) memcached.Servers = []string{"localhost"} - filter := internal_models.Filter{ + filter := models.Filter{ NameDrop: []string{"metricname2"}, NamePass: []string{"metricname1"}, FieldDrop: []string{"other", "stuff"}, FieldPass: []string{"some", "strings"}, - TagDrop: []internal_models.TagFilter{ - internal_models.TagFilter{ + TagDrop: []models.TagFilter{ + models.TagFilter{ Name: "badtag", Filter: []string{"othertag"}, }, }, - TagPass: []internal_models.TagFilter{ - internal_models.TagFilter{ + TagPass: []models.TagFilter{ + models.TagFilter{ Name: "goodtag", Filter: []string{"mytag"}, }, }, - IsActive: true, } - assert.NoError(t, filter.CompileFilter()) - mConfig := &internal_models.InputConfig{ + assert.NoError(t, filter.Compile()) + mConfig := &models.InputConfig{ Name: "memcached", Filter: filter, Interval: 5 * time.Second, @@ -113,27 +111,26 @@ func TestConfig_LoadDirectory(t *testing.T) { memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) memcached.Servers = []string{"localhost"} - filter := internal_models.Filter{ + filter := models.Filter{ NameDrop: []string{"metricname2"}, NamePass: []string{"metricname1"}, FieldDrop: []string{"other", "stuff"}, FieldPass: []string{"some", "strings"}, - TagDrop: []internal_models.TagFilter{ - internal_models.TagFilter{ + TagDrop: []models.TagFilter{ + models.TagFilter{ Name: "badtag", Filter: []string{"othertag"}, }, }, - TagPass: []internal_models.TagFilter{ - internal_models.TagFilter{ + TagPass: []models.TagFilter{ + models.TagFilter{ Name: "goodtag", Filter: []string{"mytag"}, }, }, - IsActive: true, } - assert.NoError(t, filter.CompileFilter()) - mConfig := &internal_models.InputConfig{ + assert.NoError(t, filter.Compile()) + mConfig := &models.InputConfig{ Name: "memcached", Filter: filter, Interval: 5 * time.Second, @@ -150,7 +147,7 @@ func TestConfig_LoadDirectory(t *testing.T) { assert.NoError(t, err) ex.SetParser(p) ex.Command = "/usr/bin/myothercollector --foo=bar" - eConfig := &internal_models.InputConfig{ + eConfig := &models.InputConfig{ Name: "exec", MeasurementSuffix: "_myothercollector", } @@ -169,7 +166,7 @@ func TestConfig_LoadDirectory(t *testing.T) { pstat := inputs.Inputs["procstat"]().(*procstat.Procstat) pstat.PidFile = "/var/run/grafana-server.pid" - pConfig := &internal_models.InputConfig{Name: "procstat"} + pConfig := &models.InputConfig{Name: "procstat"} pConfig.Tags = make(map[string]string) assert.Equal(t, pstat, c.Inputs[3].Input, diff --git a/internal/globpath/globpath.go b/internal/globpath/globpath.go index 6755e69b2..22ae92721 100644 --- a/internal/globpath/globpath.go +++ b/internal/globpath/globpath.go @@ -12,21 +12,23 @@ import ( var sepStr = fmt.Sprintf("%v", string(os.PathSeparator)) type GlobPath struct { - path string - hasMeta bool - g glob.Glob - root string + path string + hasMeta bool + hasSuperMeta bool + g glob.Glob + root string } func Compile(path string) (*GlobPath, error) { out := GlobPath{ - hasMeta: hasMeta(path), - path: path, + hasMeta: hasMeta(path), + hasSuperMeta: hasSuperMeta(path), + path: path, } // if there are no glob meta characters in the path, don't bother compiling // a glob object or finding the root directory. (see short-circuit in Match) - if !out.hasMeta { + if !out.hasMeta || !out.hasSuperMeta { return &out, nil } @@ -48,6 +50,17 @@ func (g *GlobPath) Match() map[string]os.FileInfo { } return out } + if !g.hasSuperMeta { + out := make(map[string]os.FileInfo) + files, _ := filepath.Glob(g.path) + for _, file := range files { + info, err := os.Stat(file) + if !os.IsNotExist(err) { + out[file] = info + } + } + return out + } return walkFilePath(g.root, g.g) } @@ -96,3 +109,8 @@ func findRootDir(path string) string { func hasMeta(path string) bool { return strings.IndexAny(path, "*?[") >= 0 } + +// hasSuperMeta reports whether path contains any super magic glob characters (**). +func hasSuperMeta(path string) bool { + return strings.Index(path, "**") >= 0 +} diff --git a/internal/internal.go b/internal/internal.go index 58a1200e0..664a1d13b 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -198,7 +198,7 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { return err case <-timer.C: if err := c.Process.Kill(); err != nil { - log.Printf("FATAL error killing process: %s", err) + log.Printf("E! FATAL error killing process: %s", err) return err } // wait for the command to return after killing it diff --git a/internal/internal_test.go b/internal/internal_test.go index 213e94d3d..c18991c2d 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -118,7 +118,7 @@ func TestRandomSleep(t *testing.T) { s = time.Now() RandomSleep(time.Millisecond*50, make(chan struct{})) elapsed = time.Since(s) - assert.True(t, elapsed < time.Millisecond*50) + assert.True(t, elapsed < time.Millisecond*100) // test that shutdown is respected s = time.Now() diff --git a/internal/models/filter.go b/internal/models/filter.go index ac24ec667..b87c59501 100644 --- a/internal/models/filter.go +++ b/internal/models/filter.go @@ -1,9 +1,8 @@ -package internal_models +package models import ( "fmt" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" ) @@ -34,47 +33,59 @@ type Filter struct { TagInclude []string tagInclude filter.Filter - IsActive bool + isActive bool } // Compile all Filter lists into filter.Filter objects. -func (f *Filter) CompileFilter() error { +func (f *Filter) Compile() error { + if len(f.NameDrop) == 0 && + len(f.NamePass) == 0 && + len(f.FieldDrop) == 0 && + len(f.FieldPass) == 0 && + len(f.TagInclude) == 0 && + len(f.TagExclude) == 0 && + len(f.TagPass) == 0 && + len(f.TagDrop) == 0 { + return nil + } + + f.isActive = true var err error - f.nameDrop, err = filter.CompileFilter(f.NameDrop) + f.nameDrop, err = filter.Compile(f.NameDrop) if err != nil { return fmt.Errorf("Error compiling 'namedrop', %s", err) } - f.namePass, err = filter.CompileFilter(f.NamePass) + f.namePass, err = filter.Compile(f.NamePass) if err != nil { return fmt.Errorf("Error compiling 'namepass', %s", err) } - f.fieldDrop, err = filter.CompileFilter(f.FieldDrop) + f.fieldDrop, err = filter.Compile(f.FieldDrop) if err != nil { return fmt.Errorf("Error compiling 'fielddrop', %s", err) } - f.fieldPass, err = filter.CompileFilter(f.FieldPass) + f.fieldPass, err = filter.Compile(f.FieldPass) if err != nil { return fmt.Errorf("Error compiling 'fieldpass', %s", err) } - f.tagExclude, err = filter.CompileFilter(f.TagExclude) + f.tagExclude, err = filter.Compile(f.TagExclude) if err != nil { return fmt.Errorf("Error compiling 'tagexclude', %s", err) } - f.tagInclude, err = filter.CompileFilter(f.TagInclude) + f.tagInclude, err = filter.Compile(f.TagInclude) if err != nil { return fmt.Errorf("Error compiling 'taginclude', %s", err) } for i, _ := range f.TagDrop { - f.TagDrop[i].filter, err = filter.CompileFilter(f.TagDrop[i].Filter) + f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter) if err != nil { return fmt.Errorf("Error compiling 'tagdrop', %s", err) } } for i, _ := range f.TagPass { - f.TagPass[i].filter, err = filter.CompileFilter(f.TagPass[i].Filter) + f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter) if err != nil { return fmt.Errorf("Error compiling 'tagpass', %s", err) } @@ -82,16 +93,52 @@ func (f *Filter) CompileFilter() error { return nil } -func (f *Filter) ShouldMetricPass(metric telegraf.Metric) bool { - if f.ShouldNamePass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) { +// Apply applies the filter to the given measurement name, fields map, and +// tags map. It will return false if the metric should be "filtered out", and +// true if the metric should "pass". +// It will modify tags in-place if they need to be deleted. +func (f *Filter) Apply( + measurement string, + fields map[string]interface{}, + tags map[string]string, +) bool { + if !f.isActive { return true } - return false + + // check if the measurement name should pass + if !f.shouldNamePass(measurement) { + return false + } + + // check if the tags should pass + if !f.shouldTagsPass(tags) { + return false + } + + // filter fields + for fieldkey, _ := range fields { + if !f.shouldFieldPass(fieldkey) { + delete(fields, fieldkey) + } + } + if len(fields) == 0 { + return false + } + + // filter tags + f.filterTags(tags) + + return true } -// ShouldFieldsPass returns true if the metric should pass, false if should drop +func (f *Filter) IsActive() bool { + return f.isActive +} + +// shouldNamePass returns true if the metric should pass, false if should drop // based on the drop/pass filter parameters -func (f *Filter) ShouldNamePass(key string) bool { +func (f *Filter) shouldNamePass(key string) bool { if f.namePass != nil { if f.namePass.Match(key) { return true @@ -107,9 +154,9 @@ func (f *Filter) ShouldNamePass(key string) bool { return true } -// ShouldFieldsPass returns true if the metric should pass, false if should drop +// shouldFieldPass returns true if the metric should pass, false if should drop // based on the drop/pass filter parameters -func (f *Filter) ShouldFieldsPass(key string) bool { +func (f *Filter) shouldFieldPass(key string) bool { if f.fieldPass != nil { if f.fieldPass.Match(key) { return true @@ -125,9 +172,9 @@ func (f *Filter) ShouldFieldsPass(key string) bool { return true } -// ShouldTagsPass returns true if the metric should pass, false if should drop +// shouldTagsPass returns true if the metric should pass, false if should drop // based on the tagdrop/tagpass filter parameters -func (f *Filter) ShouldTagsPass(tags map[string]string) bool { +func (f *Filter) shouldTagsPass(tags map[string]string) bool { if f.TagPass != nil { for _, pat := range f.TagPass { if pat.filter == nil { @@ -161,7 +208,7 @@ func (f *Filter) ShouldTagsPass(tags map[string]string) bool { // Apply TagInclude and TagExclude filters. // modifies the tags map in-place. -func (f *Filter) FilterTags(tags map[string]string) { +func (f *Filter) filterTags(tags map[string]string) { if f.tagInclude != nil { for k, _ := range tags { if !f.tagInclude.Match(k) { diff --git a/internal/models/filter_test.go b/internal/models/filter_test.go index 454f10c45..95b63e30a 100644 --- a/internal/models/filter_test.go +++ b/internal/models/filter_test.go @@ -1,14 +1,64 @@ -package internal_models +package models import ( "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func TestFilter_ApplyEmpty(t *testing.T) { + f := Filter{} + require.NoError(t, f.Compile()) + assert.False(t, f.IsActive()) + + assert.True(t, f.Apply("m", map[string]interface{}{"value": int64(1)}, map[string]string{})) +} + +func TestFilter_ApplyTagsDontPass(t *testing.T) { + filters := []TagFilter{ + TagFilter{ + Name: "cpu", + Filter: []string{"cpu-*"}, + }, + } + f := Filter{ + TagDrop: filters, + } + require.NoError(t, f.Compile()) + require.NoError(t, f.Compile()) + assert.True(t, f.IsActive()) + + assert.False(t, f.Apply("m", + map[string]interface{}{"value": int64(1)}, + map[string]string{"cpu": "cpu-total"})) +} + +func TestFilter_ApplyDeleteFields(t *testing.T) { + f := Filter{ + FieldDrop: []string{"value"}, + } + require.NoError(t, f.Compile()) + require.NoError(t, f.Compile()) + assert.True(t, f.IsActive()) + + fields := map[string]interface{}{"value": int64(1), "value2": int64(2)} + assert.True(t, f.Apply("m", fields, nil)) + assert.Equal(t, map[string]interface{}{"value2": int64(2)}, fields) +} + +func TestFilter_ApplyDeleteAllFields(t *testing.T) { + f := Filter{ + FieldDrop: []string{"value*"}, + } + require.NoError(t, f.Compile()) + require.NoError(t, f.Compile()) + assert.True(t, f.IsActive()) + + fields := map[string]interface{}{"value": int64(1), "value2": int64(2)} + assert.False(t, f.Apply("m", fields, nil)) +} + func TestFilter_Empty(t *testing.T) { f := Filter{} @@ -23,7 +73,7 @@ func TestFilter_Empty(t *testing.T) { } for _, measurement := range measurements { - if !f.ShouldFieldsPass(measurement) { + if !f.shouldFieldPass(measurement) { t.Errorf("Expected measurement %s to pass", measurement) } } @@ -33,7 +83,7 @@ func TestFilter_NamePass(t *testing.T) { f := Filter{ NamePass: []string{"foo*", "cpu_usage_idle"}, } - require.NoError(t, f.CompileFilter()) + require.NoError(t, f.Compile()) passes := []string{ "foo", @@ -51,13 +101,13 @@ func TestFilter_NamePass(t *testing.T) { } for _, measurement := range passes { - if !f.ShouldNamePass(measurement) { + if !f.shouldNamePass(measurement) { t.Errorf("Expected measurement %s to pass", measurement) } } for _, measurement := range drops { - if f.ShouldNamePass(measurement) { + if f.shouldNamePass(measurement) { t.Errorf("Expected measurement %s to drop", measurement) } } @@ -67,7 +117,7 @@ func TestFilter_NameDrop(t *testing.T) { f := Filter{ NameDrop: []string{"foo*", "cpu_usage_idle"}, } - require.NoError(t, f.CompileFilter()) + require.NoError(t, f.Compile()) drops := []string{ "foo", @@ -85,13 +135,13 @@ func TestFilter_NameDrop(t *testing.T) { } for _, measurement := range passes { - if !f.ShouldNamePass(measurement) { + if !f.shouldNamePass(measurement) { t.Errorf("Expected measurement %s to pass", measurement) } } for _, measurement := range drops { - if f.ShouldNamePass(measurement) { + if f.shouldNamePass(measurement) { t.Errorf("Expected measurement %s to drop", measurement) } } @@ -101,7 +151,7 @@ func TestFilter_FieldPass(t *testing.T) { f := Filter{ FieldPass: []string{"foo*", "cpu_usage_idle"}, } - require.NoError(t, f.CompileFilter()) + require.NoError(t, f.Compile()) passes := []string{ "foo", @@ -119,13 +169,13 @@ func TestFilter_FieldPass(t *testing.T) { } for _, measurement := range passes { - if !f.ShouldFieldsPass(measurement) { + if !f.shouldFieldPass(measurement) { t.Errorf("Expected measurement %s to pass", measurement) } } for _, measurement := range drops { - if f.ShouldFieldsPass(measurement) { + if f.shouldFieldPass(measurement) { t.Errorf("Expected measurement %s to drop", measurement) } } @@ -135,7 +185,7 @@ func TestFilter_FieldDrop(t *testing.T) { f := Filter{ FieldDrop: []string{"foo*", "cpu_usage_idle"}, } - require.NoError(t, f.CompileFilter()) + require.NoError(t, f.Compile()) drops := []string{ "foo", @@ -153,13 +203,13 @@ func TestFilter_FieldDrop(t *testing.T) { } for _, measurement := range passes { - if !f.ShouldFieldsPass(measurement) { + if !f.shouldFieldPass(measurement) { t.Errorf("Expected measurement %s to pass", measurement) } } for _, measurement := range drops { - if f.ShouldFieldsPass(measurement) { + if f.shouldFieldPass(measurement) { t.Errorf("Expected measurement %s to drop", measurement) } } @@ -178,7 +228,7 @@ func TestFilter_TagPass(t *testing.T) { f := Filter{ TagPass: filters, } - require.NoError(t, f.CompileFilter()) + require.NoError(t, f.Compile()) passes := []map[string]string{ {"cpu": "cpu-total"}, @@ -197,13 +247,13 @@ func TestFilter_TagPass(t *testing.T) { } for _, tags := range passes { - if !f.ShouldTagsPass(tags) { + if !f.shouldTagsPass(tags) { t.Errorf("Expected tags %v to pass", tags) } } for _, tags := range drops { - if f.ShouldTagsPass(tags) { + if f.shouldTagsPass(tags) { t.Errorf("Expected tags %v to drop", tags) } } @@ -222,7 +272,7 @@ func TestFilter_TagDrop(t *testing.T) { f := Filter{ TagDrop: filters, } - require.NoError(t, f.CompileFilter()) + require.NoError(t, f.Compile()) drops := []map[string]string{ {"cpu": "cpu-total"}, @@ -241,30 +291,18 @@ func TestFilter_TagDrop(t *testing.T) { } for _, tags := range passes { - if !f.ShouldTagsPass(tags) { + if !f.shouldTagsPass(tags) { t.Errorf("Expected tags %v to pass", tags) } } for _, tags := range drops { - if f.ShouldTagsPass(tags) { + if f.shouldTagsPass(tags) { t.Errorf("Expected tags %v to drop", tags) } } } -func TestFilter_ShouldMetricsPass(t *testing.T) { - m := testutil.TestMetric(1, "testmetric") - f := Filter{ - NameDrop: []string{"foobar"}, - } - require.NoError(t, f.CompileFilter()) - require.True(t, f.ShouldMetricPass(m)) - - m = testutil.TestMetric(1, "foobar") - require.False(t, f.ShouldMetricPass(m)) -} - func TestFilter_FilterTagsNoMatches(t *testing.T) { pretags := map[string]string{ "host": "localhost", @@ -273,9 +311,9 @@ func TestFilter_FilterTagsNoMatches(t *testing.T) { f := Filter{ TagExclude: []string{"nomatch"}, } - require.NoError(t, f.CompileFilter()) + require.NoError(t, f.Compile()) - f.FilterTags(pretags) + f.filterTags(pretags) assert.Equal(t, map[string]string{ "host": "localhost", "mytag": "foobar", @@ -284,9 +322,9 @@ func TestFilter_FilterTagsNoMatches(t *testing.T) { f = Filter{ TagInclude: []string{"nomatch"}, } - require.NoError(t, f.CompileFilter()) + require.NoError(t, f.Compile()) - f.FilterTags(pretags) + f.filterTags(pretags) assert.Equal(t, map[string]string{}, pretags) } @@ -298,9 +336,9 @@ func TestFilter_FilterTagsMatches(t *testing.T) { f := Filter{ TagExclude: []string{"ho*"}, } - require.NoError(t, f.CompileFilter()) + require.NoError(t, f.Compile()) - f.FilterTags(pretags) + f.filterTags(pretags) assert.Equal(t, map[string]string{ "mytag": "foobar", }, pretags) @@ -312,9 +350,9 @@ func TestFilter_FilterTagsMatches(t *testing.T) { f = Filter{ TagInclude: []string{"my*"}, } - require.NoError(t, f.CompileFilter()) + require.NoError(t, f.Compile()) - f.FilterTags(pretags) + f.filterTags(pretags) assert.Equal(t, map[string]string{ "mytag": "foobar", }, pretags) diff --git a/internal/models/running_input.go b/internal/models/running_input.go index cffaf336c..445c5ee96 100644 --- a/internal/models/running_input.go +++ b/internal/models/running_input.go @@ -1,4 +1,4 @@ -package internal_models +package models import ( "time" diff --git a/internal/models/running_output.go b/internal/models/running_output.go index 42025912c..aa94178f7 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -1,4 +1,4 @@ -package internal_models +package models import ( "log" @@ -57,21 +57,17 @@ func NewRunningOutput( // AddMetric adds a metric to the output. This function can also write cached // points if FlushBufferWhenFull is true. func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { - if ro.Config.Filter.IsActive { - if !ro.Config.Filter.ShouldMetricPass(metric) { - return - } - } - // Filter any tagexclude/taginclude parameters before adding metric - if len(ro.Config.Filter.TagExclude) != 0 || len(ro.Config.Filter.TagInclude) != 0 { + if ro.Config.Filter.IsActive() { // In order to filter out tags, we need to create a new metric, since // metrics are immutable once created. + name := metric.Name() tags := metric.Tags() fields := metric.Fields() t := metric.Time() - name := metric.Name() - ro.Config.Filter.FilterTags(tags) + if ok := ro.Config.Filter.Apply(name, fields, tags); !ok { + return + } // error is not possible if creating from another metric, so ignore. metric, _ = telegraf.NewMetric(name, tags, fields, t) } @@ -89,7 +85,7 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { // Write writes all cached points to this output. func (ro *RunningOutput) Write() error { if !ro.Quiet { - log.Printf("Output [%s] buffer fullness: %d / %d metrics. "+ + log.Printf("I! Output [%s] buffer fullness: %d / %d metrics. "+ "Total gathered metrics: %d. Total dropped metrics: %d.", ro.Name, ro.failMetrics.Len()+ro.metrics.Len(), @@ -146,7 +142,7 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error { elapsed := time.Since(start) if err == nil { if !ro.Quiet { - log.Printf("Output [%s] wrote batch of %d metrics in %s\n", + log.Printf("I! Output [%s] wrote batch of %d metrics in %s\n", ro.Name, len(metrics), elapsed) } } diff --git a/internal/models/running_output_test.go b/internal/models/running_output_test.go index d9238c5a4..a42d6fc7e 100644 --- a/internal/models/running_output_test.go +++ b/internal/models/running_output_test.go @@ -1,4 +1,4 @@ -package internal_models +package models import ( "fmt" @@ -31,9 +31,7 @@ var next5 = []telegraf.Metric{ // Benchmark adding metrics. func BenchmarkRunningOutputAddWrite(b *testing.B) { conf := &OutputConfig{ - Filter: Filter{ - IsActive: false, - }, + Filter: Filter{}, } m := &perfOutput{} @@ -49,9 +47,7 @@ func BenchmarkRunningOutputAddWrite(b *testing.B) { // Benchmark adding metrics. func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) { conf := &OutputConfig{ - Filter: Filter{ - IsActive: false, - }, + Filter: Filter{}, } m := &perfOutput{} @@ -69,9 +65,7 @@ func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) { // Benchmark adding metrics. func BenchmarkRunningOutputAddFailWrites(b *testing.B) { conf := &OutputConfig{ - Filter: Filter{ - IsActive: false, - }, + Filter: Filter{}, } m := &perfOutput{} @@ -88,11 +82,10 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) { func TestRunningOutput_DropFilter(t *testing.T) { conf := &OutputConfig{ Filter: Filter{ - IsActive: true, NameDrop: []string{"metric1", "metric2"}, }, } - assert.NoError(t, conf.Filter.CompileFilter()) + assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} ro := NewRunningOutput("test", m, conf, 1000, 10000) @@ -114,11 +107,10 @@ func TestRunningOutput_DropFilter(t *testing.T) { func TestRunningOutput_PassFilter(t *testing.T) { conf := &OutputConfig{ Filter: Filter{ - IsActive: true, NameDrop: []string{"metric1000", "foo*"}, }, } - assert.NoError(t, conf.Filter.CompileFilter()) + assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} ro := NewRunningOutput("test", m, conf, 1000, 10000) @@ -140,11 +132,11 @@ func TestRunningOutput_PassFilter(t *testing.T) { func TestRunningOutput_TagIncludeNoMatch(t *testing.T) { conf := &OutputConfig{ Filter: Filter{ - IsActive: true, + TagInclude: []string{"nothing*"}, }, } - assert.NoError(t, conf.Filter.CompileFilter()) + assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} ro := NewRunningOutput("test", m, conf, 1000, 10000) @@ -162,11 +154,11 @@ func TestRunningOutput_TagIncludeNoMatch(t *testing.T) { func TestRunningOutput_TagExcludeMatch(t *testing.T) { conf := &OutputConfig{ Filter: Filter{ - IsActive: true, + TagExclude: []string{"tag*"}, }, } - assert.NoError(t, conf.Filter.CompileFilter()) + assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} ro := NewRunningOutput("test", m, conf, 1000, 10000) @@ -184,11 +176,11 @@ func TestRunningOutput_TagExcludeMatch(t *testing.T) { func TestRunningOutput_TagExcludeNoMatch(t *testing.T) { conf := &OutputConfig{ Filter: Filter{ - IsActive: true, + TagExclude: []string{"nothing*"}, }, } - assert.NoError(t, conf.Filter.CompileFilter()) + assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} ro := NewRunningOutput("test", m, conf, 1000, 10000) @@ -206,11 +198,11 @@ func TestRunningOutput_TagExcludeNoMatch(t *testing.T) { func TestRunningOutput_TagIncludeMatch(t *testing.T) { conf := &OutputConfig{ Filter: Filter{ - IsActive: true, + TagInclude: []string{"tag*"}, }, } - assert.NoError(t, conf.Filter.CompileFilter()) + assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} ro := NewRunningOutput("test", m, conf, 1000, 10000) @@ -227,9 +219,7 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) { // Test that we can write metrics with simple default setup. func TestRunningOutputDefault(t *testing.T) { conf := &OutputConfig{ - Filter: Filter{ - IsActive: false, - }, + Filter: Filter{}, } m := &mockOutput{} @@ -252,9 +242,7 @@ func TestRunningOutputDefault(t *testing.T) { // FlushBufferWhenFull is set. func TestRunningOutputFlushWhenFull(t *testing.T) { conf := &OutputConfig{ - Filter: Filter{ - IsActive: false, - }, + Filter: Filter{}, } m := &mockOutput{} @@ -283,9 +271,7 @@ func TestRunningOutputFlushWhenFull(t *testing.T) { // FlushBufferWhenFull is set, twice. func TestRunningOutputMultiFlushWhenFull(t *testing.T) { conf := &OutputConfig{ - Filter: Filter{ - IsActive: false, - }, + Filter: Filter{}, } m := &mockOutput{} @@ -304,9 +290,7 @@ func TestRunningOutputMultiFlushWhenFull(t *testing.T) { func TestRunningOutputWriteFail(t *testing.T) { conf := &OutputConfig{ - Filter: Filter{ - IsActive: false, - }, + Filter: Filter{}, } m := &mockOutput{} @@ -339,9 +323,7 @@ func TestRunningOutputWriteFail(t *testing.T) { // Verify that the order of points is preserved during a write failure. func TestRunningOutputWriteFailOrder(t *testing.T) { conf := &OutputConfig{ - Filter: Filter{ - IsActive: false, - }, + Filter: Filter{}, } m := &mockOutput{} @@ -379,9 +361,7 @@ func TestRunningOutputWriteFailOrder(t *testing.T) { // Verify that the order of points is preserved during many write failures. func TestRunningOutputWriteFailOrder2(t *testing.T) { conf := &OutputConfig{ - Filter: Filter{ - IsActive: false, - }, + Filter: Filter{}, } m := &mockOutput{} @@ -452,9 +432,7 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) { // func TestRunningOutputWriteFailOrder3(t *testing.T) { conf := &OutputConfig{ - Filter: Filter{ - IsActive: false, - }, + Filter: Filter{}, } m := &mockOutput{} diff --git a/logger/logger.go b/logger/logger.go new file mode 100644 index 000000000..fabaabf39 --- /dev/null +++ b/logger/logger.go @@ -0,0 +1,58 @@ +package logger + +import ( + "io" + "log" + "os" + + "github.com/influxdata/wlog" +) + +// newTelegrafWriter returns a logging-wrapped writer. +func newTelegrafWriter(w io.Writer) io.Writer { + return &telegrafLog{ + writer: wlog.NewWriter(w), + } +} + +type telegrafLog struct { + writer io.Writer +} + +func (t *telegrafLog) Write(p []byte) (n int, err error) { + return t.writer.Write(p) +} + +// SetupLogging configures the logging output. +// debug will set the log level to DEBUG +// quiet will set the log level to ERROR +// logfile will direct the logging output to a file. Empty string is +// interpreted as stdout. If there is an error opening the file the +// logger will fallback to stdout. +func SetupLogging(debug, quiet bool, logfile string) { + if debug { + wlog.SetLevel(wlog.DEBUG) + } + if quiet { + wlog.SetLevel(wlog.ERROR) + } + + var oFile *os.File + if logfile != "" { + if _, err := os.Stat(logfile); os.IsNotExist(err) { + if oFile, err = os.Create(logfile); err != nil { + log.Printf("E! Unable to create %s (%s), using stdout", logfile, err) + oFile = os.Stdout + } + } else { + if oFile, err = os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, os.ModeAppend); err != nil { + log.Printf("E! Unable to append to %s (%s), using stdout", logfile, err) + oFile = os.Stdout + } + } + } else { + oFile = os.Stdout + } + + log.SetOutput(newTelegrafWriter(oFile)) +} diff --git a/metric.go b/metric.go index 0d186784a..937603cdc 100644 --- a/metric.go +++ b/metric.go @@ -6,6 +6,17 @@ import ( "github.com/influxdata/influxdb/client/v2" ) +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + Counter + Gauge + Untyped +) + type Metric interface { // Name returns the measurement name of the metric Name() string @@ -16,6 +27,9 @@ type Metric interface { // Time return the timestamp for the metric Time() time.Time + // Type returns the metric type. Can be either telegraf.Gauge or telegraf.Counter + Type() ValueType + // UnixNano returns the unix nano time of the metric UnixNano() int64 @@ -35,12 +49,11 @@ type Metric interface { // metric is a wrapper of the influxdb client.Point struct type metric struct { pt *client.Point + + mType ValueType } -// NewMetric returns a metric with the given timestamp. If a timestamp is not -// given, then data is sent to the database without a timestamp, in which case -// the server will assign local time upon reception. NOTE: it is recommended to -// send data with a timestamp. +// NewMetric returns an untyped metric. func NewMetric( name string, tags map[string]string, @@ -52,7 +65,46 @@ func NewMetric( return nil, err } return &metric{ - pt: pt, + pt: pt, + mType: Untyped, + }, nil +} + +// NewGaugeMetric returns a gauge metric. +// Gauge metrics should be used when the metric is can arbitrarily go up and +// down. ie, temperature, memory usage, cpu usage, etc. +func NewGaugeMetric( + name string, + tags map[string]string, + fields map[string]interface{}, + t time.Time, +) (Metric, error) { + pt, err := client.NewPoint(name, tags, fields, t) + if err != nil { + return nil, err + } + return &metric{ + pt: pt, + mType: Gauge, + }, nil +} + +// NewCounterMetric returns a Counter metric. +// Counter metrics should be used when the metric being created is an +// always-increasing counter. ie, net bytes received, requests served, errors, etc. +func NewCounterMetric( + name string, + tags map[string]string, + fields map[string]interface{}, + t time.Time, +) (Metric, error) { + pt, err := client.NewPoint(name, tags, fields, t) + if err != nil { + return nil, err + } + return &metric{ + pt: pt, + mType: Counter, }, nil } @@ -68,6 +120,10 @@ func (m *metric) Time() time.Time { return m.pt.Time() } +func (m *metric) Type() ValueType { + return m.mType +} + func (m *metric) UnixNano() int64 { return m.pt.UnixNano() } diff --git a/metric_test.go b/metric_test.go index 4182c9cc1..ebc392140 100644 --- a/metric_test.go +++ b/metric_test.go @@ -23,6 +23,51 @@ func TestNewMetric(t *testing.T) { m, err := NewMetric("cpu", tags, fields, now) assert.NoError(t, err) + assert.Equal(t, Untyped, m.Type()) + assert.Equal(t, tags, m.Tags()) + assert.Equal(t, fields, m.Fields()) + assert.Equal(t, "cpu", m.Name()) + assert.Equal(t, now, m.Time()) + assert.Equal(t, now.UnixNano(), m.UnixNano()) +} + +func TestNewGaugeMetric(t *testing.T) { + now := time.Now() + + tags := map[string]string{ + "host": "localhost", + "datacenter": "us-east-1", + } + fields := map[string]interface{}{ + "usage_idle": float64(99), + "usage_busy": float64(1), + } + m, err := NewGaugeMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + assert.Equal(t, Gauge, m.Type()) + assert.Equal(t, tags, m.Tags()) + assert.Equal(t, fields, m.Fields()) + assert.Equal(t, "cpu", m.Name()) + assert.Equal(t, now, m.Time()) + assert.Equal(t, now.UnixNano(), m.UnixNano()) +} + +func TestNewCounterMetric(t *testing.T) { + now := time.Now() + + tags := map[string]string{ + "host": "localhost", + "datacenter": "us-east-1", + } + fields := map[string]interface{}{ + "usage_idle": float64(99), + "usage_busy": float64(1), + } + m, err := NewCounterMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + assert.Equal(t, Counter, m.Type()) assert.Equal(t, tags, m.Tags()) assert.Equal(t, fields, m.Fields()) assert.Equal(t, "cpu", m.Name()) diff --git a/plugins/inputs/EXAMPLE_README.md b/plugins/inputs/EXAMPLE_README.md index 6bebf1e88..d6fcfdb91 100644 --- a/plugins/inputs/EXAMPLE_README.md +++ b/plugins/inputs/EXAMPLE_README.md @@ -27,6 +27,14 @@ The example plugin gathers metrics about example things - tag2 - measurement2 has the following tags: - tag3 + +### Sample Queries: + +These are some useful queries (to generate dashboards or other) to run against data from this plugin: + +``` +SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag +``` ### Example Output: diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index eb608723e..10f7fcd40 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -1,6 +1,8 @@ package aerospike import ( + "errors" + "log" "net" "strconv" "strings" @@ -11,7 +13,7 @@ import ( "github.com/influxdata/telegraf/internal/errchan" "github.com/influxdata/telegraf/plugins/inputs" - as "github.com/sparrc/aerospike-client-go" + as "github.com/aerospike/aerospike-client-go" ) type Aerospike struct { @@ -82,7 +84,12 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro return err } for k, v := range stats { - fields[strings.Replace(k, "-", "_", -1)] = parseValue(v) + val, err := parseValue(v) + if err == nil { + fields[strings.Replace(k, "-", "_", -1)] = val + } else { + log.Printf("I! skipping aerospike field %v with int64 overflow", k) + } } acc.AddFields("aerospike_node", fields, tags, time.Now()) @@ -110,7 +117,12 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro if len(parts) < 2 { continue } - nFields[strings.Replace(parts[0], "-", "_", -1)] = parseValue(parts[1]) + val, err := parseValue(parts[1]) + if err == nil { + nFields[strings.Replace(parts[0], "-", "_", -1)] = val + } else { + log.Printf("I! skipping aerospike field %v with int64 overflow", parts[0]) + } } acc.AddFields("aerospike_namespace", nFields, nTags, time.Now()) } @@ -118,13 +130,16 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro return nil } -func parseValue(v string) interface{} { +func parseValue(v string) (interface{}, error) { if parsed, err := strconv.ParseInt(v, 10, 64); err == nil { - return parsed + return parsed, nil + } else if _, err := strconv.ParseUint(v, 10, 64); err == nil { + // int64 overflow, yet valid uint64 + return nil, errors.New("Number is too large") } else if parsed, err := strconv.ParseBool(v); err == nil { - return parsed + return parsed, nil } else { - return v + return v, nil } } diff --git a/plugins/inputs/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go index 8463432f5..b20af1657 100644 --- a/plugins/inputs/aerospike/aerospike_test.go +++ b/plugins/inputs/aerospike/aerospike_test.go @@ -10,7 +10,7 @@ import ( func TestAerospikeStatistics(t *testing.T) { if testing.Short() { - t.Skip("Skipping integration test in short mode") + t.Skip("Skipping aerospike integration tests.") } a := &Aerospike{ @@ -29,7 +29,7 @@ func TestAerospikeStatistics(t *testing.T) { func TestAerospikeStatisticsPartialErr(t *testing.T) { if testing.Short() { - t.Skip("Skipping integration test in short mode") + t.Skip("Skipping aerospike integration tests.") } a := &Aerospike{ @@ -48,3 +48,20 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) { assert.True(t, acc.HasMeasurement("aerospike_namespace")) assert.True(t, acc.HasIntField("aerospike_node", "batch_error")) } + +func TestAerospikeParseValue(t *testing.T) { + // uint64 with value bigger than int64 max + val, err := parseValue("18446744041841121751") + assert.Nil(t, val) + assert.Error(t, err) + + // int values + val, err = parseValue("42") + assert.NoError(t, err) + assert.Equal(t, val, int64(42), "must be parsed as int") + + // string values + val, err = parseValue("BB977942A2CA502") + assert.NoError(t, err) + assert.Equal(t, val, `BB977942A2CA502`, "must be left as string") +} diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 529a13bae..058b230d8 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -22,10 +22,13 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/filestat" _ "github.com/influxdata/telegraf/plugins/inputs/graylog" _ "github.com/influxdata/telegraf/plugins/inputs/haproxy" + _ "github.com/influxdata/telegraf/plugins/inputs/hddtemp" + _ "github.com/influxdata/telegraf/plugins/inputs/http_listener" _ "github.com/influxdata/telegraf/plugins/inputs/http_response" _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" + _ "github.com/influxdata/telegraf/plugins/inputs/iptables" _ "github.com/influxdata/telegraf/plugins/inputs/jolokia" _ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/leofs" @@ -60,6 +63,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/riak" _ "github.com/influxdata/telegraf/plugins/inputs/sensors" _ "github.com/influxdata/telegraf/plugins/inputs/snmp" + _ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy" _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" _ "github.com/influxdata/telegraf/plugins/inputs/statsd" _ "github.com/influxdata/telegraf/plugins/inputs/sysstat" diff --git a/plugins/inputs/cassandra/cassandra.go b/plugins/inputs/cassandra/cassandra.go index e7edf7153..dc4bb2b72 100644 --- a/plugins/inputs/cassandra/cassandra.go +++ b/plugins/inputs/cassandra/cassandra.go @@ -274,7 +274,7 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error { m = newCassandraMetric(serverTokens["host"], metric, acc) } else { // unsupported metric type - log.Printf("Unsupported Cassandra metric [%s], skipping", + log.Printf("I! Unsupported Cassandra metric [%s], skipping", metric) continue } diff --git a/plugins/inputs/ceph/README.md b/plugins/inputs/ceph/README.md index ab358daaa..49ae09e73 100644 --- a/plugins/inputs/ceph/README.md +++ b/plugins/inputs/ceph/README.md @@ -2,7 +2,9 @@ Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. -The plugin works by scanning the configured SocketDir for OSD and MON socket files. When it finds +*Admin Socket Stats* + +This gatherer works by scanning the configured SocketDir for OSD and MON socket files. When it finds a MON socket, it runs **ceph --admin-daemon $file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf dump** The resulting JSON is parsed and grouped into collections, based on top-level key. Top-level keys are @@ -27,11 +29,26 @@ Would be parsed into the following metrics, all of which would be tagged with co - refresh_latency.sum: 5378.794002000 +*Cluster Stats* + +This gatherer works by invoking ceph commands against the cluster thus only requires the ceph client, valid +ceph configuration and an access key to function (the ceph_config and ceph_user configuration variables work +in conjunction to specify these prerequisites). It may be run on any server you wish which has access to +the cluster. The currently supported commands are: + +* ceph status +* ceph df +* ceph osd pool stats + ### Configuration: ``` # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. [[inputs.ceph]] + ## This is the recommended interval to poll. Too frequent and you will lose + ## data points due to timeouts during rebalancing and recovery + interval = '1m' + ## All configuration values are optional, defaults are shown below ## location of ceph binary @@ -46,15 +63,86 @@ Would be parsed into the following metrics, all of which would be tagged with co ## suffix used to identify socket files socket_suffix = "asok" + + ## Ceph user to authenticate as, ceph will search for the corresponding keyring + ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the + ## client section of ceph.conf for example: + ## + ## [client.telegraf] + ## keyring = /etc/ceph/client.telegraf.keyring + ## + ## Consult the ceph documentation for more detail on keyring generation. + ceph_user = "client.admin" + + ## Ceph configuration to use to locate the cluster + ceph_config = "/etc/ceph/ceph.conf" + + ## Whether to gather statistics via the admin socket + gather_admin_socket_stats = true + + ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config + ## to be specified + gather_cluster_stats = true ``` ### Measurements & Fields: +*Admin Socket Stats* + All fields are collected under the **ceph** measurement and stored as float64s. For a full list of fields, see the sample perf dumps in ceph_test.go. +*Cluster Stats* + +* ceph\_osdmap + * epoch (float) + * full (boolean) + * nearfull (boolean) + * num\_in\_osds (float) + * num\_osds (float) + * num\_remremapped\_pgs (float) + * num\_up\_osds (float) + +* ceph\_pgmap + * bytes\_avail (float) + * bytes\_total (float) + * bytes\_used (float) + * data\_bytes (float) + * num\_pgs (float) + * op\_per\_sec (float) + * read\_bytes\_sec (float) + * version (float) + * write\_bytes\_sec (float) + * recovering\_bytes\_per\_sec (float) + * recovering\_keys\_per\_sec (float) + * recovering\_objects\_per\_sec (float) + +* ceph\_pgmap\_state + * state name e.g. active+clean (float) + +* ceph\_usage + * bytes\_used (float) + * kb\_used (float) + * max\_avail (float) + * objects (float) + +* ceph\_pool\_usage + * bytes\_used (float) + * kb\_used (float) + * max\_avail (float) + * objects (float) + +* ceph\_pool\_stats + * op\_per\_sec (float) + * read\_bytes\_sec (float) + * write\_bytes\_sec (float) + * recovering\_object\_per\_sec (float) + * recovering\_bytes\_per\_sec (float) + * recovering\_keys\_per\_sec (float) ### Tags: +*Admin Socket Stats* + All measurements will have the following tags: - type: either 'osd' or 'mon' to indicate which type of node was queried @@ -96,9 +184,21 @@ All measurements will have the following tags: - throttle-osd_client_bytes - throttle-osd_client_messages +*Cluster Stats* + +* ceph\_pg\_state has the following tags: + * state (state for which the value applies e.g. active+clean, active+remapped+backfill) +* ceph\_pool\_usage has the following tags: + * id + * name +* ceph\_pool\_stats has the following tags: + * id + * name ### Example Output: +*Admin Socket Stats* +
 telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d  -input-filter ceph
 * Plugin: ceph, Collection 1
@@ -107,3 +207,16 @@ telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegr
 > ceph,collection=throttle-mon_daemon_bytes,id=node-2,type=mon get=4058121,get_or_fail_fail=0,get_or_fail_success=0,get_sum=6027348117,max=419430400,put=4058121,put_sum=6027348117,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814815661
 > ceph,collection=throttle-msgr_dispatch_throttler-mon,id=node-2,type=mon get=54276277,get_or_fail_fail=0,get_or_fail_success=0,get_sum=370232877040,max=104857600,put=54276277,put_sum=370232877040,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814872064
 
+ +*Cluster Stats* + +
+> ceph_osdmap,host=ceph-mon-0 epoch=170772,full=false,nearfull=false,num_in_osds=340,num_osds=340,num_remapped_pgs=0,num_up_osds=340 1468841037000000000
+> ceph_pgmap,host=ceph-mon-0 bytes_avail=634895531270144,bytes_total=812117151809536,bytes_used=177221620539392,data_bytes=56979991615058,num_pgs=22952,op_per_sec=15869,read_bytes_sec=43956026,version=39387592,write_bytes_sec=165344818 1468841037000000000
+> ceph_pgmap_state,host=ceph-mon-0 active+clean=22952 1468928660000000000
+> ceph_usage,host=ceph-mon-0 total_avail_bytes=634895514791936,total_bytes=812117151809536,total_used_bytes=177221637017600 1468841037000000000
+> ceph_pool_usage,host=ceph-mon-0,id=150,name=cinder.volumes bytes_used=12648553794802,kb_used=12352103316,max_avail=154342562489244,objects=3026295 1468841037000000000
+> ceph_pool_usage,host=ceph-mon-0,id=182,name=cinder.volumes.flash bytes_used=8541308223964,kb_used=8341121313,max_avail=39388593563936,objects=2075066 1468841037000000000
+> ceph_pool_stats,host=ceph-mon-0,id=150,name=cinder.volumes op_per_sec=1706,read_bytes_sec=28671674,write_bytes_sec=29994541 1468841037000000000
+> ceph_pool_stats,host=ceph-mon-0,id=182,name=cinder.volumes.flash op_per_sec=9748,read_bytes_sec=9605524,write_bytes_sec=45593310 1468841037000000000
+
diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index d8ebf5017..9f0a6ac78 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -23,33 +23,15 @@ const ( ) type Ceph struct { - CephBinary string - OsdPrefix string - MonPrefix string - SocketDir string - SocketSuffix string -} - -func (c *Ceph) setDefaults() { - if c.CephBinary == "" { - c.CephBinary = "/usr/bin/ceph" - } - - if c.OsdPrefix == "" { - c.OsdPrefix = osdPrefix - } - - if c.MonPrefix == "" { - c.MonPrefix = monPrefix - } - - if c.SocketDir == "" { - c.SocketDir = "/var/run/ceph" - } - - if c.SocketSuffix == "" { - c.SocketSuffix = sockSuffix - } + CephBinary string + OsdPrefix string + MonPrefix string + SocketDir string + SocketSuffix string + CephUser string + CephConfig string + GatherAdminSocketStats bool + GatherClusterStats bool } func (c *Ceph) Description() string { @@ -57,6 +39,10 @@ func (c *Ceph) Description() string { } var sampleConfig = ` + ## This is the recommended interval to poll. Too frequent and you will lose + ## data points due to timeouts during rebalancing and recovery + interval = '1m' + ## All configuration values are optional, defaults are shown below ## location of ceph binary @@ -71,6 +57,18 @@ var sampleConfig = ` ## suffix used to identify socket files socket_suffix = "asok" + + ## Ceph user to authenticate as + ceph_user = "client.admin" + + ## Ceph configuration to use to locate the cluster + ceph_config = "/etc/ceph/ceph.conf" + + ## Whether to gather statistics via the admin socket + gather_admin_socket_stats = true + + ## Whether to gather statistics via ceph commands + gather_cluster_stats = true ` func (c *Ceph) SampleConfig() string { @@ -78,7 +76,22 @@ func (c *Ceph) SampleConfig() string { } func (c *Ceph) Gather(acc telegraf.Accumulator) error { - c.setDefaults() + if c.GatherAdminSocketStats { + if err := c.gatherAdminSocketStats(acc); err != nil { + return err + } + } + + if c.GatherClusterStats { + if err := c.gatherClusterStats(acc); err != nil { + return err + } + } + + return nil +} + +func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error { sockets, err := findSockets(c) if err != nil { return fmt.Errorf("failed to find sockets at path '%s': %v", c.SocketDir, err) @@ -87,12 +100,12 @@ func (c *Ceph) Gather(acc telegraf.Accumulator) error { for _, s := range sockets { dump, err := perfDump(c.CephBinary, s) if err != nil { - log.Printf("error reading from socket '%s': %v", s.socket, err) + log.Printf("E! error reading from socket '%s': %v", s.socket, err) continue } data, err := parseDump(dump) if err != nil { - log.Printf("error parsing dump from socket '%s': %v", s.socket, err) + log.Printf("E! error parsing dump from socket '%s': %v", s.socket, err) continue } for tag, metrics := range *data { @@ -104,8 +117,46 @@ func (c *Ceph) Gather(acc telegraf.Accumulator) error { return nil } +func (c *Ceph) gatherClusterStats(acc telegraf.Accumulator) error { + jobs := []struct { + command string + parser func(telegraf.Accumulator, string) error + }{ + {"status", decodeStatus}, + {"df", decodeDf}, + {"osd pool stats", decodeOsdPoolStats}, + } + + // For each job, execute against the cluster, parse and accumulate the data points + for _, job := range jobs { + output, err := c.exec(job.command) + if err != nil { + return fmt.Errorf("error executing command: %v", err) + } + err = job.parser(acc, output) + if err != nil { + return fmt.Errorf("error parsing output: %v", err) + } + } + + return nil +} + func init() { - inputs.Add(measurement, func() telegraf.Input { return &Ceph{} }) + c := Ceph{ + CephBinary: "/usr/bin/ceph", + OsdPrefix: osdPrefix, + MonPrefix: monPrefix, + SocketDir: "/var/run/ceph", + SocketSuffix: sockSuffix, + CephUser: "client.admin", + CephConfig: "/etc/ceph/ceph.conf", + GatherAdminSocketStats: true, + GatherClusterStats: false, + } + + inputs.Add(measurement, func() telegraf.Input { return &c }) + } var perfDump = func(binary string, socket *socket) (string, error) { @@ -242,8 +293,197 @@ func flatten(data interface{}) []*metric { } } default: - log.Printf("Ignoring unexpected type '%T' for value %v", val, val) + log.Printf("I! Ignoring unexpected type '%T' for value %v", val, val) } return metrics } + +func (c *Ceph) exec(command string) (string, error) { + cmdArgs := []string{"--conf", c.CephConfig, "--name", c.CephUser, "--format", "json"} + cmdArgs = append(cmdArgs, strings.Split(command, " ")...) + + cmd := exec.Command(c.CephBinary, cmdArgs...) + + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return "", fmt.Errorf("error running ceph %v: %s", command, err) + } + + output := out.String() + + // Ceph doesn't sanitize its output, and may return invalid JSON. Patch this + // up for them, as having some inaccurate data is better than none. + output = strings.Replace(output, "-inf", "0", -1) + output = strings.Replace(output, "inf", "0", -1) + + return output, nil +} + +func decodeStatus(acc telegraf.Accumulator, input string) error { + data := make(map[string]interface{}) + err := json.Unmarshal([]byte(input), &data) + if err != nil { + return fmt.Errorf("failed to parse json: '%s': %v", input, err) + } + + err = decodeStatusOsdmap(acc, data) + if err != nil { + return err + } + + err = decodeStatusPgmap(acc, data) + if err != nil { + return err + } + + err = decodeStatusPgmapState(acc, data) + if err != nil { + return err + } + + return nil +} + +func decodeStatusOsdmap(acc telegraf.Accumulator, data map[string]interface{}) error { + osdmap, ok := data["osdmap"].(map[string]interface{}) + if !ok { + return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement) + } + fields, ok := osdmap["osdmap"].(map[string]interface{}) + if !ok { + return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement) + } + acc.AddFields("ceph_osdmap", fields, map[string]string{}) + return nil +} + +func decodeStatusPgmap(acc telegraf.Accumulator, data map[string]interface{}) error { + pgmap, ok := data["pgmap"].(map[string]interface{}) + if !ok { + return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement) + } + fields := make(map[string]interface{}) + for key, value := range pgmap { + switch value.(type) { + case float64: + fields[key] = value + } + } + acc.AddFields("ceph_pgmap", fields, map[string]string{}) + return nil +} + +func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error { + pgmap, ok := data["pgmap"].(map[string]interface{}) + if !ok { + return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement) + } + fields := make(map[string]interface{}) + for key, value := range pgmap { + switch value.(type) { + case []interface{}: + if key != "pgs_by_state" { + continue + } + for _, state := range value.([]interface{}) { + state_map, ok := state.(map[string]interface{}) + if !ok { + return fmt.Errorf("WARNING %s - unable to decode pg state", measurement) + } + state_name, ok := state_map["state_name"].(string) + if !ok { + return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement) + } + state_count, ok := state_map["count"].(float64) + if !ok { + return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement) + } + fields[state_name] = state_count + } + } + } + acc.AddFields("ceph_pgmap_state", fields, map[string]string{}) + return nil +} + +func decodeDf(acc telegraf.Accumulator, input string) error { + data := make(map[string]interface{}) + err := json.Unmarshal([]byte(input), &data) + if err != nil { + return fmt.Errorf("failed to parse json: '%s': %v", input, err) + } + + // ceph.usage: records global utilization and number of objects + stats_fields, ok := data["stats"].(map[string]interface{}) + if !ok { + return fmt.Errorf("WARNING %s - unable to decode df stats", measurement) + } + acc.AddFields("ceph_usage", stats_fields, map[string]string{}) + + // ceph.pool.usage: records per pool utilization and number of objects + pools, ok := data["pools"].([]interface{}) + if !ok { + return fmt.Errorf("WARNING %s - unable to decode df pools", measurement) + } + + for _, pool := range pools { + pool_map, ok := pool.(map[string]interface{}) + if !ok { + return fmt.Errorf("WARNING %s - unable to decode df pool", measurement) + } + pool_name, ok := pool_map["name"].(string) + if !ok { + return fmt.Errorf("WARNING %s - unable to decode df pool name", measurement) + } + fields, ok := pool_map["stats"].(map[string]interface{}) + if !ok { + return fmt.Errorf("WARNING %s - unable to decode df pool stats", measurement) + } + tags := map[string]string{ + "name": pool_name, + } + acc.AddFields("ceph_pool_usage", fields, tags) + } + + return nil +} + +func decodeOsdPoolStats(acc telegraf.Accumulator, input string) error { + data := make([]map[string]interface{}, 0) + err := json.Unmarshal([]byte(input), &data) + if err != nil { + return fmt.Errorf("failed to parse json: '%s': %v", input, err) + } + + // ceph.pool.stats: records pre pool IO and recovery throughput + for _, pool := range data { + pool_name, ok := pool["pool_name"].(string) + if !ok { + return fmt.Errorf("WARNING %s - unable to decode osd pool stats name", measurement) + } + // Note: the 'recovery' object looks broken (in hammer), so it's omitted + objects := []string{ + "client_io_rate", + "recovery_rate", + } + fields := make(map[string]interface{}) + for _, object := range objects { + perfdata, ok := pool[object].(map[string]interface{}) + if !ok { + return fmt.Errorf("WARNING %s - unable to decode osd pool stats", measurement) + } + for key, value := range perfdata { + fields[key] = value + } + } + tags := map[string]string{ + "name": pool_name, + } + acc.AddFields("ceph_pool_stats", fields, tags) + } + + return nil +} diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index ce96943be..f7b17ece3 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -65,12 +65,17 @@ func TestFindSockets(t *testing.T) { assert.NoError(t, err) }() c := &Ceph{ - CephBinary: "foo", - SocketDir: tmpdir, + CephBinary: "foo", + OsdPrefix: "ceph-osd", + MonPrefix: "ceph-mon", + SocketDir: tmpdir, + SocketSuffix: "asok", + CephUser: "client.admin", + CephConfig: "/etc/ceph/ceph.conf", + GatherAdminSocketStats: true, + GatherClusterStats: false, } - c.setDefaults() - for _, st := range sockTestParams { createTestFiles(tmpdir, st) diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index df62e62bc..4430e48fd 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -34,6 +34,11 @@ API endpoint. In the following order the plugin will attempt to authenticate. ## Metric Statistic Namespace (required) namespace = 'AWS/ELB' + ## Maximum requests per second. Note that the global default AWS rate limit is + ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a + ## maximum of 10. Optional - default value is 10. + ratelimit = 10 + ## Metrics to Pull (optional) ## Defaults to all Metrics in Namespace if nothing is provided ## Refreshes Namespace available metrics every 1h diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index f3019eb4b..ebc4147d8 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -33,6 +33,7 @@ type ( Namespace string `toml:"namespace"` Metrics []*Metric `toml:"metrics"` CacheTTL internal.Duration `toml:"cache_ttl"` + RateLimit int `toml:"ratelimit"` client cloudwatchClient metricCache *MetricCache } @@ -96,6 +97,11 @@ func (c *CloudWatch) SampleConfig() string { ## Metric Statistic Namespace (required) namespace = 'AWS/ELB' + ## Maximum requests per second. Note that the global default AWS rate limit is + ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a + ## maximum of 10. Optional - default value is 10. + ratelimit = 10 + ## Metrics to Pull (optional) ## Defaults to all Metrics in Namespace if nothing is provided ## Refreshes Namespace available metrics every 1h @@ -175,7 +181,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { // limit concurrency or we can easily exhaust user connection limit // see cloudwatch API request limits: // http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html - lmtr := limiter.NewRateLimiter(10, time.Second) + lmtr := limiter.NewRateLimiter(c.RateLimit, time.Second) defer lmtr.Stop() var wg sync.WaitGroup wg.Add(len(metrics)) @@ -195,7 +201,8 @@ func init() { inputs.Add("cloudwatch", func() telegraf.Input { ttl, _ := time.ParseDuration("1hr") return &CloudWatch{ - CacheTTL: internal.Duration{Duration: ttl}, + CacheTTL: internal.Duration{Duration: ttl}, + RateLimit: 10, } }) } diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index 8f8a3ad0b..73fca9253 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -58,6 +58,7 @@ func TestGather(t *testing.T) { Namespace: "AWS/ELB", Delay: internalDuration, Period: internalDuration, + RateLimit: 10, } var acc testutil.Accumulator diff --git a/plugins/inputs/conntrack/conntrack.go b/plugins/inputs/conntrack/conntrack.go index 68bf8adba..841aedb54 100644 --- a/plugins/inputs/conntrack/conntrack.go +++ b/plugins/inputs/conntrack/conntrack.go @@ -93,13 +93,14 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error { contents, err := ioutil.ReadFile(fName) if err != nil { - log.Printf("failed to read file '%s': %v", fName, err) + log.Printf("E! failed to read file '%s': %v", fName, err) + continue } v := strings.TrimSpace(string(contents)) fields[metricKey], err = strconv.ParseFloat(v, 64) if err != nil { - log.Printf("failed to parse metric, expected number but "+ + log.Printf("E! failed to parse metric, expected number but "+ " found '%s': %v", v, err) } } diff --git a/plugins/inputs/couchbase/couchbase_test.go b/plugins/inputs/couchbase/couchbase_test.go index 8fda04d41..222ecbf4e 100644 --- a/plugins/inputs/couchbase/couchbase_test.go +++ b/plugins/inputs/couchbase/couchbase_test.go @@ -2,9 +2,11 @@ package couchbase import ( "encoding/json" - couchbase "github.com/couchbase/go-couchbase" - "github.com/influxdata/telegraf/testutil" "testing" + + "github.com/influxdata/telegraf/testutil" + + "github.com/couchbase/go-couchbase" ) func TestGatherServer(t *testing.T) { @@ -13,13 +15,9 @@ func TestGatherServer(t *testing.T) { t.Fatal("parse poolsDefaultResponse", err) } - var bucket couchbase.Bucket - if err := json.Unmarshal([]byte(bucketResponse), &bucket); err != nil { + if err := json.Unmarshal([]byte(bucketResponse), &pool.BucketMap); err != nil { t.Fatal("parse bucketResponse", err) } - pool.BucketMap = map[string]couchbase.Bucket{ - bucket.Name: bucket, - } var cb Couchbase var acc testutil.Accumulator cb.gatherServer("mycluster", &acc, &pool) @@ -40,11 +38,10 @@ func TestGatherServer(t *testing.T) { "mem_used": 202156957464.0, }, map[string]string{"cluster": "mycluster", "bucket": "blastro-df"}) - } // From `/pools/default` on a real cluster const poolsDefaultResponse string = `{"storageTotals":{"ram":{"total":450972598272,"quotaTotal":360777252864,"quotaUsed":360777252864,"used":446826622976,"usedByData":255061495696,"quotaUsedPerNode":51539607552,"quotaTotalPerNode":51539607552},"hdd":{"total":1108766539776,"quotaTotal":1108766539776,"used":559135126484,"usedByData":515767865143,"free":498944942902}},"serverGroupsUri":"/pools/default/serverGroups?v=98656394","name":"default","alerts":["Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.148\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.65\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.173\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.75\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.105\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.127\" is taken up by keys and metadata.","Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.120\" is taken up by keys and metadata.","Metadata overhead warning. Over 66% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.187\" is taken up by keys and metadata."],"alertsSilenceURL":"/controller/resetAlerts?token=2814&uuid=2bec87861652b990cf6aa5c7ee58c253","nodes":[{"systemStats":{"cpu_utilization_rate":35.43307086614173,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23181365248},"interestingStats":{"cmd_get":17.98201798201798,"couch_docs_actual_disk_size":68506048063,"couch_docs_data_size":38718796110,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140158886,"curr_items_tot":279374646,"ep_bg_fetched":0.999000999000999,"get_hits":10.98901098901099,"mem_used":36497390640,"ops":829.1708291708292,"vb_replica_curr_items":139215760},"uptime":"341236","memoryTotal":64424656896,"memoryFree":23181365248,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.187:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.38255033557047,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23665811456},"interestingStats":{"cmd_get":172.8271728271728,"couch_docs_actual_disk_size":79360565405,"couch_docs_data_size":38736382876,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140174377,"curr_items_tot":279383025,"ep_bg_fetched":0.999000999000999,"get_hits":167.8321678321678,"mem_used":36650059656,"ops":1685.314685314685,"vb_replica_curr_items":139208648},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23665811456,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.65:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":25.5586592178771,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23726600192},"interestingStats":{"cmd_get":63.06306306306306,"couch_docs_actual_disk_size":79345105217,"couch_docs_data_size":38728086130,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139195268,"curr_items_tot":279349113,"ep_bg_fetched":0,"get_hits":53.05305305305306,"mem_used":36476665576,"ops":1878.878878878879,"vb_replica_curr_items":140153845},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23726600192,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.105:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":26.45803698435277,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23854841856},"interestingStats":{"cmd_get":51.05105105105105,"couch_docs_actual_disk_size":74465931949,"couch_docs_data_size":38723830730,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139209869,"curr_items_tot":279380019,"ep_bg_fetched":0,"get_hits":47.04704704704704,"mem_used":36471784896,"ops":1831.831831831832,"vb_replica_curr_items":140170150},"uptime":"340526","memoryTotal":64424656896,"memoryFree":23854841856,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.173:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.31034482758621,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23773573120},"interestingStats":{"cmd_get":77.07707707707708,"couch_docs_actual_disk_size":74743093945,"couch_docs_data_size":38594660087,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139215932,"curr_items_tot":278427644,"ep_bg_fetched":0,"get_hits":53.05305305305305,"mem_used":36306500344,"ops":1981.981981981982,"vb_replica_curr_items":139211712},"uptime":"340495","memoryTotal":64424656896,"memoryFree":23773573120,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.15.120:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":17.60660247592847,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23662190592},"interestingStats":{"cmd_get":146.8531468531468,"couch_docs_actual_disk_size":72932847344,"couch_docs_data_size":38581771457,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139226879,"curr_items_tot":278436540,"ep_bg_fetched":0,"get_hits":144.8551448551448,"mem_used":36421860496,"ops":1495.504495504495,"vb_replica_curr_items":139209661},"uptime":"337174","memoryTotal":64424656896,"memoryFree":23662190592,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.127:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":21.68831168831169,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":24049729536},"interestingStats":{"cmd_get":11.98801198801199,"couch_docs_actual_disk_size":66414273220,"couch_docs_data_size":38587642702,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139193759,"curr_items_tot":278398926,"ep_bg_fetched":0,"get_hits":9.990009990009991,"mem_used":36237234088,"ops":883.1168831168832,"vb_replica_curr_items":139205167},"uptime":"341228","memoryTotal":64424656896,"memoryFree":24049729536,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.148:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"buckets":{"uri":"/pools/default/buckets?v=74117050&uuid=2bec87861652b990cf6aa5c7ee58c253","terseBucketsBase":"/pools/default/b/","terseStreamingBucketsBase":"/pools/default/bs/"},"remoteClusters":{"uri":"/pools/default/remoteClusters?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/pools/default/remoteClusters?just_validate=1"},"controllers":{"addNode":{"uri":"/controller/addNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"rebalance":{"uri":"/controller/rebalance?uuid=2bec87861652b990cf6aa5c7ee58c253"},"failOver":{"uri":"/controller/failOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"startGracefulFailover":{"uri":"/controller/startGracefulFailover?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reAddNode":{"uri":"/controller/reAddNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reFailOver":{"uri":"/controller/reFailOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"ejectNode":{"uri":"/controller/ejectNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setRecoveryType":{"uri":"/controller/setRecoveryType?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setAutoCompaction":{"uri":"/controller/setAutoCompaction?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setAutoCompaction?just_validate=1"},"clusterLogsCollection":{"startURI":"/controller/startLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253","cancelURI":"/controller/cancelLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253"},"replication":{"createURI":"/controller/createReplication?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/createReplication?just_validate=1"},"setFastWarmup":{"uri":"/controller/setFastWarmup?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setFastWarmup?just_validate=1"}},"rebalanceStatus":"none","rebalanceProgressUri":"/pools/default/rebalanceProgress","stopRebalanceUri":"/controller/stopRebalance?uuid=2bec87861652b990cf6aa5c7ee58c253","nodeStatusesUri":"/nodeStatuses","maxBucketCount":10,"autoCompactionSettings":{"parallelDBAndViewCompaction":false,"databaseFragmentationThreshold":{"percentage":50,"size":"undefined"},"viewFragmentationThreshold":{"percentage":50,"size":"undefined"}},"fastWarmupSettings":{"fastWarmupEnabled":true,"minMemoryThreshold":10,"minItemsThreshold":10},"tasks":{"uri":"/pools/default/tasks?v=97479372"},"visualSettingsUri":"/internalSettings/visual?v=7111573","counters":{"rebalance_success":4,"rebalance_start":6,"rebalance_stop":2}}` // From `/pools/default/buckets/blastro-df` on a real cluster -const bucketResponse string = `{"name":"blastro-df","bucketType":"membase","authType":"sasl","saslPassword":"","proxyPort":0,"replicaIndex":false,"uri":"/pools/default/buckets/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","streamingUri":"/pools/default/bucketsStreaming/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","localRandomKeyUri":"/pools/default/buckets/blastro-df/localRandomKey","controllers":{"compactAll":"/pools/default/buckets/blastro-df/controller/compactBucket","compactDB":"/pools/default/buckets/default/controller/compactDatabases","purgeDeletes":"/pools/default/buckets/blastro-df/controller/unsafePurgeBucket","startRecovery":"/pools/default/buckets/blastro-df/controller/startRecovery"},"nodes":[{"couchApiBase":"http://172.16.8.148:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":18.39557399723375,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23791935488},"interestingStats":{"cmd_get":10.98901098901099,"couch_docs_actual_disk_size":79525832077,"couch_docs_data_size":38633186946,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139229304,"curr_items_tot":278470058,"ep_bg_fetched":0,"get_hits":5.994005994005994,"mem_used":36284362960,"ops":1275.724275724276,"vb_replica_curr_items":139240754},"uptime":"343968","memoryTotal":64424656896,"memoryFree":23791935488,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.8.127:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.97183098591549,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23533023232},"interestingStats":{"cmd_get":39.96003996003996,"couch_docs_actual_disk_size":63322357663,"couch_docs_data_size":38603481061,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139262616,"curr_items_tot":278508069,"ep_bg_fetched":0.999000999000999,"get_hits":30.96903096903097,"mem_used":36475078736,"ops":1370.629370629371,"vb_replica_curr_items":139245453},"uptime":"339914","memoryTotal":64424656896,"memoryFree":23533023232,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.15.120:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":23.38028169014084,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23672963072},"interestingStats":{"cmd_get":88.08808808808809,"couch_docs_actual_disk_size":80260594761,"couch_docs_data_size":38632863189,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139251563,"curr_items_tot":278498913,"ep_bg_fetched":0,"get_hits":74.07407407407408,"mem_used":36348663000,"ops":1707.707707707708,"vb_replica_curr_items":139247350},"uptime":"343235","memoryTotal":64424656896,"memoryFree":23672963072,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.173:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":22.15988779803646,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23818825728},"interestingStats":{"cmd_get":103.1031031031031,"couch_docs_actual_disk_size":68247785524,"couch_docs_data_size":38747583467,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139245453,"curr_items_tot":279451313,"ep_bg_fetched":1.001001001001001,"get_hits":86.08608608608608,"mem_used":36524715864,"ops":1749.74974974975,"vb_replica_curr_items":140205860},"uptime":"343266","memoryTotal":64424656896,"memoryFree":23818825728,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.105:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.94444444444444,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23721426944},"interestingStats":{"cmd_get":113.1131131131131,"couch_docs_actual_disk_size":68102832275,"couch_docs_data_size":38747477407,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139230887,"curr_items_tot":279420530,"ep_bg_fetched":0,"get_hits":106.1061061061061,"mem_used":36524887624,"ops":1799.7997997998,"vb_replica_curr_items":140189643},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23721426944,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.65:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":60.62176165803109,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23618203648},"interestingStats":{"cmd_get":30.96903096903097,"couch_docs_actual_disk_size":69052175561,"couch_docs_data_size":38755695030,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140210194,"curr_items_tot":279454253,"ep_bg_fetched":0,"get_hits":26.97302697302698,"mem_used":36543072472,"ops":1337.662337662338,"vb_replica_curr_items":139244059},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23618203648,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.187:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.83588317107093,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23062269952},"interestingStats":{"cmd_get":33.03303303303304,"couch_docs_actual_disk_size":74422029546,"couch_docs_data_size":38758172837,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140194321,"curr_items_tot":279445526,"ep_bg_fetched":0,"get_hits":21.02102102102102,"mem_used":36527676832,"ops":1088.088088088088,"vb_replica_curr_items":139251205},"uptime":"343971","memoryTotal":64424656896,"memoryFree":23062269952,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"stats":{"uri":"/pools/default/buckets/blastro-df/stats","directoryURI":"/pools/default/buckets/blastro-df/statsDirectory","nodeStatsListURI":"/pools/default/buckets/blastro-df/nodes"},"ddocs":{"uri":"/pools/default/buckets/blastro-df/ddocs"},"nodeLocator":"vbucket","fastWarmupSettings":false,"autoCompactionSettings":false,"uuid":"2e6b9dc4c278300ce3a4f27ad540323f","vBucketServerMap":{"hashAlgorithm":"CRC","numReplicas":1,"serverList":["172.16.10.187:11210","172.16.10.65:11210","172.16.13.105:11210","172.16.13.173:11210","172.16.15.120:11210","172.16.8.127:11210","172.16.8.148:11210"],"vBucketMap":[[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,6],[0,6],[0,6],[0,6],[0,6],[1,3],[1,3],[1,3],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[2,3],[2,3],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[2,5],[2,5],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,6],[3,6],[3,6],[3,6],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[5,3],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[0,3],[0,3],[0,3],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,5],[4,5],[4,5],[4,5],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[2,6],[2,6],[3,2],[3,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[2,0],[2,0],[2,0],[2,0],[2,0],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,2],[4,3],[4,3],[4,3],[4,5],[4,5],[4,5],[4,5],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[5,4],[5,4],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[6,5],[6,5],[6,5],[6,5],[6,5],[4,0],[4,0],[4,0],[4,0],[4,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,6],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[4,6],[4,6],[4,6],[4,6],[4,6],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[5,0],[5,0],[5,0],[2,0],[2,0],[3,0],[3,0],[3,0],[5,3],[5,3],[5,3],[5,3],[5,3],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,5],[4,5],[1,0],[3,0],[3,1],[3,1],[3,1],[3,1],[5,4],[5,4],[5,4],[5,4],[5,4],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[5,6],[5,6],[5,6],[6,2],[6,2],[6,3],[6,3],[6,3],[4,0],[4,0],[4,0],[4,0],[4,0],[4,1],[4,1],[4,1],[5,6],[5,6],[5,6],[5,6],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[0,5],[0,5],[0,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,1],[0,1],[4,6],[4,6],[4,6],[4,6],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,6],[2,0],[2,0],[5,2],[5,3],[5,3],[5,3],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,1],[4,1],[4,1],[5,3],[5,3],[5,3],[5,3],[5,3],[2,0],[5,2],[5,2],[5,2],[5,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,2],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,1],[4,1],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,1],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,2],[0,2],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[0,2],[0,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,1],[4,1],[4,2],[4,2],[4,2],[6,3],[6,3],[6,3],[6,3],[6,3],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,2],[6,2],[6,2],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,1],[2,3],[2,3],[1,2],[1,2],[1,2],[1,3],[1,3],[1,3],[1,3],[1,3],[3,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[3,1],[3,1],[3,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[6,3],[6,3],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[5,1],[5,1],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[5,2],[6,2],[6,2],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,3],[1,3],[1,3],[6,2],[6,2],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[1,3],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,5],[6,5],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,2]]},"replicaNumber":1,"threadsNumber":3,"quota":{"ram":293601280000,"rawRAM":41943040000},"basicStats":{"quotaPercentUsed":68.85424936294555,"opsPerSec":5686.789686789687,"diskFetches":0,"itemCount":943239752,"diskUsed":409178772321,"dataUsed":212179309111,"memUsed":202156957464},"evictionPolicy":"valueOnly","bucketCapabilitiesVer":"","bucketCapabilities":["cbhello","touch","couchapi","cccp","xdcrCheckpointing","nodesExt"]}` +const bucketResponse string = `{"blastro-df": {"name":"blastro-df","bucketType":"membase","authType":"sasl","saslPassword":"","proxyPort":0,"replicaIndex":false,"uri":"/pools/default/buckets/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","streamingUri":"/pools/default/bucketsStreaming/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","localRandomKeyUri":"/pools/default/buckets/blastro-df/localRandomKey","controllers":{"compactAll":"/pools/default/buckets/blastro-df/controller/compactBucket","compactDB":"/pools/default/buckets/default/controller/compactDatabases","purgeDeletes":"/pools/default/buckets/blastro-df/controller/unsafePurgeBucket","startRecovery":"/pools/default/buckets/blastro-df/controller/startRecovery"},"nodes":[{"couchApiBase":"http://172.16.8.148:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":18.39557399723375,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23791935488},"interestingStats":{"cmd_get":10.98901098901099,"couch_docs_actual_disk_size":79525832077,"couch_docs_data_size":38633186946,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139229304,"curr_items_tot":278470058,"ep_bg_fetched":0,"get_hits":5.994005994005994,"mem_used":36284362960,"ops":1275.724275724276,"vb_replica_curr_items":139240754},"uptime":"343968","memoryTotal":64424656896,"memoryFree":23791935488,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.8.127:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.97183098591549,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23533023232},"interestingStats":{"cmd_get":39.96003996003996,"couch_docs_actual_disk_size":63322357663,"couch_docs_data_size":38603481061,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139262616,"curr_items_tot":278508069,"ep_bg_fetched":0.999000999000999,"get_hits":30.96903096903097,"mem_used":36475078736,"ops":1370.629370629371,"vb_replica_curr_items":139245453},"uptime":"339914","memoryTotal":64424656896,"memoryFree":23533023232,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.15.120:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":23.38028169014084,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23672963072},"interestingStats":{"cmd_get":88.08808808808809,"couch_docs_actual_disk_size":80260594761,"couch_docs_data_size":38632863189,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139251563,"curr_items_tot":278498913,"ep_bg_fetched":0,"get_hits":74.07407407407408,"mem_used":36348663000,"ops":1707.707707707708,"vb_replica_curr_items":139247350},"uptime":"343235","memoryTotal":64424656896,"memoryFree":23672963072,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.173:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":22.15988779803646,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23818825728},"interestingStats":{"cmd_get":103.1031031031031,"couch_docs_actual_disk_size":68247785524,"couch_docs_data_size":38747583467,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139245453,"curr_items_tot":279451313,"ep_bg_fetched":1.001001001001001,"get_hits":86.08608608608608,"mem_used":36524715864,"ops":1749.74974974975,"vb_replica_curr_items":140205860},"uptime":"343266","memoryTotal":64424656896,"memoryFree":23818825728,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.105:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.94444444444444,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23721426944},"interestingStats":{"cmd_get":113.1131131131131,"couch_docs_actual_disk_size":68102832275,"couch_docs_data_size":38747477407,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139230887,"curr_items_tot":279420530,"ep_bg_fetched":0,"get_hits":106.1061061061061,"mem_used":36524887624,"ops":1799.7997997998,"vb_replica_curr_items":140189643},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23721426944,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.65:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":60.62176165803109,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23618203648},"interestingStats":{"cmd_get":30.96903096903097,"couch_docs_actual_disk_size":69052175561,"couch_docs_data_size":38755695030,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140210194,"curr_items_tot":279454253,"ep_bg_fetched":0,"get_hits":26.97302697302698,"mem_used":36543072472,"ops":1337.662337662338,"vb_replica_curr_items":139244059},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23618203648,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.187:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.83588317107093,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23062269952},"interestingStats":{"cmd_get":33.03303303303304,"couch_docs_actual_disk_size":74422029546,"couch_docs_data_size":38758172837,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140194321,"curr_items_tot":279445526,"ep_bg_fetched":0,"get_hits":21.02102102102102,"mem_used":36527676832,"ops":1088.088088088088,"vb_replica_curr_items":139251205},"uptime":"343971","memoryTotal":64424656896,"memoryFree":23062269952,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"stats":{"uri":"/pools/default/buckets/blastro-df/stats","directoryURI":"/pools/default/buckets/blastro-df/statsDirectory","nodeStatsListURI":"/pools/default/buckets/blastro-df/nodes"},"ddocs":{"uri":"/pools/default/buckets/blastro-df/ddocs"},"nodeLocator":"vbucket","fastWarmupSettings":false,"autoCompactionSettings":false,"uuid":"2e6b9dc4c278300ce3a4f27ad540323f","vBucketServerMap":{"hashAlgorithm":"CRC","numReplicas":1,"serverList":["172.16.10.187:11210","172.16.10.65:11210","172.16.13.105:11210","172.16.13.173:11210","172.16.15.120:11210","172.16.8.127:11210","172.16.8.148:11210"],"vBucketMap":[[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,6],[0,6],[0,6],[0,6],[0,6],[1,3],[1,3],[1,3],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[2,3],[2,3],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[2,5],[2,5],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,6],[3,6],[3,6],[3,6],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[5,3],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[0,3],[0,3],[0,3],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,5],[4,5],[4,5],[4,5],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[2,6],[2,6],[3,2],[3,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[2,0],[2,0],[2,0],[2,0],[2,0],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,2],[4,3],[4,3],[4,3],[4,5],[4,5],[4,5],[4,5],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[5,4],[5,4],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[6,5],[6,5],[6,5],[6,5],[6,5],[4,0],[4,0],[4,0],[4,0],[4,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,6],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[4,6],[4,6],[4,6],[4,6],[4,6],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[5,0],[5,0],[5,0],[2,0],[2,0],[3,0],[3,0],[3,0],[5,3],[5,3],[5,3],[5,3],[5,3],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,5],[4,5],[1,0],[3,0],[3,1],[3,1],[3,1],[3,1],[5,4],[5,4],[5,4],[5,4],[5,4],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[5,6],[5,6],[5,6],[6,2],[6,2],[6,3],[6,3],[6,3],[4,0],[4,0],[4,0],[4,0],[4,0],[4,1],[4,1],[4,1],[5,6],[5,6],[5,6],[5,6],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[0,5],[0,5],[0,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,1],[0,1],[4,6],[4,6],[4,6],[4,6],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,6],[2,0],[2,0],[5,2],[5,3],[5,3],[5,3],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,1],[4,1],[4,1],[5,3],[5,3],[5,3],[5,3],[5,3],[2,0],[5,2],[5,2],[5,2],[5,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,2],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,1],[4,1],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,1],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,2],[0,2],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[0,2],[0,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,1],[4,1],[4,2],[4,2],[4,2],[6,3],[6,3],[6,3],[6,3],[6,3],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,2],[6,2],[6,2],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,1],[2,3],[2,3],[1,2],[1,2],[1,2],[1,3],[1,3],[1,3],[1,3],[1,3],[3,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[3,1],[3,1],[3,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[6,3],[6,3],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[5,1],[5,1],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[5,2],[6,2],[6,2],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,3],[1,3],[1,3],[6,2],[6,2],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[1,3],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,5],[6,5],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,2]]},"replicaNumber":1,"threadsNumber":3,"quota":{"ram":293601280000,"rawRAM":41943040000},"basicStats":{"quotaPercentUsed":68.85424936294555,"opsPerSec":5686.789686789687,"diskFetches":0,"itemCount":943239752,"diskUsed":409178772321,"dataUsed":212179309111,"memUsed":202156957464},"evictionPolicy":"valueOnly","bucketCapabilitiesVer":"","bucketCapabilities":["cbhello","touch","couchapi","cccp","xdcrCheckpointing","nodesExt"]}}` diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 52f9a7adb..5e8910677 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -103,6 +103,9 @@ based on the availability of per-cpu stats on your system. - n_used_file_descriptors - n_cpus - n_containers + - n_containers_running + - n_containers_stopped + - n_containers_paused - n_images - n_goroutines - n_listener_events @@ -153,6 +156,9 @@ based on the availability of per-cpu stats on your system. > docker n_cpus=8i 1456926671065383978 > docker n_used_file_descriptors=15i 1456926671065383978 > docker n_containers=7i 1456926671065383978 +> docker n_containers_running=7i 1456926671065383978 +> docker n_containers_stopped=3i 1456926671065383978 +> docker n_containers_paused=0i 1456926671065383978 > docker n_images=152i 1456926671065383978 > docker n_goroutines=36i 1456926671065383978 > docker n_listener_events=0i 1456926671065383978 diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index dfd768c1a..e2c488dc8 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -25,8 +25,11 @@ type Docker struct { Endpoint string ContainerNames []string Timeout internal.Duration + PerDevice bool `toml:"perdevice"` + Total bool `toml:"total"` - client DockerClient + client DockerClient + engine_host string } // DockerClient interface, useful for testing @@ -58,6 +61,13 @@ var sampleConfig = ` container_names = [] ## Timeout for docker list, info, and stats commands timeout = "5s" + + ## Whether to report for each container per-device blkio (8:0, 8:1...) and + ## network (eth0, eth1, ...) stats or not + perdevice = true + ## Whether to report for each container total blkio and network stats or not + total = false + ` // Description returns input description @@ -116,7 +126,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { defer wg.Done() err := d.gatherContainer(c, acc) if err != nil { - log.Printf("Error gathering container %s stats: %s\n", + log.Printf("E! Error gathering container %s stats: %s\n", c.Names, err.Error()) } }(container) @@ -138,11 +148,15 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { if err != nil { return err } + d.engine_host = info.Name fields := map[string]interface{}{ "n_cpus": info.NCPU, "n_used_file_descriptors": info.NFd, "n_containers": info.Containers, + "n_containers_running": info.ContainersRunning, + "n_containers_stopped": info.ContainersStopped, + "n_containers_paused": info.ContainersPaused, "n_images": info.Images, "n_goroutines": info.NGoroutines, "n_listener_events": info.NEventsListener, @@ -150,11 +164,11 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { // Add metrics acc.AddFields("docker", fields, - nil, + map[string]string{"engine_host": d.engine_host}, now) acc.AddFields("docker", map[string]interface{}{"memory_total": info.MemTotal}, - map[string]string{"unit": "bytes"}, + map[string]string{"unit": "bytes", "engine_host": d.engine_host}, now) // Get storage metrics for _, rawData := range info.DriverStatus { @@ -168,7 +182,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { // pool blocksize acc.AddFields("docker", map[string]interface{}{"pool_blocksize": value}, - map[string]string{"unit": "bytes"}, + map[string]string{"unit": "bytes", "engine_host": d.engine_host}, now) } else if strings.HasPrefix(name, "data_space_") { // data space @@ -183,13 +197,13 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { if len(dataFields) > 0 { acc.AddFields("docker_data", dataFields, - map[string]string{"unit": "bytes"}, + map[string]string{"unit": "bytes", "engine_host": d.engine_host}, now) } if len(metadataFields) > 0 { acc.AddFields("docker_metadata", metadataFields, - map[string]string{"unit": "bytes"}, + map[string]string{"unit": "bytes", "engine_host": d.engine_host}, now) } return nil @@ -216,6 +230,7 @@ func (d *Docker) gatherContainer( imageVersion = imageParts[1] } tags := map[string]string{ + "engine_host": d.engine_host, "container_name": cname, "container_image": imageName, "container_version": imageVersion, @@ -246,7 +261,7 @@ func (d *Docker) gatherContainer( tags[k] = label } - gatherContainerStats(v, acc, tags, container.ID) + gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total) return nil } @@ -256,6 +271,8 @@ func gatherContainerStats( acc telegraf.Accumulator, tags map[string]string, id string, + perDevice bool, + total bool, ) { now := stat.Read @@ -323,6 +340,7 @@ func gatherContainerStats( acc.AddFields("docker_container_cpu", fields, percputags, now) } + totalNetworkStatMap := make(map[string]interface{}) for network, netstats := range stat.Networks { netfields := map[string]interface{}{ "rx_dropped": netstats.RxDropped, @@ -336,12 +354,35 @@ func gatherContainerStats( "container_id": id, } // Create a new network tag dictionary for the "network" tag - nettags := copyTags(tags) - nettags["network"] = network - acc.AddFields("docker_container_net", netfields, nettags, now) + if perDevice { + nettags := copyTags(tags) + nettags["network"] = network + acc.AddFields("docker_container_net", netfields, nettags, now) + } + if total { + for field, value := range netfields { + if field == "container_id" { + continue + } + _, ok := totalNetworkStatMap[field] + if ok { + totalNetworkStatMap[field] = totalNetworkStatMap[field].(uint64) + value.(uint64) + } else { + totalNetworkStatMap[field] = value + } + } + } } - gatherBlockIOMetrics(stat, acc, tags, now, id) + // totalNetworkStatMap could be empty if container is running with --net=host. + if total && len(totalNetworkStatMap) != 0 { + nettags := copyTags(tags) + nettags["network"] = "total" + totalNetworkStatMap["container_id"] = id + acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, now) + } + + gatherBlockIOMetrics(stat, acc, tags, now, id, perDevice, total) } func calculateMemPercent(stat *types.StatsJSON) float64 { @@ -370,6 +411,8 @@ func gatherBlockIOMetrics( tags map[string]string, now time.Time, id string, + perDevice bool, + total bool, ) { blkioStats := stat.BlkioStats // Make a map of devices to their block io stats @@ -431,11 +474,33 @@ func gatherBlockIOMetrics( deviceStatMap[device]["sectors_recursive"] = metric.Value } + totalStatMap := make(map[string]interface{}) for device, fields := range deviceStatMap { - iotags := copyTags(tags) - iotags["device"] = device fields["container_id"] = id - acc.AddFields("docker_container_blkio", fields, iotags, now) + if perDevice { + iotags := copyTags(tags) + iotags["device"] = device + acc.AddFields("docker_container_blkio", fields, iotags, now) + } + if total { + for field, value := range fields { + if field == "container_id" { + continue + } + _, ok := totalStatMap[field] + if ok { + totalStatMap[field] = totalStatMap[field].(uint64) + value.(uint64) + } else { + totalStatMap[field] = value + } + } + } + } + if total { + totalStatMap["container_id"] = id + iotags := copyTags(tags) + iotags["device"] = "total" + acc.AddFields("docker_container_blkio", totalStatMap, iotags, now) } } @@ -480,7 +545,8 @@ func parseSize(sizeStr string) (int64, error) { func init() { inputs.Add("docker", func() telegraf.Input { return &Docker{ - Timeout: internal.Duration{Duration: time.Second * 5}, + PerDevice: true, + Timeout: internal.Duration{Duration: time.Second * 5}, } }) } diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index b1c76f5af..21960a4d8 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -24,7 +24,7 @@ func TestDockerGatherContainerStats(t *testing.T) { "container_name": "redis", "container_image": "redis/image", } - gatherContainerStats(stats, &acc, tags, "123456789") + gatherContainerStats(stats, &acc, tags, "123456789", true, true) // test docker_container_net measurement netfields := map[string]interface{}{ @@ -42,6 +42,21 @@ func TestDockerGatherContainerStats(t *testing.T) { nettags["network"] = "eth0" acc.AssertContainsTaggedFields(t, "docker_container_net", netfields, nettags) + netfields = map[string]interface{}{ + "rx_dropped": uint64(6), + "rx_bytes": uint64(8), + "rx_errors": uint64(10), + "tx_packets": uint64(12), + "tx_dropped": uint64(6), + "rx_packets": uint64(8), + "tx_errors": uint64(10), + "tx_bytes": uint64(12), + "container_id": "123456789", + } + nettags = copyTags(tags) + nettags["network"] = "total" + acc.AssertContainsTaggedFields(t, "docker_container_net", netfields, nettags) + // test docker_blkio measurement blkiotags := copyTags(tags) blkiotags["device"] = "6:0" @@ -52,6 +67,15 @@ func TestDockerGatherContainerStats(t *testing.T) { } acc.AssertContainsTaggedFields(t, "docker_container_blkio", blkiofields, blkiotags) + blkiotags = copyTags(tags) + blkiotags["device"] = "total" + blkiofields = map[string]interface{}{ + "io_service_bytes_recursive_read": uint64(100), + "io_serviced_recursive_write": uint64(302), + "container_id": "123456789", + } + acc.AssertContainsTaggedFields(t, "docker_container_blkio", blkiofields, blkiotags) + // test docker_container_mem measurement memfields := map[string]interface{}{ "max_usage": uint64(1001), @@ -186,6 +210,17 @@ func testStats() *types.StatsJSON { TxBytes: 4, } + stats.Networks["eth1"] = types.NetworkStats{ + RxDropped: 5, + RxBytes: 6, + RxErrors: 7, + TxPackets: 8, + TxDropped: 5, + RxPackets: 6, + TxErrors: 7, + TxBytes: 8, + } + sbr := types.BlkioStatEntry{ Major: 6, Minor: 0, @@ -198,11 +233,19 @@ func testStats() *types.StatsJSON { Op: "write", Value: 101, } + sr2 := types.BlkioStatEntry{ + Major: 6, + Minor: 1, + Op: "write", + Value: 201, + } stats.BlkioStats.IoServiceBytesRecursive = append( stats.BlkioStats.IoServiceBytesRecursive, sbr) stats.BlkioStats.IoServicedRecursive = append( stats.BlkioStats.IoServicedRecursive, sr) + stats.BlkioStats.IoServicedRecursive = append( + stats.BlkioStats.IoServicedRecursive, sr2) return stats } @@ -213,6 +256,9 @@ type FakeDockerClient struct { func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) { env := types.Info{ Containers: 108, + ContainersRunning: 98, + ContainersStopped: 6, + ContainersPaused: 3, OomKillDisable: false, SystemTime: "2016-02-24T00:55:09.15073105-05:00", NEventsListener: 0, @@ -354,10 +400,13 @@ func TestDockerGatherInfo(t *testing.T) { "n_cpus": int(4), "n_used_file_descriptors": int(19), "n_containers": int(108), + "n_containers_running": int(98), + "n_containers_stopped": int(6), + "n_containers_paused": int(3), "n_images": int(199), "n_goroutines": int(39), }, - map[string]string{}, + map[string]string{"engine_host": "absol"}, ) acc.AssertContainsTaggedFields(t, @@ -368,7 +417,8 @@ func TestDockerGatherInfo(t *testing.T) { "available": int64(36530000000), }, map[string]string{ - "unit": "bytes", + "unit": "bytes", + "engine_host": "absol", }, ) acc.AssertContainsTaggedFields(t, @@ -382,6 +432,7 @@ func TestDockerGatherInfo(t *testing.T) { "container_image": "quay.io/coreos/etcd", "cpu": "cpu3", "container_version": "v2.2.2", + "engine_host": "absol", }, ) acc.AssertContainsTaggedFields(t, @@ -424,6 +475,7 @@ func TestDockerGatherInfo(t *testing.T) { "container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173", }, map[string]string{ + "engine_host": "absol", "container_name": "etcd2", "container_image": "quay.io/coreos/etcd", "container_version": "v2.2.2", diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index 526bc3f39..2cf6f4d77 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -8,9 +8,18 @@ and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference ``` [[inputs.elasticsearch]] + ## specify a list of one or more Elasticsearch servers servers = ["http://localhost:9200"] + + ## Timeout for HTTP requests to the elastic search server(s) + http_timeout = "5s" + + ## set local to false when you want to read the indices stats from all nodes + ## within the cluster local = true - cluster_health = true + + ## set cluster_health to true when you want to also obtain cluster level stats + cluster_health = false ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index ef0a4c199..896e03f2e 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -62,6 +62,9 @@ const sampleConfig = ` ## specify a list of one or more Elasticsearch servers servers = ["http://localhost:9200"] + ## Timeout for HTTP requests to the elastic search server(s) + http_timeout = "5s" + ## set local to false when you want to read the indices stats from all nodes ## within the cluster local = true @@ -82,6 +85,7 @@ const sampleConfig = ` type Elasticsearch struct { Local bool Servers []string + HttpTimeout internal.Duration ClusterHealth bool SSLCA string `toml:"ssl_ca"` // Path to CA file SSLCert string `toml:"ssl_cert"` // Path to host cert file @@ -92,7 +96,9 @@ type Elasticsearch struct { // NewElasticsearch return a new instance of Elasticsearch func NewElasticsearch() *Elasticsearch { - return &Elasticsearch{} + return &Elasticsearch{ + HttpTimeout: internal.Duration{Duration: time.Second * 5}, + } } // SampleConfig returns sample configuration for this plugin. @@ -150,12 +156,12 @@ func (e *Elasticsearch) createHttpClient() (*http.Client, error) { return nil, err } tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: e.HttpTimeout.Duration, TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: e.HttpTimeout.Duration, } return client, nil diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index 060a4f308..f2fc60e5c 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -5,6 +5,7 @@ import ( "fmt" "os/exec" "path/filepath" + "runtime" "strings" "sync" "syscall" @@ -114,9 +115,36 @@ func (c CommandRunner) Run( } } + out = removeCarriageReturns(out) return out.Bytes(), nil } +// removeCarriageReturns removes all carriage returns from the input if the +// OS is Windows. It does not return any errors. +func removeCarriageReturns(b bytes.Buffer) bytes.Buffer { + if runtime.GOOS == "windows" { + var buf bytes.Buffer + for { + byt, er := b.ReadBytes(0x0D) + end := len(byt) + if nil == er { + end -= 1 + } + if nil != byt { + buf.Write(byt[:end]) + } else { + break + } + if nil != er { + break + } + } + b = buf + } + return b + +} + func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync.WaitGroup) { defer wg.Done() diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index cd9c9eaef..ac527a12f 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -1,7 +1,9 @@ package exec import ( + "bytes" "fmt" + "runtime" "testing" "github.com/influxdata/telegraf" @@ -46,6 +48,29 @@ cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 ` +type CarriageReturnTest struct { + input []byte + output []byte +} + +var crTests = []CarriageReturnTest{ + {[]byte{0x4c, 0x69, 0x6e, 0x65, 0x20, 0x31, 0x0d, 0x0a, 0x4c, 0x69, + 0x6e, 0x65, 0x20, 0x32, 0x0d, 0x0a, 0x4c, 0x69, 0x6e, 0x65, + 0x20, 0x33}, + []byte{0x4c, 0x69, 0x6e, 0x65, 0x20, 0x31, 0x0a, 0x4c, 0x69, 0x6e, + 0x65, 0x20, 0x32, 0x0a, 0x4c, 0x69, 0x6e, 0x65, 0x20, 0x33}}, + {[]byte{0x4c, 0x69, 0x6e, 0x65, 0x20, 0x31, 0x0a, 0x4c, 0x69, 0x6e, + 0x65, 0x20, 0x32, 0x0a, 0x4c, 0x69, 0x6e, 0x65, 0x20, 0x33}, + []byte{0x4c, 0x69, 0x6e, 0x65, 0x20, 0x31, 0x0a, 0x4c, 0x69, 0x6e, + 0x65, 0x20, 0x32, 0x0a, 0x4c, 0x69, 0x6e, 0x65, 0x20, 0x33}}, + {[]byte{0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, 0x6c, + 0x6c, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x62, 0x69, 0x67, 0x20, + 0x6c, 0x69, 0x6e, 0x65}, + []byte{0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, 0x6c, + 0x6c, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x62, 0x69, 0x67, 0x20, + 0x6c, 0x69, 0x6e, 0x65}}, +} + type runnerMock struct { out []byte err error @@ -217,3 +242,21 @@ func TestExecCommandWithoutGlobAndPath(t *testing.T) { } acc.AssertContainsFields(t, "metric", fields) } + +func TestRemoveCarriageReturns(t *testing.T) { + if runtime.GOOS == "windows" { + // Test that all carriage returns are removed + for _, test := range crTests { + b := bytes.NewBuffer(test.input) + out := removeCarriageReturns(*b) + assert.True(t, bytes.Equal(test.output, out.Bytes())) + } + } else { + // Test that the buffer is returned unaltered + for _, test := range crTests { + b := bytes.NewBuffer(test.input) + out := removeCarriageReturns(*b) + assert.True(t, bytes.Equal(test.input, out.Bytes())) + } + } +} diff --git a/plugins/inputs/hddtemp/README.md b/plugins/inputs/hddtemp/README.md new file mode 100644 index 000000000..d87ae625d --- /dev/null +++ b/plugins/inputs/hddtemp/README.md @@ -0,0 +1,22 @@ +# Hddtemp Input Plugin + +This plugin reads data from hddtemp daemon + +## Requirements + +Hddtemp should be installed and its daemon running + +## Configuration + +``` +[[inputs.hddtemp]] +## By default, telegraf gathers temps data from all disks detected by the +## hddtemp. +## +## Only collect temps from the selected disks. +## +## A * as the device name will return the temperature values of all disks. +## +# address = "127.0.0.1:7634" +# devices = ["sda", "*"] +``` diff --git a/plugins/inputs/hddtemp/go-hddtemp/LICENSE b/plugins/inputs/hddtemp/go-hddtemp/LICENSE new file mode 100644 index 000000000..d5aed19c6 --- /dev/null +++ b/plugins/inputs/hddtemp/go-hddtemp/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mendelson Gusmão + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go b/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go new file mode 100644 index 000000000..d7d650b79 --- /dev/null +++ b/plugins/inputs/hddtemp/go-hddtemp/hddtemp.go @@ -0,0 +1,61 @@ +package hddtemp + +import ( + "bytes" + "io" + "net" + "strconv" + "strings" +) + +type disk struct { + DeviceName string + Model string + Temperature int32 + Unit string + Status string +} + +func Fetch(address string) ([]disk, error) { + var ( + err error + conn net.Conn + buffer bytes.Buffer + disks []disk + ) + + if conn, err = net.Dial("tcp", address); err != nil { + return nil, err + } + + if _, err = io.Copy(&buffer, conn); err != nil { + return nil, err + } + + fields := strings.Split(buffer.String(), "|") + + for index := 0; index < len(fields)/5; index++ { + status := "" + offset := index * 5 + device := fields[offset+1] + device = device[strings.LastIndex(device, "/")+1:] + + temperatureField := fields[offset+3] + temperature, err := strconv.ParseInt(temperatureField, 10, 32) + + if err != nil { + temperature = 0 + status = temperatureField + } + + disks = append(disks, disk{ + DeviceName: device, + Model: fields[offset+2], + Temperature: int32(temperature), + Unit: fields[offset+4], + Status: status, + }) + } + + return disks, nil +} diff --git a/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go new file mode 100644 index 000000000..858e91a90 --- /dev/null +++ b/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go @@ -0,0 +1,116 @@ +package hddtemp + +import ( + "net" + "reflect" + "testing" +) + +func TestFetch(t *testing.T) { + l := serve(t, []byte("|/dev/sda|foobar|36|C|")) + defer l.Close() + + disks, err := Fetch(l.Addr().String()) + + if err != nil { + t.Error("expecting err to be nil") + } + + expected := []disk{ + { + DeviceName: "sda", + Model: "foobar", + Temperature: 36, + Unit: "C", + }, + } + + if !reflect.DeepEqual(expected, disks) { + t.Error("disks' slice is different from expected") + } +} + +func TestFetchWrongAddress(t *testing.T) { + _, err := Fetch("127.0.0.1:1") + + if err == nil { + t.Error("expecting err to be non-nil") + } +} + +func TestFetchStatus(t *testing.T) { + l := serve(t, []byte("|/dev/sda|foobar|SLP|C|")) + defer l.Close() + + disks, err := Fetch(l.Addr().String()) + + if err != nil { + t.Error("expecting err to be nil") + } + + expected := []disk{ + { + DeviceName: "sda", + Model: "foobar", + Temperature: 0, + Unit: "C", + Status: "SLP", + }, + } + + if !reflect.DeepEqual(expected, disks) { + t.Error("disks' slice is different from expected") + } +} + +func TestFetchTwoDisks(t *testing.T) { + l := serve(t, []byte("|/dev/hda|ST380011A|46|C||/dev/hdd|ST340016A|SLP|*|")) + defer l.Close() + + disks, err := Fetch(l.Addr().String()) + + if err != nil { + t.Error("expecting err to be nil") + } + + expected := []disk{ + { + DeviceName: "hda", + Model: "ST380011A", + Temperature: 46, + Unit: "C", + }, + { + DeviceName: "hdd", + Model: "ST340016A", + Temperature: 0, + Unit: "*", + Status: "SLP", + }, + } + + if !reflect.DeepEqual(expected, disks) { + t.Error("disks' slice is different from expected") + } +} + +func serve(t *testing.T, data []byte) net.Listener { + l, err := net.Listen("tcp", "127.0.0.1:0") + + if err != nil { + t.Fatal(err) + } + + go func(t *testing.T) { + conn, err := l.Accept() + + if err != nil { + t.Fatal(err) + } + + conn.Write(data) + conn.Close() + }(t) + + return l +} diff --git a/plugins/inputs/hddtemp/hddtemp.go b/plugins/inputs/hddtemp/hddtemp.go new file mode 100644 index 000000000..c1e01c3c6 --- /dev/null +++ b/plugins/inputs/hddtemp/hddtemp.go @@ -0,0 +1,74 @@ +// +build linux + +package hddtemp + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + gohddtemp "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp" +) + +const defaultAddress = "127.0.0.1:7634" + +type HDDTemp struct { + Address string + Devices []string +} + +func (_ *HDDTemp) Description() string { + return "Monitor disks' temperatures using hddtemp" +} + +var hddtempSampleConfig = ` + ## By default, telegraf gathers temps data from all disks detected by the + ## hddtemp. + ## + ## Only collect temps from the selected disks. + ## + ## A * as the device name will return the temperature values of all disks. + ## + # address = "127.0.0.1:7634" + # devices = ["sda", "*"] +` + +func (_ *HDDTemp) SampleConfig() string { + return hddtempSampleConfig +} + +func (h *HDDTemp) Gather(acc telegraf.Accumulator) error { + disks, err := gohddtemp.Fetch(h.Address) + + if err != nil { + return err + } + + for _, disk := range disks { + for _, chosenDevice := range h.Devices { + if chosenDevice == "*" || chosenDevice == disk.DeviceName { + tags := map[string]string{ + "device": disk.DeviceName, + "model": disk.Model, + "unit": disk.Unit, + "status": disk.Status, + } + + fields := map[string]interface{}{ + disk.DeviceName: disk.Temperature, + } + + acc.AddFields("hddtemp", fields, tags) + } + } + } + + return nil +} + +func init() { + inputs.Add("hddtemp", func() telegraf.Input { + return &HDDTemp{ + Address: defaultAddress, + Devices: []string{"*"}, + } + }) +} diff --git a/plugins/inputs/hddtemp/hddtemp_nocompile.go b/plugins/inputs/hddtemp/hddtemp_nocompile.go new file mode 100644 index 000000000..0c5801670 --- /dev/null +++ b/plugins/inputs/hddtemp/hddtemp_nocompile.go @@ -0,0 +1,3 @@ +// +build !linux + +package hddtemp diff --git a/plugins/inputs/http_listener/README.md b/plugins/inputs/http_listener/README.md new file mode 100644 index 000000000..9643f6a2e --- /dev/null +++ b/plugins/inputs/http_listener/README.md @@ -0,0 +1,24 @@ +# HTTP listener service input plugin + +The HTTP listener is a service input plugin that listens for messages sent via HTTP POST. +The plugin expects messages in the InfluxDB line-protocol ONLY, other Telegraf input data formats are not supported. +The intent of the plugin is to allow Telegraf to serve as a proxy/router for the /write endpoint of the InfluxDB HTTP API. +When chaining Telegraf instances using this plugin, CREATE DATABASE requests receive a 200 OK response with message body `{"results":[]}` but they are not relayed. The output configuration of the Telegraf instance which ultimately submits data to InfluxDB determines the destination database. + +See: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx). +Example: curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' + +### Configuration: + +This is a sample configuration for the plugin. + +```toml +# # Influx HTTP write listener +[[inputs.http_listener]] + ## Address and port to host HTTP listener on + service_address = ":8186" + + ## timeouts + read_timeout = "10s" + write_timeout = "10s" +``` diff --git a/plugins/inputs/http_listener/http_listener.go b/plugins/inputs/http_listener/http_listener.go new file mode 100644 index 000000000..2eeee8e75 --- /dev/null +++ b/plugins/inputs/http_listener/http_listener.go @@ -0,0 +1,165 @@ +package http_listener + +import ( + "bufio" + "bytes" + "fmt" + "log" + "net" + "net/http" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/http_listener/stoppableListener" + "github.com/influxdata/telegraf/plugins/parsers" +) + +type HttpListener struct { + ServiceAddress string + ReadTimeout internal.Duration + WriteTimeout internal.Duration + + sync.Mutex + wg sync.WaitGroup + + listener *stoppableListener.StoppableListener + + parser parsers.Parser + acc telegraf.Accumulator +} + +const sampleConfig = ` + ## Address and port to host HTTP listener on + service_address = ":8186" + + ## timeouts + read_timeout = "10s" + write_timeout = "10s" +` + +func (t *HttpListener) SampleConfig() string { + return sampleConfig +} + +func (t *HttpListener) Description() string { + return "Influx HTTP write listener" +} + +func (t *HttpListener) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (t *HttpListener) SetParser(parser parsers.Parser) { + t.parser = parser +} + +// Start starts the http listener service. +func (t *HttpListener) Start(acc telegraf.Accumulator) error { + t.Lock() + defer t.Unlock() + + t.acc = acc + + var rawListener, err = net.Listen("tcp", t.ServiceAddress) + if err != nil { + return err + } + t.listener, err = stoppableListener.New(rawListener) + if err != nil { + return err + } + + go t.httpListen() + + log.Printf("I! Started HTTP listener service on %s\n", t.ServiceAddress) + + return nil +} + +// Stop cleans up all resources +func (t *HttpListener) Stop() { + t.Lock() + defer t.Unlock() + + t.listener.Stop() + t.listener.Close() + + t.wg.Wait() + + log.Println("I! Stopped HTTP listener service on ", t.ServiceAddress) +} + +// httpListen listens for HTTP requests. +func (t *HttpListener) httpListen() error { + if t.ReadTimeout.Duration < time.Second { + t.ReadTimeout.Duration = time.Second * 10 + } + if t.WriteTimeout.Duration < time.Second { + t.WriteTimeout.Duration = time.Second * 10 + } + + var server = http.Server{ + Handler: t, + ReadTimeout: t.ReadTimeout.Duration, + WriteTimeout: t.WriteTimeout.Duration, + } + + return server.Serve(t.listener) +} + +func (t *HttpListener) ServeHTTP(res http.ResponseWriter, req *http.Request) { + t.wg.Add(1) + defer t.wg.Done() + + switch req.URL.Path { + case "/write": + var http400msg bytes.Buffer + var partial string + scanner := bufio.NewScanner(req.Body) + scanner.Buffer([]byte(""), 128*1024) + for scanner.Scan() { + metrics, err := t.parser.Parse(scanner.Bytes()) + if err == nil { + for _, m := range metrics { + t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + } + partial = "partial write: " + } else { + http400msg.WriteString(err.Error() + " ") + } + } + + if err := scanner.Err(); err != nil { + http.Error(res, "Internal server error: "+err.Error(), http.StatusInternalServerError) + } else if http400msg.Len() > 0 { + res.Header().Set("Content-Type", "application/json") + res.Header().Set("X-Influxdb-Version", "1.0") + res.WriteHeader(http.StatusBadRequest) + res.Write([]byte(fmt.Sprintf(`{"error":"%s%s"}`, partial, http400msg.String()))) + } else { + res.WriteHeader(http.StatusNoContent) + } + case "/query": + // Deliver a dummy response to the query endpoint, as some InfluxDB + // clients test endpoint availability with a query + res.Header().Set("Content-Type", "application/json") + res.Header().Set("X-Influxdb-Version", "1.0") + res.WriteHeader(http.StatusOK) + res.Write([]byte("{\"results\":[]}")) + case "/ping": + // respond to ping requests + res.WriteHeader(http.StatusNoContent) + default: + // Don't know how to respond to calls to other endpoints + http.NotFound(res, req) + } +} + +func init() { + inputs.Add("http_listener", func() telegraf.Input { + return &HttpListener{} + }) +} diff --git a/plugins/inputs/http_listener/http_listener_test.go b/plugins/inputs/http_listener/http_listener_test.go new file mode 100644 index 000000000..ed04cf860 --- /dev/null +++ b/plugins/inputs/http_listener/http_listener_test.go @@ -0,0 +1,195 @@ +package http_listener + +import ( + "sync" + "testing" + "time" + + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" + + "bytes" + "github.com/stretchr/testify/require" + "net/http" +) + +const ( + testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n" + + testMsgs = `cpu_load_short,host=server02 value=12.0 1422568543702900257 +cpu_load_short,host=server03 value=12.0 1422568543702900257 +cpu_load_short,host=server04 value=12.0 1422568543702900257 +cpu_load_short,host=server05 value=12.0 1422568543702900257 +cpu_load_short,host=server06 value=12.0 1422568543702900257 +` + badMsg = "blahblahblah: 42\n" + + emptyMsg = "" +) + +func newTestHttpListener() *HttpListener { + listener := &HttpListener{ + ServiceAddress: ":8186", + } + return listener +} + +func TestWriteHTTP(t *testing.T) { + listener := newTestHttpListener() + parser, _ := parsers.NewInfluxParser() + listener.SetParser(parser) + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + time.Sleep(time.Millisecond * 25) + + // post single message to listener + resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) + + time.Sleep(time.Millisecond * 15) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01"}, + ) + + // post multiple message to listener + resp, err = http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgs))) + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) + + time.Sleep(time.Millisecond * 15) + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } + + // Post a gigantic metric to the listener: + resp, err = http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(hugeMetric))) + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) + + time.Sleep(time.Millisecond * 15) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01"}, + ) +} + +// writes 25,000 metrics to the listener with 10 different writers +func TestWriteHTTPHighTraffic(t *testing.T) { + listener := &HttpListener{ServiceAddress: ":8286"} + parser, _ := parsers.NewInfluxParser() + listener.SetParser(parser) + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + time.Sleep(time.Millisecond * 25) + + // post many messages to listener + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + for i := 0; i < 500; i++ { + resp, err := http.Post("http://localhost:8286/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgs))) + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) + } + wg.Done() + }() + } + + wg.Wait() + time.Sleep(time.Millisecond * 50) + listener.Gather(acc) + + require.Equal(t, int64(25000), int64(acc.NMetrics())) +} + +func TestReceive404ForInvalidEndpoint(t *testing.T) { + listener := newTestHttpListener() + listener.parser, _ = parsers.NewInfluxParser() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + time.Sleep(time.Millisecond * 25) + + // post single message to listener + resp, err := http.Post("http://localhost:8186/foobar", "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + require.EqualValues(t, 404, resp.StatusCode) +} + +func TestWriteHTTPInvalid(t *testing.T) { + time.Sleep(time.Millisecond * 250) + + listener := newTestHttpListener() + listener.parser, _ = parsers.NewInfluxParser() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + time.Sleep(time.Millisecond * 25) + + // post single message to listener + resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(badMsg))) + require.NoError(t, err) + require.EqualValues(t, 400, resp.StatusCode) +} + +func TestWriteHTTPEmpty(t *testing.T) { + time.Sleep(time.Millisecond * 250) + + listener := newTestHttpListener() + listener.parser, _ = parsers.NewInfluxParser() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + time.Sleep(time.Millisecond * 25) + + // post single message to listener + resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(emptyMsg))) + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) +} + +func TestQueryAndPingHTTP(t *testing.T) { + time.Sleep(time.Millisecond * 250) + + listener := newTestHttpListener() + listener.parser, _ = parsers.NewInfluxParser() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + time.Sleep(time.Millisecond * 25) + + // post query to listener + resp, err := http.Post("http://localhost:8186/query?db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22", "", nil) + require.NoError(t, err) + require.EqualValues(t, 200, resp.StatusCode) + + // post ping to listener + resp, err = http.Post("http://localhost:8186/ping", "", nil) + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) +} + +const hugeMetric = `super_long_metric,foo=bar clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i +` diff --git a/plugins/inputs/http_listener/stoppableListener/LICENSE b/plugins/inputs/http_listener/stoppableListener/LICENSE new file mode 100644 index 000000000..eb0782451 --- /dev/null +++ b/plugins/inputs/http_listener/stoppableListener/LICENSE @@ -0,0 +1,10 @@ +Copyright (c) 2014, Eric Urban +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugins/inputs/http_listener/stoppableListener/listener.go b/plugins/inputs/http_listener/stoppableListener/listener.go new file mode 100644 index 000000000..69a9f33cc --- /dev/null +++ b/plugins/inputs/http_listener/stoppableListener/listener.go @@ -0,0 +1,62 @@ +package stoppableListener + +import ( + "errors" + "net" + "time" +) + +type StoppableListener struct { + *net.TCPListener //Wrapped listener + stop chan int //Channel used only to indicate listener should shutdown +} + +func New(l net.Listener) (*StoppableListener, error) { + tcpL, ok := l.(*net.TCPListener) + + if !ok { + return nil, errors.New("Cannot wrap listener") + } + + retval := &StoppableListener{} + retval.TCPListener = tcpL + retval.stop = make(chan int) + + return retval, nil +} + +var StoppedError = errors.New("Listener stopped") + +func (sl *StoppableListener) Accept() (net.Conn, error) { + + for { + //Wait up to one second for a new connection + sl.SetDeadline(time.Now().Add(time.Second)) + + newConn, err := sl.TCPListener.Accept() + + //Check for the channel being closed + select { + case <-sl.stop: + return nil, StoppedError + default: + //If the channel is still open, continue as normal + } + + if err != nil { + netErr, ok := err.(net.Error) + + //If this is a timeout, then continue to wait for + //new connections + if ok && netErr.Timeout() && netErr.Temporary() { + continue + } + } + + return newConn, err + } +} + +func (sl *StoppableListener) Stop() { + close(sl.stop) +} diff --git a/plugins/inputs/httpjson/README.md b/plugins/inputs/httpjson/README.md index 707b256df..81680e6ec 100644 --- a/plugins/inputs/httpjson/README.md +++ b/plugins/inputs/httpjson/README.md @@ -2,8 +2,7 @@ The httpjson plugin can collect data from remote URLs which respond with JSON. Then it flattens JSON and finds all numeric values, treating them as floats. -For example, if you have a service called _mycollector_, which has HTTP endpoint for gathering stats at http://my.service.com/_stats, you would configure the HTTP JSON -plugin like this: +For example, if you have a service called _mycollector_, which has HTTP endpoint for gathering stats at http://my.service.com/_stats, you would configure the HTTP JSON plugin like this: ``` [[inputs.httpjson]] @@ -15,12 +14,17 @@ plugin like this: # HTTP method to use (case-sensitive) method = "GET" + + # Set response_timeout (default 5 seconds) + response_timeout = "5s" ``` `name` is used as a prefix for the measurements. `method` specifies HTTP method to use for requests. +`response_timeout` specifies timeout to wait to get the response + You can also specify which keys from server response should be considered tags: ``` @@ -94,8 +98,7 @@ httpjson_mycollector_b_e,service='service01',server='http://my.service.com/_stat # Example 2, Multiple Services: -There is also the option to collect JSON from multiple services, here is an -example doing that. +There is also the option to collect JSON from multiple services, here is an example doing that. ``` [[inputs.httpjson]] diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index 6fe4da1e5..89bfccf77 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -16,13 +16,15 @@ import ( "github.com/influxdata/telegraf/plugins/parsers" ) +// HttpJson struct type HttpJson struct { - Name string - Servers []string - Method string - TagKeys []string - Parameters map[string]string - Headers map[string]string + Name string + Servers []string + Method string + TagKeys []string + ResponseTimeout internal.Duration + Parameters map[string]string + Headers map[string]string // Path to CA file SSLCA string `toml:"ssl_ca"` @@ -79,6 +81,8 @@ var sampleConfig = ` "http://localhost:9999/stats/", "http://localhost:9998/stats/", ] + ## Set response_timeout (default 5 seconds) + response_timeout = "5s" ## HTTP method to use: GET or POST (case-sensitive) method = "GET" @@ -126,12 +130,12 @@ func (h *HttpJson) Gather(acc telegraf.Accumulator) error { return err } tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: h.ResponseTimeout.Duration, TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: h.ResponseTimeout.Duration, } h.client.SetHTTPClient(client) } @@ -291,6 +295,9 @@ func init() { inputs.Add("httpjson", func() telegraf.Input { return &HttpJson{ client: &RealHTTPClient{}, + ResponseTimeout: internal.Duration{ + Duration: 5 * time.Second, + }, } }) } diff --git a/plugins/inputs/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go index 974a1b9e7..bb11cfee4 100644 --- a/plugins/inputs/influxdb/influxdb.go +++ b/plugins/inputs/influxdb/influxdb.go @@ -10,11 +10,16 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) type InfluxDB struct { URLs []string `toml:"urls"` + + Timeout internal.Duration + + client *http.Client } func (*InfluxDB) Description() string { @@ -32,6 +37,9 @@ func (*InfluxDB) SampleConfig() string { urls = [ "http://localhost:8086/debug/vars" ] + + ## http request & header timeout + timeout = "5s" ` } @@ -39,6 +47,16 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error { if len(i.URLs) == 0 { i.URLs = []string{"http://localhost:8086/debug/vars"} } + + if i.client == nil { + i.client = &http.Client{ + Transport: &http.Transport{ + ResponseHeaderTimeout: i.Timeout.Duration, + }, + Timeout: i.Timeout.Duration, + } + } + errorChannel := make(chan error, len(i.URLs)) var wg sync.WaitGroup @@ -104,15 +122,6 @@ type memstats struct { GCCPUFraction float64 `json:"GCCPUFraction"` } -var tr = &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), -} - -var client = &http.Client{ - Transport: tr, - Timeout: time.Duration(4 * time.Second), -} - // Gathers data from a particular URL // Parameters: // acc : The telegraf Accumulator to use @@ -127,7 +136,7 @@ func (i *InfluxDB) gatherURL( shardCounter := 0 now := time.Now() - resp, err := client.Get(url) + resp, err := i.client.Get(url) if err != nil { return err } @@ -210,9 +219,13 @@ func (i *InfluxDB) gatherURL( continue } + if p.Tags == nil { + p.Tags = make(map[string]string) + } + // If the object was a point, but was not fully initialized, // ignore it and move on. - if p.Name == "" || p.Tags == nil || p.Values == nil || len(p.Values) == 0 { + if p.Name == "" || p.Values == nil || len(p.Values) == 0 { continue } @@ -244,6 +257,8 @@ func (i *InfluxDB) gatherURL( func init() { inputs.Add("influxdb", func() telegraf.Input { - return &InfluxDB{} + return &InfluxDB{ + Timeout: internal.Duration{Duration: time.Second * 5}, + } }) } diff --git a/plugins/inputs/influxdb/influxdb_test.go b/plugins/inputs/influxdb/influxdb_test.go index 517661e4a..09707a548 100644 --- a/plugins/inputs/influxdb/influxdb_test.go +++ b/plugins/inputs/influxdb/influxdb_test.go @@ -116,6 +116,31 @@ func TestInfluxDB(t *testing.T) { }, map[string]string{}) } +func TestInfluxDB2(t *testing.T) { + fakeInfluxServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/endpoint" { + _, _ = w.Write([]byte(influxReturn2)) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + defer fakeInfluxServer.Close() + + plugin := &influxdb.InfluxDB{ + URLs: []string{fakeInfluxServer.URL + "/endpoint"}, + } + + var acc testutil.Accumulator + require.NoError(t, plugin.Gather(&acc)) + + require.Len(t, acc.Metrics, 34) + + acc.AssertContainsTaggedFields(t, "influxdb", + map[string]interface{}{ + "n_shards": 1, + }, map[string]string{}) +} + func TestErrorHandling(t *testing.T) { badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { @@ -241,3 +266,49 @@ const influxReturn = ` "tsm1_wal:/Users/csparr/.influxdb/wal/udp/default/1": {"name": "tsm1_wal", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/wal/udp/default/1", "retentionPolicy": "default"}, "values": {"currentSegmentDiskBytes": 193728, "oldSegmentsDiskBytes": 1008330}}, "write": {"name": "write", "tags": {}, "values": {"pointReq": 3613, "pointReqLocal": 3613, "req": 110, "subWriteOk": 110, "writeOk": 110}} }` + +// InfluxDB 1.0+ with tags: null instead of tags: {}. +const influxReturn2 = ` +{ +"cluster": {"name": "cluster", "tags": null, "values": {}}, +"cmdline": ["influxd"], +"cq": {"name": "cq", "tags": null, "values": {}}, +"database:_internal": {"name": "database", "tags": {"database": "_internal"}, "values": {"numMeasurements": 8, "numSeries": 12}}, +"database:udp": {"name": "database", "tags": {"database": "udp"}, "values": {"numMeasurements": 14, "numSeries": 38}}, +"hh:/Users/csparr/.influxdb/hh": {"name": "hh", "tags": {"path": "/Users/csparr/.influxdb/hh"}, "values": {}}, +"httpd::8086": {"name": "httpd", "tags": {"bind": ":8086"}, "values": {"req": 7, "reqActive": 1, "reqDurationNs": 4488799}}, +"measurement:cpu_idle.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "cpu_idle"}, "values": {"numSeries": 1}}, +"measurement:cpu_usage.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "cpu_usage"}, "values": {"numSeries": 1}}, +"measurement:database._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "database"}, "values": {"numSeries": 2}}, +"measurement:database.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "database"}, "values": {"numSeries": 2}}, +"measurement:httpd.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "httpd"}, "values": {"numSeries": 1}}, +"measurement:measurement.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "measurement"}, "values": {"numSeries": 22}}, +"measurement:mem.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "mem"}, "values": {"numSeries": 1}}, +"measurement:net.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "net"}, "values": {"numSeries": 1}}, +"measurement:runtime._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "runtime"}, "values": {"numSeries": 1}}, +"measurement:runtime.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "runtime"}, "values": {"numSeries": 1}}, +"measurement:shard._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "shard"}, "values": {"numSeries": 2}}, +"measurement:shard.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "shard"}, "values": {"numSeries": 1}}, +"measurement:subscriber._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "subscriber"}, "values": {"numSeries": 1}}, +"measurement:subscriber.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "subscriber"}, "values": {"numSeries": 1}}, +"measurement:swap_used.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "swap_used"}, "values": {"numSeries": 1}}, +"measurement:tsm1_cache._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "tsm1_cache"}, "values": {"numSeries": 2}}, +"measurement:tsm1_cache.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "tsm1_cache"}, "values": {"numSeries": 2}}, +"measurement:tsm1_wal._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "tsm1_wal"}, "values": {"numSeries": 2}}, +"measurement:tsm1_wal.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "tsm1_wal"}, "values": {"numSeries": 2}}, +"measurement:udp._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "udp"}, "values": {"numSeries": 1}}, +"measurement:write._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "write"}, "values": {"numSeries": 1}}, +"measurement:write.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "write"}, "values": {"numSeries": 1}}, +"memstats": {"Alloc":17034016,"TotalAlloc":201739016,"Sys":38537464,"Lookups":77,"Mallocs":570251,"Frees":381008,"HeapAlloc":17034016,"HeapSys":33849344,"HeapIdle":15802368,"HeapInuse":18046976,"HeapReleased":3473408,"HeapObjects":189243,"StackInuse":753664,"StackSys":753664,"MSpanInuse":97440,"MSpanSys":114688,"MCacheInuse":4800,"MCacheSys":16384,"BuckHashSys":1461583,"GCSys":1112064,"OtherSys":1229737,"NextGC":20843042,"LastGC":1460434886475114239,"PauseTotalNs":5132914,"PauseNs":[195052,117751,139370,156933,263089,165249,713747,103904,122015,294408,213753,170864,175845,114221,121563,122409,113098,162219,229257,126726,250774,254235,117206,293588,144279,124306,127053,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"PauseEnd":[1460433856394860455,1460433856398162739,1460433856405888337,1460433856411784017,1460433856417924684,1460433856428385687,1460433856443782908,1460433856456522851,1460433857392743223,1460433866484394564,1460433866494076235,1460433896472438632,1460433957839825106,1460433976473440328,1460434016473413006,1460434096471892794,1460434126470792929,1460434246480428250,1460434366554468369,1460434396471249528,1460434456471205885,1460434476479487292,1460434536471435965,1460434616469784776,1460434736482078216,1460434856544251733,1460434886475114239,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"NumGC":27,"GCCPUFraction":4.287178819113636e-05,"EnableGC":true,"DebugGC":false,"BySize":[{"Size":0,"Mallocs":0,"Frees":0},{"Size":8,"Mallocs":1031,"Frees":955},{"Size":16,"Mallocs":308485,"Frees":142064},{"Size":32,"Mallocs":64937,"Frees":54321},{"Size":48,"Mallocs":33012,"Frees":29754},{"Size":64,"Mallocs":20299,"Frees":18173},{"Size":80,"Mallocs":8186,"Frees":7597},{"Size":96,"Mallocs":9806,"Frees":8982},{"Size":112,"Mallocs":5671,"Frees":4850},{"Size":128,"Mallocs":2972,"Frees":2684},{"Size":144,"Mallocs":4106,"Frees":3719},{"Size":160,"Mallocs":1324,"Frees":911},{"Size":176,"Mallocs":2574,"Frees":2391},{"Size":192,"Mallocs":4053,"Frees":3863},{"Size":208,"Mallocs":442,"Frees":307},{"Size":224,"Mallocs":336,"Frees":172},{"Size":240,"Mallocs":143,"Frees":125},{"Size":256,"Mallocs":542,"Frees":497},{"Size":288,"Mallocs":15971,"Frees":14761},{"Size":320,"Mallocs":245,"Frees":30},{"Size":352,"Mallocs":1299,"Frees":1065},{"Size":384,"Mallocs":138,"Frees":2},{"Size":416,"Mallocs":54,"Frees":47},{"Size":448,"Mallocs":75,"Frees":29},{"Size":480,"Mallocs":6,"Frees":4},{"Size":512,"Mallocs":452,"Frees":422},{"Size":576,"Mallocs":486,"Frees":395},{"Size":640,"Mallocs":81,"Frees":67},{"Size":704,"Mallocs":421,"Frees":397},{"Size":768,"Mallocs":469,"Frees":468},{"Size":896,"Mallocs":1049,"Frees":1010},{"Size":1024,"Mallocs":1078,"Frees":960},{"Size":1152,"Mallocs":750,"Frees":498},{"Size":1280,"Mallocs":84,"Frees":72},{"Size":1408,"Mallocs":218,"Frees":187},{"Size":1536,"Mallocs":73,"Frees":48},{"Size":1664,"Mallocs":43,"Frees":30},{"Size":2048,"Mallocs":153,"Frees":57},{"Size":2304,"Mallocs":41,"Frees":30},{"Size":2560,"Mallocs":18,"Frees":15},{"Size":2816,"Mallocs":164,"Frees":157},{"Size":3072,"Mallocs":0,"Frees":0},{"Size":3328,"Mallocs":13,"Frees":6},{"Size":4096,"Mallocs":101,"Frees":82},{"Size":4608,"Mallocs":32,"Frees":26},{"Size":5376,"Mallocs":165,"Frees":151},{"Size":6144,"Mallocs":15,"Frees":9},{"Size":6400,"Mallocs":1,"Frees":1},{"Size":6656,"Mallocs":1,"Frees":0},{"Size":6912,"Mallocs":0,"Frees":0},{"Size":8192,"Mallocs":13,"Frees":13},{"Size":8448,"Mallocs":0,"Frees":0},{"Size":8704,"Mallocs":1,"Frees":1},{"Size":9472,"Mallocs":6,"Frees":4},{"Size":10496,"Mallocs":0,"Frees":0},{"Size":12288,"Mallocs":41,"Frees":35},{"Size":13568,"Mallocs":0,"Frees":0},{"Size":14080,"Mallocs":0,"Frees":0},{"Size":16384,"Mallocs":4,"Frees":4},{"Size":16640,"Mallocs":0,"Frees":0},{"Size":17664,"Mallocs":0,"Frees":0}]}, +"queryExecutor": {"name": "queryExecutor", "tags": null, "values": {}}, +"shard:/Users/csparr/.influxdb/data/_internal/monitor/2:2": {"name": "shard", "tags": {"database": "_internal", "engine": "tsm1", "id": "2", "path": "/Users/csparr/.influxdb/data/_internal/monitor/2", "retentionPolicy": "monitor"}, "values": {}}, +"shard:/Users/csparr/.influxdb/data/udp/default/1:1": {"name": "shard", "tags": {"database": "udp", "engine": "tsm1", "id": "1", "path": "/Users/csparr/.influxdb/data/udp/default/1", "retentionPolicy": "default"}, "values": {"fieldsCreate": 61, "seriesCreate": 33, "writePointsOk": 3613, "writeReq": 110}}, +"subscriber": {"name": "subscriber", "tags": null, "values": {"pointsWritten": 3613}}, +"tsm1_cache:/Users/csparr/.influxdb/data/_internal/monitor/2": {"name": "tsm1_cache", "tags": {"database": "_internal", "path": "/Users/csparr/.influxdb/data/_internal/monitor/2", "retentionPolicy": "monitor"}, "values": {"WALCompactionTimeMs": 0, "cacheAgeMs": 1103932, "cachedBytes": 0, "diskBytes": 0, "memBytes": 40480, "snapshotCount": 0}}, +"tsm1_cache:/Users/csparr/.influxdb/data/udp/default/1": {"name": "tsm1_cache", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/data/udp/default/1", "retentionPolicy": "default"}, "values": {"WALCompactionTimeMs": 0, "cacheAgeMs": 1103029, "cachedBytes": 0, "diskBytes": 0, "memBytes": 2359472, "snapshotCount": 0}}, +"tsm1_filestore:/Users/csparr/.influxdb/data/_internal/monitor/2": {"name": "tsm1_filestore", "tags": {"database": "_internal", "path": "/Users/csparr/.influxdb/data/_internal/monitor/2", "retentionPolicy": "monitor"}, "values": {}}, +"tsm1_filestore:/Users/csparr/.influxdb/data/udp/default/1": {"name": "tsm1_filestore", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/data/udp/default/1", "retentionPolicy": "default"}, "values": {}}, +"tsm1_wal:/Users/csparr/.influxdb/wal/_internal/monitor/2": {"name": "tsm1_wal", "tags": {"database": "_internal", "path": "/Users/csparr/.influxdb/wal/_internal/monitor/2", "retentionPolicy": "monitor"}, "values": {"currentSegmentDiskBytes": 0, "oldSegmentsDiskBytes": 69532}}, +"tsm1_wal:/Users/csparr/.influxdb/wal/udp/default/1": {"name": "tsm1_wal", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/wal/udp/default/1", "retentionPolicy": "default"}, "values": {"currentSegmentDiskBytes": 193728, "oldSegmentsDiskBytes": 1008330}}, +"write": {"name": "write", "tags": null, "values": {"pointReq": 3613, "pointReqLocal": 3613, "req": 110, "subWriteOk": 110, "writeOk": 110}} +}` diff --git a/plugins/inputs/iptables/README.md b/plugins/inputs/iptables/README.md new file mode 100644 index 000000000..f5ebd4780 --- /dev/null +++ b/plugins/inputs/iptables/README.md @@ -0,0 +1,74 @@ +# Iptables Plugin + +The iptables plugin gathers packets and bytes counters for rules within a set of table and chain from the Linux's iptables firewall. + +Rules are identified through associated comment. Rules without comment are ignored. + +The iptables command requires CAP_NET_ADMIN and CAP_NET_RAW capabilities. You have several options to grant telegraf to run iptables: + +* Run telegraf as root. This is strongly discouraged. +* Configure systemd to run telegraf with CAP_NET_ADMIN and CAP_NET_RAW. This is the simplest and recommended option. +* Configure sudo to grant telegraf to run iptables. This is the most restrictive option, but require sudo setup. + +### Using systemd capabilities + +You may run `systemctl edit telegraf.service` and add the following: + +``` +[Service] +CapabilityBoundingSet=CAP_NET_RAW CAP_NET_ADMIN +AmbientCapabilities=CAP_NET_RAW CAP_NET_ADMIN +``` + +Since telegraf will fork a process to run iptables, `AmbientCapabilities` is required to transmit the capabilities bounding set to the forked process. + +### Using sudo + +You may edit your sudo configuration with the following: + +```sudo +telegraf ALL=(root) NOPASSWD: /usr/bin/iptables -nvL * +``` + +### Configuration: + +```toml + # use sudo to run iptables + use_sudo = false + # defines the table to monitor: + table = "filter" + # defines the chains to monitor: + chains = [ "INPUT" ] +``` + +### Measurements & Fields: + + +- iptables + - pkts (integer, count) + - bytes (integer, bytes) + +### Tags: + +- All measurements have the following tags: + - table + - chain + - ruleid + +The `ruleid` is the comment associated to the rule. + +### Example Output: + +``` +$ iptables -nvL INPUT +Chain INPUT (policy DROP 0 packets, 0 bytes) +pkts bytes target prot opt in out source destination +100 1024 ACCEPT tcp -- * * 192.168.0.0/24 0.0.0.0/0 tcp dpt:22 /* ssh */ + 42 2048 ACCEPT tcp -- * * 192.168.0.0/24 0.0.0.0/0 tcp dpt:80 /* httpd */ +``` + +``` +$ ./telegraf -config telegraf.conf -input-filter iptables -test +iptables,table=filter,chain=INPUT,ruleid=ssh pkts=100i,bytes=1024i 1453831884664956455 +iptables,table=filter,chain=INPUT,ruleid=httpd pkts=42i,bytes=2048i 1453831884664956455 +``` diff --git a/plugins/inputs/iptables/iptables.go b/plugins/inputs/iptables/iptables.go new file mode 100644 index 000000000..4ceb45230 --- /dev/null +++ b/plugins/inputs/iptables/iptables.go @@ -0,0 +1,128 @@ +// +build linux + +package iptables + +import ( + "errors" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Iptables is a telegraf plugin to gather packets and bytes throughput from Linux's iptables packet filter. +type Iptables struct { + UseSudo bool + Table string + Chains []string + lister chainLister +} + +// Description returns a short description of the plugin. +func (ipt *Iptables) Description() string { + return "Gather packets and bytes throughput from iptables" +} + +// SampleConfig returns sample configuration options. +func (ipt *Iptables) SampleConfig() string { + return ` + ## iptables require root access on most systems. + ## Setting 'use_sudo' to true will make use of sudo to run iptables. + ## Users must configure sudo to allow telegraf user to run iptables with no password. + ## iptables can be restricted to only list command "iptables -nvL" + use_sudo = false + ## defines the table to monitor: + table = "filter" + ## defines the chains to monitor: + chains = [ "INPUT" ] +` +} + +// Gather gathers iptables packets and bytes throughput from the configured tables and chains. +func (ipt *Iptables) Gather(acc telegraf.Accumulator) error { + if ipt.Table == "" || len(ipt.Chains) == 0 { + return nil + } + // best effort : we continue through the chains even if an error is encountered, + // but we keep track of the last error. + var err error + for _, chain := range ipt.Chains { + data, e := ipt.lister(ipt.Table, chain) + if e != nil { + err = e + continue + } + e = ipt.parseAndGather(data, acc) + if e != nil { + err = e + continue + } + } + return err +} + +func (ipt *Iptables) chainList(table, chain string) (string, error) { + iptablePath, err := exec.LookPath("iptables") + if err != nil { + return "", err + } + var args []string + name := iptablePath + if ipt.UseSudo { + name = "sudo" + args = append(args, iptablePath) + } + args = append(args, "-nvL", chain, "-t", table, "-x") + c := exec.Command(name, args...) + out, err := c.Output() + return string(out), err +} + +const measurement = "iptables" + +var errParse = errors.New("Cannot parse iptables list information") +var chainNameRe = regexp.MustCompile(`^Chain\s+(\S+)`) +var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+`) +var valuesRe = regexp.MustCompile(`^\s*([0-9]+)\s+([0-9]+)\s+.*?(/\*\s(.*)\s\*/)?$`) + +func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error { + lines := strings.Split(data, "\n") + if len(lines) < 3 { + return nil + } + mchain := chainNameRe.FindStringSubmatch(lines[0]) + if mchain == nil { + return errParse + } + if !fieldsHeaderRe.MatchString(lines[1]) { + return errParse + } + for _, line := range lines[2:] { + mv := valuesRe.FindAllStringSubmatch(line, -1) + // best effort : if line does not match or rule is not commented forget about it + if len(mv) == 0 || len(mv[0]) != 5 || mv[0][4] == "" { + continue + } + tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": mv[0][4]} + fields := make(map[string]interface{}) + // since parse error is already catched by the regexp, + // we never enter ther error case here => no error check (but still need a test to cover the case) + fields["pkts"], _ = strconv.ParseUint(mv[0][1], 10, 64) + fields["bytes"], _ = strconv.ParseUint(mv[0][2], 10, 64) + acc.AddFields(measurement, fields, tags) + } + return nil +} + +type chainLister func(table, chain string) (string, error) + +func init() { + inputs.Add("iptables", func() telegraf.Input { + ipt := new(Iptables) + ipt.lister = ipt.chainList + return ipt + }) +} diff --git a/plugins/inputs/iptables/iptables_nocompile.go b/plugins/inputs/iptables/iptables_nocompile.go new file mode 100644 index 000000000..f71b4208e --- /dev/null +++ b/plugins/inputs/iptables/iptables_nocompile.go @@ -0,0 +1,3 @@ +// +build !linux + +package iptables diff --git a/plugins/inputs/iptables/iptables_test.go b/plugins/inputs/iptables/iptables_test.go new file mode 100644 index 000000000..bd8a2a726 --- /dev/null +++ b/plugins/inputs/iptables/iptables_test.go @@ -0,0 +1,206 @@ +// +build linux + +package iptables + +import ( + "errors" + "reflect" + "testing" + + "github.com/influxdata/telegraf/testutil" +) + +func TestIptables_Gather(t *testing.T) { + tests := []struct { + table string + chains []string + values []string + tags []map[string]string + fields [][]map[string]interface{} + err error + }{ + { // 1 - no configured table => no results + values: []string{ + `Chain INPUT (policy ACCEPT 58 packets, 5096 bytes) + pkts bytes target prot opt in out source destination + 57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 + `}, + }, + { // 2 - no configured chains => no results + table: "filter", + values: []string{ + `Chain INPUT (policy ACCEPT 58 packets, 5096 bytes) + pkts bytes target prot opt in out source destination + 57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 + `}, + }, + { // 3 - pkts and bytes are gathered as integers + table: "filter", + chains: []string{"INPUT"}, + values: []string{ + `Chain INPUT (policy ACCEPT 58 packets, 5096 bytes) + pkts bytes target prot opt in out source destination + 57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foobar */ + `}, + tags: []map[string]string{map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foobar"}}, + fields: [][]map[string]interface{}{ + {map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}}, + }, + }, + { // 4 - missing fields header => no results + table: "filter", + chains: []string{"INPUT"}, + values: []string{`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)`}, + }, + { // 5 - invalid chain header => error + table: "filter", + chains: []string{"INPUT"}, + values: []string{ + `INPUT (policy ACCEPT 58 packets, 5096 bytes) + pkts bytes target prot opt in out source destination + 57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 + `}, + err: errParse, + }, + { // 6 - invalid fields header => error + table: "filter", + chains: []string{"INPUT"}, + values: []string{ + `Chain INPUT (policy ACCEPT 58 packets, 5096 bytes) + + 57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 + `}, + err: errParse, + }, + { // 7 - invalid integer value => best effort, no error + table: "filter", + chains: []string{"INPUT"}, + values: []string{ + `Chain INPUT (policy ACCEPT 58 packets, 5096 bytes) + pkts bytes target prot opt in out source destination + K 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 + `}, + }, + { // 8 - Multiple rows, multipe chains => no error + table: "filter", + chains: []string{"INPUT", "FORWARD"}, + values: []string{ + `Chain INPUT (policy ACCEPT 58 packets, 5096 bytes) + pkts bytes target prot opt in out source destination + 100 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 + 200 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foo */ + `, + `Chain FORWARD (policy ACCEPT 58 packets, 5096 bytes) + pkts bytes target prot opt in out source destination + 300 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* bar */ + 400 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 + 500 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foobar */ + `, + }, + tags: []map[string]string{ + map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foo"}, + map[string]string{"table": "filter", "chain": "FORWARD", "ruleid": "bar"}, + map[string]string{"table": "filter", "chain": "FORWARD", "ruleid": "foobar"}, + }, + fields: [][]map[string]interface{}{ + {map[string]interface{}{"pkts": uint64(200), "bytes": uint64(4520)}}, + {map[string]interface{}{"pkts": uint64(300), "bytes": uint64(4520)}}, + {map[string]interface{}{"pkts": uint64(500), "bytes": uint64(4520)}}, + }, + }, + { // 9 - comments are used as ruleid if any + table: "filter", + chains: []string{"INPUT"}, + values: []string{ + `Chain INPUT (policy ACCEPT 58 packets, 5096 bytes) + pkts bytes target prot opt in out source destination + 57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:22 /* foobar */ + 100 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:80 + `}, + tags: []map[string]string{ + map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foobar"}, + }, + fields: [][]map[string]interface{}{ + {map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}}, + }, + }, + } + + for i, tt := range tests { + i++ + ipt := &Iptables{ + Table: tt.table, + Chains: tt.chains, + lister: func(table, chain string) (string, error) { + if len(tt.values) > 0 { + v := tt.values[0] + tt.values = tt.values[1:] + return v, nil + } + return "", nil + }, + } + acc := new(testutil.Accumulator) + err := ipt.Gather(acc) + if !reflect.DeepEqual(tt.err, err) { + t.Errorf("%d: expected error '%#v' got '%#v'", i, tt.err, err) + } + if tt.table == "" { + n := acc.NFields() + if n != 0 { + t.Errorf("%d: expected 0 fields if empty table got %d", i, n) + } + continue + } + if len(tt.chains) == 0 { + n := acc.NFields() + if n != 0 { + t.Errorf("%d: expected 0 fields if empty chains got %d", i, n) + } + continue + } + if len(tt.tags) == 0 { + n := acc.NFields() + if n != 0 { + t.Errorf("%d: expected 0 values got %d", i, n) + } + continue + } + n := 0 + for j, tags := range tt.tags { + for k, fields := range tt.fields[j] { + if len(acc.Metrics) < n+1 { + t.Errorf("%d: expected at least %d values got %d", i, n+1, len(acc.Metrics)) + break + } + m := acc.Metrics[n] + if !reflect.DeepEqual(m.Measurement, measurement) { + t.Errorf("%d %d %d: expected measurement '%#v' got '%#v'\n", i, j, k, measurement, m.Measurement) + } + if !reflect.DeepEqual(m.Tags, tags) { + t.Errorf("%d %d %d: expected tags\n%#v got\n%#v\n", i, j, k, tags, m.Tags) + } + if !reflect.DeepEqual(m.Fields, fields) { + t.Errorf("%d %d %d: expected fields\n%#v got\n%#v\n", i, j, k, fields, m.Fields) + } + n++ + } + } + } +} + +func TestIptables_Gather_listerError(t *testing.T) { + errFoo := errors.New("error foobar") + ipt := &Iptables{ + Table: "nat", + Chains: []string{"foo", "bar"}, + lister: func(table, chain string) (string, error) { + return "", errFoo + }, + } + acc := new(testutil.Accumulator) + err := ipt.Gather(acc) + if !reflect.DeepEqual(err, errFoo) { + t.Errorf("Expected error %#v got\n%#v\n", errFoo, err) + } +} diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 53bb65fd0..2cc0d6422 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -52,6 +52,7 @@ type Jolokia struct { const sampleConfig = ` ## This is the context root used to compose the jolokia url + ## NOTE that your jolokia security policy must allow for POST requests. context = "/jolokia" ## This specifies the mode used @@ -104,7 +105,6 @@ func (j *Jolokia) Description() string { } func (j *Jolokia) doRequest(req *http.Request) (map[string]interface{}, error) { - resp, err := j.jClient.MakeRequest(req) if err != nil { return nil, err diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 5600d82a4..52117759d 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -90,7 +90,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { case "newest": config.Offsets.Initial = sarama.OffsetNewest default: - log.Printf("WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n", + log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n", k.Offset) config.Offsets.Initial = sarama.OffsetOldest } @@ -115,7 +115,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { // Start the kafka message reader go k.receiver() - log.Printf("Started the kafka consumer service, peers: %v, topics: %v\n", + log.Printf("I! Started the kafka consumer service, peers: %v, topics: %v\n", k.ZookeeperPeers, k.Topics) return nil } @@ -128,11 +128,13 @@ func (k *Kafka) receiver() { case <-k.done: return case err := <-k.errs: - log.Printf("Kafka Consumer Error: %s\n", err.Error()) + if err != nil { + log.Printf("E! Kafka Consumer Error: %s\n", err) + } case msg := <-k.in: metrics, err := k.parser.Parse(msg.Value) if err != nil { - log.Printf("KAFKA PARSE ERROR\nmessage: %s\nerror: %s", + log.Printf("E! Kafka Message Parse Error\nmessage: %s\nerror: %s", string(msg.Value), err.Error()) } @@ -156,7 +158,7 @@ func (k *Kafka) Stop() { defer k.Unlock() close(k.done) if err := k.Consumer.Close(); err != nil { - log.Printf("Error closing kafka consumer: %s\n", err.Error()) + log.Printf("E! Error closing kafka consumer: %s\n", err.Error()) } } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index e631f6708..609dc6a37 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -43,7 +43,7 @@ func TestRunParser(t *testing.T) { k.parser, _ = parsers.NewInfluxParser() go k.receiver() in <- saramaMsg(testMsg) - time.Sleep(time.Millisecond) + time.Sleep(time.Millisecond * 5) assert.Equal(t, acc.NFields(), 1) } @@ -58,7 +58,7 @@ func TestRunParserInvalidMsg(t *testing.T) { k.parser, _ = parsers.NewInfluxParser() go k.receiver() in <- saramaMsg(invalidMsg) - time.Sleep(time.Millisecond) + time.Sleep(time.Millisecond * 5) assert.Equal(t, acc.NFields(), 0) } @@ -73,7 +73,7 @@ func TestRunParserAndGather(t *testing.T) { k.parser, _ = parsers.NewInfluxParser() go k.receiver() in <- saramaMsg(testMsg) - time.Sleep(time.Millisecond) + time.Sleep(time.Millisecond * 5) k.Gather(&acc) @@ -92,7 +92,7 @@ func TestRunParserAndGatherGraphite(t *testing.T) { k.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) go k.receiver() in <- saramaMsg(testMsgGraphite) - time.Sleep(time.Millisecond) + time.Sleep(time.Millisecond * 5) k.Gather(&acc) @@ -111,7 +111,7 @@ func TestRunParserAndGatherJSON(t *testing.T) { k.parser, _ = parsers.NewJSONParser("kafka_json_test", []string{}, nil) go k.receiver() in <- saramaMsg(testMsgJSON) - time.Sleep(time.Millisecond) + time.Sleep(time.Millisecond * 5) k.Gather(&acc) diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 64e8909f5..1affcd811 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -14,17 +14,22 @@ regex patterns. ## /var/log/**.log -> recursively find all .log files in /var/log ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log ## /var/log/apache.log -> only tail the apache log file - files = ["/var/log/influxdb/influxdb.log"] + files = ["/var/log/apache/access.log"] ## Read file from beginning. from_beginning = false ## Parse logstash-style "grok" patterns: - ## Telegraf builtin parsing patterns: https://goo.gl/dkay10 + ## Telegraf built-in parsing patterns: https://goo.gl/dkay10 [inputs.logparser.grok] ## This is a list of patterns to check the given log file(s) for. ## Note that adding patterns here increases processing time. The most - ## efficient configuration is to have one file & pattern per logparser. - patterns = ["%{INFLUXDB_HTTPD_LOG}"] + ## efficient configuration is to have one pattern per logparser. + ## Other common built-in patterns are: + ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) + ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) + patterns = ["%{COMBINED_LOG_FORMAT}"] + ## Name of the outputted measurement name. + measurement = "apache_access_log" ## Full path(s) to custom pattern files. custom_pattern_files = [] ## Custom patterns can also be defined here. Put one pattern per line. @@ -32,8 +37,6 @@ regex patterns. ''' ``` -> **Note:** The InfluxDB log pattern in the default configuration only works for Influx versions 1.0.0-beta1 or higher. - ## Grok Parser The grok parser uses a slightly modified version of logstash "grok" patterns, @@ -69,6 +72,7 @@ Timestamp modifiers can be used to convert captures to the timestamp of the - tag (converts the field into a tag) - drop (drops the field completely) - Timestamp modifiers: + - ts (This will auto-learn the timestamp format) - ts-ansic ("Mon Jan _2 15:04:05 2006") - ts-unix ("Mon Jan _2 15:04:05 MST 2006") - ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") diff --git a/plugins/inputs/logparser/grok/grok.go b/plugins/inputs/logparser/grok/grok.go index d8691d7b9..b2cabe642 100644 --- a/plugins/inputs/logparser/grok/grok.go +++ b/plugins/inputs/logparser/grok/grok.go @@ -15,7 +15,7 @@ import ( "github.com/influxdata/telegraf" ) -var timeFormats = map[string]string{ +var timeLayouts = map[string]string{ "ts-ansic": "Mon Jan _2 15:04:05 2006", "ts-unix": "Mon Jan _2 15:04:05 MST 2006", "ts-ruby": "Mon Jan 02 15:04:05 -0700 2006", @@ -27,27 +27,33 @@ var timeFormats = map[string]string{ "ts-rfc3339": "2006-01-02T15:04:05Z07:00", "ts-rfc3339nano": "2006-01-02T15:04:05.999999999Z07:00", "ts-httpd": "02/Jan/2006:15:04:05 -0700", - "ts-epoch": "EPOCH", - "ts-epochnano": "EPOCH_NANO", + // These three are not exactly "layouts", but they are special cases that + // will get handled in the ParseLine function. + "ts-epoch": "EPOCH", + "ts-epochnano": "EPOCH_NANO", + "ts": "GENERIC_TIMESTAMP", // try parsing all known timestamp layouts. } const ( - INT = "int" - TAG = "tag" - FLOAT = "float" - STRING = "string" - DURATION = "duration" - DROP = "drop" + INT = "int" + TAG = "tag" + FLOAT = "float" + STRING = "string" + DURATION = "duration" + DROP = "drop" + EPOCH = "EPOCH" + EPOCH_NANO = "EPOCH_NANO" + GENERIC_TIMESTAMP = "GENERIC_TIMESTAMP" ) var ( - // matches named captures that contain a type. + // matches named captures that contain a modifier. // ie, // %{NUMBER:bytes:int} // %{IPORHOST:clientip:tag} // %{HTTPDATE:ts1:ts-http} // %{HTTPDATE:ts2:ts-"02 Jan 06 15:04"} - typedRe = regexp.MustCompile(`%{\w+:(\w+):(ts-".+"|t?s?-?\w+)}`) + modifierRe = regexp.MustCompile(`%{\w+:(\w+):(ts-".+"|t?s?-?\w+)}`) // matches a plain pattern name. ie, %{NUMBER} patternOnlyRe = regexp.MustCompile(`%{(\w+)}`) ) @@ -87,6 +93,12 @@ type Parser struct { // "RESPONSE_CODE": "%{NUMBER:rc:tag}" // } patterns map[string]string + // foundTsLayouts is a slice of timestamp patterns that have been found + // in the log lines. This slice gets updated if the user uses the generic + // 'ts' modifier for timestamps. This slice is checked first for matches, + // so that previously-matched layouts get priority over all other timestamp + // layouts. + foundTsLayouts []string g *grok.Grok tsModder *tsModder @@ -140,6 +152,7 @@ func (p *Parser) Compile() error { func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { var err error + // values are the parsed fields from the log line var values map[string]string // the matching pattern string var patternName string @@ -165,6 +178,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { continue } + // t is the modifier of the field var t string // check if pattern has some modifiers if types, ok := p.typeMap[patternName]; ok { @@ -188,21 +202,21 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { case INT: iv, err := strconv.ParseInt(v, 10, 64) if err != nil { - log.Printf("ERROR parsing %s to int: %s", v, err) + log.Printf("E! Error parsing %s to int: %s", v, err) } else { fields[k] = iv } case FLOAT: fv, err := strconv.ParseFloat(v, 64) if err != nil { - log.Printf("ERROR parsing %s to float: %s", v, err) + log.Printf("E! Error parsing %s to float: %s", v, err) } else { fields[k] = fv } case DURATION: d, err := time.ParseDuration(v) if err != nil { - log.Printf("ERROR parsing %s to duration: %s", v, err) + log.Printf("E! Error parsing %s to duration: %s", v, err) } else { fields[k] = int64(d) } @@ -210,20 +224,50 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { tags[k] = v case STRING: fields[k] = strings.Trim(v, `"`) - case "EPOCH": + case EPOCH: iv, err := strconv.ParseInt(v, 10, 64) if err != nil { - log.Printf("ERROR parsing %s to int: %s", v, err) + log.Printf("E! Error parsing %s to int: %s", v, err) } else { timestamp = time.Unix(iv, 0) } - case "EPOCH_NANO": + case EPOCH_NANO: iv, err := strconv.ParseInt(v, 10, 64) if err != nil { - log.Printf("ERROR parsing %s to int: %s", v, err) + log.Printf("E! Error parsing %s to int: %s", v, err) } else { timestamp = time.Unix(0, iv) } + case GENERIC_TIMESTAMP: + var foundTs bool + // first try timestamp layouts that we've already found + for _, layout := range p.foundTsLayouts { + ts, err := time.Parse(layout, v) + if err == nil { + timestamp = ts + foundTs = true + break + } + } + // if we haven't found a timestamp layout yet, try all timestamp + // layouts. + if !foundTs { + for _, layout := range timeLayouts { + ts, err := time.Parse(layout, v) + if err == nil { + timestamp = ts + foundTs = true + p.foundTsLayouts = append(p.foundTsLayouts, layout) + break + } + } + } + // if we still haven't found a timestamp layout, log it and we will + // just use time.Now() + if !foundTs { + log.Printf("E! Error parsing timestamp [%s], could not find any "+ + "suitable time layouts.", v) + } case DROP: // goodbye! default: @@ -231,7 +275,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { if err == nil { timestamp = ts } else { - log.Printf("ERROR parsing %s to time layout [%s]: %s", v, t, err) + log.Printf("E! Error parsing %s to time layout [%s]: %s", v, t, err) } } } @@ -267,7 +311,7 @@ func (p *Parser) compileCustomPatterns() error { // check if pattern contains modifiers. Parse them out if it does. for name, pattern := range p.patterns { - if typedRe.MatchString(pattern) { + if modifierRe.MatchString(pattern) { // this pattern has modifiers, so parse out the modifiers pattern, err = p.parseTypedCaptures(name, pattern) if err != nil { @@ -280,13 +324,13 @@ func (p *Parser) compileCustomPatterns() error { return p.g.AddPatternsFromMap(p.patterns) } -// parseTypedCaptures parses the capture types, and then deletes the type from -// the line so that it is a valid "grok" pattern again. +// parseTypedCaptures parses the capture modifiers, and then deletes the +// modifier from the line so that it is a valid "grok" pattern again. // ie, // %{NUMBER:bytes:int} => %{NUMBER:bytes} (stores %{NUMBER}->bytes->int) // %{IPORHOST:clientip:tag} => %{IPORHOST:clientip} (stores %{IPORHOST}->clientip->tag) func (p *Parser) parseTypedCaptures(name, pattern string) (string, error) { - matches := typedRe.FindAllStringSubmatch(pattern, -1) + matches := modifierRe.FindAllStringSubmatch(pattern, -1) // grab the name of the capture pattern patternName := "%{" + name + "}" @@ -298,16 +342,18 @@ func (p *Parser) parseTypedCaptures(name, pattern string) (string, error) { hasTimestamp := false for _, match := range matches { // regex capture 1 is the name of the capture - // regex capture 2 is the type of the capture - if strings.HasPrefix(match[2], "ts-") { + // regex capture 2 is the modifier of the capture + if strings.HasPrefix(match[2], "ts") { if hasTimestamp { return pattern, fmt.Errorf("logparser pattern compile error: "+ "Each pattern is allowed only one named "+ "timestamp data type. pattern: %s", pattern) } - if f, ok := timeFormats[match[2]]; ok { - p.tsMap[patternName][match[1]] = f + if layout, ok := timeLayouts[match[2]]; ok { + // built-in time format + p.tsMap[patternName][match[1]] = layout } else { + // custom time format p.tsMap[patternName][match[1]] = strings.TrimSuffix(strings.TrimPrefix(match[2], `ts-"`), `"`) } hasTimestamp = true diff --git a/plugins/inputs/logparser/grok/grok_test.go b/plugins/inputs/logparser/grok/grok_test.go index 295f32609..bc8d980f2 100644 --- a/plugins/inputs/logparser/grok/grok_test.go +++ b/plugins/inputs/logparser/grok/grok_test.go @@ -38,32 +38,6 @@ func Benchmark_ParseLine_CombinedLogFormat(b *testing.B) { benchM = m } -func Benchmark_ParseLine_InfluxLog(b *testing.B) { - p := &Parser{ - Patterns: []string{"%{INFLUXDB_HTTPD_LOG}"}, - } - p.Compile() - - var m telegraf.Metric - for n := 0; n < b.N; n++ { - m, _ = p.ParseLine(`[httpd] 192.168.1.1 - - [14/Jun/2016:11:33:29 +0100] "POST /write?consistency=any&db=telegraf&precision=ns&rp= HTTP/1.1" 204 0 "-" "InfluxDBClient" 6f61bc44-321b-11e6-8050-000000000000 2513`) - } - benchM = m -} - -func Benchmark_ParseLine_InfluxLog_NoMatch(b *testing.B) { - p := &Parser{ - Patterns: []string{"%{INFLUXDB_HTTPD_LOG}"}, - } - p.Compile() - - var m telegraf.Metric - for n := 0; n < b.N; n++ { - m, _ = p.ParseLine(`[retention] 2016/06/14 14:38:24 retention policy shard deletion check commencing`) - } - benchM = m -} - func Benchmark_ParseLine_CustomPattern(b *testing.B) { p := &Parser{ Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, @@ -108,9 +82,9 @@ func TestMeasurementName(t *testing.T) { assert.Equal(t, "my_web_log", m.Name()) } -func TestBuiltinInfluxdbHttpd(t *testing.T) { +func TestCustomInfluxdbHttpd(t *testing.T) { p := &Parser{ - Patterns: []string{"%{INFLUXDB_HTTPD_LOG}"}, + Patterns: []string{`\[httpd\] %{COMBINED_LOG_FORMAT} %{UUID:uuid:drop} %{NUMBER:response_time_us:int}`}, } assert.NoError(t, p.Compile()) @@ -333,6 +307,55 @@ func TestParseEpochErrors(t *testing.T) { assert.NoError(t, err) } +func TestParseGenericTimestamp(t *testing.T) { + p := &Parser{ + Patterns: []string{`\[%{HTTPDATE:ts:ts}\] response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}`}, + } + assert.NoError(t, p.Compile()) + + metricA, err := p.ParseLine(`[09/Jun/2016:03:37:03 +0000] response_time=20821 mymetric=10890.645`) + require.NotNil(t, metricA) + assert.NoError(t, err) + assert.Equal(t, + map[string]interface{}{ + "response_time": int64(20821), + "metric": float64(10890.645), + }, + metricA.Fields()) + assert.Equal(t, map[string]string{}, metricA.Tags()) + assert.Equal(t, time.Unix(1465443423, 0).UTC(), metricA.Time().UTC()) + + metricB, err := p.ParseLine(`[09/Jun/2016:03:37:04 +0000] response_time=20821 mymetric=10890.645`) + require.NotNil(t, metricB) + assert.NoError(t, err) + assert.Equal(t, + map[string]interface{}{ + "response_time": int64(20821), + "metric": float64(10890.645), + }, + metricB.Fields()) + assert.Equal(t, map[string]string{}, metricB.Tags()) + assert.Equal(t, time.Unix(1465443424, 0).UTC(), metricB.Time().UTC()) +} + +func TestParseGenericTimestampNotFound(t *testing.T) { + p := &Parser{ + Patterns: []string{`\[%{NOTSPACE:ts:ts}\] response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}`}, + } + assert.NoError(t, p.Compile()) + + metricA, err := p.ParseLine(`[foobar] response_time=20821 mymetric=10890.645`) + require.NotNil(t, metricA) + assert.NoError(t, err) + assert.Equal(t, + map[string]interface{}{ + "response_time": int64(20821), + "metric": float64(10890.645), + }, + metricA.Fields()) + assert.Equal(t, map[string]string{}, metricA.Tags()) +} + func TestCompileFileAndParse(t *testing.T) { p := &Parser{ Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, diff --git a/plugins/inputs/logparser/grok/influx_patterns.go b/plugins/inputs/logparser/grok/influx_patterns.go index 53be0e20d..ff9d60ebf 100644 --- a/plugins/inputs/logparser/grok/influx_patterns.go +++ b/plugins/inputs/logparser/grok/influx_patterns.go @@ -55,15 +55,13 @@ EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} # Wider-ranging username matching vs. logstash built-in %{USER} NGUSERNAME [a-zA-Z\.\@\-\+_%]+ NGUSER %{NGUSERNAME} +# Wider-ranging client IP matching +CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1) ## ## COMMON LOG PATTERNS ## -# InfluxDB log patterns -CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1) -INFLUXDB_HTTPD_LOG \[httpd\] %{COMBINED_LOG_FORMAT} %{UUID:uuid:drop} %{NUMBER:response_time_us:int} - # apache & nginx logs, this is also known as the "common log format" # see https://en.wikipedia.org/wiki/Common_Log_Format COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-) diff --git a/plugins/inputs/logparser/grok/patterns/influx-patterns b/plugins/inputs/logparser/grok/patterns/influx-patterns index 1db74a17a..6f4d81f89 100644 --- a/plugins/inputs/logparser/grok/patterns/influx-patterns +++ b/plugins/inputs/logparser/grok/patterns/influx-patterns @@ -51,15 +51,13 @@ EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} # Wider-ranging username matching vs. logstash built-in %{USER} NGUSERNAME [a-zA-Z\.\@\-\+_%]+ NGUSER %{NGUSERNAME} +# Wider-ranging client IP matching +CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1) ## ## COMMON LOG PATTERNS ## -# InfluxDB log patterns -CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1) -INFLUXDB_HTTPD_LOG \[httpd\] %{COMBINED_LOG_FORMAT} %{UUID:uuid:drop} %{NUMBER:response_time_us:int} - # apache & nginx logs, this is also known as the "common log format" # see https://en.wikipedia.org/wiki/Common_Log_Format COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-) diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index 6b29ea031..0778a8a6d 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -45,7 +45,7 @@ const sampleConfig = ` ## /var/log/**.log -> recursively find all .log files in /var/log ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log ## /var/log/apache.log -> only tail the apache log file - files = ["/var/log/influxdb/influxdb.log"] + files = ["/var/log/apache/access.log"] ## Read file from beginning. from_beginning = false @@ -58,9 +58,9 @@ const sampleConfig = ` ## Other common built-in patterns are: ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) - patterns = ["%{INFLUXDB_HTTPD_LOG}"] + patterns = ["%{COMBINED_LOG_FORMAT}"] ## Name of the outputted measurement name. - measurement = "influxdb_log" + measurement = "apache_access_log" ## Full path(s) to custom pattern files. custom_pattern_files = [] ## Custom patterns can also be defined here. Put one pattern per line. @@ -134,7 +134,7 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { for _, filepath := range l.Files { g, err := globpath.Compile(filepath) if err != nil { - log.Printf("ERROR Glob %s failed to compile, %s", filepath, err) + log.Printf("E! Error Glob %s failed to compile, %s", filepath, err) continue } files := g.Match() @@ -167,7 +167,7 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) { var line *tail.Line for line = range tailer.Lines { if line.Err != nil { - log.Printf("ERROR tailing file %s, Error: %s\n", + log.Printf("E! Error tailing file %s, Error: %s\n", tailer.Filename, line.Err) continue } @@ -216,7 +216,7 @@ func (l *LogParserPlugin) Stop() { for _, t := range l.tailers { err := t.Stop() if err != nil { - log.Printf("ERROR stopping tail on file %s\n", t.Filename) + log.Printf("E! Error stopping tail on file %s\n", t.Filename) } t.Cleanup() } diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index 75c9a30d7..db0004ce2 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -134,7 +134,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { req.URL.RawQuery = params.String() req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin") if api.Debug { - log.Printf("Request URL: %s", req.URL.String()) + log.Printf("D! Request URL: %s", req.URL.String()) } resp, err := client.Do(req) @@ -148,7 +148,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { return nil, err } if api.Debug { - log.Printf("Response Body:%s", string(body)) + log.Printf("D! Response Body:%s", string(body)) } if err = chimpErrorCheck(body); err != nil { diff --git a/plugins/inputs/mesos/README.md b/plugins/inputs/mesos/README.md index 20a6dd244..575396585 100644 --- a/plugins/inputs/mesos/README.md +++ b/plugins/inputs/mesos/README.md @@ -1,6 +1,6 @@ # Mesos Input Plugin -This input plugin gathers metrics from Mesos (*currently only Mesos masters*). +This input plugin gathers metrics from Mesos. For more information, please check the [Mesos Observability Metrics](http://mesos.apache.org/documentation/latest/monitoring/) page. ### Configuration: @@ -8,14 +8,38 @@ For more information, please check the [Mesos Observability Metrics](http://meso ```toml # Telegraf plugin for gathering metrics from N Mesos masters [[inputs.mesos]] - # Timeout, in ms. + ## Timeout, in ms. timeout = 100 - # A list of Mesos masters, default value is localhost:5050. + ## A list of Mesos masters. masters = ["localhost:5050"] - # Metrics groups to be collected, by default, all enabled. - master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] + ## Master metrics groups to be collected, by default, all enabled. + master_collections = [ + "resources", + "master", + "system", + "agents", + "frameworks", + "tasks", + "messages", + "evqueue", + "registrar", + ] + ## A list of Mesos slaves, default is [] + # slaves = [] + ## Slave metrics groups to be collected, by default, all enabled. + # slave_collections = [ + # "resources", + # "agent", + # "system", + # "executors", + # "tasks", + # "messages", + # ] ``` +By default this plugin is not configured to gather metrics from mesos. Since a mesos cluster can be deployed in numerous ways it does not provide any default +values. User needs to specify master/slave nodes this plugin will gather metrics from. + ### Measurements & Fields: Mesos master metric groups @@ -33,6 +57,12 @@ Mesos master metric groups - master/disk_revocable_percent - master/disk_revocable_total - master/disk_revocable_used + - master/gpus_percent + - master/gpus_used + - master/gpus_total + - master/gpus_revocable_percent + - master/gpus_revocable_total + - master/gpus_revocable_used - master/mem_percent - master/mem_used - master/mem_total @@ -136,17 +166,87 @@ Mesos master metric groups - registrar/state_store_ms/p999 - registrar/state_store_ms/p9999 +Mesos slave metric groups +- resources + - slave/cpus_percent + - slave/cpus_used + - slave/cpus_total + - slave/cpus_revocable_percent + - slave/cpus_revocable_total + - slave/cpus_revocable_used + - slave/disk_percent + - slave/disk_used + - slave/disk_total + - slave/disk_revocable_percent + - slave/disk_revocable_total + - slave/disk_revocable_used + - slave/gpus_percent + - slave/gpus_used + - slave/gpus_total, + - slave/gpus_revocable_percent + - slave/gpus_revocable_total + - slave/gpus_revocable_used + - slave/mem_percent + - slave/mem_used + - slave/mem_total + - slave/mem_revocable_percent + - slave/mem_revocable_total + - slave/mem_revocable_used + +- agent + - slave/registered + - slave/uptime_secs + +- system + - system/cpus_total + - system/load_15min + - system/load_5min + - system/load_1min + - system/mem_free_bytes + - system/mem_total_bytes + +- executors + - containerizer/mesos/container_destroy_errors + - slave/container_launch_errors + - slave/executors_preempted + - slave/frameworks_active + - slave/executor_directory_max_allowed_age_secs + - slave/executors_registering + - slave/executors_running + - slave/executors_terminated + - slave/executors_terminating + - slave/recovery_errors + +- tasks + - slave/tasks_failed + - slave/tasks_finished + - slave/tasks_killed + - slave/tasks_lost + - slave/tasks_running + - slave/tasks_staging + - slave/tasks_starting + +- messages + - slave/invalid_framework_messages + - slave/invalid_status_updates + - slave/valid_framework_messages + - slave/valid_status_updates + ### Tags: -- All measurements have the following tags: +- All master/slave measurements have the following tags: - server + - role (master/slave) + +- All master measurements have the extra tags: + - state (leader/follower) ### Example Output: - ``` $ telegraf -config ~/mesos.conf -input-filter mesos -test * Plugin: mesos, Collection 1 -mesos,server=172.17.8.101 allocator/event_queue_dispatches=0,master/cpus_percent=0, +mesos,role=master,state=leader,host=172.17.8.102,server=172.17.8.101 +allocator/event_queue_dispatches=0,master/cpus_percent=0, master/cpus_revocable_percent=0,master/cpus_revocable_total=0, master/cpus_revocable_used=0,master/cpus_total=2, master/cpus_used=0,master/disk_percent=0,master/disk_revocable_percent=0, @@ -163,3 +263,4 @@ master/mem_revocable_used=0,master/mem_total=1002, master/mem_used=0,master/messages_authenticate=0, master/messages_deactivate_framework=0 ... ``` + diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index b096a20d9..e6c68bd7d 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -17,33 +17,55 @@ import ( jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" ) +type Role string + +const ( + MASTER Role = "master" + SLAVE = "slave" +) + type Mesos struct { Timeout int Masters []string MasterCols []string `toml:"master_collections"` + Slaves []string + SlaveCols []string `toml:"slave_collections"` + //SlaveTasks bool } -var defaultMetrics = []string{ - "resources", "master", "system", "slaves", "frameworks", - "tasks", "messages", "evqueue", "messages", "registrar", +var allMetrics = map[Role][]string{ + MASTER: []string{"resources", "master", "system", "agents", "frameworks", "tasks", "messages", "evqueue", "registrar"}, + SLAVE: []string{"resources", "agent", "system", "executors", "tasks", "messages"}, } var sampleConfig = ` - # Timeout, in ms. + ## Timeout, in ms. timeout = 100 - # A list of Mesos masters, default value is localhost:5050. + ## A list of Mesos masters. masters = ["localhost:5050"] - # Metrics groups to be collected, by default, all enabled. + ## Master metrics groups to be collected, by default, all enabled. master_collections = [ "resources", "master", "system", - "slaves", + "agents", "frameworks", + "tasks", "messages", "evqueue", "registrar", ] + ## A list of Mesos slaves, default is [] + # slaves = [] + ## Slave metrics groups to be collected, by default, all enabled. + # slave_collections = [ + # "resources", + # "agent", + # "system", + # "executors", + # "tasks", + # "messages", + # ] ` // SampleConfig returns a sample configuration block @@ -56,26 +78,59 @@ func (m *Mesos) Description() string { return "Telegraf plugin for gathering metrics from N Mesos masters" } +func (m *Mesos) SetDefaults() { + if len(m.MasterCols) == 0 { + m.MasterCols = allMetrics[MASTER] + } + + if len(m.SlaveCols) == 0 { + m.SlaveCols = allMetrics[SLAVE] + } + + if m.Timeout == 0 { + log.Println("I! [mesos] Missing timeout value, setting default value (100ms)") + m.Timeout = 100 + } +} + // Gather() metrics from given list of Mesos Masters func (m *Mesos) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup var errorChannel chan error - if len(m.Masters) == 0 { - m.Masters = []string{"localhost:5050"} - } + m.SetDefaults() - errorChannel = make(chan error, len(m.Masters)*2) + errorChannel = make(chan error, len(m.Masters)+2*len(m.Slaves)) for _, v := range m.Masters { wg.Add(1) go func(c string) { - errorChannel <- m.gatherMetrics(c, acc) + errorChannel <- m.gatherMainMetrics(c, ":5050", MASTER, acc) wg.Done() return }(v) } + for _, v := range m.Slaves { + wg.Add(1) + go func(c string) { + errorChannel <- m.gatherMainMetrics(c, ":5051", SLAVE, acc) + wg.Done() + return + }(v) + + // if !m.SlaveTasks { + // continue + // } + + // wg.Add(1) + // go func(c string) { + // errorChannel <- m.gatherSlaveTaskMetrics(c, ":5051", acc) + // wg.Done() + // return + // }(v) + } + wg.Wait() close(errorChannel) errorStrings := []string{} @@ -94,7 +149,7 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { } // metricsDiff() returns set names for removal -func metricsDiff(w []string) []string { +func metricsDiff(role Role, w []string) []string { b := []string{} s := make(map[string]bool) @@ -106,7 +161,7 @@ func metricsDiff(w []string) []string { s[v] = true } - for _, d := range defaultMetrics { + for _, d := range allMetrics[role] { if _, ok := s[d]; !ok { b = append(b, d) } @@ -116,156 +171,239 @@ func metricsDiff(w []string) []string { } // masterBlocks serves as kind of metrics registry groupping them in sets -func masterBlocks(g string) []string { +func getMetrics(role Role, group string) []string { var m map[string][]string m = make(map[string][]string) - m["resources"] = []string{ - "master/cpus_percent", - "master/cpus_used", - "master/cpus_total", - "master/cpus_revocable_percent", - "master/cpus_revocable_total", - "master/cpus_revocable_used", - "master/disk_percent", - "master/disk_used", - "master/disk_total", - "master/disk_revocable_percent", - "master/disk_revocable_total", - "master/disk_revocable_used", - "master/mem_percent", - "master/mem_used", - "master/mem_total", - "master/mem_revocable_percent", - "master/mem_revocable_total", - "master/mem_revocable_used", + if role == MASTER { + m["resources"] = []string{ + "master/cpus_percent", + "master/cpus_used", + "master/cpus_total", + "master/cpus_revocable_percent", + "master/cpus_revocable_total", + "master/cpus_revocable_used", + "master/disk_percent", + "master/disk_used", + "master/disk_total", + "master/disk_revocable_percent", + "master/disk_revocable_total", + "master/disk_revocable_used", + "master/gpus_percent", + "master/gpus_used", + "master/gpus_total", + "master/gpus_revocable_percent", + "master/gpus_revocable_total", + "master/gpus_revocable_used", + "master/mem_percent", + "master/mem_used", + "master/mem_total", + "master/mem_revocable_percent", + "master/mem_revocable_total", + "master/mem_revocable_used", + } + + m["master"] = []string{ + "master/elected", + "master/uptime_secs", + } + + m["system"] = []string{ + "system/cpus_total", + "system/load_15min", + "system/load_5min", + "system/load_1min", + "system/mem_free_bytes", + "system/mem_total_bytes", + } + + m["agents"] = []string{ + "master/slave_registrations", + "master/slave_removals", + "master/slave_reregistrations", + "master/slave_shutdowns_scheduled", + "master/slave_shutdowns_canceled", + "master/slave_shutdowns_completed", + "master/slaves_active", + "master/slaves_connected", + "master/slaves_disconnected", + "master/slaves_inactive", + } + + m["frameworks"] = []string{ + "master/frameworks_active", + "master/frameworks_connected", + "master/frameworks_disconnected", + "master/frameworks_inactive", + "master/outstanding_offers", + } + + m["tasks"] = []string{ + "master/tasks_error", + "master/tasks_failed", + "master/tasks_finished", + "master/tasks_killed", + "master/tasks_lost", + "master/tasks_running", + "master/tasks_staging", + "master/tasks_starting", + } + + m["messages"] = []string{ + "master/invalid_executor_to_framework_messages", + "master/invalid_framework_to_executor_messages", + "master/invalid_status_update_acknowledgements", + "master/invalid_status_updates", + "master/dropped_messages", + "master/messages_authenticate", + "master/messages_deactivate_framework", + "master/messages_decline_offers", + "master/messages_executor_to_framework", + "master/messages_exited_executor", + "master/messages_framework_to_executor", + "master/messages_kill_task", + "master/messages_launch_tasks", + "master/messages_reconcile_tasks", + "master/messages_register_framework", + "master/messages_register_slave", + "master/messages_reregister_framework", + "master/messages_reregister_slave", + "master/messages_resource_request", + "master/messages_revive_offers", + "master/messages_status_update", + "master/messages_status_update_acknowledgement", + "master/messages_unregister_framework", + "master/messages_unregister_slave", + "master/messages_update_slave", + "master/recovery_slave_removals", + "master/slave_removals/reason_registered", + "master/slave_removals/reason_unhealthy", + "master/slave_removals/reason_unregistered", + "master/valid_framework_to_executor_messages", + "master/valid_status_update_acknowledgements", + "master/valid_status_updates", + "master/task_lost/source_master/reason_invalid_offers", + "master/task_lost/source_master/reason_slave_removed", + "master/task_lost/source_slave/reason_executor_terminated", + "master/valid_executor_to_framework_messages", + } + + m["evqueue"] = []string{ + "master/event_queue_dispatches", + "master/event_queue_http_requests", + "master/event_queue_messages", + } + + m["registrar"] = []string{ + "registrar/state_fetch_ms", + "registrar/state_store_ms", + "registrar/state_store_ms/max", + "registrar/state_store_ms/min", + "registrar/state_store_ms/p50", + "registrar/state_store_ms/p90", + "registrar/state_store_ms/p95", + "registrar/state_store_ms/p99", + "registrar/state_store_ms/p999", + "registrar/state_store_ms/p9999", + } + } else if role == SLAVE { + m["resources"] = []string{ + "slave/cpus_percent", + "slave/cpus_used", + "slave/cpus_total", + "slave/cpus_revocable_percent", + "slave/cpus_revocable_total", + "slave/cpus_revocable_used", + "slave/disk_percent", + "slave/disk_used", + "slave/disk_total", + "slave/disk_revocable_percent", + "slave/disk_revocable_total", + "slave/disk_revocable_used", + "slave/gpus_percent", + "slave/gpus_used", + "slave/gpus_total", + "slave/gpus_revocable_percent", + "slave/gpus_revocable_total", + "slave/gpus_revocable_used", + "slave/mem_percent", + "slave/mem_used", + "slave/mem_total", + "slave/mem_revocable_percent", + "slave/mem_revocable_total", + "slave/mem_revocable_used", + } + + m["agent"] = []string{ + "slave/registered", + "slave/uptime_secs", + } + + m["system"] = []string{ + "system/cpus_total", + "system/load_15min", + "system/load_5min", + "system/load_1min", + "system/mem_free_bytes", + "system/mem_total_bytes", + } + + m["executors"] = []string{ + "containerizer/mesos/container_destroy_errors", + "slave/container_launch_errors", + "slave/executors_preempted", + "slave/frameworks_active", + "slave/executor_directory_max_allowed_age_secs", + "slave/executors_registering", + "slave/executors_running", + "slave/executors_terminated", + "slave/executors_terminating", + "slave/recovery_errors", + } + + m["tasks"] = []string{ + "slave/tasks_failed", + "slave/tasks_finished", + "slave/tasks_killed", + "slave/tasks_lost", + "slave/tasks_running", + "slave/tasks_staging", + "slave/tasks_starting", + } + + m["messages"] = []string{ + "slave/invalid_framework_messages", + "slave/invalid_status_updates", + "slave/valid_framework_messages", + "slave/valid_status_updates", + } } - m["master"] = []string{ - "master/elected", - "master/uptime_secs", - } - - m["system"] = []string{ - "system/cpus_total", - "system/load_15min", - "system/load_5min", - "system/load_1min", - "system/mem_free_bytes", - "system/mem_total_bytes", - } - - m["slaves"] = []string{ - "master/slave_registrations", - "master/slave_removals", - "master/slave_reregistrations", - "master/slave_shutdowns_scheduled", - "master/slave_shutdowns_canceled", - "master/slave_shutdowns_completed", - "master/slaves_active", - "master/slaves_connected", - "master/slaves_disconnected", - "master/slaves_inactive", - } - - m["frameworks"] = []string{ - "master/frameworks_active", - "master/frameworks_connected", - "master/frameworks_disconnected", - "master/frameworks_inactive", - "master/outstanding_offers", - } - - m["tasks"] = []string{ - "master/tasks_error", - "master/tasks_failed", - "master/tasks_finished", - "master/tasks_killed", - "master/tasks_lost", - "master/tasks_running", - "master/tasks_staging", - "master/tasks_starting", - } - - m["messages"] = []string{ - "master/invalid_executor_to_framework_messages", - "master/invalid_framework_to_executor_messages", - "master/invalid_status_update_acknowledgements", - "master/invalid_status_updates", - "master/dropped_messages", - "master/messages_authenticate", - "master/messages_deactivate_framework", - "master/messages_decline_offers", - "master/messages_executor_to_framework", - "master/messages_exited_executor", - "master/messages_framework_to_executor", - "master/messages_kill_task", - "master/messages_launch_tasks", - "master/messages_reconcile_tasks", - "master/messages_register_framework", - "master/messages_register_slave", - "master/messages_reregister_framework", - "master/messages_reregister_slave", - "master/messages_resource_request", - "master/messages_revive_offers", - "master/messages_status_update", - "master/messages_status_update_acknowledgement", - "master/messages_unregister_framework", - "master/messages_unregister_slave", - "master/messages_update_slave", - "master/recovery_slave_removals", - "master/slave_removals/reason_registered", - "master/slave_removals/reason_unhealthy", - "master/slave_removals/reason_unregistered", - "master/valid_framework_to_executor_messages", - "master/valid_status_update_acknowledgements", - "master/valid_status_updates", - "master/task_lost/source_master/reason_invalid_offers", - "master/task_lost/source_master/reason_slave_removed", - "master/task_lost/source_slave/reason_executor_terminated", - "master/valid_executor_to_framework_messages", - } - - m["evqueue"] = []string{ - "master/event_queue_dispatches", - "master/event_queue_http_requests", - "master/event_queue_messages", - } - - m["registrar"] = []string{ - "registrar/state_fetch_ms", - "registrar/state_store_ms", - "registrar/state_store_ms/max", - "registrar/state_store_ms/min", - "registrar/state_store_ms/p50", - "registrar/state_store_ms/p90", - "registrar/state_store_ms/p95", - "registrar/state_store_ms/p99", - "registrar/state_store_ms/p999", - "registrar/state_store_ms/p9999", - } - - ret, ok := m[g] + ret, ok := m[group] if !ok { - log.Println("[mesos] Unkown metrics group: ", g) + log.Printf("I! [mesos] Unkown %s metrics group: %s\n", role, group) return []string{} } return ret } -// removeGroup(), remove unwanted sets -func (m *Mesos) removeGroup(j *map[string]interface{}) { +func (m *Mesos) filterMetrics(role Role, metrics *map[string]interface{}) { var ok bool + var selectedMetrics []string - b := metricsDiff(m.MasterCols) + if role == MASTER { + selectedMetrics = m.MasterCols + } else if role == SLAVE { + selectedMetrics = m.SlaveCols + } - for _, k := range b { - for _, v := range masterBlocks(k) { - if _, ok = (*j)[v]; ok { - delete((*j), v) + for _, k := range metricsDiff(role, selectedMetrics) { + for _, v := range getMetrics(role, k) { + if _, ok = (*metrics)[v]; ok { + delete((*metrics), v) } } } @@ -280,23 +418,76 @@ var client = &http.Client{ Timeout: time.Duration(4 * time.Second), } -// This should not belong to the object -func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error { - var jsonOut map[string]interface{} +// TaskStats struct for JSON API output /monitor/statistics +type TaskStats struct { + ExecutorID string `json:"executor_id"` + FrameworkID string `json:"framework_id"` + Statistics map[string]interface{} `json:"statistics"` +} - host, _, err := net.SplitHostPort(a) +func (m *Mesos) gatherSlaveTaskMetrics(address string, defaultPort string, acc telegraf.Accumulator) error { + var metrics []TaskStats + + host, _, err := net.SplitHostPort(address) if err != nil { - host = a - a = a + ":5050" + host = address + address = address + defaultPort } tags := map[string]string{ "server": host, } - if m.Timeout == 0 { - log.Println("[mesos] Missing timeout value, setting default value (100ms)") - m.Timeout = 100 + ts := strconv.Itoa(m.Timeout) + "ms" + + resp, err := client.Get("http://" + address + "/monitor/statistics?timeout=" + ts) + + if err != nil { + return err + } + + data, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return err + } + + if err = json.Unmarshal([]byte(data), &metrics); err != nil { + return errors.New("Error decoding JSON response") + } + + for _, task := range metrics { + tags["framework_id"] = task.FrameworkID + + jf := jsonparser.JSONFlattener{} + err = jf.FlattenJSON("", task.Statistics) + + if err != nil { + return err + } + + timestamp := time.Unix(int64(jf.Fields["timestamp"].(float64)), 0) + jf.Fields["executor_id"] = task.ExecutorID + + acc.AddFields("mesos_tasks", jf.Fields, tags, timestamp) + } + + return nil +} + +// This should not belong to the object +func (m *Mesos) gatherMainMetrics(a string, defaultPort string, role Role, acc telegraf.Accumulator) error { + var jsonOut map[string]interface{} + + host, _, err := net.SplitHostPort(a) + if err != nil { + host = a + a = a + defaultPort + } + + tags := map[string]string{ + "server": host, + "role": string(role), } ts := strconv.Itoa(m.Timeout) + "ms" @@ -317,7 +508,7 @@ func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error { return errors.New("Error decoding JSON response") } - m.removeGroup(&jsonOut) + m.filterMetrics(role, &jsonOut) jf := jsonparser.JSONFlattener{} @@ -327,6 +518,14 @@ func (m *Mesos) gatherMetrics(a string, acc telegraf.Accumulator) error { return err } + if role == MASTER { + if jf.Fields["master/elected"] != 0.0 { + tags["state"] = "leader" + } else { + tags["state"] = "standby" + } + } + acc.AddFields("mesos", jf.Fields, tags) return nil diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index c56580649..5c83e294c 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -2,6 +2,7 @@ package mesos import ( "encoding/json" + "fmt" "math/rand" "net/http" "net/http/httptest" @@ -11,61 +12,265 @@ import ( "github.com/influxdata/telegraf/testutil" ) -var mesosMetrics map[string]interface{} -var ts *httptest.Server +var masterMetrics map[string]interface{} +var masterTestServer *httptest.Server +var slaveMetrics map[string]interface{} + +// var slaveTaskMetrics map[string]interface{} +var slaveTestServer *httptest.Server + +func randUUID() string { + b := make([]byte, 16) + rand.Read(b) + return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) +} func generateMetrics() { - mesosMetrics = make(map[string]interface{}) + masterMetrics = make(map[string]interface{}) - metricNames := []string{"master/cpus_percent", "master/cpus_used", "master/cpus_total", - "master/cpus_revocable_percent", "master/cpus_revocable_total", "master/cpus_revocable_used", - "master/disk_percent", "master/disk_used", "master/disk_total", "master/disk_revocable_percent", - "master/disk_revocable_total", "master/disk_revocable_used", "master/mem_percent", - "master/mem_used", "master/mem_total", "master/mem_revocable_percent", "master/mem_revocable_total", - "master/mem_revocable_used", "master/elected", "master/uptime_secs", "system/cpus_total", - "system/load_15min", "system/load_5min", "system/load_1min", "system/mem_free_bytes", - "system/mem_total_bytes", "master/slave_registrations", "master/slave_removals", - "master/slave_reregistrations", "master/slave_shutdowns_scheduled", "master/slave_shutdowns_canceled", - "master/slave_shutdowns_completed", "master/slaves_active", "master/slaves_connected", - "master/slaves_disconnected", "master/slaves_inactive", "master/frameworks_active", - "master/frameworks_connected", "master/frameworks_disconnected", "master/frameworks_inactive", - "master/outstanding_offers", "master/tasks_error", "master/tasks_failed", "master/tasks_finished", - "master/tasks_killed", "master/tasks_lost", "master/tasks_running", "master/tasks_staging", - "master/tasks_starting", "master/invalid_executor_to_framework_messages", "master/invalid_framework_to_executor_messages", - "master/invalid_status_update_acknowledgements", "master/invalid_status_updates", - "master/dropped_messages", "master/messages_authenticate", "master/messages_deactivate_framework", - "master/messages_decline_offers", "master/messages_executor_to_framework", "master/messages_exited_executor", - "master/messages_framework_to_executor", "master/messages_kill_task", "master/messages_launch_tasks", - "master/messages_reconcile_tasks", "master/messages_register_framework", "master/messages_register_slave", - "master/messages_reregister_framework", "master/messages_reregister_slave", "master/messages_resource_request", - "master/messages_revive_offers", "master/messages_status_update", "master/messages_status_update_acknowledgement", - "master/messages_unregister_framework", "master/messages_unregister_slave", "master/messages_update_slave", - "master/recovery_slave_removals", "master/slave_removals/reason_registered", "master/slave_removals/reason_unhealthy", - "master/slave_removals/reason_unregistered", "master/valid_framework_to_executor_messages", "master/valid_status_update_acknowledgements", - "master/valid_status_updates", "master/task_lost/source_master/reason_invalid_offers", - "master/task_lost/source_master/reason_slave_removed", "master/task_lost/source_slave/reason_executor_terminated", - "master/valid_executor_to_framework_messages", "master/event_queue_dispatches", - "master/event_queue_http_requests", "master/event_queue_messages", "registrar/state_fetch_ms", - "registrar/state_store_ms", "registrar/state_store_ms/max", "registrar/state_store_ms/min", - "registrar/state_store_ms/p50", "registrar/state_store_ms/p90", "registrar/state_store_ms/p95", - "registrar/state_store_ms/p99", "registrar/state_store_ms/p999", "registrar/state_store_ms/p9999"} + metricNames := []string{ + // resources + "master/cpus_percent", + "master/cpus_used", + "master/cpus_total", + "master/cpus_revocable_percent", + "master/cpus_revocable_total", + "master/cpus_revocable_used", + "master/disk_percent", + "master/disk_used", + "master/disk_total", + "master/disk_revocable_percent", + "master/disk_revocable_total", + "master/disk_revocable_used", + "master/gpus_percent", + "master/gpus_used", + "master/gpus_total", + "master/gpus_revocable_percent", + "master/gpus_revocable_total", + "master/gpus_revocable_used", + "master/mem_percent", + "master/mem_used", + "master/mem_total", + "master/mem_revocable_percent", + "master/mem_revocable_total", + "master/mem_revocable_used", + // master + "master/elected", + "master/uptime_secs", + // system + "system/cpus_total", + "system/load_15min", + "system/load_5min", + "system/load_1min", + "system/mem_free_bytes", + "system/mem_total_bytes", + // agents + "master/slave_registrations", + "master/slave_removals", + "master/slave_reregistrations", + "master/slave_shutdowns_scheduled", + "master/slave_shutdowns_canceled", + "master/slave_shutdowns_completed", + "master/slaves_active", + "master/slaves_connected", + "master/slaves_disconnected", + "master/slaves_inactive", + // frameworks + "master/frameworks_active", + "master/frameworks_connected", + "master/frameworks_disconnected", + "master/frameworks_inactive", + "master/outstanding_offers", + // tasks + "master/tasks_error", + "master/tasks_failed", + "master/tasks_finished", + "master/tasks_killed", + "master/tasks_lost", + "master/tasks_running", + "master/tasks_staging", + "master/tasks_starting", + // messages + "master/invalid_executor_to_framework_messages", + "master/invalid_framework_to_executor_messages", + "master/invalid_status_update_acknowledgements", + "master/invalid_status_updates", + "master/dropped_messages", + "master/messages_authenticate", + "master/messages_deactivate_framework", + "master/messages_decline_offers", + "master/messages_executor_to_framework", + "master/messages_exited_executor", + "master/messages_framework_to_executor", + "master/messages_kill_task", + "master/messages_launch_tasks", + "master/messages_reconcile_tasks", + "master/messages_register_framework", + "master/messages_register_slave", + "master/messages_reregister_framework", + "master/messages_reregister_slave", + "master/messages_resource_request", + "master/messages_revive_offers", + "master/messages_status_update", + "master/messages_status_update_acknowledgement", + "master/messages_unregister_framework", + "master/messages_unregister_slave", + "master/messages_update_slave", + "master/recovery_slave_removals", + "master/slave_removals/reason_registered", + "master/slave_removals/reason_unhealthy", + "master/slave_removals/reason_unregistered", + "master/valid_framework_to_executor_messages", + "master/valid_status_update_acknowledgements", + "master/valid_status_updates", + "master/task_lost/source_master/reason_invalid_offers", + "master/task_lost/source_master/reason_slave_removed", + "master/task_lost/source_slave/reason_executor_terminated", + "master/valid_executor_to_framework_messages", + // evgqueue + "master/event_queue_dispatches", + "master/event_queue_http_requests", + "master/event_queue_messages", + // registrar + "registrar/state_fetch_ms", + "registrar/state_store_ms", + "registrar/state_store_ms/max", + "registrar/state_store_ms/min", + "registrar/state_store_ms/p50", + "registrar/state_store_ms/p90", + "registrar/state_store_ms/p95", + "registrar/state_store_ms/p99", + "registrar/state_store_ms/p999", + "registrar/state_store_ms/p9999", + } for _, k := range metricNames { - mesosMetrics[k] = rand.Float64() + masterMetrics[k] = rand.Float64() } + + slaveMetrics = make(map[string]interface{}) + + metricNames = []string{ + // resources + "slave/cpus_percent", + "slave/cpus_used", + "slave/cpus_total", + "slave/cpus_revocable_percent", + "slave/cpus_revocable_total", + "slave/cpus_revocable_used", + "slave/disk_percent", + "slave/disk_used", + "slave/disk_total", + "slave/disk_revocable_percent", + "slave/disk_revocable_total", + "slave/disk_revocable_used", + "slave/gpus_percent", + "slave/gpus_used", + "slave/gpus_total", + "slave/gpus_revocable_percent", + "slave/gpus_revocable_total", + "slave/gpus_revocable_used", + "slave/mem_percent", + "slave/mem_used", + "slave/mem_total", + "slave/mem_revocable_percent", + "slave/mem_revocable_total", + "slave/mem_revocable_used", + // agent + "slave/registered", + "slave/uptime_secs", + // system + "system/cpus_total", + "system/load_15min", + "system/load_5min", + "system/load_1min", + "system/mem_free_bytes", + "system/mem_total_bytes", + // executors + "containerizer/mesos/container_destroy_errors", + "slave/container_launch_errors", + "slave/executors_preempted", + "slave/frameworks_active", + "slave/executor_directory_max_allowed_age_secs", + "slave/executors_registering", + "slave/executors_running", + "slave/executors_terminated", + "slave/executors_terminating", + "slave/recovery_errors", + // tasks + "slave/tasks_failed", + "slave/tasks_finished", + "slave/tasks_killed", + "slave/tasks_lost", + "slave/tasks_running", + "slave/tasks_staging", + "slave/tasks_starting", + // messages + "slave/invalid_framework_messages", + "slave/invalid_status_updates", + "slave/valid_framework_messages", + "slave/valid_status_updates", + } + + for _, k := range metricNames { + slaveMetrics[k] = rand.Float64() + } + + // slaveTaskMetrics = map[string]interface{}{ + // "executor_id": fmt.Sprintf("task_name.%s", randUUID()), + // "executor_name": "Some task description", + // "framework_id": randUUID(), + // "source": fmt.Sprintf("task_source.%s", randUUID()), + // "statistics": map[string]interface{}{ + // "cpus_limit": rand.Float64(), + // "cpus_system_time_secs": rand.Float64(), + // "cpus_user_time_secs": rand.Float64(), + // "mem_anon_bytes": float64(rand.Int63()), + // "mem_cache_bytes": float64(rand.Int63()), + // "mem_critical_pressure_counter": float64(rand.Int63()), + // "mem_file_bytes": float64(rand.Int63()), + // "mem_limit_bytes": float64(rand.Int63()), + // "mem_low_pressure_counter": float64(rand.Int63()), + // "mem_mapped_file_bytes": float64(rand.Int63()), + // "mem_medium_pressure_counter": float64(rand.Int63()), + // "mem_rss_bytes": float64(rand.Int63()), + // "mem_swap_bytes": float64(rand.Int63()), + // "mem_total_bytes": float64(rand.Int63()), + // "mem_total_memsw_bytes": float64(rand.Int63()), + // "mem_unevictable_bytes": float64(rand.Int63()), + // "timestamp": rand.Float64(), + // }, + // } } func TestMain(m *testing.M) { generateMetrics() - r := http.NewServeMux() - r.HandleFunc("/metrics/snapshot", func(w http.ResponseWriter, r *http.Request) { + + masterRouter := http.NewServeMux() + masterRouter.HandleFunc("/metrics/snapshot", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(mesosMetrics) + json.NewEncoder(w).Encode(masterMetrics) }) - ts = httptest.NewServer(r) + masterTestServer = httptest.NewServer(masterRouter) + + slaveRouter := http.NewServeMux() + slaveRouter.HandleFunc("/metrics/snapshot", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(slaveMetrics) + }) + // slaveRouter.HandleFunc("/monitor/statistics", func(w http.ResponseWriter, r *http.Request) { + // w.WriteHeader(http.StatusOK) + // w.Header().Set("Content-Type", "application/json") + // json.NewEncoder(w).Encode([]map[string]interface{}{slaveTaskMetrics}) + // }) + slaveTestServer = httptest.NewServer(slaveRouter) + rc := m.Run() - ts.Close() + + masterTestServer.Close() + slaveTestServer.Close() os.Exit(rc) } @@ -73,7 +278,7 @@ func TestMesosMaster(t *testing.T) { var acc testutil.Accumulator m := Mesos{ - Masters: []string{ts.Listener.Addr().String()}, + Masters: []string{masterTestServer.Listener.Addr().String()}, Timeout: 10, } @@ -83,34 +288,91 @@ func TestMesosMaster(t *testing.T) { t.Errorf(err.Error()) } - acc.AssertContainsFields(t, "mesos", mesosMetrics) + acc.AssertContainsFields(t, "mesos", masterMetrics) } -func TestRemoveGroup(t *testing.T) { - generateMetrics() - +func TestMasterFilter(t *testing.T) { m := Mesos{ MasterCols: []string{ "resources", "master", "registrar", }, } b := []string{ - "system", "slaves", "frameworks", - "messages", "evqueue", + "system", "agents", "frameworks", + "messages", "evqueue", "tasks", } - m.removeGroup(&mesosMetrics) + m.filterMetrics(MASTER, &masterMetrics) for _, v := range b { - for _, x := range masterBlocks(v) { - if _, ok := mesosMetrics[x]; ok { + for _, x := range getMetrics(MASTER, v) { + if _, ok := masterMetrics[x]; ok { t.Errorf("Found key %s, it should be gone.", x) } } } for _, v := range m.MasterCols { - for _, x := range masterBlocks(v) { - if _, ok := mesosMetrics[x]; !ok { + for _, x := range getMetrics(MASTER, v) { + if _, ok := masterMetrics[x]; !ok { + t.Errorf("Didn't find key %s, it should present.", x) + } + } + } +} + +func TestMesosSlave(t *testing.T) { + var acc testutil.Accumulator + + m := Mesos{ + Masters: []string{}, + Slaves: []string{slaveTestServer.Listener.Addr().String()}, + // SlaveTasks: true, + Timeout: 10, + } + + err := m.Gather(&acc) + + if err != nil { + t.Errorf(err.Error()) + } + + acc.AssertContainsFields(t, "mesos", slaveMetrics) + + // expectedFields := make(map[string]interface{}, len(slaveTaskMetrics["statistics"].(map[string]interface{}))+1) + // for k, v := range slaveTaskMetrics["statistics"].(map[string]interface{}) { + // expectedFields[k] = v + // } + // expectedFields["executor_id"] = slaveTaskMetrics["executor_id"] + + // acc.AssertContainsTaggedFields( + // t, + // "mesos_tasks", + // expectedFields, + // map[string]string{"server": "127.0.0.1", "framework_id": slaveTaskMetrics["framework_id"].(string)}) +} + +func TestSlaveFilter(t *testing.T) { + m := Mesos{ + SlaveCols: []string{ + "resources", "agent", "tasks", + }, + } + b := []string{ + "system", "executors", "messages", + } + + m.filterMetrics(SLAVE, &slaveMetrics) + + for _, v := range b { + for _, x := range getMetrics(SLAVE, v) { + if _, ok := slaveMetrics[x]; ok { + t.Errorf("Found key %s, it should be gone.", x) + } + } + } + for _, v := range m.MasterCols { + for _, x := range getMetrics(SLAVE, v) { + if _, ok := slaveMetrics[x]; !ok { t.Errorf("Didn't find key %s, it should present.", x) } } diff --git a/plugins/inputs/mock_Plugin.go b/plugins/inputs/mock_Plugin.go index caf30f72f..4dec121bc 100644 --- a/plugins/inputs/mock_Plugin.go +++ b/plugins/inputs/mock_Plugin.go @@ -6,10 +6,22 @@ import ( "github.com/stretchr/testify/mock" ) +// MockPlugin struct should be named the same as the Plugin type MockPlugin struct { mock.Mock } +// Description will appear directly above the plugin definition in the config file +func (m *MockPlugin) Description() string { + return `This is an example plugin` +} + +// SampleConfig will populate the sample configuration portion of the plugin's configuration +func (m *MockPlugin) SampleConfig() string { + return ` sampleVar = 'foo'` +} + +// Gather defines what data the plugin will gather. func (m *MockPlugin) Gather(_a0 telegraf.Accumulator) error { ret := m.Called(_a0) diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index e797fd6ab..e843c70f0 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -26,14 +26,28 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error s.Session.SetMode(mgo.Eventual, true) s.Session.SetSocketTimeout(0) result_server := &ServerStatus{} - err := s.Session.DB("admin").Run(bson.D{{"serverStatus", 1}, {"recordStats", 0}}, result_server) + err := s.Session.DB("admin").Run(bson.D{ + { + Name: "serverStatus", + Value: 1, + }, + { + Name: "recordStats", + Value: 0, + }, + }, result_server) if err != nil { return err } result_repl := &ReplSetStatus{} - err = s.Session.DB("admin").Run(bson.D{{"replSetGetStatus", 1}}, result_repl) + err = s.Session.DB("admin").Run(bson.D{ + { + Name: "replSetGetStatus", + Value: 1, + }, + }, result_repl) if err != nil { - log.Println("Not gathering replica set status, member not in replica set (" + err.Error() + ")") + log.Println("E! Not gathering replica set status, member not in replica set (" + err.Error() + ")") } jumbo_chunks, _ := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count() @@ -48,13 +62,18 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error names := []string{} names, err = s.Session.DatabaseNames() if err != nil { - log.Println("Error getting database names (" + err.Error() + ")") + log.Println("E! Error getting database names (" + err.Error() + ")") } for _, db_name := range names { db_stat_line := &DbStatsData{} - err = s.Session.DB(db_name).Run(bson.D{{"dbStats", 1}}, db_stat_line) + err = s.Session.DB(db_name).Run(bson.D{ + { + Name: "dbStats", + Value: 1, + }, + }, db_stat_line) if err != nil { - log.Println("Error getting db stats from " + db_name + "(" + err.Error() + ")") + log.Println("E! Error getting db stats from " + db_name + "(" + err.Error() + ")") } db := &Db{ Name: db_name, diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 50f65333e..da539f8aa 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -514,7 +514,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.Command = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs) } - if newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil { + if newStat.Metrics != nil && newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil { returnVal.Passes = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs) returnVal.DeletedDocuments = diff(newStat.Metrics.TTL.DeletedDocuments, oldStat.Metrics.TTL.DeletedDocuments, sampleSecs) } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index beebe00ce..cfade2944 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -133,7 +133,7 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { return nil } func (m *MQTTConsumer) onConnect(c mqtt.Client) { - log.Printf("MQTT Client Connected") + log.Printf("I! MQTT Client Connected") if !m.PersistentSession || !m.started { topics := make(map[string]byte) for _, topic := range m.Topics { @@ -142,7 +142,7 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) { subscribeToken := c.SubscribeMultiple(topics, m.recvMessage) subscribeToken.Wait() if subscribeToken.Error() != nil { - log.Printf("MQTT SUBSCRIBE ERROR\ntopics: %s\nerror: %s", + log.Printf("E! MQTT Subscribe Error\ntopics: %s\nerror: %s", strings.Join(m.Topics[:], ","), subscribeToken.Error()) } m.started = true @@ -151,7 +151,7 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) { } func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) { - log.Printf("MQTT Connection lost\nerror: %s\nMQTT Client will try to reconnect", err.Error()) + log.Printf("E! MQTT Connection lost\nerror: %s\nMQTT Client will try to reconnect", err.Error()) return } @@ -166,7 +166,7 @@ func (m *MQTTConsumer) receiver() { topic := msg.Topic() metrics, err := m.parser.Parse(msg.Payload()) if err != nil { - log.Printf("MQTT PARSE ERROR\nmessage: %s\nerror: %s", + log.Printf("E! MQTT Parse Error\nmessage: %s\nerror: %s", string(msg.Payload()), err.Error()) } diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 10b8c2f75..4902e9b3f 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -313,6 +313,10 @@ var mappings = []*mapping{ onServer: "wsrep_", inExport: "wsrep_", }, + { + onServer: "Uptime_", + inExport: "uptime_", + }, } var ( @@ -1376,6 +1380,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf &rowsAffected, &rowsSent, &rowsExamined, &tmpTables, &tmpDiskTables, &sortMergePasses, &sortRows, + &noIndexUsed, ) if err != nil { @@ -1477,19 +1482,23 @@ func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumula tags["schema"] = tableSchema tags["table"] = tableName - acc.Add(newNamespace("info_schema", "table_rows"), tableRows, tags) + acc.AddFields(newNamespace("info_schema", "table_rows"), + map[string]interface{}{"value": tableRows}, tags) dlTags := copyTags(tags) dlTags["component"] = "data_length" - acc.Add(newNamespace("info_schema", "table_size", "data_length"), dataLength, dlTags) + acc.AddFields(newNamespace("info_schema", "table_size", "data_length"), + map[string]interface{}{"value": dataLength}, dlTags) ilTags := copyTags(tags) ilTags["component"] = "index_length" - acc.Add(newNamespace("info_schema", "table_size", "index_length"), indexLength, ilTags) + acc.AddFields(newNamespace("info_schema", "table_size", "index_length"), + map[string]interface{}{"value": indexLength}, ilTags) dfTags := copyTags(tags) dfTags["component"] = "data_free" - acc.Add(newNamespace("info_schema", "table_size", "data_free"), dataFree, dfTags) + acc.AddFields(newNamespace("info_schema", "table_size", "data_free"), + map[string]interface{}{"value": dataFree}, dfTags) versionTags := copyTags(tags) versionTags["type"] = tableType @@ -1497,7 +1506,8 @@ func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumula versionTags["row_format"] = rowFormat versionTags["create_options"] = createOptions - acc.Add(newNamespace("info_schema", "table_version"), version, versionTags) + acc.AddFields(newNamespace("info_schema", "table_version"), + map[string]interface{}{"value": version}, versionTags) } } return nil @@ -1510,7 +1520,7 @@ func parseValue(value sql.RawBytes) (float64, bool) { } if bytes.Compare(value, []byte("No")) == 0 || bytes.Compare(value, []byte("OFF")) == 0 { - return 0, false + return 0, true } n, err := strconv.ParseFloat(string(value), 64) return n, err == nil diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 232d5740f..a760d0362 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -119,7 +119,7 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error { // Start the message reader go n.receiver() - log.Printf("Started the NATS consumer service, nats: %v, subjects: %v, queue: %v\n", + log.Printf("I! Started the NATS consumer service, nats: %v, subjects: %v, queue: %v\n", n.Conn.ConnectedUrl(), n.Subjects, n.QueueGroup) return nil @@ -134,11 +134,11 @@ func (n *natsConsumer) receiver() { case <-n.done: return case err := <-n.errs: - log.Printf("error reading from %s\n", err.Error()) + log.Printf("E! error reading from %s\n", err.Error()) case msg := <-n.in: metrics, err := n.parser.Parse(msg.Data) if err != nil { - log.Printf("subject: %s, error: %s", msg.Subject, err.Error()) + log.Printf("E! subject: %s, error: %s", msg.Subject, err.Error()) } for _, metric := range metrics { @@ -157,7 +157,7 @@ func (n *natsConsumer) clean() { for _, sub := range n.Subs { if err := sub.Unsubscribe(); err != nil { - log.Printf("Error unsubscribing from subject %s in queue %s: %s\n", + log.Printf("E! Error unsubscribing from subject %s in queue %s: %s\n", sub.Subject, sub.Queue, err.Error()) } } diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go index b227b7e50..d4f4e9679 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer.go @@ -62,7 +62,7 @@ func (n *NSQConsumer) Start(acc telegraf.Accumulator) error { n.consumer.AddConcurrentHandlers(nsq.HandlerFunc(func(message *nsq.Message) error { metrics, err := n.parser.Parse(message.Body) if err != nil { - log.Printf("NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error()) + log.Printf("E! NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error()) return nil } for _, metric := range metrics { diff --git a/plugins/inputs/ntpq/ntpq.go b/plugins/inputs/ntpq/ntpq.go index e9dc1cc14..674cd7216 100644 --- a/plugins/inputs/ntpq/ntpq.go +++ b/plugins/inputs/ntpq/ntpq.go @@ -119,7 +119,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { // Get integer metrics from output for key, index := range intI { - if index == -1 { + if index == -1 || index >= len(fields) { continue } if fields[index] == "-" { @@ -132,7 +132,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { case strings.HasSuffix(when, "h"): m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h")) if err != nil { - log.Printf("ERROR ntpq: parsing int: %s", fields[index]) + log.Printf("E! Error ntpq: parsing int: %s", fields[index]) continue } // seconds in an hour @@ -141,7 +141,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { case strings.HasSuffix(when, "d"): m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d")) if err != nil { - log.Printf("ERROR ntpq: parsing int: %s", fields[index]) + log.Printf("E! Error ntpq: parsing int: %s", fields[index]) continue } // seconds in a day @@ -150,7 +150,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { case strings.HasSuffix(when, "m"): m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m")) if err != nil { - log.Printf("ERROR ntpq: parsing int: %s", fields[index]) + log.Printf("E! Error ntpq: parsing int: %s", fields[index]) continue } // seconds in a day @@ -161,7 +161,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { m, err := strconv.Atoi(fields[index]) if err != nil { - log.Printf("ERROR ntpq: parsing int: %s", fields[index]) + log.Printf("E! Error ntpq: parsing int: %s", fields[index]) continue } mFields[key] = int64(m) @@ -169,7 +169,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { // get float metrics from output for key, index := range floatI { - if index == -1 { + if index == -1 || index >= len(fields) { continue } if fields[index] == "-" { @@ -178,7 +178,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { m, err := strconv.ParseFloat(fields[index], 64) if err != nil { - log.Printf("ERROR ntpq: parsing float: %s", fields[index]) + log.Printf("E! Error ntpq: parsing float: %s", fields[index]) continue } mFields[key] = m diff --git a/plugins/inputs/ntpq/ntpq_test.go b/plugins/inputs/ntpq/ntpq_test.go index 7e83243c0..4b6489949 100644 --- a/plugins/inputs/ntpq/ntpq_test.go +++ b/plugins/inputs/ntpq/ntpq_test.go @@ -41,6 +41,35 @@ func TestSingleNTPQ(t *testing.T) { acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) } +func TestMissingJitterField(t *testing.T) { + tt := tester{ + ret: []byte(missingJitterField), + err: nil, + } + n := &NTPQ{ + runQ: tt.runqTest, + } + + acc := testutil.Accumulator{} + assert.NoError(t, n.Gather(&acc)) + + fields := map[string]interface{}{ + "when": int64(101), + "poll": int64(256), + "reach": int64(37), + "delay": float64(51.016), + "offset": float64(233.010), + } + tags := map[string]string{ + "remote": "uschi5-ntp-002.", + "state_prefix": "*", + "refid": "10.177.80.46", + "stratum": "2", + "type": "u", + } + acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) +} + func TestBadIntNTPQ(t *testing.T) { tt := tester{ ret: []byte(badIntParseNTPQ), @@ -381,6 +410,11 @@ var singleNTPQ = ` remote refid st t when poll reach delay *uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462 ` +var missingJitterField = ` remote refid st t when poll reach delay offset jitter +============================================================================== +*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 +` + var badHeaderNTPQ = `remote refid foobar t when poll reach delay offset jitter ============================================================================== *uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462 diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md new file mode 100644 index 000000000..1f087c774 --- /dev/null +++ b/plugins/inputs/ping/README.md @@ -0,0 +1,36 @@ +# Ping input plugin + +This input plugin will measures the round-trip + +## Windows: +### Configration: +``` + ## urls to ping + urls = ["www.google.com"] # required + + ## number of pings to send per collection (ping -n ) + count = 4 # required + + ## Ping timeout, in seconds. 0 means default timeout (ping -w ) + Timeout = 0 +``` +### Measurements & Fields: +- packets_transmitted ( from ping output ) +- reply_received ( increasing only on valid metric from echo replay, eg. 'Destination net unreachable' reply will increment packets_received but not reply_received ) +- packets_received ( from ping output ) +- percent_reply_loss ( compute from packets_transmitted and reply_received ) +- percent_packets_loss ( compute from packets_transmitted and packets_received ) +- errors ( when host can not be found or wrong prameters is passed to application ) +- response time + - average_response_ms ( compute from minimum_response_ms and maximum_response_ms ) + - minimum_response_ms ( from ping output ) + - maximum_response_ms ( from ping output ) + +### Tags: +- server + +### Example Output: +``` +* Plugin: ping, Collection 1 +ping,host=WIN-PBAPLP511R7,url=www.google.com average_response_ms=7i,maximum_response_ms=9i,minimum_response_ms=7i,packets_received=4i,packets_transmitted=4i,percent_packet_loss=0,percent_reply_loss=0,reply_received=4i 1469879119000000000 +``` \ No newline at end of file diff --git a/plugins/inputs/ping/ping_windows.go b/plugins/inputs/ping/ping_windows.go index b1d3ef06f..7fb112810 100644 --- a/plugins/inputs/ping/ping_windows.go +++ b/plugins/inputs/ping/ping_windows.go @@ -1,3 +1,223 @@ // +build windows - package ping + +import ( + "errors" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "os/exec" + "regexp" + "strconv" + "strings" + "sync" + "time" +) + +// HostPinger is a function that runs the "ping" function using a list of +// passed arguments. This can be easily switched with a mocked ping function +// for unit test purposes (see ping_test.go) +type HostPinger func(timeout float64, args ...string) (string, error) + +type Ping struct { + // Number of pings to send (ping -c ) + Count int + + // Ping timeout, in seconds. 0 means no timeout (ping -W ) + Timeout float64 + + // URLs to ping + Urls []string + + // host ping function + pingHost HostPinger +} + +func (s *Ping) Description() string { + return "Ping given url(s) and return statistics" +} + +const sampleConfig = ` + ## urls to ping + urls = ["www.google.com"] # required + + ## number of pings to send per collection (ping -n ) + count = 4 # required + + ## Ping timeout, in seconds. 0 means default timeout (ping -w ) + Timeout = 0 +` + +func (s *Ping) SampleConfig() string { + return sampleConfig +} + +func hostPinger(timeout float64, args ...string) (string, error) { + bin, err := exec.LookPath("ping") + if err != nil { + return "", err + } + c := exec.Command(bin, args...) + out, err := internal.CombinedOutputTimeout(c, + time.Second*time.Duration(timeout+1)) + return string(out), err +} + +// processPingOutput takes in a string output from the ping command +// based on linux implementation but using regex ( multilanguage support ) ( shouldn't affect the performance of the program ) +// It returns (, , , , , ) +func processPingOutput(out string) (int, int, int, int, int, int, error) { + // So find a line contain 3 numbers except reply lines + var stats, aproxs []string = nil, nil + err := errors.New("Fatal error processing ping output") + stat := regexp.MustCompile(`=\W*(\d+)\D*=\W*(\d+)\D*=\W*(\d+)`) + aprox := regexp.MustCompile(`=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms`) + tttLine := regexp.MustCompile(`TTL=\d+`) + lines := strings.Split(out, "\n") + var receivedReply int = 0 + for _, line := range lines { + if tttLine.MatchString(line) { + receivedReply++ + } else { + if stats == nil { + stats = stat.FindStringSubmatch(line) + } + if stats != nil && aproxs == nil { + aproxs = aprox.FindStringSubmatch(line) + } + } + } + + // stats data should contain 4 members: entireExpression + ( Send, Receive, Lost ) + if len(stats) != 4 { + return 0, 0, 0, 0, 0, 0, err + } + trans, err := strconv.Atoi(stats[1]) + if err != nil { + return 0, 0, 0, 0, 0, 0, err + } + receivedPacket, err := strconv.Atoi(stats[2]) + if err != nil { + return 0, 0, 0, 0, 0, 0, err + } + + // aproxs data should contain 4 members: entireExpression + ( min, max, avg ) + if len(aproxs) != 4 { + return trans, receivedReply, receivedPacket, 0, 0, 0, err + } + min, err := strconv.Atoi(aproxs[1]) + if err != nil { + return trans, receivedReply, receivedPacket, 0, 0, 0, err + } + max, err := strconv.Atoi(aproxs[2]) + if err != nil { + return trans, receivedReply, receivedPacket, 0, 0, 0, err + } + avg, err := strconv.Atoi(aproxs[3]) + if err != nil { + return 0, 0, 0, 0, 0, 0, err + } + + return trans, receivedReply, receivedPacket, avg, min, max, err +} + +func (p *Ping) timeout() float64 { + // According to MSDN, default ping timeout for windows is 4 second + // Add also one second interval + + if p.Timeout > 0 { + return p.Timeout + 1 + } + return 4 + 1 +} + +// args returns the arguments for the 'ping' executable +func (p *Ping) args(url string) []string { + args := []string{"-n", strconv.Itoa(p.Count)} + + if p.Timeout > 0 { + args = append(args, "-w", strconv.FormatFloat(p.Timeout*1000, 'f', 0, 64)) + } + + args = append(args, url) + + return args +} + +func (p *Ping) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + errorChannel := make(chan error, len(p.Urls)*2) + var pendingError error = nil + // Spin off a go routine for each url to ping + for _, url := range p.Urls { + wg.Add(1) + go func(u string) { + defer wg.Done() + args := p.args(u) + totalTimeout := p.timeout() * float64(p.Count) + out, err := p.pingHost(totalTimeout, args...) + // ping host return exitcode != 0 also when there was no response from host + // but command was execute succesfully + if err != nil { + // Combine go err + stderr output + pendingError = errors.New(strings.TrimSpace(out) + ", " + err.Error()) + } + tags := map[string]string{"url": u} + trans, recReply, receivePacket, avg, min, max, err := processPingOutput(out) + if err != nil { + // fatal error + if pendingError != nil { + errorChannel <- pendingError + } + errorChannel <- err + fields := map[string]interface{}{ + "errors": 100.0, + } + + acc.AddFields("ping", fields, tags) + + return + } + // Calculate packet loss percentage + lossReply := float64(trans-recReply) / float64(trans) * 100.0 + lossPackets := float64(trans-receivePacket) / float64(trans) * 100.0 + fields := map[string]interface{}{ + "packets_transmitted": trans, + "reply_received": recReply, + "packets_received": receivePacket, + "percent_packet_loss": lossPackets, + "percent_reply_loss": lossReply, + } + if avg > 0 { + fields["average_response_ms"] = avg + } + if min > 0 { + fields["minimum_response_ms"] = min + } + if max > 0 { + fields["maximum_response_ms"] = max + } + acc.AddFields("ping", fields, tags) + }(url) + } + + wg.Wait() + close(errorChannel) + + // Get all errors and return them as one giant error + errorStrings := []string{} + for err := range errorChannel { + errorStrings = append(errorStrings, err.Error()) + } + + if len(errorStrings) == 0 { + return nil + } + return errors.New(strings.Join(errorStrings, "\n")) +} + +func init() { + inputs.Add("ping", func() telegraf.Input { + return &Ping{pingHost: hostPinger} + }) +} diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go new file mode 100644 index 000000000..34428b814 --- /dev/null +++ b/plugins/inputs/ping/ping_windows_test.go @@ -0,0 +1,328 @@ +// +build windows +package ping + +import ( + "errors" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "testing" +) + +// Windows ping format ( should support multilanguage ?) +var winPLPingOutput = ` +Badanie 8.8.8.8 z 32 bajtami danych: +Odpowiedz z 8.8.8.8: bajtow=32 czas=49ms TTL=43 +Odpowiedz z 8.8.8.8: bajtow=32 czas=46ms TTL=43 +Odpowiedz z 8.8.8.8: bajtow=32 czas=48ms TTL=43 +Odpowiedz z 8.8.8.8: bajtow=32 czas=57ms TTL=43 + +Statystyka badania ping dla 8.8.8.8: + Pakiety: Wyslane = 4, Odebrane = 4, Utracone = 0 + (0% straty), +Szacunkowy czas bladzenia pakietww w millisekundach: + Minimum = 46 ms, Maksimum = 57 ms, Czas sredni = 50 ms +` + +// Windows ping format ( should support multilanguage ?) +var winENPingOutput = ` +Pinging 8.8.8.8 with 32 bytes of data: +Reply from 8.8.8.8: bytes=32 time=52ms TTL=43 +Reply from 8.8.8.8: bytes=32 time=50ms TTL=43 +Reply from 8.8.8.8: bytes=32 time=50ms TTL=43 +Reply from 8.8.8.8: bytes=32 time=51ms TTL=43 + +Ping statistics for 8.8.8.8: + Packets: Sent = 4, Received = 4, Lost = 0 (0% loss), +Approximate round trip times in milli-seconds: + Minimum = 50ms, Maximum = 52ms, Average = 50ms +` + +func TestHost(t *testing.T) { + trans, recReply, recPacket, avg, min, max, err := processPingOutput(winPLPingOutput) + assert.NoError(t, err) + assert.Equal(t, 4, trans, "4 packets were transmitted") + assert.Equal(t, 4, recReply, "4 packets were reply") + assert.Equal(t, 4, recPacket, "4 packets were received") + assert.Equal(t, 50, avg, "Average 50") + assert.Equal(t, 46, min, "Min 46") + assert.Equal(t, 57, max, "max 57") + + trans, recReply, recPacket, avg, min, max, err = processPingOutput(winENPingOutput) + assert.NoError(t, err) + assert.Equal(t, 4, trans, "4 packets were transmitted") + assert.Equal(t, 4, recReply, "4 packets were reply") + assert.Equal(t, 4, recPacket, "4 packets were received") + assert.Equal(t, 50, avg, "Average 50") + assert.Equal(t, 50, min, "Min 50") + assert.Equal(t, 52, max, "Max 52") +} + +func mockHostPinger(timeout float64, args ...string) (string, error) { + return winENPingOutput, nil +} + +// Test that Gather function works on a normal ping +func TestPingGather(t *testing.T) { + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"www.google.com", "www.reddit.com"}, + pingHost: mockHostPinger, + } + + p.Gather(&acc) + tags := map[string]string{"url": "www.google.com"} + fields := map[string]interface{}{ + "packets_transmitted": 4, + "packets_received": 4, + "reply_received": 4, + "percent_packet_loss": 0.0, + "percent_reply_loss": 0.0, + "average_response_ms": 50, + "minimum_response_ms": 50, + "maximum_response_ms": 52, + } + acc.AssertContainsTaggedFields(t, "ping", fields, tags) + + tags = map[string]string{"url": "www.reddit.com"} + acc.AssertContainsTaggedFields(t, "ping", fields, tags) +} + +var errorPingOutput = ` +Badanie nask.pl [195.187.242.157] z 32 bajtami danych: +Upłynął limit czasu żądania. +Upłynął limit czasu żądania. +Upłynął limit czasu żądania. +Upłynął limit czasu żądania. + +Statystyka badania ping dla 195.187.242.157: + Pakiety: Wysłane = 4, Odebrane = 0, Utracone = 4 + (100% straty), +` + +func mockErrorHostPinger(timeout float64, args ...string) (string, error) { + return errorPingOutput, errors.New("No packets received") +} + +// Test that Gather works on a ping with no transmitted packets, even though the +// command returns an error +func TestBadPingGather(t *testing.T) { + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"www.amazon.com"}, + pingHost: mockErrorHostPinger, + } + + p.Gather(&acc) + tags := map[string]string{"url": "www.amazon.com"} + fields := map[string]interface{}{ + "packets_transmitted": 4, + "packets_received": 0, + "reply_received": 0, + "percent_packet_loss": 100.0, + "percent_reply_loss": 100.0, + } + acc.AssertContainsTaggedFields(t, "ping", fields, tags) +} + +var lossyPingOutput = ` +Badanie thecodinglove.com [66.6.44.4] z 9800 bajtami danych: +Upłynął limit czasu żądania. +Odpowiedź z 66.6.44.4: bajtów=9800 czas=114ms TTL=48 +Odpowiedź z 66.6.44.4: bajtów=9800 czas=114ms TTL=48 +Odpowiedź z 66.6.44.4: bajtów=9800 czas=118ms TTL=48 +Odpowiedź z 66.6.44.4: bajtów=9800 czas=114ms TTL=48 +Odpowiedź z 66.6.44.4: bajtów=9800 czas=114ms TTL=48 +Upłynął limit czasu żądania. +Odpowiedź z 66.6.44.4: bajtów=9800 czas=119ms TTL=48 +Odpowiedź z 66.6.44.4: bajtów=9800 czas=116ms TTL=48 + +Statystyka badania ping dla 66.6.44.4: + Pakiety: Wysłane = 9, Odebrane = 7, Utracone = 2 + (22% straty), +Szacunkowy czas błądzenia pakietów w millisekundach: + Minimum = 114 ms, Maksimum = 119 ms, Czas średni = 115 ms +` + +func mockLossyHostPinger(timeout float64, args ...string) (string, error) { + return lossyPingOutput, nil +} + +// Test that Gather works on a ping with lossy packets +func TestLossyPingGather(t *testing.T) { + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"www.google.com"}, + pingHost: mockLossyHostPinger, + } + + p.Gather(&acc) + tags := map[string]string{"url": "www.google.com"} + fields := map[string]interface{}{ + "packets_transmitted": 9, + "packets_received": 7, + "reply_received": 7, + "percent_packet_loss": 22.22222222222222, + "percent_reply_loss": 22.22222222222222, + "average_response_ms": 115, + "minimum_response_ms": 114, + "maximum_response_ms": 119, + } + acc.AssertContainsTaggedFields(t, "ping", fields, tags) +} + +// Fatal ping output (invalid argument) +var fatalPingOutput = ` +Bad option -d. + + +Usage: ping [-t] [-a] [-n count] [-l size] [-f] [-i TTL] [-v TOS] + [-r count] [-s count] [[-j host-list] | [-k host-list]] + [-w timeout] [-R] [-S srcaddr] [-4] [-6] target_name + +Options: + -t Ping the specified host until stopped. + To see statistics and continue - type Control-Break; + To stop - type Control-C. + -a Resolve addresses to hostnames. + -n count Number of echo requests to send. + -l size Send buffer size. + -f Set Don't Fragment flag in packet (IPv4-only). + -i TTL Time To Live. + -v TOS Type Of Service (IPv4-only. This setting has been deprecated + and has no effect on the type of service field in the IP Header). + -r count Record route for count hops (IPv4-only). + -s count Timestamp for count hops (IPv4-only). + -j host-list Loose source route along host-list (IPv4-only). + -k host-list Strict source route along host-list (IPv4-only). + -w timeout Timeout in milliseconds to wait for each reply. + -R Use routing header to test reverse route also (IPv6-only). + -S srcaddr Source address to use. + -4 Force using IPv4. + -6 Force using IPv6. + +` + +func mockFatalHostPinger(timeout float64, args ...string) (string, error) { + return fatalPingOutput, errors.New("So very bad") +} + +// Test that a fatal ping command does not gather any statistics. +func TestFatalPingGather(t *testing.T) { + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"www.amazon.com"}, + pingHost: mockFatalHostPinger, + } + + p.Gather(&acc) + assert.True(t, acc.HasFloatField("ping", "errors"), + "Fatal ping should have packet measurements") + assert.False(t, acc.HasIntField("ping", "packets_transmitted"), + "Fatal ping should not have packet measurements") + assert.False(t, acc.HasIntField("ping", "packets_received"), + "Fatal ping should not have packet measurements") + assert.False(t, acc.HasFloatField("ping", "percent_packet_loss"), + "Fatal ping should not have packet measurements") + assert.False(t, acc.HasFloatField("ping", "percent_reply_loss"), + "Fatal ping should not have packet measurements") + assert.False(t, acc.HasIntField("ping", "average_response_ms"), + "Fatal ping should not have packet measurements") + assert.False(t, acc.HasIntField("ping", "maximum_response_ms"), + "Fatal ping should not have packet measurements") + assert.False(t, acc.HasIntField("ping", "minimum_response_ms"), + "Fatal ping should not have packet measurements") +} + +var UnreachablePingOutput = ` +Pinging www.google.pl [8.8.8.8] with 32 bytes of data: +Request timed out. +Request timed out. +Reply from 194.204.175.50: Destination net unreachable. +Request timed out. + +Ping statistics for 8.8.8.8: + Packets: Sent = 4, Received = 1, Lost = 3 (75% loss), +` + +func mockUnreachableHostPinger(timeout float64, args ...string) (string, error) { + return UnreachablePingOutput, errors.New("So very bad") +} + +//Reply from 185.28.251.217: TTL expired in transit. + +// in case 'Destination net unreachable' ping app return receive packet which is not what we need +// it's not contain valid metric so treat it as lost one +func TestUnreachablePingGather(t *testing.T) { + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"www.google.com"}, + pingHost: mockUnreachableHostPinger, + } + + p.Gather(&acc) + + tags := map[string]string{"url": "www.google.com"} + fields := map[string]interface{}{ + "packets_transmitted": 4, + "packets_received": 1, + "reply_received": 0, + "percent_packet_loss": 75.0, + "percent_reply_loss": 100.0, + } + acc.AssertContainsTaggedFields(t, "ping", fields, tags) + + assert.False(t, acc.HasFloatField("ping", "errors"), + "Fatal ping should not have packet measurements") + assert.False(t, acc.HasIntField("ping", "average_response_ms"), + "Fatal ping should not have packet measurements") + assert.False(t, acc.HasIntField("ping", "maximum_response_ms"), + "Fatal ping should not have packet measurements") + assert.False(t, acc.HasIntField("ping", "minimum_response_ms"), + "Fatal ping should not have packet measurements") +} + +var TTLExpiredPingOutput = ` +Pinging www.google.pl [8.8.8.8] with 32 bytes of data: +Request timed out. +Request timed out. +Reply from 185.28.251.217: TTL expired in transit. +Request timed out. + +Ping statistics for 8.8.8.8: + Packets: Sent = 4, Received = 1, Lost = 3 (75% loss), +` + +func mockTTLExpiredPinger(timeout float64, args ...string) (string, error) { + return TTLExpiredPingOutput, errors.New("So very bad") +} + +// in case 'Destination net unreachable' ping app return receive packet which is not what we need +// it's not contain valid metric so treat it as lost one +func TestTTLExpiredPingGather(t *testing.T) { + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"www.google.com"}, + pingHost: mockTTLExpiredPinger, + } + + p.Gather(&acc) + + tags := map[string]string{"url": "www.google.com"} + fields := map[string]interface{}{ + "packets_transmitted": 4, + "packets_received": 1, + "reply_received": 0, + "percent_packet_loss": 75.0, + "percent_reply_loss": 100.0, + } + acc.AssertContainsTaggedFields(t, "ping", fields, tags) + + assert.False(t, acc.HasFloatField("ping", "errors"), + "Fatal ping should not have packet measurements") + assert.False(t, acc.HasIntField("ping", "average_response_ms"), + "Fatal ping should not have packet measurements") + assert.False(t, acc.HasIntField("ping", "maximum_response_ms"), + "Fatal ping should not have packet measurements") + assert.False(t, acc.HasIntField("ping", "minimum_response_ms"), + "Fatal ping should not have packet measurements") +} diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index da8ee8001..0e7cdb509 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -17,6 +17,7 @@ import ( type Postgresql struct { Address string Databases []string + IgnoredDatabases []string OrderedColumns []string AllColumns []string sanitizedAddress string @@ -40,8 +41,12 @@ var sampleConfig = ` ## address = "host=localhost user=postgres sslmode=disable" + ## A list of databases to explicitly ignore. If not specified, metrics for all + ## databases are gathered. Do NOT use with the 'databases' option. + # ignored_databases = ["postgres", "template0", "template1"] + ## A list of databases to pull metrics about. If not specified, metrics for all - ## databases are gathered. + ## databases are gathered. Do NOT use with the 'ignore_databases' option. # databases = ["app_production", "testing"] ` @@ -73,8 +78,11 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { defer db.Close() - if len(p.Databases) == 0 { + if len(p.Databases) == 0 && len(p.IgnoredDatabases) == 0 { query = `SELECT * FROM pg_stat_database` + } else if len(p.IgnoredDatabases) != 0 { + query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname NOT IN ('%s')`, + strings.Join(p.IgnoredDatabases, "','")) } else { query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname IN ('%s')`, strings.Join(p.Databases, "','")) diff --git a/plugins/inputs/postgresql/postgresql_test.go b/plugins/inputs/postgresql/postgresql_test.go index 552b18cdb..64926f61e 100644 --- a/plugins/inputs/postgresql/postgresql_test.go +++ b/plugins/inputs/postgresql/postgresql_test.go @@ -150,3 +150,75 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { assert.False(t, acc.HasMeasurement(col)) } } + +func TestPostgresqlDatabaseWhitelistTest(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + p := &Postgresql{ + Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", + testutil.GetLocalHost()), + Databases: []string{"template0"}, + } + + var acc testutil.Accumulator + + err := p.Gather(&acc) + require.NoError(t, err) + + var foundTemplate0 = false + var foundTemplate1 = false + + for _, pnt := range acc.Metrics { + if pnt.Measurement == "postgresql" { + if pnt.Tags["db"] == "template0" { + foundTemplate0 = true + } + } + if pnt.Measurement == "postgresql" { + if pnt.Tags["db"] == "template1" { + foundTemplate1 = true + } + } + } + + assert.True(t, foundTemplate0) + assert.False(t, foundTemplate1) +} + +func TestPostgresqlDatabaseBlacklistTest(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + p := &Postgresql{ + Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", + testutil.GetLocalHost()), + IgnoredDatabases: []string{"template0"}, + } + + var acc testutil.Accumulator + + err := p.Gather(&acc) + require.NoError(t, err) + + var foundTemplate0 = false + var foundTemplate1 = false + + for _, pnt := range acc.Metrics { + if pnt.Measurement == "postgresql" { + if pnt.Tags["db"] == "template0" { + foundTemplate0 = true + } + } + if pnt.Measurement == "postgresql" { + if pnt.Tags["db"] == "template1" { + foundTemplate1 = true + } + } + } + + assert.False(t, foundTemplate0) + assert.True(t, foundTemplate1) +} diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 75bc6b936..199262c0b 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -266,29 +266,31 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula tags := map[string]string{} tags["server"] = tagAddress tags["db"] = dbname.String() - var isATag int fields := make(map[string]interface{}) +COLUMN: for col, val := range columnMap { - if acc.Debug() { - log.Printf("postgresql_extensible: column: %s = %T: %s\n", col, *val, *val) - } + log.Printf("D! postgresql_extensible: column: %s = %T: %s\n", col, *val, *val) _, ignore := ignoredColumns[col] - if !ignore && *val != nil { - isATag = 0 - for tag := range p.AdditionalTags { - if col == p.AdditionalTags[tag] { - isATag = 1 - value_type_p := fmt.Sprintf(`%T`, *val) - if value_type_p == "[]uint8" { - tags[col] = fmt.Sprintf(`%s`, *val) - } else if value_type_p == "int64" { - tags[col] = fmt.Sprintf(`%v`, *val) - } - } + if ignore || *val == nil { + continue + } + for _, tag := range p.AdditionalTags { + if col != tag { + continue } - if isATag == 0 { - fields[col] = *val + switch v := (*val).(type) { + case []byte: + tags[col] = string(v) + case int64: + tags[col] = fmt.Sprintf("%d", v) } + continue COLUMN + } + + if v, ok := (*val).([]byte); ok { + fields[col] = string(v) + } else { + fields[col] = *val } } acc.AddFields(meas_name, fields, tags) diff --git a/plugins/inputs/powerdns/powerdns.go b/plugins/inputs/powerdns/powerdns.go index 0824ff672..68b1696e0 100644 --- a/plugins/inputs/powerdns/powerdns.go +++ b/plugins/inputs/powerdns/powerdns.go @@ -4,6 +4,7 @@ import ( "bufio" "fmt" "io" + "log" "net" "strconv" "strings" @@ -86,10 +87,7 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error metrics := string(buf) // Process data - fields, err := parseResponse(metrics) - if err != nil { - return err - } + fields := parseResponse(metrics) // Add server socket as a tag tags := map[string]string{"server": address} @@ -99,22 +97,27 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error return nil } -func parseResponse(metrics string) (map[string]interface{}, error) { +func parseResponse(metrics string) map[string]interface{} { values := make(map[string]interface{}) s := strings.Split(metrics, ",") for _, metric := range s[:len(s)-1] { m := strings.Split(metric, "=") + if len(m) < 2 { + continue + } i, err := strconv.ParseInt(m[1], 10, 64) if err != nil { - return values, err + log.Printf("E! powerdns: Error parsing integer for metric [%s]: %s", + metric, err) + continue } values[m[0]] = i } - return values, nil + return values } func init() { diff --git a/plugins/inputs/powerdns/powerdns_test.go b/plugins/inputs/powerdns/powerdns_test.go index b0d883d0b..78845c23d 100644 --- a/plugins/inputs/powerdns/powerdns_test.go +++ b/plugins/inputs/powerdns/powerdns_test.go @@ -25,6 +25,30 @@ var metrics = "corrupt-packets=0,deferred-cache-inserts=0,deferred-cache-lookup= "key-cache-size=0,latency=26,meta-cache-size=0,qsize-q=0," + "signature-cache-size=0,sys-msec=2889,uptime=86317,user-msec=2167," +// first metric has no "=" +var corruptMetrics = "corrupt-packets--0,deferred-cache-inserts=0,deferred-cache-lookup=0," + + "dnsupdate-answers=0,dnsupdate-changes=0,dnsupdate-queries=0," + + "dnsupdate-refused=0,packetcache-hit=0,packetcache-miss=1,packetcache-size=0," + + "query-cache-hit=0,query-cache-miss=6,rd-queries=1,recursing-answers=0," + + "recursing-questions=0,recursion-unanswered=0,security-status=3," + + "servfail-packets=0,signatures=0,tcp-answers=0,tcp-queries=0," + + "timedout-packets=0,udp-answers=1,udp-answers-bytes=50,udp-do-queries=0," + + "udp-queries=0,udp4-answers=1,udp4-queries=1,udp6-answers=0,udp6-queries=0," + + "key-cache-size=0,latency=26,meta-cache-size=0,qsize-q=0," + + "signature-cache-size=0,sys-msec=2889,uptime=86317,user-msec=2167," + +// integer overflow +var intOverflowMetrics = "corrupt-packets=18446744073709550195,deferred-cache-inserts=0,deferred-cache-lookup=0," + + "dnsupdate-answers=0,dnsupdate-changes=0,dnsupdate-queries=0," + + "dnsupdate-refused=0,packetcache-hit=0,packetcache-miss=1,packetcache-size=0," + + "query-cache-hit=0,query-cache-miss=6,rd-queries=1,recursing-answers=0," + + "recursing-questions=0,recursion-unanswered=0,security-status=3," + + "servfail-packets=0,signatures=0,tcp-answers=0,tcp-queries=0," + + "timedout-packets=0,udp-answers=1,udp-answers-bytes=50,udp-do-queries=0," + + "udp-queries=0,udp4-answers=1,udp4-queries=1,udp6-answers=0,udp6-queries=0," + + "key-cache-size=0,latency=26,meta-cache-size=0,qsize-q=0," + + "signature-cache-size=0,sys-msec=2889,uptime=86317,user-msec=2167," + func (s statServer) serverSocket(l net.Listener) { for { @@ -86,8 +110,7 @@ func TestMemcachedGeneratesMetrics(t *testing.T) { } func TestPowerdnsParseMetrics(t *testing.T) { - values, err := parseResponse(metrics) - require.NoError(t, err, "Error parsing memcached response") + values := parseResponse(metrics) tests := []struct { key string @@ -145,3 +168,121 @@ func TestPowerdnsParseMetrics(t *testing.T) { } } } + +func TestPowerdnsParseCorruptMetrics(t *testing.T) { + values := parseResponse(corruptMetrics) + + tests := []struct { + key string + value int64 + }{ + {"deferred-cache-inserts", 0}, + {"deferred-cache-lookup", 0}, + {"dnsupdate-answers", 0}, + {"dnsupdate-changes", 0}, + {"dnsupdate-queries", 0}, + {"dnsupdate-refused", 0}, + {"packetcache-hit", 0}, + {"packetcache-miss", 1}, + {"packetcache-size", 0}, + {"query-cache-hit", 0}, + {"query-cache-miss", 6}, + {"rd-queries", 1}, + {"recursing-answers", 0}, + {"recursing-questions", 0}, + {"recursion-unanswered", 0}, + {"security-status", 3}, + {"servfail-packets", 0}, + {"signatures", 0}, + {"tcp-answers", 0}, + {"tcp-queries", 0}, + {"timedout-packets", 0}, + {"udp-answers", 1}, + {"udp-answers-bytes", 50}, + {"udp-do-queries", 0}, + {"udp-queries", 0}, + {"udp4-answers", 1}, + {"udp4-queries", 1}, + {"udp6-answers", 0}, + {"udp6-queries", 0}, + {"key-cache-size", 0}, + {"latency", 26}, + {"meta-cache-size", 0}, + {"qsize-q", 0}, + {"signature-cache-size", 0}, + {"sys-msec", 2889}, + {"uptime", 86317}, + {"user-msec", 2167}, + } + + for _, test := range tests { + value, ok := values[test.key] + if !ok { + t.Errorf("Did not find key for metric %s in values", test.key) + continue + } + if value != test.value { + t.Errorf("Metric: %s, Expected: %d, actual: %d", + test.key, test.value, value) + } + } +} + +func TestPowerdnsParseIntOverflowMetrics(t *testing.T) { + values := parseResponse(intOverflowMetrics) + + tests := []struct { + key string + value int64 + }{ + {"deferred-cache-inserts", 0}, + {"deferred-cache-lookup", 0}, + {"dnsupdate-answers", 0}, + {"dnsupdate-changes", 0}, + {"dnsupdate-queries", 0}, + {"dnsupdate-refused", 0}, + {"packetcache-hit", 0}, + {"packetcache-miss", 1}, + {"packetcache-size", 0}, + {"query-cache-hit", 0}, + {"query-cache-miss", 6}, + {"rd-queries", 1}, + {"recursing-answers", 0}, + {"recursing-questions", 0}, + {"recursion-unanswered", 0}, + {"security-status", 3}, + {"servfail-packets", 0}, + {"signatures", 0}, + {"tcp-answers", 0}, + {"tcp-queries", 0}, + {"timedout-packets", 0}, + {"udp-answers", 1}, + {"udp-answers-bytes", 50}, + {"udp-do-queries", 0}, + {"udp-queries", 0}, + {"udp4-answers", 1}, + {"udp4-queries", 1}, + {"udp6-answers", 0}, + {"udp6-queries", 0}, + {"key-cache-size", 0}, + {"latency", 26}, + {"meta-cache-size", 0}, + {"qsize-q", 0}, + {"signature-cache-size", 0}, + {"sys-msec", 2889}, + {"uptime", 86317}, + {"user-msec", 2167}, + } + + for _, test := range tests { + value, ok := values[test.key] + if !ok { + t.Errorf("Did not find key for metric %s in values", test.key) + continue + } + if value != test.value { + t.Errorf("Metric: %s, Expected: %d, actual: %d", + test.key, test.value, value) + } + } +} diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 358dc4c0f..e29b5031c 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -66,7 +66,7 @@ func (_ *Procstat) Description() string { func (p *Procstat) Gather(acc telegraf.Accumulator) error { err := p.createProcesses() if err != nil { - log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", + log.Printf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", p.Exe, p.PidFile, p.Pattern, p.User, err.Error()) } else { for pid, proc := range p.pidmap { diff --git a/plugins/inputs/procstat/spec_processor.go b/plugins/inputs/procstat/spec_processor.go index 3789e99d0..5143d8bcc 100644 --- a/plugins/inputs/procstat/spec_processor.go +++ b/plugins/inputs/procstat/spec_processor.go @@ -71,7 +71,7 @@ func (p *SpecProcessor) pushMetrics() { fields[prefix+"read_count"] = io.ReadCount fields[prefix+"write_count"] = io.WriteCount fields[prefix+"read_bytes"] = io.ReadBytes - fields[prefix+"write_bytes"] = io.WriteCount + fields[prefix+"write_bytes"] = io.WriteBytes } cpu_time, err := p.proc.Times() diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 8a879d179..237f71c66 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -107,7 +107,8 @@ type Queue struct { Node string Vhost string Durable bool - AutoDelete bool `json:"auto_delete"` + AutoDelete bool `json:"auto_delete"` + IdleSince string `json:"idle_since"` } // Node ... @@ -328,6 +329,7 @@ func gatherQueues(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) { // common information "consumers": queue.Consumers, "consumer_utilisation": queue.ConsumerUtilisation, + "idle_since": queue.IdleSince, "memory": queue.Memory, // messages information "message_bytes": queue.MessageBytes, diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index b08eedee3..98a6bc659 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -44,40 +44,9 @@ func (r *Redis) Description() string { } var Tracking = map[string]string{ - "uptime_in_seconds": "uptime", - "connected_clients": "clients", - "used_memory": "used_memory", - "used_memory_rss": "used_memory_rss", - "used_memory_peak": "used_memory_peak", - "used_memory_lua": "used_memory_lua", - "rdb_changes_since_last_save": "rdb_changes_since_last_save", - "total_connections_received": "total_connections_received", - "total_commands_processed": "total_commands_processed", - "instantaneous_ops_per_sec": "instantaneous_ops_per_sec", - "instantaneous_input_kbps": "instantaneous_input_kbps", - "instantaneous_output_kbps": "instantaneous_output_kbps", - "sync_full": "sync_full", - "sync_partial_ok": "sync_partial_ok", - "sync_partial_err": "sync_partial_err", - "expired_keys": "expired_keys", - "evicted_keys": "evicted_keys", - "keyspace_hits": "keyspace_hits", - "keyspace_misses": "keyspace_misses", - "pubsub_channels": "pubsub_channels", - "pubsub_patterns": "pubsub_patterns", - "latest_fork_usec": "latest_fork_usec", - "connected_slaves": "connected_slaves", - "master_repl_offset": "master_repl_offset", - "master_last_io_seconds_ago": "master_last_io_seconds_ago", - "repl_backlog_active": "repl_backlog_active", - "repl_backlog_size": "repl_backlog_size", - "repl_backlog_histlen": "repl_backlog_histlen", - "mem_fragmentation_ratio": "mem_fragmentation_ratio", - "used_cpu_sys": "used_cpu_sys", - "used_cpu_user": "used_cpu_user", - "used_cpu_sys_children": "used_cpu_sys_children", - "used_cpu_user_children": "used_cpu_user_children", - "role": "replication_role", + "uptime_in_seconds": "uptime", + "connected_clients": "clients", + "role": "replication_role", } var ErrProtocolError = errors.New("redis protocol error") @@ -188,6 +157,7 @@ func gatherInfoOutput( acc telegraf.Accumulator, tags map[string]string, ) error { + var section string var keyspace_hits, keyspace_misses uint64 = 0, 0 scanner := bufio.NewScanner(rdr) @@ -198,7 +168,13 @@ func gatherInfoOutput( break } - if len(line) == 0 || line[0] == '#' { + if len(line) == 0 { + continue + } + if line[0] == '#' { + if len(line) > 2 { + section = line[2:] + } continue } @@ -206,42 +182,69 @@ func gatherInfoOutput( if len(parts) < 2 { continue } - name := string(parts[0]) - metric, ok := Tracking[name] - if !ok { - kline := strings.TrimSpace(string(parts[1])) - gatherKeyspaceLine(name, kline, acc, tags) + + if section == "Server" { + if name != "lru_clock" && name != "uptime_in_seconds" { + continue + } + } + + if name == "mem_allocator" { continue } + if strings.HasSuffix(name, "_human") { + continue + } + + metric, ok := Tracking[name] + if !ok { + if section == "Keyspace" { + kline := strings.TrimSpace(string(parts[1])) + gatherKeyspaceLine(name, kline, acc, tags) + continue + } + metric = name + } + val := strings.TrimSpace(parts[1]) - ival, err := strconv.ParseUint(val, 10, 64) - if name == "keyspace_hits" { - keyspace_hits = ival + // Try parsing as a uint + if ival, err := strconv.ParseUint(val, 10, 64); err == nil { + switch name { + case "keyspace_hits": + keyspace_hits = ival + case "keyspace_misses": + keyspace_misses = ival + case "rdb_last_save_time": + // influxdb can't calculate this, so we have to do it + fields["rdb_last_save_time_elapsed"] = uint64(time.Now().Unix()) - ival + } + fields[metric] = ival + continue } - if name == "keyspace_misses" { - keyspace_misses = ival + // Try parsing as an int + if ival, err := strconv.ParseInt(val, 10, 64); err == nil { + fields[metric] = ival + continue } + // Try parsing as a float + if fval, err := strconv.ParseFloat(val, 64); err == nil { + fields[metric] = fval + continue + } + + // Treat it as a string + if name == "role" { tags["replication_role"] = val continue } - if err == nil { - fields[metric] = ival - continue - } - - fval, err := strconv.ParseFloat(val, 64) - if err != nil { - return err - } - - fields[metric] = fval + fields[metric] = val } var keyspace_hitrate float64 = 0.0 if keyspace_hits != 0 || keyspace_misses != 0 { diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index 2e2fc1e37..cf62da0bd 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -5,8 +5,10 @@ import ( "fmt" "strings" "testing" + "time" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -37,40 +39,73 @@ func TestRedis_ParseMetrics(t *testing.T) { tags = map[string]string{"host": "redis.net", "replication_role": "master"} fields := map[string]interface{}{ - "uptime": uint64(238), - "clients": uint64(1), - "used_memory": uint64(1003936), - "used_memory_rss": uint64(811008), - "used_memory_peak": uint64(1003936), - "used_memory_lua": uint64(33792), - "rdb_changes_since_last_save": uint64(0), - "total_connections_received": uint64(2), - "total_commands_processed": uint64(1), - "instantaneous_ops_per_sec": uint64(0), - "sync_full": uint64(0), - "sync_partial_ok": uint64(0), - "sync_partial_err": uint64(0), - "expired_keys": uint64(0), - "evicted_keys": uint64(0), - "keyspace_hits": uint64(1), - "keyspace_misses": uint64(1), - "pubsub_channels": uint64(0), - "pubsub_patterns": uint64(0), - "latest_fork_usec": uint64(0), - "connected_slaves": uint64(0), - "master_repl_offset": uint64(0), - "repl_backlog_active": uint64(0), - "repl_backlog_size": uint64(1048576), - "repl_backlog_histlen": uint64(0), - "mem_fragmentation_ratio": float64(0.81), - "instantaneous_input_kbps": float64(876.16), - "instantaneous_output_kbps": float64(3010.23), - "used_cpu_sys": float64(0.14), - "used_cpu_user": float64(0.05), - "used_cpu_sys_children": float64(0.00), - "used_cpu_user_children": float64(0.00), - "keyspace_hitrate": float64(0.50), + "uptime": uint64(238), + "lru_clock": uint64(2364819), + "clients": uint64(1), + "client_longest_output_list": uint64(0), + "client_biggest_input_buf": uint64(0), + "blocked_clients": uint64(0), + "used_memory": uint64(1003936), + "used_memory_rss": uint64(811008), + "used_memory_peak": uint64(1003936), + "used_memory_lua": uint64(33792), + "mem_fragmentation_ratio": float64(0.81), + "loading": uint64(0), + "rdb_changes_since_last_save": uint64(0), + "rdb_bgsave_in_progress": uint64(0), + "rdb_last_save_time": uint64(1428427941), + "rdb_last_bgsave_status": "ok", + "rdb_last_bgsave_time_sec": int64(-1), + "rdb_current_bgsave_time_sec": int64(-1), + "aof_enabled": uint64(0), + "aof_rewrite_in_progress": uint64(0), + "aof_rewrite_scheduled": uint64(0), + "aof_last_rewrite_time_sec": int64(-1), + "aof_current_rewrite_time_sec": int64(-1), + "aof_last_bgrewrite_status": "ok", + "aof_last_write_status": "ok", + "total_connections_received": uint64(2), + "total_commands_processed": uint64(1), + "instantaneous_ops_per_sec": uint64(0), + "instantaneous_input_kbps": float64(876.16), + "instantaneous_output_kbps": float64(3010.23), + "rejected_connections": uint64(0), + "sync_full": uint64(0), + "sync_partial_ok": uint64(0), + "sync_partial_err": uint64(0), + "expired_keys": uint64(0), + "evicted_keys": uint64(0), + "keyspace_hits": uint64(1), + "keyspace_misses": uint64(1), + "pubsub_channels": uint64(0), + "pubsub_patterns": uint64(0), + "latest_fork_usec": uint64(0), + "connected_slaves": uint64(0), + "master_repl_offset": uint64(0), + "repl_backlog_active": uint64(0), + "repl_backlog_size": uint64(1048576), + "repl_backlog_first_byte_offset": uint64(0), + "repl_backlog_histlen": uint64(0), + "used_cpu_sys": float64(0.14), + "used_cpu_user": float64(0.05), + "used_cpu_sys_children": float64(0.00), + "used_cpu_user_children": float64(0.00), + "keyspace_hitrate": float64(0.50), } + + // We have to test rdb_last_save_time_offset manually because the value is based on the time when gathered + for _, m := range acc.Metrics { + for k, v := range m.Fields { + if k == "rdb_last_save_time_elapsed" { + fields[k] = v + } + } + } + assert.InDelta(t, + uint64(time.Now().Unix())-fields["rdb_last_save_time"].(uint64), + fields["rdb_last_save_time_elapsed"].(uint64), + 2) // allow for 2 seconds worth of offset + keyspaceTags := map[string]string{"host": "redis.net", "replication_role": "master", "database": "db0"} keyspaceFields := map[string]interface{}{ "avg_ttl": uint64(0), diff --git a/plugins/inputs/sensors/README.md b/plugins/inputs/sensors/README.md new file mode 100644 index 000000000..237a9b789 --- /dev/null +++ b/plugins/inputs/sensors/README.md @@ -0,0 +1,47 @@ +# sensors Input Plugin + +Collect [lm-sensors](https://en.wikipedia.org/wiki/Lm_sensors) metrics - requires the lm-sensors +package installed. + +This plugin collects sensor metrics with the `sensors` executable from the lm-sensor package. + +### Configuration: +``` +# Monitor sensors, requires lm-sensors package +[[inputs.sensors]] + ## Remove numbers from field names. + ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. + # remove_numbers = true +``` + +### Measurements & Fields: +Fields are created dynamicaly depending on the sensors. All fields are float. + +### Tags: + +- All measurements have the following tags: + - chip + - feature + +### Example Output: + +#### Default +``` +$ telegraf -config telegraf.conf -input-filter sensors -test +* Plugin: sensors, Collection 1 +> sensors,chip=power_meter-acpi-0,feature=power1 power_average=0,power_average_interval=300 1466751326000000000 +> sensors,chip=k10temp-pci-00c3,feature=temp1 temp_crit=70,temp_crit_hyst=65,temp_input=29,temp_max=70 1466751326000000000 +> sensors,chip=k10temp-pci-00cb,feature=temp1 temp_input=29,temp_max=70 1466751326000000000 +> sensors,chip=k10temp-pci-00d3,feature=temp1 temp_input=27.5,temp_max=70 1466751326000000000 +> sensors,chip=k10temp-pci-00db,feature=temp1 temp_crit=70,temp_crit_hyst=65,temp_input=29.5,temp_max=70 1466751326000000000 +``` + +#### With remove_numbers=false +``` +* Plugin: sensors, Collection 1 +> sensors,chip=power_meter-acpi-0,feature=power1 power1_average=0,power1_average_interval=300 1466753424000000000 +> sensors,chip=k10temp-pci-00c3,feature=temp1 temp1_crit=70,temp1_crit_hyst=65,temp1_input=29.125,temp1_max=70 1466753424000000000 +> sensors,chip=k10temp-pci-00cb,feature=temp1 temp1_input=29,temp1_max=70 1466753424000000000 +> sensors,chip=k10temp-pci-00d3,feature=temp1 temp1_input=29.5,temp1_max=70 1466753424000000000 +> sensors,chip=k10temp-pci-00db,feature=temp1 temp1_crit=70,temp1_crit_hyst=65,temp1_input=30,temp1_max=70 1466753424000000000 +``` diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index dbb304b71..1caf6ba59 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -1,91 +1,118 @@ -// +build linux,sensors +// +build linux package sensors import ( + "errors" + "fmt" + "os/exec" + "regexp" + "strconv" "strings" - - "github.com/md14454/gosensors" + "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) +var ( + execCommand = exec.Command // execCommand is used to mock commands in tests. + numberRegp = regexp.MustCompile("[0-9]+") +) + type Sensors struct { - Sensors []string + RemoveNumbers bool `toml:"remove_numbers"` + path string } -func (_ *Sensors) Description() string { - return "Monitor sensors using lm-sensors package" +func (*Sensors) Description() string { + return "Monitor sensors, requires lm-sensors package" } -var sensorsSampleConfig = ` - ## By default, telegraf gathers stats from all sensors detected by the - ## lm-sensors module. - ## - ## Only collect stats from the selected sensors. Sensors are listed as - ## :. This information can be found by running the - ## sensors command, e.g. sensors -u - ## - ## A * as the feature name will return all features of the chip - ## - # sensors = ["coretemp-isa-0000:Core 0", "coretemp-isa-0001:*"] +func (*Sensors) SampleConfig() string { + return ` + ## Remove numbers from field names. + ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. + # remove_numbers = true ` -func (_ *Sensors) SampleConfig() string { - return sensorsSampleConfig } func (s *Sensors) Gather(acc telegraf.Accumulator) error { - gosensors.Init() - defer gosensors.Cleanup() - - for _, chip := range gosensors.GetDetectedChips() { - for _, feature := range chip.GetFeatures() { - chipName := chip.String() - featureLabel := feature.GetLabel() - - if len(s.Sensors) != 0 { - var found bool - - for _, sensor := range s.Sensors { - parts := strings.SplitN(sensor, ":", 2) - - if parts[0] == chipName { - if parts[1] == "*" || parts[1] == featureLabel { - found = true - break - } - } - } - - if !found { - continue - } - } - - tags := map[string]string{ - "chip": chipName, - "adapter": chip.AdapterName(), - "feature-name": feature.Name, - "feature-label": featureLabel, - } - - fieldName := chipName + ":" + featureLabel - - fields := map[string]interface{}{ - fieldName: feature.GetValue(), - } - - acc.AddFields("sensors", fields, tags) - } + if len(s.path) == 0 { + return errors.New("sensors not found: verify that lm-sensors package is installed and that sensors is in your PATH") } + return s.parse(acc) +} + +// parse forks the command: +// sensors -u -A +// and parses the output to add it to the telegraf.Accumulator. +func (s *Sensors) parse(acc telegraf.Accumulator) error { + tags := map[string]string{} + fields := map[string]interface{}{} + chip := "" + cmd := execCommand(s.path, "-A", "-u") + out, err := internal.CombinedOutputTimeout(cmd, time.Second*5) + if err != nil { + return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) + } + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + for _, line := range lines { + if len(line) == 0 { + acc.AddFields("sensors", fields, tags) + chip = "" + tags = map[string]string{} + fields = map[string]interface{}{} + continue + } + if len(chip) == 0 { + chip = line + tags["chip"] = chip + continue + } + if !strings.HasPrefix(line, " ") { + if len(tags) > 1 { + acc.AddFields("sensors", fields, tags) + } + fields = map[string]interface{}{} + tags = map[string]string{ + "chip": chip, + "feature": strings.TrimRight(snake(line), ":"), + } + } else { + splitted := strings.Split(line, ":") + fieldName := strings.TrimSpace(splitted[0]) + if s.RemoveNumbers { + fieldName = numberRegp.ReplaceAllString(fieldName, "") + } + fieldValue, err := strconv.ParseFloat(strings.TrimSpace(splitted[1]), 64) + if err != nil { + return err + } + fields[fieldName] = fieldValue + } + } + acc.AddFields("sensors", fields, tags) return nil } func init() { + s := Sensors{ + RemoveNumbers: true, + } + path, _ := exec.LookPath("sensors") + if len(path) > 0 { + s.path = path + } inputs.Add("sensors", func() telegraf.Input { - return &Sensors{} + return &s }) } + +// snake converts string to snake case +func snake(input string) string { + return strings.ToLower(strings.Replace(strings.TrimSpace(input), " ", "_", -1)) +} diff --git a/plugins/inputs/sensors/sensors_nocompile.go b/plugins/inputs/sensors/sensors_nocompile.go deleted file mode 100644 index 5c38a437b..000000000 --- a/plugins/inputs/sensors/sensors_nocompile.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux !sensors - -package sensors diff --git a/plugins/inputs/sensors/sensors_notlinux.go b/plugins/inputs/sensors/sensors_notlinux.go new file mode 100644 index 000000000..62a621159 --- /dev/null +++ b/plugins/inputs/sensors/sensors_notlinux.go @@ -0,0 +1,3 @@ +// +build !linux + +package sensors diff --git a/plugins/inputs/sensors/sensors_test.go b/plugins/inputs/sensors/sensors_test.go new file mode 100644 index 000000000..7e6cac95a --- /dev/null +++ b/plugins/inputs/sensors/sensors_test.go @@ -0,0 +1,381 @@ +// +build linux + +package sensors + +import ( + "fmt" + "os" + "os/exec" + "testing" + + "github.com/influxdata/telegraf/testutil" +) + +func TestGatherDefault(t *testing.T) { + s := Sensors{ + RemoveNumbers: true, + path: "sensors", + } + // overwriting exec commands with mock commands + execCommand = fakeExecCommand + defer func() { execCommand = exec.Command }() + var acc testutil.Accumulator + + err := s.Gather(&acc) + if err != nil { + t.Fatal(err) + } + + var tests = []struct { + tags map[string]string + fields map[string]interface{} + }{ + { + map[string]string{ + "chip": "acpitz-virtual-0", + "feature": "temp1", + }, + map[string]interface{}{ + "temp_input": 8.3, + "temp_crit": 31.3, + }, + }, + { + map[string]string{ + "chip": "power_meter-acpi-0", + "feature": "power1", + }, + map[string]interface{}{ + "power_average": 0.0, + "power_average_interval": 300.0, + }, + }, + { + map[string]string{ + "chip": "coretemp-isa-0000", + "feature": "physical_id_0", + }, + map[string]interface{}{ + "temp_input": 77.0, + "temp_max": 82.0, + "temp_crit": 92.0, + "temp_crit_alarm": 0.0, + }, + }, + { + map[string]string{ + "chip": "coretemp-isa-0000", + "feature": "core_0", + }, + map[string]interface{}{ + "temp_input": 75.0, + "temp_max": 82.0, + "temp_crit": 92.0, + "temp_crit_alarm": 0.0, + }, + }, + { + map[string]string{ + "chip": "coretemp-isa-0000", + "feature": "core_1", + }, + map[string]interface{}{ + "temp_input": 77.0, + "temp_max": 82.0, + "temp_crit": 92.0, + "temp_crit_alarm": 0.0, + }, + }, + { + map[string]string{ + "chip": "coretemp-isa-0001", + "feature": "physical_id_1", + }, + map[string]interface{}{ + "temp_input": 70.0, + "temp_max": 82.0, + "temp_crit": 92.0, + "temp_crit_alarm": 0.0, + }, + }, + { + map[string]string{ + "chip": "coretemp-isa-0001", + "feature": "core_0", + }, + map[string]interface{}{ + "temp_input": 66.0, + "temp_max": 82.0, + "temp_crit": 92.0, + "temp_crit_alarm": 0.0, + }, + }, + { + map[string]string{ + "chip": "coretemp-isa-0001", + "feature": "core_1", + }, + map[string]interface{}{ + "temp_input": 70.0, + "temp_max": 82.0, + "temp_crit": 92.0, + "temp_crit_alarm": 0.0, + }, + }, + { + map[string]string{ + "chip": "atk0110-acpi-0", + "feature": "vcore_voltage", + }, + map[string]interface{}{ + "in_input": 1.136, + "in_min": 0.800, + "in_max": 1.600, + }, + }, + { + map[string]string{ + "chip": "atk0110-acpi-0", + "feature": "+3.3_voltage", + }, + map[string]interface{}{ + "in_input": 3.360, + "in_min": 2.970, + "in_max": 3.630, + }, + }, + } + + for _, test := range tests { + acc.AssertContainsTaggedFields(t, "sensors", test.fields, test.tags) + } +} + +func TestGatherNotRemoveNumbers(t *testing.T) { + s := Sensors{ + RemoveNumbers: false, + path: "sensors", + } + // overwriting exec commands with mock commands + execCommand = fakeExecCommand + defer func() { execCommand = exec.Command }() + var acc testutil.Accumulator + + err := s.Gather(&acc) + if err != nil { + t.Fatal(err) + } + + var tests = []struct { + tags map[string]string + fields map[string]interface{} + }{ + { + map[string]string{ + "chip": "acpitz-virtual-0", + "feature": "temp1", + }, + map[string]interface{}{ + "temp1_input": 8.3, + "temp1_crit": 31.3, + }, + }, + { + map[string]string{ + "chip": "power_meter-acpi-0", + "feature": "power1", + }, + map[string]interface{}{ + "power1_average": 0.0, + "power1_average_interval": 300.0, + }, + }, + { + map[string]string{ + "chip": "coretemp-isa-0000", + "feature": "physical_id_0", + }, + map[string]interface{}{ + "temp1_input": 77.0, + "temp1_max": 82.0, + "temp1_crit": 92.0, + "temp1_crit_alarm": 0.0, + }, + }, + { + map[string]string{ + "chip": "coretemp-isa-0000", + "feature": "core_0", + }, + map[string]interface{}{ + "temp2_input": 75.0, + "temp2_max": 82.0, + "temp2_crit": 92.0, + "temp2_crit_alarm": 0.0, + }, + }, + { + map[string]string{ + "chip": "coretemp-isa-0000", + "feature": "core_1", + }, + map[string]interface{}{ + "temp3_input": 77.0, + "temp3_max": 82.0, + "temp3_crit": 92.0, + "temp3_crit_alarm": 0.0, + }, + }, + { + map[string]string{ + "chip": "coretemp-isa-0001", + "feature": "physical_id_1", + }, + map[string]interface{}{ + "temp1_input": 70.0, + "temp1_max": 82.0, + "temp1_crit": 92.0, + "temp1_crit_alarm": 0.0, + }, + }, + { + map[string]string{ + "chip": "coretemp-isa-0001", + "feature": "core_0", + }, + map[string]interface{}{ + "temp2_input": 66.0, + "temp2_max": 82.0, + "temp2_crit": 92.0, + "temp2_crit_alarm": 0.0, + }, + }, + { + map[string]string{ + "chip": "coretemp-isa-0001", + "feature": "core_1", + }, + map[string]interface{}{ + "temp3_input": 70.0, + "temp3_max": 82.0, + "temp3_crit": 92.0, + "temp3_crit_alarm": 0.0, + }, + }, + { + map[string]string{ + "chip": "atk0110-acpi-0", + "feature": "vcore_voltage", + }, + map[string]interface{}{ + "in0_input": 1.136, + "in0_min": 0.800, + "in0_max": 1.600, + }, + }, + { + map[string]string{ + "chip": "atk0110-acpi-0", + "feature": "+3.3_voltage", + }, + map[string]interface{}{ + "in1_input": 3.360, + "in1_min": 2.970, + "in1_max": 3.630, + }, + }, + } + for _, test := range tests { + acc.AssertContainsTaggedFields(t, "sensors", test.fields, test.tags) + } +} + +// fackeExecCommand is a helper function that mock +// the exec.Command call (and call the test binary) +func fakeExecCommand(command string, args ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcess", "--", command} + cs = append(cs, args...) + cmd := exec.Command(os.Args[0], cs...) + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + return cmd +} + +// TestHelperProcess isn't a real test. It's used to mock exec.Command +// For example, if you run: +// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking +// it returns below mockData. +func TestHelperProcess(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + mockData := `acpitz-virtual-0 +temp1: + temp1_input: 8.300 + temp1_crit: 31.300 + +power_meter-acpi-0 +power1: + power1_average: 0.000 + power1_average_interval: 300.000 + +coretemp-isa-0000 +Physical id 0: + temp1_input: 77.000 + temp1_max: 82.000 + temp1_crit: 92.000 + temp1_crit_alarm: 0.000 +Core 0: + temp2_input: 75.000 + temp2_max: 82.000 + temp2_crit: 92.000 + temp2_crit_alarm: 0.000 +Core 1: + temp3_input: 77.000 + temp3_max: 82.000 + temp3_crit: 92.000 + temp3_crit_alarm: 0.000 + +coretemp-isa-0001 +Physical id 1: + temp1_input: 70.000 + temp1_max: 82.000 + temp1_crit: 92.000 + temp1_crit_alarm: 0.000 +Core 0: + temp2_input: 66.000 + temp2_max: 82.000 + temp2_crit: 92.000 + temp2_crit_alarm: 0.000 +Core 1: + temp3_input: 70.000 + temp3_max: 82.000 + temp3_crit: 92.000 + temp3_crit_alarm: 0.000 + +atk0110-acpi-0 +Vcore Voltage: + in0_input: 1.136 + in0_min: 0.800 + in0_max: 1.600 + +3.3 Voltage: + in1_input: 3.360 + in1_min: 2.970 + in1_max: 3.630 +` + + args := os.Args + + // Previous arguments are tests stuff, that looks like : + // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- + cmd, args := args[3], args[4:] + + if cmd == "sensors" { + fmt.Fprint(os.Stdout, mockData) + } else { + fmt.Fprint(os.Stdout, "command not found") + os.Exit(1) + + } + os.Exit(0) +} diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index bee783228..f8dbc7a31 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -1,549 +1,172 @@ -# SNMP Input Plugin +# SNMP Plugin -The SNMP input plugin gathers metrics from SNMP agents +The SNMP input plugin gathers metrics from SNMP agents. -### Configuration: +## Configuration: +### Example: -#### Very simple example - -In this example, the plugin will gather value of OIDS: - - - `.1.3.6.1.2.1.2.2.1.4.1` - -```toml -# Very Simple Example -[[inputs.snmp]] - - [[inputs.snmp.host]] - address = "127.0.0.1:161" - # SNMP community - community = "public" # default public - # SNMP version (1, 2 or 3) - # Version 3 not supported yet - version = 2 # default 2 - # Simple list of OIDs to get, in addition to "collect" - get_oids = [".1.3.6.1.2.1.2.2.1.4.1"] +SNMP data: +``` +.1.0.0.0.1.1.0 octet_str "foo" +.1.0.0.0.1.1.1 octet_str "bar" +.1.0.0.0.1.102 octet_str "bad" +.1.0.0.0.1.2.0 integer 1 +.1.0.0.0.1.2.1 integer 2 +.1.0.0.0.1.3.0 octet_str "0.123" +.1.0.0.0.1.3.1 octet_str "0.456" +.1.0.0.0.1.3.2 octet_str "9.999" +.1.0.0.1.1 octet_str "baz" +.1.0.0.1.2 uinteger 54321 +.1.0.0.1.3 uinteger 234 ``` - -#### Simple example - -In this example, Telegraf gathers value of OIDS: - - - named **ifnumber** - - named **interface_speed** - -With **inputs.snmp.get** section the plugin gets the oid number: - - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* - -As you can see *ifSpeed* is not a valid OID. In order to get -the valid OID, the plugin uses `snmptranslate_file` to match the OID: - - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` - -Also as the plugin will append `instance` to the corresponding OID: - - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` - -In this example, the plugin will gather value of OIDS: - -- `.1.3.6.1.2.1.2.1.0` -- `.1.3.6.1.2.1.2.2.1.5.1` - - +Telegraf config: ```toml -# Simple example [[inputs.snmp]] - ## Use 'oids.txt' file to translate oids to names - ## To generate 'oids.txt' you need to run: - ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt - ## Or if you have an other MIB folder with custom MIBs - ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt - snmptranslate_file = "/tmp/oids.txt" - [[inputs.snmp.host]] - address = "127.0.0.1:161" - # SNMP community - community = "public" # default public - # SNMP version (1, 2 or 3) - # Version 3 not supported yet - version = 2 # default 2 - # Which get/bulk do you want to collect for this host - collect = ["ifnumber", "interface_speed"] + agents = [ "127.0.0.1:161" ] + version = 2 + community = "public" - [[inputs.snmp.get]] - name = "ifnumber" - oid = ".1.3.6.1.2.1.2.1.0" + name = "system" + [[inputs.snmp.field]] + name = "hostname" + oid = ".1.0.0.1.1" + is_tag = true + [[inputs.snmp.field]] + name = "uptime" + oid = ".1.0.0.1.2" + [[inputs.snmp.field]] + name = "loadavg" + oid = ".1.0.0.1.3" + conversion = "float(2)" - [[inputs.snmp.get]] - name = "interface_speed" - oid = "ifSpeed" - instance = "1" - -``` - - -#### Simple bulk example - -In this example, Telegraf gathers value of OIDS: - - - named **ifnumber** - - named **interface_speed** - - named **if_out_octets** - -With **inputs.snmp.get** section the plugin gets oid number: - - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* - -With **inputs.snmp.bulk** section the plugin gets the oid number: - - - **if_out_octets** => *ifOutOctets* - -As you can see *ifSpeed* and *ifOutOctets* are not a valid OID. -In order to get the valid OID, the plugin uses `snmptranslate_file` -to match the OID: - - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` - - **if_out_octets** => *ifOutOctets* => `.1.3.6.1.2.1.2.2.1.16` - -Also, the plugin will append `instance` to the corresponding OID: - - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` - -And **if_out_octets** is a bulk request, the plugin will gathers all -OIDS in the table. - -- `.1.3.6.1.2.1.2.2.1.16.1` -- `.1.3.6.1.2.1.2.2.1.16.2` -- `.1.3.6.1.2.1.2.2.1.16.3` -- `.1.3.6.1.2.1.2.2.1.16.4` -- `.1.3.6.1.2.1.2.2.1.16.5` -- `...` - -In this example, the plugin will gather value of OIDS: - -- `.1.3.6.1.2.1.2.1.0` -- `.1.3.6.1.2.1.2.2.1.5.1` -- `.1.3.6.1.2.1.2.2.1.16.1` -- `.1.3.6.1.2.1.2.2.1.16.2` -- `.1.3.6.1.2.1.2.2.1.16.3` -- `.1.3.6.1.2.1.2.2.1.16.4` -- `.1.3.6.1.2.1.2.2.1.16.5` -- `...` - - -```toml -# Simple bulk example -[[inputs.snmp]] - ## Use 'oids.txt' file to translate oids to names - ## To generate 'oids.txt' you need to run: - ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt - ## Or if you have an other MIB folder with custom MIBs - ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt - snmptranslate_file = "/tmp/oids.txt" - [[inputs.snmp.host]] - address = "127.0.0.1:161" - # SNMP community - community = "public" # default public - # SNMP version (1, 2 or 3) - # Version 3 not supported yet - version = 2 # default 2 - # Which get/bulk do you want to collect for this host - collect = ["interface_speed", "if_number", "if_out_octets"] - - [[inputs.snmp.get]] - name = "interface_speed" - oid = "ifSpeed" - instance = "1" - - [[inputs.snmp.get]] - name = "if_number" - oid = "ifNumber" - - [[inputs.snmp.bulk]] - name = "if_out_octets" - oid = "ifOutOctets" -``` - - -#### Table example - -In this example, we remove collect attribute to the host section, -but you can still use it in combination of the following part. - -Note: This example is like a bulk request a but using an -other configuration - -Telegraf gathers value of OIDS of the table: - - - named **iftable1** - -With **inputs.snmp.table** section the plugin gets oid number: - - - **iftable1** => `.1.3.6.1.2.1.31.1.1.1` - -Also **iftable1** is a table, the plugin will gathers all -OIDS in the table and in the subtables - -- `.1.3.6.1.2.1.31.1.1.1.1` -- `.1.3.6.1.2.1.31.1.1.1.1.1` -- `.1.3.6.1.2.1.31.1.1.1.1.2` -- `.1.3.6.1.2.1.31.1.1.1.1.3` -- `.1.3.6.1.2.1.31.1.1.1.1.4` -- `.1.3.6.1.2.1.31.1.1.1.1....` -- `.1.3.6.1.2.1.31.1.1.1.2` -- `.1.3.6.1.2.1.31.1.1.1.2....` -- `.1.3.6.1.2.1.31.1.1.1.3` -- `.1.3.6.1.2.1.31.1.1.1.3....` -- `.1.3.6.1.2.1.31.1.1.1.4` -- `.1.3.6.1.2.1.31.1.1.1.4....` -- `.1.3.6.1.2.1.31.1.1.1.5` -- `.1.3.6.1.2.1.31.1.1.1.5....` -- `.1.3.6.1.2.1.31.1.1.1.6....` -- `...` - -```toml -# Table example -[[inputs.snmp]] - ## Use 'oids.txt' file to translate oids to names - ## To generate 'oids.txt' you need to run: - ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt - ## Or if you have an other MIB folder with custom MIBs - ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt - snmptranslate_file = "/tmp/oids.txt" - [[inputs.snmp.host]] - address = "127.0.0.1:161" - # SNMP community - community = "public" # default public - # SNMP version (1, 2 or 3) - # Version 3 not supported yet - version = 2 # default 2 - # Which get/bulk do you want to collect for this host - # Which table do you want to collect - [[inputs.snmp.host.table]] - name = "iftable1" - - # table without mapping neither subtables - # This is like bulk request [[inputs.snmp.table]] - name = "iftable1" - oid = ".1.3.6.1.2.1.31.1.1.1" + name = "remote_servers" + inherit_tags = [ "hostname" ] + [[inputs.snmp.table.field]] + name = "server" + oid = ".1.0.0.0.1.1" + is_tag = true + [[inputs.snmp.table.field]] + name = "connections" + oid = ".1.0.0.0.1.2" + [[inputs.snmp.table.field]] + name = "latency" + oid = ".1.0.0.0.1.3" + conversion = "float" ``` +Resulting output: +``` +* Plugin: snmp, Collection 1 +> system,agent_host=127.0.0.1,host=mylocalhost,hostname=baz loadavg=2.34,uptime=54321i 1468953135000000000 +> remote_servers,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=foo connections=1i,latency=0.123 1468953135000000000 +> remote_servers,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=bar connections=2i,latency=0.456 1468953135000000000 +``` -#### Table with subtable example - -In this example, we remove collect attribute to the host section, -but you can still use it in combination of the following part. - -Note: This example is like a bulk request a but using an -other configuration - -Telegraf gathers value of OIDS of the table: - - - named **iftable2** - -With **inputs.snmp.table** section *AND* **sub_tables** attribute, -the plugin will get OIDS from subtables: - - - **iftable2** => `.1.3.6.1.2.1.2.2.1.13` - -Also **iftable2** is a table, the plugin will gathers all -OIDS in subtables: - -- `.1.3.6.1.2.1.2.2.1.13.1` -- `.1.3.6.1.2.1.2.2.1.13.2` -- `.1.3.6.1.2.1.2.2.1.13.3` -- `.1.3.6.1.2.1.2.2.1.13.4` -- `.1.3.6.1.2.1.2.2.1.13....` +#### Configuration via MIB: +This example uses the SNMP data above, but is configured via the MIB. +The example MIB file can be found in the `testdata` directory. See the [MIB lookups](#mib-lookups) section for more information. +Telegraf config: ```toml -# Table with subtable example [[inputs.snmp]] - ## Use 'oids.txt' file to translate oids to names - ## To generate 'oids.txt' you need to run: - ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt - ## Or if you have an other MIB folder with custom MIBs - ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt - snmptranslate_file = "/tmp/oids.txt" - [[inputs.snmp.host]] - address = "127.0.0.1:161" - # SNMP community - community = "public" # default public - # SNMP version (1, 2 or 3) - # Version 3 not supported yet - version = 2 # default 2 - # Which table do you want to collect - [[inputs.snmp.host.table]] - name = "iftable2" + agents = [ "127.0.0.1:161" ] + version = 2 + community = "public" + + [[inputs.snmp.field]] + oid = "TEST::hostname" + is_tag = true - # table without mapping but with subtables [[inputs.snmp.table]] - name = "iftable2" - sub_tables = [".1.3.6.1.2.1.2.2.1.13"] - # note - # oid attribute is useless + oid = "TEST::testTable" + inherit_tags = "hostname" ``` - -#### Table with mapping example - -In this example, we remove collect attribute to the host section, -but you can still use it in combination of the following part. - -Telegraf gathers value of OIDS of the table: - - - named **iftable3** - -With **inputs.snmp.table** section the plugin gets oid number: - - - **iftable3** => `.1.3.6.1.2.1.31.1.1.1` - -Also **iftable2** is a table, the plugin will gathers all -OIDS in the table and in the subtables - -- `.1.3.6.1.2.1.31.1.1.1.1` -- `.1.3.6.1.2.1.31.1.1.1.1.1` -- `.1.3.6.1.2.1.31.1.1.1.1.2` -- `.1.3.6.1.2.1.31.1.1.1.1.3` -- `.1.3.6.1.2.1.31.1.1.1.1.4` -- `.1.3.6.1.2.1.31.1.1.1.1....` -- `.1.3.6.1.2.1.31.1.1.1.2` -- `.1.3.6.1.2.1.31.1.1.1.2....` -- `.1.3.6.1.2.1.31.1.1.1.3` -- `.1.3.6.1.2.1.31.1.1.1.3....` -- `.1.3.6.1.2.1.31.1.1.1.4` -- `.1.3.6.1.2.1.31.1.1.1.4....` -- `.1.3.6.1.2.1.31.1.1.1.5` -- `.1.3.6.1.2.1.31.1.1.1.5....` -- `.1.3.6.1.2.1.31.1.1.1.6....` -- `...` - -But the **include_instances** attribute will filter which OIDS -will be gathered; As you see, there is an other attribute, `mapping_table`. -`include_instances` and `mapping_table` permit to build a hash table -to filter only OIDS you want. -Let's say, we have the following data on SNMP server: - - OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` - -The plugin will build the following hash table: - -| instance name | instance id | -|---------------|-------------| -| `enp5s0` | `1` | -| `enp5s1` | `2` | -| `enp5s2` | `3` | -| `eth0` | `4` | -| `eth1` | `5` | - -With the **include_instances** attribute, the plugin will gather -the following OIDS: - -- `.1.3.6.1.2.1.31.1.1.1.1.1` -- `.1.3.6.1.2.1.31.1.1.1.1.5` -- `.1.3.6.1.2.1.31.1.1.1.2.1` -- `.1.3.6.1.2.1.31.1.1.1.2.5` -- `.1.3.6.1.2.1.31.1.1.1.3.1` -- `.1.3.6.1.2.1.31.1.1.1.3.5` -- `.1.3.6.1.2.1.31.1.1.1.4.1` -- `.1.3.6.1.2.1.31.1.1.1.4.5` -- `.1.3.6.1.2.1.31.1.1.1.5.1` -- `.1.3.6.1.2.1.31.1.1.1.5.5` -- `.1.3.6.1.2.1.31.1.1.1.6.1` -- `.1.3.6.1.2.1.31.1.1.1.6.5` -- `...` - -Note: the plugin will add instance name as tag *instance* - -```toml -# Simple table with mapping example -[[inputs.snmp]] - ## Use 'oids.txt' file to translate oids to names - ## To generate 'oids.txt' you need to run: - ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt - ## Or if you have an other MIB folder with custom MIBs - ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt - snmptranslate_file = "/tmp/oids.txt" - [[inputs.snmp.host]] - address = "127.0.0.1:161" - # SNMP community - community = "public" # default public - # SNMP version (1, 2 or 3) - # Version 3 not supported yet - version = 2 # default 2 - # Which table do you want to collect - [[inputs.snmp.host.table]] - name = "iftable3" - include_instances = ["enp5s0", "eth1"] - - # table with mapping but without subtables - [[inputs.snmp.table]] - name = "iftable3" - oid = ".1.3.6.1.2.1.31.1.1.1" - # if empty. get all instances - mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" - # if empty, get all subtables +Resulting output: +``` +* Plugin: snmp, Collection 1 +> testTable,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=foo connections=1i,latency="0.123" 1468953135000000000 +> testTable,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=bar connections=2i,latency="0.456" 1468953135000000000 ``` +### Config parameters -#### Table with both mapping and subtable example +* `agents`: Default: `[]` +List of SNMP agents to connect to in the form of `IP[:PORT]`. If `:PORT` is unspecified, it defaults to `161`. -In this example, we remove collect attribute to the host section, -but you can still use it in combination of the following part. +* `version`: Default: `2` +SNMP protocol version to use. -Telegraf gathers value of OIDS of the table: +* `community`: Default: `"public"` +SNMP community to use. - - named **iftable4** +* `max_repetitions`: Default: `50` +Maximum number of iterations for repeating variables. -With **inputs.snmp.table** section *AND* **sub_tables** attribute, -the plugin will get OIDS from subtables: +* `sec_name`: +Security name for authenticated SNMPv3 requests. - - **iftable4** => `.1.3.6.1.2.1.31.1.1.1` +* `auth_protocol`: Values: `"MD5"`,`"SHA"`,`""`. Default: `""` +Authentication protocol for authenticated SNMPv3 requests. -Also **iftable2** is a table, the plugin will gathers all -OIDS in the table and in the subtables +* `auth_password`: +Authentication password for authenticated SNMPv3 requests. -- `.1.3.6.1.2.1.31.1.1.1.6.1 -- `.1.3.6.1.2.1.31.1.1.1.6.2` -- `.1.3.6.1.2.1.31.1.1.1.6.3` -- `.1.3.6.1.2.1.31.1.1.1.6.4` -- `.1.3.6.1.2.1.31.1.1.1.6....` -- `.1.3.6.1.2.1.31.1.1.1.10.1` -- `.1.3.6.1.2.1.31.1.1.1.10.2` -- `.1.3.6.1.2.1.31.1.1.1.10.3` -- `.1.3.6.1.2.1.31.1.1.1.10.4` -- `.1.3.6.1.2.1.31.1.1.1.10....` +* `sec_level`: Values: `"noAuthNoPriv"`,`"authNoPriv"`,`"authPriv"`. Default: `"noAuthNoPriv"` +Security level used for SNMPv3 messages. -But the **include_instances** attribute will filter which OIDS -will be gathered; As you see, there is an other attribute, `mapping_table`. -`include_instances` and `mapping_table` permit to build a hash table -to filter only OIDS you want. -Let's say, we have the following data on SNMP server: - - OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` +* `context_name`: +Context name used for SNMPv3 requests. -The plugin will build the following hash table: +* `priv_protocol`: Values: `"DES"`,`"AES"`,`""`. Default: `""` +Privacy protocol used for encrypted SNMPv3 messages. -| instance name | instance id | -|---------------|-------------| -| `enp5s0` | `1` | -| `enp5s1` | `2` | -| `enp5s2` | `3` | -| `eth0` | `4` | -| `eth1` | `5` | - -With the **include_instances** attribute, the plugin will gather -the following OIDS: - -- `.1.3.6.1.2.1.31.1.1.1.6.1` -- `.1.3.6.1.2.1.31.1.1.1.6.5` -- `.1.3.6.1.2.1.31.1.1.1.10.1` -- `.1.3.6.1.2.1.31.1.1.1.10.5` - -Note: the plugin will add instance name as tag *instance* +* `priv_password`: +Privacy password used for encrypted SNMPv3 messages. +* `name`: +Output measurement name. -```toml -# Table with both mapping and subtable example -[[inputs.snmp]] - ## Use 'oids.txt' file to translate oids to names - ## To generate 'oids.txt' you need to run: - ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt - ## Or if you have an other MIB folder with custom MIBs - ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt - snmptranslate_file = "/tmp/oids.txt" - [[inputs.snmp.host]] - address = "127.0.0.1:161" - # SNMP community - community = "public" # default public - # SNMP version (1, 2 or 3) - # Version 3 not supported yet - version = 2 # default 2 - # Which table do you want to collect - [[inputs.snmp.host.table]] - name = "iftable4" - include_instances = ["enp5s0", "eth1"] +#### Field parameters: +* `oid`: +OID to get. May be a numeric or textual OID. - # table with both mapping and subtables - [[inputs.snmp.table]] - name = "iftable4" - # if empty get all instances - mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" - # if empty get all subtables - # sub_tables could be not "real subtables" - sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] - # note - # oid attribute is useless +* `oid_index_suffix`: +The OID sub-identifier to strip off so that the index can be matched against other fields in the table. - # SNMP SUBTABLES - [[inputs.snmp.subtable]] - name = "bytes_recv" - oid = ".1.3.6.1.2.1.31.1.1.1.6" - unit = "octets" +* `name`: +Output field/tag name. +If not specified, it defaults to the value of `oid`. If `oid` is numeric, an attempt to translate the numeric OID into a texual OID will be made. - [[inputs.snmp.subtable]] - name = "bytes_send" - oid = ".1.3.6.1.2.1.31.1.1.1.10" - unit = "octets" -``` +* `is_tag`: +Output this field as a tag. -#### Configuration notes +* `conversion`: Values: `"float(X)"`,`"float"`,`"int"`,`""`. Default: `""` +Converts the value according to the given specification. -- In **inputs.snmp.table** section, the `oid` attribute is useless if - the `sub_tables` attributes is defined + - `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Efficively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. + - `float`: Converts the value into a float with no adjustment. Same as `float(0)`. + - `int`: Convertes the value into an integer. + - `hwaddr`: Converts the value to a MAC address. + - `ipaddr`: Converts the value to an IP address. -- In **inputs.snmp.subtable** section, you can put a name from `snmptranslate_file` - as `oid` attribute instead of a valid OID +#### Table parameters: +* `oid`: +Automatically populates the table's fields using data from the MIB. -### Measurements & Fields: +* `name`: +Output measurement name. +If not specified, it defaults to the value of `oid`. If `oid` is numeric, an attempt to translate the numeric OID into a texual OID will be made. -With the last example (Table with both mapping and subtable example): +* `inherit_tags`: +Which tags to inherit from the top-level config and to use in the output of this table's measurement. -- ifHCOutOctets - - ifHCOutOctets -- ifInDiscards - - ifInDiscards -- ifHCInOctets - - ifHCInOctets +### MIB lookups +If the plugin is configured such that it needs to perform lookups from the MIB, it will use the net-snmp utilities `snmptranslate` and `snmptable`. -### Tags: - -With the last example (Table with both mapping and subtable example): - -- ifHCOutOctets - - host - - instance - - unit -- ifInDiscards - - host - - instance -- ifHCInOctets - - host - - instance - - unit - -### Example Output: - -With the last example (Table with both mapping and subtable example): - -``` -ifHCOutOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCOutOctets=10565628i 1456878706044462901 -ifInDiscards,host=127.0.0.1,instance=enp5s0 ifInDiscards=0i 1456878706044510264 -ifHCInOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCInOctets=76351777i 1456878706044531312 -``` +When performing the lookups, the plugin will load all available MIBs. If your MIB files are in a custom path, you may add the path using the `MIBDIRS` environment variable. See [`man 1 snmpcmd`](http://net-snmp.sourceforge.net/docs/man/snmpcmd.html#lbAK) for more information on the variable. diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 3cbfa0db1..6ee257c81 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -1,818 +1,903 @@ package snmp import ( - "io/ioutil" - "log" + "bytes" + "fmt" + "math" "net" + "os/exec" "strconv" "strings" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/soniah/gosnmp" ) -// Snmp is a snmp plugin -type Snmp struct { - Host []Host - Get []Data - Bulk []Data - Table []Table - Subtable []Subtable - SnmptranslateFile string +const description = `Retrieves SNMP values from remote agents` +const sampleConfig = ` + agents = [ "127.0.0.1:161" ] + ## Timeout for each SNMP query. + timeout = "5s" + ## Number of retries to attempt within timeout. + retries = 3 + ## SNMP version, values can be 1, 2, or 3 + version = 2 - nameToOid map[string]string - initNode Node - subTableMap map[string]Subtable -} + ## SNMP community string. + community = "public" -type Host struct { - Address string - Community string - // SNMP version. Default 2 - Version int - // SNMP timeout, in seconds. 0 means no timeout - Timeout float64 - // SNMP retries - Retries int - // Data to collect (list of Data names) - Collect []string - // easy get oids - GetOids []string - // Table - Table []HostTable - // Oids - getOids []Data - bulkOids []Data - tables []HostTable - // array of processed oids - // to skip oid duplication - processedOids []string + ## The GETBULK max-repetitions parameter + max_repetitions = 10 - OidInstanceMapping map[string]map[string]string -} + ## SNMPv3 auth parameters + #sec_name = "myuser" + #auth_protocol = "md5" # Values: "MD5", "SHA", "" + #auth_password = "pass" + #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv" + #context_name = "" + #priv_protocol = "" # Values: "DES", "AES", "" + #priv_password = "" -type Table struct { - // name = "iftable" - Name string - // oid = ".1.3.6.1.2.1.31.1.1.1" - Oid string - //if empty get all instances - //mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" - MappingTable string - // if empty get all subtables - // sub_tables could be not "real subtables" - //sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] - SubTables []string -} + ## measurement name + name = "system" + [[inputs.snmp.field]] + name = "hostname" + oid = ".1.0.0.1.1" + [[inputs.snmp.field]] + name = "uptime" + oid = ".1.0.0.1.2" + [[inputs.snmp.field]] + name = "load" + oid = ".1.0.0.1.3" + [[inputs.snmp.field]] + oid = "HOST-RESOURCES-MIB::hrMemorySize" -type HostTable struct { - // name = "iftable" - Name string - // Includes only these instances - // include_instances = ["eth0", "eth1"] - IncludeInstances []string - // Excludes only these instances - // exclude_instances = ["eth20", "eth21"] - ExcludeInstances []string - // From Table struct - oid string - mappingTable string - subTables []string -} - -// TODO find better names -type Subtable struct { - //name = "bytes_send" - Name string - //oid = ".1.3.6.1.2.1.31.1.1.1.10" - Oid string - //unit = "octets" - Unit string -} - -type Data struct { - Name string - // OID (could be numbers or name) - Oid string - // Unit - Unit string - // SNMP getbulk max repetition - MaxRepetition uint8 `toml:"max_repetition"` - // SNMP Instance (default 0) - // (only used with GET request and if - // OID is a name from snmptranslate file) - Instance string - // OID (only number) (used for computation) - rawOid string -} - -type Node struct { - id string - name string - subnodes map[string]Node -} - -var sampleConfig = ` - ## Use 'oids.txt' file to translate oids to names - ## To generate 'oids.txt' you need to run: - ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt - ## Or if you have an other MIB folder with custom MIBs - ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt - snmptranslate_file = "/tmp/oids.txt" - [[inputs.snmp.host]] - address = "192.168.2.2:161" - # SNMP community - community = "public" # default public - # SNMP version (1, 2 or 3) - # Version 3 not supported yet - version = 2 # default 2 - # SNMP response timeout - timeout = 2.0 # default 2.0 - # SNMP request retries - retries = 2 # default 2 - # Which get/bulk do you want to collect for this host - collect = ["mybulk", "sysservices", "sysdescr"] - # Simple list of OIDs to get, in addition to "collect" - get_oids = [] - - [[inputs.snmp.host]] - address = "192.168.2.3:161" - community = "public" - version = 2 - timeout = 2.0 - retries = 2 - collect = ["mybulk"] - get_oids = [ - "ifNumber", - ".1.3.6.1.2.1.1.3.0", - ] - - [[inputs.snmp.get]] - name = "ifnumber" - oid = "ifNumber" - - [[inputs.snmp.get]] - name = "interface_speed" - oid = "ifSpeed" - instance = "0" - - [[inputs.snmp.get]] - name = "sysuptime" - oid = ".1.3.6.1.2.1.1.3.0" - unit = "second" - - [[inputs.snmp.bulk]] - name = "mybulk" - max_repetition = 127 - oid = ".1.3.6.1.2.1.1" - - [[inputs.snmp.bulk]] - name = "ifoutoctets" - max_repetition = 127 - oid = "ifOutOctets" - - [[inputs.snmp.host]] - address = "192.168.2.13:161" - #address = "127.0.0.1:161" - community = "public" - version = 2 - timeout = 2.0 - retries = 2 - #collect = ["mybulk", "sysservices", "sysdescr", "systype"] - collect = ["sysuptime" ] - [[inputs.snmp.host.table]] - name = "iftable3" - include_instances = ["enp5s0", "eth1"] - - # SNMP TABLEs - # table without mapping neither subtables [[inputs.snmp.table]] - name = "iftable1" - oid = ".1.3.6.1.2.1.31.1.1.1" + ## measurement name + name = "remote_servers" + inherit_tags = [ "hostname" ] + [[inputs.snmp.table.field]] + name = "server" + oid = ".1.0.0.0.1.0" + is_tag = true + [[inputs.snmp.table.field]] + name = "connections" + oid = ".1.0.0.0.1.1" + [[inputs.snmp.table.field]] + name = "latency" + oid = ".1.0.0.0.1.2" - # table without mapping but with subtables [[inputs.snmp.table]] - name = "iftable2" - oid = ".1.3.6.1.2.1.31.1.1.1" - sub_tables = [".1.3.6.1.2.1.2.2.1.13"] - - # table with mapping but without subtables - [[inputs.snmp.table]] - name = "iftable3" - oid = ".1.3.6.1.2.1.31.1.1.1" - # if empty. get all instances - mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" - # if empty, get all subtables - - # table with both mapping and subtables - [[inputs.snmp.table]] - name = "iftable4" - oid = ".1.3.6.1.2.1.31.1.1.1" - # if empty get all instances - mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" - # if empty get all subtables - # sub_tables could be not "real subtables" - sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + ## auto populate table's fields using the MIB + oid = "HOST-RESOURCES-MIB::hrNetworkTable" ` -// SampleConfig returns sample configuration message -func (s *Snmp) SampleConfig() string { - return sampleConfig +// execCommand is so tests can mock out exec.Command usage. +var execCommand = exec.Command + +// execCmd executes the specified command, returning the STDOUT content. +// If command exits with error status, the output is captured into the returned error. +func execCmd(arg0 string, args ...string) ([]byte, error) { + out, err := execCommand(arg0, args...).Output() + if err != nil { + if err, ok := err.(*exec.ExitError); ok { + return nil, NestedError{ + Err: err, + NestedErr: fmt.Errorf("%s", bytes.TrimRight(err.Stderr, "\n")), + } + } + return nil, err + } + return out, nil } -// Description returns description of Zookeeper plugin -func (s *Snmp) Description() string { - return `Reads oids value from one or many snmp agents` +// Snmp holds the configuration for the plugin. +type Snmp struct { + // The SNMP agent to query. Format is ADDR[:PORT] (e.g. 1.2.3.4:161). + Agents []string + // Timeout to wait for a response. + Timeout internal.Duration + Retries int + // Values: 1, 2, 3 + Version uint8 + + // Parameters for Version 1 & 2 + Community string + + // Parameters for Version 2 & 3 + MaxRepetitions uint8 + + // Parameters for Version 3 + ContextName string + // Values: "noAuthNoPriv", "authNoPriv", "authPriv" + SecLevel string + SecName string + // Values: "MD5", "SHA", "". Default: "" + AuthProtocol string + AuthPassword string + // Values: "DES", "AES", "". Default: "" + PrivProtocol string + PrivPassword string + EngineID string + EngineBoots uint32 + EngineTime uint32 + + Tables []Table `toml:"table"` + + // Name & Fields are the elements of a Table. + // Telegraf chokes if we try to embed a Table. So instead we have to embed the + // fields of a Table, and construct a Table during runtime. + Name string + Fields []Field `toml:"field"` + + connectionCache map[string]snmpConnection + initialized bool } -func fillnode(parentNode Node, oid_name string, ids []string) { - // ids = ["1", "3", "6", ...] - id, ids := ids[0], ids[1:] - node, ok := parentNode.subnodes[id] - if ok == false { - node = Node{ - id: id, - name: "", - subnodes: make(map[string]Node), - } - if len(ids) == 0 { - node.name = oid_name - } - parentNode.subnodes[id] = node +func (s *Snmp) init() error { + if s.initialized { + return nil } - if len(ids) > 0 { - fillnode(node, oid_name, ids) - } -} -func findnodename(node Node, ids []string) (string, string) { - // ids = ["1", "3", "6", ...] - if len(ids) == 1 { - return node.name, ids[0] - } - id, ids := ids[0], ids[1:] - // Get node - subnode, ok := node.subnodes[id] - if ok { - return findnodename(subnode, ids) - } - // We got a node - // Get node name - if node.name != "" && len(ids) == 0 && id == "0" { - // node with instance 0 - return node.name, "0" - } else if node.name != "" && len(ids) == 0 && id != "0" { - // node with an instance - return node.name, string(id) - } else if node.name != "" && len(ids) > 0 { - // node with subinstances - return node.name, strings.Join(ids, ".") - } - // return an empty node name - return node.name, "" -} - -func (s *Snmp) Gather(acc telegraf.Accumulator) error { - // TODO put this in cache on first run - // Create subtables mapping - if len(s.subTableMap) == 0 { - s.subTableMap = make(map[string]Subtable) - for _, sb := range s.Subtable { - s.subTableMap[sb.Name] = sb - } - } - // TODO put this in cache on first run - // Create oid tree - if s.SnmptranslateFile != "" && len(s.initNode.subnodes) == 0 { - s.nameToOid = make(map[string]string) - s.initNode = Node{ - id: "1", - name: "", - subnodes: make(map[string]Node), - } - - data, err := ioutil.ReadFile(s.SnmptranslateFile) - if err != nil { - log.Printf("Reading SNMPtranslate file error: %s", err) + for i := range s.Tables { + if err := s.Tables[i].init(); err != nil { return err - } else { - for _, line := range strings.Split(string(data), "\n") { - oids := strings.Fields(string(line)) - if len(oids) == 2 && oids[1] != "" { - oid_name := oids[0] - oid := oids[1] - fillnode(s.initNode, oid_name, strings.Split(string(oid), ".")) - s.nameToOid[oid_name] = oid - } - } } } - // Fetching data - for _, host := range s.Host { - // Set default args - if len(host.Address) == 0 { - host.Address = "127.0.0.1:161" - } - if host.Community == "" { - host.Community = "public" - } - if host.Timeout <= 0 { - host.Timeout = 2.0 - } - if host.Retries <= 0 { - host.Retries = 2 - } - // Prepare host - // Get Easy GET oids - for _, oidstring := range host.GetOids { - oid := Data{} - if val, ok := s.nameToOid[oidstring]; ok { - // TODO should we add the 0 instance ? - oid.Name = oidstring - oid.Oid = val - oid.rawOid = "." + val + ".0" - } else { - oid.Name = oidstring - oid.Oid = oidstring - if string(oidstring[:1]) != "." { - oid.rawOid = "." + oidstring - } else { - oid.rawOid = oidstring - } - } - host.getOids = append(host.getOids, oid) - } - for _, oid_name := range host.Collect { - // Get GET oids - for _, oid := range s.Get { - if oid.Name == oid_name { - if val, ok := s.nameToOid[oid.Oid]; ok { - // TODO should we add the 0 instance ? - if oid.Instance != "" { - oid.rawOid = "." + val + "." + oid.Instance - } else { - oid.rawOid = "." + val + ".0" - } - } else { - oid.rawOid = oid.Oid - } - host.getOids = append(host.getOids, oid) - } - } - // Get GETBULK oids - for _, oid := range s.Bulk { - if oid.Name == oid_name { - if val, ok := s.nameToOid[oid.Oid]; ok { - oid.rawOid = "." + val - } else { - oid.rawOid = oid.Oid - } - host.bulkOids = append(host.bulkOids, oid) - } - } + for i := range s.Fields { + if err := s.Fields[i].init(); err != nil { + return err } - // Table - for _, hostTable := range host.Table { - for _, snmpTable := range s.Table { - if hostTable.Name == snmpTable.Name { - table := hostTable - table.oid = snmpTable.Oid - table.mappingTable = snmpTable.MappingTable - table.subTables = snmpTable.SubTables - host.tables = append(host.tables, table) - } - } - } - // Launch Mapping - // TODO put this in cache on first run - // TODO save mapping and computed oids - // to do it only the first time - // only if len(s.OidInstanceMapping) == 0 - if len(host.OidInstanceMapping) >= 0 { - if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil { - log.Printf("SNMP Mapping error for host '%s': %s", host.Address, err) + } + + s.initialized = true + return nil +} + +// Table holds the configuration for a SNMP table. +type Table struct { + // Name will be the name of the measurement. + Name string + + // Which tags to inherit from the top-level config. + InheritTags []string + + // Fields is the tags and values to look up. + Fields []Field `toml:"field"` + + // OID for automatic field population. + // If provided, init() will populate Fields with all the table columns of the + // given OID. + Oid string + + initialized bool +} + +// init() populates Fields if a table OID is provided. +func (t *Table) init() error { + if t.initialized { + return nil + } + if t.Oid == "" { + t.initialized = true + return nil + } + + mibName, _, oidText, _, err := snmpTranslate(t.Oid) + if err != nil { + return Errorf(err, "translating %s", t.Oid) + } + if t.Name == "" { + t.Name = oidText + } + mibPrefix := mibName + "::" + oidFullName := mibPrefix + oidText + + // first attempt to get the table's tags + tagOids := map[string]struct{}{} + // We have to guess that the "entry" oid is `t.Oid+".1"`. snmptable and snmptranslate don't seem to have a way to provide the info. + if out, err := execCmd("snmptranslate", "-Td", oidFullName+".1"); err == nil { + lines := bytes.Split(out, []byte{'\n'}) + for _, line := range lines { + if !bytes.HasPrefix(line, []byte(" INDEX")) { continue } - } - // Launch Get requests - if err := host.SNMPGet(acc, s.initNode); err != nil { - log.Printf("SNMP Error for host '%s': %s", host.Address, err) - } - if err := host.SNMPBulk(acc, s.initNode); err != nil { - log.Printf("SNMP Error for host '%s': %s", host.Address, err) - } - } - return nil -} -func (h *Host) SNMPMap( - acc telegraf.Accumulator, - nameToOid map[string]string, - subTableMap map[string]Subtable, -) error { - if h.OidInstanceMapping == nil { - h.OidInstanceMapping = make(map[string]map[string]string) - } - // Get snmp client - snmpClient, err := h.GetSNMPClient() - if err != nil { - return err - } - // Deconnection - defer snmpClient.Conn.Close() - // Prepare OIDs - for _, table := range h.tables { - // We don't have mapping - if table.mappingTable == "" { - if len(table.subTables) == 0 { - // If We don't have mapping table - // neither subtables list - // This is just a bulk request - oid := Data{} - oid.Oid = table.oid - if val, ok := nameToOid[oid.Oid]; ok { - oid.rawOid = "." + val - } else { - oid.rawOid = oid.Oid - } - h.bulkOids = append(h.bulkOids, oid) - } else { - // If We don't have mapping table - // but we have subtables - // This is a bunch of bulk requests - // For each subtable ... - for _, sb := range table.subTables { - // ... we create a new Data (oid) object - oid := Data{} - // Looking for more information about this subtable - ssb, exists := subTableMap[sb] - if exists { - // We found a subtable section in config files - oid.Oid = ssb.Oid - oid.rawOid = ssb.Oid - oid.Unit = ssb.Unit - } else { - // We did NOT find a subtable section in config files - oid.Oid = sb - oid.rawOid = sb - } - // TODO check oid validity - - // Add the new oid to getOids list - h.bulkOids = append(h.bulkOids, oid) - } + i := bytes.Index(line, []byte("{ ")) + if i == -1 { // parse error + continue } - } else { - // We have a mapping table - // We need to query this table - // To get mapping between instance id - // and instance name - oid_asked := table.mappingTable - oid_next := oid_asked - need_more_requests := true - // Set max repetition - maxRepetition := uint8(32) - // Launch requests - for need_more_requests { - // Launch request - result, err3 := snmpClient.GetBulk([]string{oid_next}, 0, maxRepetition) - if err3 != nil { - return err3 - } - - lastOid := "" - for _, variable := range result.Variables { - lastOid = variable.Name - if strings.HasPrefix(variable.Name, oid_asked) { - switch variable.Type { - // handle instance names - case gosnmp.OctetString: - // Check if instance is in includes instances - getInstances := true - if len(table.IncludeInstances) > 0 { - getInstances = false - for _, instance := range table.IncludeInstances { - if instance == string(variable.Value.([]byte)) { - getInstances = true - } - } - } - // Check if instance is in excludes instances - if len(table.ExcludeInstances) > 0 { - getInstances = true - for _, instance := range table.ExcludeInstances { - if instance == string(variable.Value.([]byte)) { - getInstances = false - } - } - } - // We don't want this instance - if !getInstances { - continue - } - - // remove oid table from the complete oid - // in order to get the current instance id - key := strings.Replace(variable.Name, oid_asked, "", 1) - - if len(table.subTables) == 0 { - // We have a mapping table - // but no subtables - // This is just a bulk request - - // Building mapping table - mapping := map[string]string{strings.Trim(key, "."): string(variable.Value.([]byte))} - _, exists := h.OidInstanceMapping[table.oid] - if exists { - h.OidInstanceMapping[table.oid][strings.Trim(key, ".")] = string(variable.Value.([]byte)) - } else { - h.OidInstanceMapping[table.oid] = mapping - } - - // Add table oid in bulk oid list - oid := Data{} - oid.Oid = table.oid - if val, ok := nameToOid[oid.Oid]; ok { - oid.rawOid = "." + val - } else { - oid.rawOid = oid.Oid - } - h.bulkOids = append(h.bulkOids, oid) - } else { - // We have a mapping table - // and some subtables - // This is a bunch of get requests - // This is the best case :) - - // For each subtable ... - for _, sb := range table.subTables { - // ... we create a new Data (oid) object - oid := Data{} - // Looking for more information about this subtable - ssb, exists := subTableMap[sb] - if exists { - // We found a subtable section in config files - oid.Oid = ssb.Oid + key - oid.rawOid = ssb.Oid + key - oid.Unit = ssb.Unit - oid.Instance = string(variable.Value.([]byte)) - } else { - // We did NOT find a subtable section in config files - oid.Oid = sb + key - oid.rawOid = sb + key - oid.Instance = string(variable.Value.([]byte)) - } - // TODO check oid validity - - // Add the new oid to getOids list - h.getOids = append(h.getOids, oid) - } - } - default: - } - } else { - break - } - } - // Determine if we need more requests - if strings.HasPrefix(lastOid, oid_asked) { - need_more_requests = true - oid_next = lastOid - } else { - need_more_requests = false - } + line = line[i+2:] + i = bytes.Index(line, []byte(" }")) + if i == -1 { // parse error + continue + } + line = line[:i] + for _, col := range bytes.Split(line, []byte(", ")) { + tagOids[mibPrefix+string(col)] = struct{}{} } } } - // Mapping finished - // Create newoids based on mapping - - return nil -} - -func (h *Host) SNMPGet(acc telegraf.Accumulator, initNode Node) error { - // Get snmp client - snmpClient, err := h.GetSNMPClient() + // this won't actually try to run a query. The `-Ch` will just cause it to dump headers. + out, err := execCmd("snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", oidFullName) if err != nil { - return err + return Errorf(err, "getting table columns for %s", t.Oid) } - // Deconnection - defer snmpClient.Conn.Close() - // Prepare OIDs - oidsList := make(map[string]Data) - for _, oid := range h.getOids { - oidsList[oid.rawOid] = oid + cols := bytes.SplitN(out, []byte{'\n'}, 2)[0] + if len(cols) == 0 { + return fmt.Errorf("unable to get columns for table %s", t.Oid) } - oidsNameList := make([]string, 0, len(oidsList)) - for _, oid := range oidsList { - oidsNameList = append(oidsNameList, oid.rawOid) + for _, col := range bytes.Split(cols, []byte{' '}) { + if len(col) == 0 { + continue + } + col := string(col) + _, isTag := tagOids[mibPrefix+col] + t.Fields = append(t.Fields, Field{Name: col, Oid: mibPrefix + col, IsTag: isTag}) } - // gosnmp.MAX_OIDS == 60 - // TODO use gosnmp.MAX_OIDS instead of hard coded value - max_oids := 60 - // limit 60 (MAX_OIDS) oids by requests - for i := 0; i < len(oidsList); i = i + max_oids { - // Launch request - max_index := i + max_oids - if i+max_oids > len(oidsList) { - max_index = len(oidsList) - } - result, err3 := snmpClient.Get(oidsNameList[i:max_index]) // Get() accepts up to g.MAX_OIDS - if err3 != nil { - return err3 - } - // Handle response - _, err = h.HandleResponse(oidsList, result, acc, initNode) - if err != nil { + // initialize all the nested fields + for i := range t.Fields { + if err := t.Fields[i].init(); err != nil { return err } } + + t.initialized = true return nil } -func (h *Host) SNMPBulk(acc telegraf.Accumulator, initNode Node) error { - // Get snmp client - snmpClient, err := h.GetSNMPClient() +// Field holds the configuration for a Field to look up. +type Field struct { + // Name will be the name of the field. + Name string + // OID is prefix for this field. The plugin will perform a walk through all + // OIDs with this as their parent. For each value found, the plugin will strip + // off the OID prefix, and use the remainder as the index. For multiple fields + // to show up in the same row, they must share the same index. + Oid string + // OidIndexSuffix is the trailing sub-identifier on a table record OID that will be stripped off to get the record's index. + OidIndexSuffix string + // IsTag controls whether this OID is output as a tag or a value. + IsTag bool + // Conversion controls any type conversion that is done on the value. + // "float"/"float(0)" will convert the value into a float. + // "float(X)" will convert the value into a float, and then move the decimal before Xth right-most digit. + // "int" will conver the value into an integer. + // "hwaddr" will convert a 6-byte string to a MAC address. + // "ipaddr" will convert the value to an IPv4 or IPv6 address. + Conversion string + + initialized bool +} + +// init() converts OID names to numbers, and sets the .Name attribute if unset. +func (f *Field) init() error { + if f.initialized { + return nil + } + + _, oidNum, oidText, conversion, err := snmpTranslate(f.Oid) if err != nil { - return err + return Errorf(err, "translating %s", f.Oid) } - // Deconnection - defer snmpClient.Conn.Close() - // Prepare OIDs - oidsList := make(map[string]Data) - for _, oid := range h.bulkOids { - oidsList[oid.rawOid] = oid + f.Oid = oidNum + if f.Name == "" { + f.Name = oidText } - oidsNameList := make([]string, 0, len(oidsList)) - for _, oid := range oidsList { - oidsNameList = append(oidsNameList, oid.rawOid) - } - // TODO Trying to make requests with more than one OID - // to reduce the number of requests - for _, oid := range oidsNameList { - oid_asked := oid - need_more_requests := true - // Set max repetition - maxRepetition := oidsList[oid].MaxRepetition - if maxRepetition <= 0 { - maxRepetition = 32 - } - // Launch requests - for need_more_requests { - // Launch request - result, err3 := snmpClient.GetBulk([]string{oid}, 0, maxRepetition) - if err3 != nil { - return err3 - } - // Handle response - last_oid, err := h.HandleResponse(oidsList, result, acc, initNode) - if err != nil { - return err - } - // Determine if we need more requests - if strings.HasPrefix(last_oid, oid_asked) { - need_more_requests = true - oid = last_oid - } else { - need_more_requests = false - } - } + if f.Conversion == "" { + f.Conversion = conversion } + + //TODO use textual convention conversion from the MIB + + f.initialized = true return nil } -func (h *Host) GetSNMPClient() (*gosnmp.GoSNMP, error) { - // Prepare Version - var version gosnmp.SnmpVersion - if h.Version == 1 { - version = gosnmp.Version1 - } else if h.Version == 3 { - version = gosnmp.Version3 - } else { - version = gosnmp.Version2c - } - // Prepare host and port - host, port_str, err := net.SplitHostPort(h.Address) - if err != nil { - port_str = string("161") - } - // convert port_str to port in uint16 - port_64, err := strconv.ParseUint(port_str, 10, 16) - port := uint16(port_64) - // Get SNMP client - snmpClient := &gosnmp.GoSNMP{ - Target: host, - Port: port, - Community: h.Community, - Version: version, - Timeout: time.Duration(h.Timeout) * time.Second, - Retries: h.Retries, - } - // Connection - err2 := snmpClient.Connect() - if err2 != nil { - return nil, err2 - } - // Return snmpClient - return snmpClient, nil +// RTable is the resulting table built from a Table. +type RTable struct { + // Name is the name of the field, copied from Table.Name. + Name string + // Time is the time the table was built. + Time time.Time + // Rows are the rows that were found, one row for each table OID index found. + Rows []RTableRow } -func (h *Host) HandleResponse( - oids map[string]Data, - result *gosnmp.SnmpPacket, - acc telegraf.Accumulator, - initNode Node, -) (string, error) { - var lastOid string - for _, variable := range result.Variables { - lastOid = variable.Name - nextresult: - // Get only oid wanted - for oid_key, oid := range oids { - // Skip oids already processed - for _, processedOid := range h.processedOids { - if variable.Name == processedOid { - break nextresult - } - } - // If variable.Name is the same as oid_key - // OR - // the result is SNMP table which "." comes right after oid_key. - // ex: oid_key: .1.3.6.1.2.1.2.2.1.16, variable.Name: .1.3.6.1.2.1.2.2.1.16.1 - if variable.Name == oid_key || strings.HasPrefix(variable.Name, oid_key+".") { - switch variable.Type { - // handle Metrics - case gosnmp.Boolean, gosnmp.Integer, gosnmp.Counter32, gosnmp.Gauge32, - gosnmp.TimeTicks, gosnmp.Counter64, gosnmp.Uinteger32, gosnmp.OctetString: - // Prepare tags - tags := make(map[string]string) - if oid.Unit != "" { - tags["unit"] = oid.Unit - } - // Get name and instance - var oid_name string - var instance string - // Get oidname and instance from translate file - oid_name, instance = findnodename(initNode, - strings.Split(string(variable.Name[1:]), ".")) - // Set instance tag - // From mapping table - mapping, inMappingNoSubTable := h.OidInstanceMapping[oid_key] - if inMappingNoSubTable { - // filter if the instance in not in - // OidInstanceMapping mapping map - if instance_name, exists := mapping[instance]; exists { - tags["instance"] = instance_name - } else { - continue - } - } else if oid.Instance != "" { - // From config files - tags["instance"] = oid.Instance - } else if instance != "" { - // Using last id of the current oid, ie: - // with .1.3.6.1.2.1.31.1.1.1.10.3 - // instance is 3 - tags["instance"] = instance - } +// RTableRow is the resulting row containing all the OID values which shared +// the same index. +type RTableRow struct { + // Tags are all the Field values which had IsTag=true. + Tags map[string]string + // Fields are all the Field values which had IsTag=false. + Fields map[string]interface{} +} - // Set name - var field_name string - if oid_name != "" { - // Set fieldname as oid name from translate file - field_name = oid_name - } else { - // Set fieldname as oid name from inputs.snmp.get section - // Because the result oid is equal to inputs.snmp.get section - field_name = oid.Name - } - tags["snmp_host"], _, _ = net.SplitHostPort(h.Address) - fields := make(map[string]interface{}) - fields[string(field_name)] = variable.Value +// NestedError wraps an error returned from deeper in the code. +type NestedError struct { + // Err is the error from where the NestedError was constructed. + Err error + // NestedError is the error that was passed back from the called function. + NestedErr error +} - h.processedOids = append(h.processedOids, variable.Name) - acc.AddFields(field_name, fields, tags) - case gosnmp.NoSuchObject, gosnmp.NoSuchInstance: - // Oid not found - log.Printf("[snmp input] Oid not found: %s", oid_key) - default: - // delete other data - } - break - } - } +// Error returns a concatenated string of all the nested errors. +func (ne NestedError) Error() string { + return ne.Err.Error() + ": " + ne.NestedErr.Error() +} + +// Errorf is a convenience function for constructing a NestedError. +func Errorf(err error, msg string, format ...interface{}) error { + return NestedError{ + NestedErr: err, + Err: fmt.Errorf(msg, format...), } - return lastOid, nil } func init() { inputs.Add("snmp", func() telegraf.Input { - return &Snmp{} + return &Snmp{ + Retries: 3, + MaxRepetitions: 10, + Timeout: internal.Duration{Duration: 5 * time.Second}, + Version: 2, + Community: "public", + } }) } + +// SampleConfig returns the default configuration of the input. +func (s *Snmp) SampleConfig() string { + return sampleConfig +} + +// Description returns a one-sentence description on the input. +func (s *Snmp) Description() string { + return description +} + +// Gather retrieves all the configured fields and tables. +// Any error encountered does not halt the process. The errors are accumulated +// and returned at the end. +func (s *Snmp) Gather(acc telegraf.Accumulator) error { + if err := s.init(); err != nil { + return err + } + + for _, agent := range s.Agents { + gs, err := s.getConnection(agent) + if err != nil { + acc.AddError(Errorf(err, "agent %s", agent)) + continue + } + + // First is the top-level fields. We treat the fields as table prefixes with an empty index. + t := Table{ + Name: s.Name, + Fields: s.Fields, + } + topTags := map[string]string{} + if err := s.gatherTable(acc, gs, t, topTags, false); err != nil { + acc.AddError(Errorf(err, "agent %s", agent)) + } + + // Now is the real tables. + for _, t := range s.Tables { + if err := s.gatherTable(acc, gs, t, topTags, true); err != nil { + acc.AddError(Errorf(err, "agent %s", agent)) + } + } + } + + return nil +} + +func (s *Snmp) gatherTable(acc telegraf.Accumulator, gs snmpConnection, t Table, topTags map[string]string, walk bool) error { + rt, err := t.Build(gs, walk) + if err != nil { + return err + } + + for _, tr := range rt.Rows { + if !walk { + // top-level table. Add tags to topTags. + for k, v := range tr.Tags { + topTags[k] = v + } + } else { + // real table. Inherit any specified tags. + for _, k := range t.InheritTags { + if v, ok := topTags[k]; ok { + tr.Tags[k] = v + } + } + } + if _, ok := tr.Tags["agent_host"]; !ok { + tr.Tags["agent_host"] = gs.Host() + } + acc.AddFields(rt.Name, tr.Fields, tr.Tags, rt.Time) + } + + return nil +} + +// Build retrieves all the fields specified in the table and constructs the RTable. +func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { + rows := map[string]RTableRow{} + + tagCount := 0 + for _, f := range t.Fields { + if f.IsTag { + tagCount++ + } + + if len(f.Oid) == 0 { + return nil, fmt.Errorf("cannot have empty OID") + } + var oid string + if f.Oid[0] == '.' { + oid = f.Oid + } else { + // make sure OID has "." because the BulkWalkAll results do, and the prefix needs to match + oid = "." + f.Oid + } + + // ifv contains a mapping of table OID index to field value + ifv := map[string]interface{}{} + + if !walk { + // This is used when fetching non-table fields. Fields configured a the top + // scope of the plugin. + // We fetch the fields directly, and add them to ifv as if the index were an + // empty string. This results in all the non-table fields sharing the same + // index, and being added on the same row. + if pkt, err := gs.Get([]string{oid}); err != nil { + return nil, Errorf(err, "performing get") + } else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject && pkt.Variables[0].Type != gosnmp.NoSuchInstance { + ent := pkt.Variables[0] + fv, err := fieldConvert(f.Conversion, ent.Value) + if err != nil { + return nil, Errorf(err, "converting %q", ent.Value) + } + if fvs, ok := fv.(string); !ok || fvs != "" { + ifv[""] = fv + } + } + } else { + err := gs.Walk(oid, func(ent gosnmp.SnmpPDU) error { + if len(ent.Name) <= len(oid) || ent.Name[:len(oid)+1] != oid+"." { + return NestedError{} // break the walk + } + + idx := ent.Name[len(oid):] + if f.OidIndexSuffix != "" { + if !strings.HasSuffix(idx, f.OidIndexSuffix) { + // this entry doesn't match our OidIndexSuffix. skip it + return nil + } + idx = idx[:len(idx)-len(f.OidIndexSuffix)] + } + + fv, err := fieldConvert(f.Conversion, ent.Value) + if err != nil { + return Errorf(err, "converting %q", ent.Value) + } + if fvs, ok := fv.(string); !ok || fvs != "" { + ifv[idx] = fv + } + return nil + }) + if err != nil { + if _, ok := err.(NestedError); !ok { + return nil, Errorf(err, "performing bulk walk") + } + } + } + + for i, v := range ifv { + rtr, ok := rows[i] + if !ok { + rtr = RTableRow{} + rtr.Tags = map[string]string{} + rtr.Fields = map[string]interface{}{} + rows[i] = rtr + } + if f.IsTag { + if vs, ok := v.(string); ok { + rtr.Tags[f.Name] = vs + } else { + rtr.Tags[f.Name] = fmt.Sprintf("%v", v) + } + } else { + rtr.Fields[f.Name] = v + } + } + } + + rt := RTable{ + Name: t.Name, + Time: time.Now(), //TODO record time at start + Rows: make([]RTableRow, 0, len(rows)), + } + for _, r := range rows { + if len(r.Tags) < tagCount { + // don't add rows which are missing tags, as without tags you can't filter + continue + } + rt.Rows = append(rt.Rows, r) + } + return &rt, nil +} + +// snmpConnection is an interface which wraps a *gosnmp.GoSNMP object. +// We interact through an interface so we can mock it out in tests. +type snmpConnection interface { + Host() string + //BulkWalkAll(string) ([]gosnmp.SnmpPDU, error) + Walk(string, gosnmp.WalkFunc) error + Get(oids []string) (*gosnmp.SnmpPacket, error) +} + +// gosnmpWrapper wraps a *gosnmp.GoSNMP object so we can use it as a snmpConnection. +type gosnmpWrapper struct { + *gosnmp.GoSNMP +} + +// Host returns the value of GoSNMP.Target. +func (gsw gosnmpWrapper) Host() string { + return gsw.Target +} + +// Walk wraps GoSNMP.Walk() or GoSNMP.BulkWalk(), depending on whether the +// connection is using SNMPv1 or newer. +// Also, if any error is encountered, it will just once reconnect and try again. +func (gsw gosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error { + var err error + // On error, retry once. + // Unfortunately we can't distinguish between an error returned by gosnmp, and one returned by the walk function. + for i := 0; i < 2; i++ { + if gsw.Version == gosnmp.Version1 { + err = gsw.GoSNMP.Walk(oid, fn) + } else { + err = gsw.GoSNMP.BulkWalk(oid, fn) + } + if err == nil { + return nil + } + if err := gsw.GoSNMP.Connect(); err != nil { + return Errorf(err, "reconnecting") + } + } + return err +} + +// Get wraps GoSNMP.GET(). +// If any error is encountered, it will just once reconnect and try again. +func (gsw gosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) { + var err error + var pkt *gosnmp.SnmpPacket + for i := 0; i < 2; i++ { + pkt, err = gsw.GoSNMP.Get(oids) + if err == nil { + return pkt, nil + } + if err := gsw.GoSNMP.Connect(); err != nil { + return nil, Errorf(err, "reconnecting") + } + } + return nil, err +} + +// getConnection creates a snmpConnection (*gosnmp.GoSNMP) object and caches the +// result using `agent` as the cache key. +func (s *Snmp) getConnection(agent string) (snmpConnection, error) { + if s.connectionCache == nil { + s.connectionCache = map[string]snmpConnection{} + } + if gs, ok := s.connectionCache[agent]; ok { + return gs, nil + } + + gs := gosnmpWrapper{&gosnmp.GoSNMP{}} + + host, portStr, err := net.SplitHostPort(agent) + if err != nil { + if err, ok := err.(*net.AddrError); !ok || err.Err != "missing port in address" { + return nil, Errorf(err, "parsing host") + } + host = agent + portStr = "161" + } + gs.Target = host + + port, err := strconv.ParseUint(portStr, 10, 16) + if err != nil { + return nil, Errorf(err, "parsing port") + } + gs.Port = uint16(port) + + gs.Timeout = s.Timeout.Duration + + gs.Retries = s.Retries + + switch s.Version { + case 3: + gs.Version = gosnmp.Version3 + case 2, 0: + gs.Version = gosnmp.Version2c + case 1: + gs.Version = gosnmp.Version1 + default: + return nil, fmt.Errorf("invalid version") + } + + if s.Version < 3 { + if s.Community == "" { + gs.Community = "public" + } else { + gs.Community = s.Community + } + } + + gs.MaxRepetitions = s.MaxRepetitions + + if s.Version == 3 { + gs.ContextName = s.ContextName + + sp := &gosnmp.UsmSecurityParameters{} + gs.SecurityParameters = sp + gs.SecurityModel = gosnmp.UserSecurityModel + + switch strings.ToLower(s.SecLevel) { + case "noauthnopriv", "": + gs.MsgFlags = gosnmp.NoAuthNoPriv + case "authnopriv": + gs.MsgFlags = gosnmp.AuthNoPriv + case "authpriv": + gs.MsgFlags = gosnmp.AuthPriv + default: + return nil, fmt.Errorf("invalid secLevel") + } + + sp.UserName = s.SecName + + switch strings.ToLower(s.AuthProtocol) { + case "md5": + sp.AuthenticationProtocol = gosnmp.MD5 + case "sha": + sp.AuthenticationProtocol = gosnmp.SHA + case "": + sp.AuthenticationProtocol = gosnmp.NoAuth + default: + return nil, fmt.Errorf("invalid authProtocol") + } + + sp.AuthenticationPassphrase = s.AuthPassword + + switch strings.ToLower(s.PrivProtocol) { + case "des": + sp.PrivacyProtocol = gosnmp.DES + case "aes": + sp.PrivacyProtocol = gosnmp.AES + case "": + sp.PrivacyProtocol = gosnmp.NoPriv + default: + return nil, fmt.Errorf("invalid privProtocol") + } + + sp.PrivacyPassphrase = s.PrivPassword + + sp.AuthoritativeEngineID = s.EngineID + + sp.AuthoritativeEngineBoots = s.EngineBoots + + sp.AuthoritativeEngineTime = s.EngineTime + } + + if err := gs.Connect(); err != nil { + return nil, Errorf(err, "setting up connection") + } + + s.connectionCache[agent] = gs + return gs, nil +} + +// fieldConvert converts from any type according to the conv specification +// "float"/"float(0)" will convert the value into a float. +// "float(X)" will convert the value into a float, and then move the decimal before Xth right-most digit. +// "int" will convert the value into an integer. +// "hwaddr" will convert the value into a MAC address. +// "ipaddr" will convert the value into into an IP address. +// "" will convert a byte slice into a string. +func fieldConvert(conv string, v interface{}) (interface{}, error) { + if conv == "" { + if bs, ok := v.([]byte); ok { + return string(bs), nil + } + return v, nil + } + + var d int + if _, err := fmt.Sscanf(conv, "float(%d)", &d); err == nil || conv == "float" { + switch vt := v.(type) { + case float32: + v = float64(vt) / math.Pow10(d) + case float64: + v = float64(vt) / math.Pow10(d) + case int: + v = float64(vt) / math.Pow10(d) + case int8: + v = float64(vt) / math.Pow10(d) + case int16: + v = float64(vt) / math.Pow10(d) + case int32: + v = float64(vt) / math.Pow10(d) + case int64: + v = float64(vt) / math.Pow10(d) + case uint: + v = float64(vt) / math.Pow10(d) + case uint8: + v = float64(vt) / math.Pow10(d) + case uint16: + v = float64(vt) / math.Pow10(d) + case uint32: + v = float64(vt) / math.Pow10(d) + case uint64: + v = float64(vt) / math.Pow10(d) + case []byte: + vf, _ := strconv.ParseFloat(string(vt), 64) + v = vf / math.Pow10(d) + case string: + vf, _ := strconv.ParseFloat(vt, 64) + v = vf / math.Pow10(d) + } + return v, nil + } + + if conv == "int" { + switch vt := v.(type) { + case float32: + v = int64(vt) + case float64: + v = int64(vt) + case int: + v = int64(vt) + case int8: + v = int64(vt) + case int16: + v = int64(vt) + case int32: + v = int64(vt) + case int64: + v = int64(vt) + case uint: + v = int64(vt) + case uint8: + v = int64(vt) + case uint16: + v = int64(vt) + case uint32: + v = int64(vt) + case uint64: + v = int64(vt) + case []byte: + v, _ = strconv.Atoi(string(vt)) + case string: + v, _ = strconv.Atoi(vt) + } + return v, nil + } + + if conv == "hwaddr" { + switch vt := v.(type) { + case string: + v = net.HardwareAddr(vt).String() + case []byte: + v = net.HardwareAddr(vt).String() + default: + return nil, fmt.Errorf("invalid type (%T) for hwaddr conversion", v) + } + return v, nil + } + + if conv == "ipaddr" { + var ipbs []byte + + switch vt := v.(type) { + case string: + ipbs = []byte(vt) + case []byte: + ipbs = vt + default: + return nil, fmt.Errorf("invalid type (%T) for ipaddr conversion", v) + } + + switch len(ipbs) { + case 4, 16: + v = net.IP(ipbs).String() + default: + return nil, fmt.Errorf("invalid length (%d) for ipaddr conversion", len(ipbs)) + } + + return v, nil + } + + return nil, fmt.Errorf("invalid conversion type '%s'", conv) +} + +// snmpTranslate resolves the given OID. +func snmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { + var out []byte + if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") { + out, err = execCmd("snmptranslate", "-Td", "-Ob", oid) + } else { + out, err = execCmd("snmptranslate", "-Td", "-Ob", "-m", "all", oid) + } + if err != nil { + return "", "", "", "", err + } + + bb := bytes.NewBuffer(out) + + oidText, err = bb.ReadString('\n') + if err != nil { + return "", "", "", "", Errorf(err, "getting OID text") + } + oidText = oidText[:len(oidText)-1] + + i := strings.Index(oidText, "::") + if i == -1 { + // was not found in MIB. + if bytes.Index(bb.Bytes(), []byte(" [TRUNCATED]")) >= 0 { + return "", oid, oid, "", nil + } + // not truncated, but not fully found. We still need to parse out numeric OID, so keep going + oidText = oid + } else { + mibName = oidText[:i] + oidText = oidText[i+2:] + } + + if i := bytes.Index(bb.Bytes(), []byte(" -- TEXTUAL CONVENTION ")); i != -1 { + bb.Next(i + len(" -- TEXTUAL CONVENTION ")) + tc, err := bb.ReadString('\n') + if err != nil { + return "", "", "", "", Errorf(err, "getting textual convention") + } + tc = tc[:len(tc)-1] + switch tc { + case "MacAddress", "PhysAddress": + conversion = "hwaddr" + case "InetAddressIPv4", "InetAddressIPv6", "InetAddress": + conversion = "ipaddr" + } + } + + i = bytes.Index(bb.Bytes(), []byte("::= { ")) + bb.Next(i + len("::= { ")) + objs, err := bb.ReadString('}') + if err != nil { + return "", "", "", "", Errorf(err, "getting numeric oid") + } + objs = objs[:len(objs)-1] + for _, obj := range strings.Split(objs, " ") { + if len(obj) == 0 { + continue + } + if i := strings.Index(obj, "("); i != -1 { + obj = obj[i+1:] + oidNum += "." + obj[:strings.Index(obj, ")")] + } else { + oidNum += "." + obj + } + } + + return mibName, oidNum, oidText, conversion, nil +} diff --git a/plugins/inputs/snmp/snmp_mocks_generate.go b/plugins/inputs/snmp/snmp_mocks_generate.go new file mode 100644 index 000000000..590257983 --- /dev/null +++ b/plugins/inputs/snmp/snmp_mocks_generate.go @@ -0,0 +1,99 @@ +// +build generate + +package main + +import ( + "bufio" + "bytes" + "fmt" + "os" + "os/exec" + "strings" +) + +// This file is a generator used to generate the mocks for the commands used by the tests. + +// These are the commands to be mocked. +var mockedCommands = [][]string{ + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.1.1"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.1.2"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", "1.0.0.1.1"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1.0"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.2.3"}, + {"snmptranslate", "-Td", "-Ob", ".iso.2.3"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".999"}, + {"snmptranslate", "-Td", "-Ob", "TEST::server"}, + {"snmptranslate", "-Td", "-Ob", "TEST::server.0"}, + {"snmptranslate", "-Td", "-Ob", "TEST::testTable"}, + {"snmptranslate", "-Td", "-Ob", "TEST::connections"}, + {"snmptranslate", "-Td", "-Ob", "TEST::latency"}, + {"snmptranslate", "-Td", "-Ob", "TEST::hostname"}, + {"snmptranslate", "-Td", "-Ob", "IF-MIB::ifPhysAddress.1"}, + {"snmptranslate", "-Td", "-Ob", "BRIDGE-MIB::dot1dTpFdbAddress.1"}, + {"snmptranslate", "-Td", "-Ob", "TCP-MIB::tcpConnectionLocalAddress.1"}, + {"snmptranslate", "-Td", "TEST::testTable.1"}, + {"snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", "TEST::testTable"}, +} + +type mockedCommandResult struct { + stdout string + stderr string + exitError bool +} + +func main() { + if err := generate(); err != nil { + fmt.Fprintf(os.Stderr, "error: %s\n", err) + os.Exit(1) + } +} + +func generate() error { + f, err := os.OpenFile("snmp_mocks_test.go", os.O_RDWR, 0644) + if err != nil { + return err + } + br := bufio.NewReader(f) + var i int64 + for l, err := br.ReadString('\n'); err == nil; l, err = br.ReadString('\n') { + i += int64(len(l)) + if l == "// BEGIN GO GENERATE CONTENT\n" { + break + } + } + f.Truncate(i) + f.Seek(i, 0) + + fmt.Fprintf(f, "var mockedCommandResults = map[string]mockedCommandResult{\n") + + for _, cmd := range mockedCommands { + ec := exec.Command(cmd[0], cmd[1:]...) + out := bytes.NewBuffer(nil) + err := bytes.NewBuffer(nil) + ec.Stdout = out + ec.Stderr = err + ec.Env = []string{ + "MIBDIRS=+./testdata", + } + + var mcr mockedCommandResult + if err := ec.Run(); err != nil { + if err, ok := err.(*exec.ExitError); !ok { + mcr.exitError = true + } else { + return fmt.Errorf("executing %v: %s", cmd, err) + } + } + mcr.stdout = string(out.Bytes()) + mcr.stderr = string(err.Bytes()) + cmd0 := strings.Join(cmd, "\000") + mcrv := fmt.Sprintf("%#v", mcr)[5:] // trim `main.` prefix + fmt.Fprintf(f, "%#v: %s,\n", cmd0, mcrv) + } + f.Write([]byte("}\n")) + f.Close() + + return exec.Command("gofmt", "-w", "snmp_mocks_test.go").Run() +} diff --git a/plugins/inputs/snmp/snmp_mocks_test.go b/plugins/inputs/snmp/snmp_mocks_test.go new file mode 100644 index 000000000..1c41f29e2 --- /dev/null +++ b/plugins/inputs/snmp/snmp_mocks_test.go @@ -0,0 +1,84 @@ +package snmp + +import ( + "fmt" + "os" + "os/exec" + "strings" + "testing" +) + +type mockedCommandResult struct { + stdout string + stderr string + exitError bool +} + +func mockExecCommand(arg0 string, args ...string) *exec.Cmd { + args = append([]string{"-test.run=TestMockExecCommand", "--", arg0}, args...) + cmd := exec.Command(os.Args[0], args...) + cmd.Stderr = os.Stderr // so the test output shows errors + return cmd +} + +// This is not a real test. This is just a way of mocking out commands. +// +// Idea based on https://github.com/golang/go/blob/7c31043/src/os/exec/exec_test.go#L568 +func TestMockExecCommand(t *testing.T) { + var cmd []string + for _, arg := range os.Args { + if string(arg) == "--" { + cmd = []string{} + continue + } + if cmd == nil { + continue + } + cmd = append(cmd, string(arg)) + } + if cmd == nil { + return + } + + cmd0 := strings.Join(cmd, "\000") + mcr, ok := mockedCommandResults[cmd0] + if !ok { + cv := fmt.Sprintf("%#v", cmd)[8:] // trim `[]string` prefix + fmt.Fprintf(os.Stderr, "Unmocked command. Please add the following to `mockedCommands` in snmp_mocks_generate.go, and then run `go generate`:\n\t%s,\n", cv) + os.Exit(1) + } + fmt.Printf("%s", mcr.stdout) + fmt.Fprintf(os.Stderr, "%s", mcr.stderr) + if mcr.exitError { + os.Exit(1) + } + os.Exit(0) +} + +func init() { + execCommand = mockExecCommand +} + +// BEGIN GO GENERATE CONTENT +var mockedCommandResults = map[string]mockedCommandResult{ + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0": mockedCommandResult{stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.1": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.2": mockedCommandResult{stdout: "TEST::1.2\nanonymous#1 OBJECT-TYPE\n -- FROM\tTEST\n::= { iso(1) 0 testOID(0) 1 2 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x001.0.0.1.1": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1": mockedCommandResult{stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1.0": mockedCommandResult{stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.2.3": mockedCommandResult{stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00.iso.2.3": mockedCommandResult{stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.999": mockedCommandResult{stdout: ".999\n [TRUNCATED]\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::server": mockedCommandResult{stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::server.0": mockedCommandResult{stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::testTable": mockedCommandResult{stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::connections": mockedCommandResult{stdout: "TEST::connections\nconnections OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tINTEGER\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 2 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::latency": mockedCommandResult{stdout: "TEST::latency\nlatency OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 3 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::hostname": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00IF-MIB::ifPhysAddress.1": mockedCommandResult{stdout: "IF-MIB::ifPhysAddress.1\nifPhysAddress OBJECT-TYPE\n -- FROM\tIF-MIB\n -- TEXTUAL CONVENTION PhysAddress\n SYNTAX\tOCTET STRING\n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"The interface's address at its protocol sub-layer. For\n example, for an 802.x interface, this object normally\n contains a MAC address. The interface's media-specific MIB\n must define the bit and byte ordering and the format of the\n value of this object. For interfaces which do not have such\n an address (e.g., a serial line), this object should contain\n an octet string of zero length.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) interfaces(2) ifTable(2) ifEntry(1) ifPhysAddress(6) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00BRIDGE-MIB::dot1dTpFdbAddress.1": mockedCommandResult{stdout: "BRIDGE-MIB::dot1dTpFdbAddress.1\ndot1dTpFdbAddress OBJECT-TYPE\n -- FROM\tBRIDGE-MIB\n -- TEXTUAL CONVENTION MacAddress\n SYNTAX\tOCTET STRING (6) \n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"A unicast MAC address for which the bridge has\n forwarding and/or filtering information.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) dot1dBridge(17) dot1dTp(4) dot1dTpFdbTable(3) dot1dTpFdbEntry(1) dot1dTpFdbAddress(1) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TCP-MIB::tcpConnectionLocalAddress.1": mockedCommandResult{stdout: "TCP-MIB::tcpConnectionLocalAddress.1\ntcpConnectionLocalAddress OBJECT-TYPE\n -- FROM\tTCP-MIB\n -- TEXTUAL CONVENTION InetAddress\n SYNTAX\tOCTET STRING (0..255) \n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n DESCRIPTION\t\"The local IP address for this TCP connection. The type\n of this address is determined by the value of\n tcpConnectionLocalAddressType.\n\n As this object is used in the index for the\n tcpConnectionTable, implementors should be\n careful not to create entries that would result in OIDs\n with more than 128 subidentifiers; otherwise the information\n cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) tcp(6) tcpConnectionTable(19) tcpConnectionEntry(1) tcpConnectionLocalAddress(2) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00TEST::testTable.1": mockedCommandResult{stdout: "TEST::testTableEntry\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) 1 }\n", stderr: "", exitError: false}, + "snmptable\x00-Ch\x00-Cl\x00-c\x00public\x00127.0.0.1\x00TEST::testTable": mockedCommandResult{stdout: "server connections latency \nTEST::testTable: No entries\n", stderr: "", exitError: false}, +} diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 2faaa1408..6839fdd8f 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -1,482 +1,611 @@ +//go:generate go run -tags generate snmp_mocks_generate.go package snmp import ( + "fmt" + "net" + "sync" "testing" + "time" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" - + "github.com/influxdata/toml" + "github.com/soniah/gosnmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestSNMPErrorGet1(t *testing.T) { - get1 := Data{ - Name: "oid1", - Unit: "octets", - Oid: ".1.3.6.1.2.1.2.2.1.16.1", +type testSNMPConnection struct { + host string + values map[string]interface{} +} + +func (tsc *testSNMPConnection) Host() string { + return tsc.host +} + +func (tsc *testSNMPConnection) Get(oids []string) (*gosnmp.SnmpPacket, error) { + sp := &gosnmp.SnmpPacket{} + for _, oid := range oids { + v, ok := tsc.values[oid] + if !ok { + sp.Variables = append(sp.Variables, gosnmp.SnmpPDU{ + Name: oid, + Type: gosnmp.NoSuchObject, + }) + continue + } + sp.Variables = append(sp.Variables, gosnmp.SnmpPDU{ + Name: oid, + Value: v, + }) } - h := Host{ - Collect: []string{"oid1"}, + return sp, nil +} +func (tsc *testSNMPConnection) Walk(oid string, wf gosnmp.WalkFunc) error { + for void, v := range tsc.values { + if void == oid || (len(void) > len(oid) && void[:len(oid)+1] == oid+".") { + if err := wf(gosnmp.SnmpPDU{ + Name: void, + Value: v, + }); err != nil { + return err + } + } } + return nil +} + +var tsc = &testSNMPConnection{ + host: "tsc", + values: map[string]interface{}{ + ".1.0.0.0.1.1.0": "foo", + ".1.0.0.0.1.1.1": []byte("bar"), + ".1.0.0.0.1.1.2": []byte(""), + ".1.0.0.0.1.102": "bad", + ".1.0.0.0.1.2.0": 1, + ".1.0.0.0.1.2.1": 2, + ".1.0.0.0.1.2.2": 0, + ".1.0.0.0.1.3.0": "0.123", + ".1.0.0.0.1.3.1": "0.456", + ".1.0.0.0.1.3.2": "0.000", + ".1.0.0.0.1.3.3": "9.999", + ".1.0.0.0.1.4.0": 123456, + ".1.0.0.1.1": "baz", + ".1.0.0.1.2": 234, + ".1.0.0.1.3": []byte("byte slice"), + ".1.0.0.2.1.5.0.9.9": 11, + ".1.0.0.2.1.5.1.9.9": 22, + }, +} + +func TestSampleConfig(t *testing.T) { + conf := struct { + Inputs struct { + Snmp []*Snmp + } + }{} + err := toml.Unmarshal([]byte("[[inputs.snmp]]\n"+(*Snmp)(nil).SampleConfig()), &conf) + assert.NoError(t, err) + s := Snmp{ - SnmptranslateFile: "bad_oid.txt", - Host: []Host{h}, - Get: []Data{get1}, + Agents: []string{"127.0.0.1:161"}, + Timeout: internal.Duration{Duration: 5 * time.Second}, + Version: 2, + Community: "public", + MaxRepetitions: 10, + Retries: 3, + + Name: "system", + Fields: []Field{ + {Name: "hostname", Oid: ".1.0.0.1.1"}, + {Name: "uptime", Oid: ".1.0.0.1.2"}, + {Name: "load", Oid: ".1.0.0.1.3"}, + {Oid: "HOST-RESOURCES-MIB::hrMemorySize"}, + }, + Tables: []Table{ + { + Name: "remote_servers", + InheritTags: []string{"hostname"}, + Fields: []Field{ + {Name: "server", Oid: ".1.0.0.0.1.0", IsTag: true}, + {Name: "connections", Oid: ".1.0.0.0.1.1"}, + {Name: "latency", Oid: ".1.0.0.0.1.2"}, + }, + }, + { + Oid: "HOST-RESOURCES-MIB::hrNetworkTable", + }, + }, + } + assert.Equal(t, s, *conf.Inputs.Snmp[0]) +} + +func TestFieldInit(t *testing.T) { + translations := []struct { + inputOid string + inputName string + inputConversion string + expectedOid string + expectedName string + expectedConversion string + }{ + {".1.2.3", "foo", "", ".1.2.3", "foo", ""}, + {".iso.2.3", "foo", "", ".1.2.3", "foo", ""}, + {".1.0.0.0.1.1", "", "", ".1.0.0.0.1.1", "server", ""}, + {".1.0.0.0.1.1.0", "", "", ".1.0.0.0.1.1.0", "server.0", ""}, + {".999", "", "", ".999", ".999", ""}, + {"TEST::server", "", "", ".1.0.0.0.1.1", "server", ""}, + {"TEST::server.0", "", "", ".1.0.0.0.1.1.0", "server.0", ""}, + {"TEST::server", "foo", "", ".1.0.0.0.1.1", "foo", ""}, + {"IF-MIB::ifPhysAddress.1", "", "", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "hwaddr"}, + {"IF-MIB::ifPhysAddress.1", "", "none", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "none"}, + {"BRIDGE-MIB::dot1dTpFdbAddress.1", "", "", ".1.3.6.1.2.1.17.4.3.1.1.1", "dot1dTpFdbAddress.1", "hwaddr"}, + {"TCP-MIB::tcpConnectionLocalAddress.1", "", "", ".1.3.6.1.2.1.6.19.1.2.1", "tcpConnectionLocalAddress.1", "ipaddr"}, } - var acc testutil.Accumulator - err := s.Gather(&acc) + for _, txl := range translations { + f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion} + err := f.init() + if !assert.NoError(t, err, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName) { + continue + } + assert.Equal(t, txl.expectedOid, f.Oid, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) + assert.Equal(t, txl.expectedName, f.Name, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) + } +} + +func TestTableInit(t *testing.T) { + tbl := Table{ + Oid: ".1.0.0.0", + Fields: []Field{{Oid: ".999", Name: "foo"}}, + } + err := tbl.init() + require.NoError(t, err) + + assert.Equal(t, "testTable", tbl.Name) + + assert.Len(t, tbl.Fields, 4) + assert.Contains(t, tbl.Fields, Field{Oid: ".999", Name: "foo", initialized: true}) + assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.1", Name: "server", IsTag: true, initialized: true}) + assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.2", Name: "connections", initialized: true}) + assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.3", Name: "latency", initialized: true}) +} + +func TestSnmpInit(t *testing.T) { + s := &Snmp{ + Tables: []Table{ + {Oid: "TEST::testTable"}, + }, + Fields: []Field{ + {Oid: "TEST::hostname"}, + }, + } + + err := s.init() + require.NoError(t, err) + + assert.Len(t, s.Tables[0].Fields, 3) + assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.1", Name: "server", IsTag: true, initialized: true}) + assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.2", Name: "connections", initialized: true}) + assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.3", Name: "latency", initialized: true}) + + assert.Equal(t, Field{ + Oid: ".1.0.0.1.1", + Name: "hostname", + initialized: true, + }, s.Fields[0]) +} + +func TestGetSNMPConnection_v2(t *testing.T) { + s := &Snmp{ + Timeout: internal.Duration{Duration: 3 * time.Second}, + Retries: 4, + Version: 2, + Community: "foo", + } + + gsc, err := s.getConnection("1.2.3.4:567") + require.NoError(t, err) + gs := gsc.(gosnmpWrapper) + assert.Equal(t, "1.2.3.4", gs.Target) + assert.EqualValues(t, 567, gs.Port) + assert.Equal(t, gosnmp.Version2c, gs.Version) + assert.Equal(t, "foo", gs.Community) + + gsc, err = s.getConnection("1.2.3.4") + require.NoError(t, err) + gs = gsc.(gosnmpWrapper) + assert.Equal(t, "1.2.3.4", gs.Target) + assert.EqualValues(t, 161, gs.Port) +} + +func TestGetSNMPConnection_v3(t *testing.T) { + s := &Snmp{ + Version: 3, + MaxRepetitions: 20, + ContextName: "mycontext", + SecLevel: "authPriv", + SecName: "myuser", + AuthProtocol: "md5", + AuthPassword: "password123", + PrivProtocol: "des", + PrivPassword: "321drowssap", + EngineID: "myengineid", + EngineBoots: 1, + EngineTime: 2, + } + + gsc, err := s.getConnection("1.2.3.4") + require.NoError(t, err) + gs := gsc.(gosnmpWrapper) + assert.Equal(t, gs.Version, gosnmp.Version3) + sp := gs.SecurityParameters.(*gosnmp.UsmSecurityParameters) + assert.Equal(t, "1.2.3.4", gsc.Host()) + assert.EqualValues(t, 20, gs.MaxRepetitions) + assert.Equal(t, "mycontext", gs.ContextName) + assert.Equal(t, gosnmp.AuthPriv, gs.MsgFlags&gosnmp.AuthPriv) + assert.Equal(t, "myuser", sp.UserName) + assert.Equal(t, gosnmp.MD5, sp.AuthenticationProtocol) + assert.Equal(t, "password123", sp.AuthenticationPassphrase) + assert.Equal(t, gosnmp.DES, sp.PrivacyProtocol) + assert.Equal(t, "321drowssap", sp.PrivacyPassphrase) + assert.Equal(t, "myengineid", sp.AuthoritativeEngineID) + assert.EqualValues(t, 1, sp.AuthoritativeEngineBoots) + assert.EqualValues(t, 2, sp.AuthoritativeEngineTime) +} + +func TestGetSNMPConnection_caching(t *testing.T) { + s := &Snmp{} + gs1, err := s.getConnection("1.2.3.4") + require.NoError(t, err) + gs2, err := s.getConnection("1.2.3.4") + require.NoError(t, err) + gs3, err := s.getConnection("1.2.3.5") + require.NoError(t, err) + assert.True(t, gs1 == gs2) + assert.False(t, gs2 == gs3) +} + +func TestGosnmpWrapper_walk_retry(t *testing.T) { + srvr, err := net.ListenUDP("udp4", &net.UDPAddr{}) + defer srvr.Close() + require.NoError(t, err) + reqCount := 0 + // Set up a WaitGroup to wait for the server goroutine to exit and protect + // reqCount. + // Even though simultaneous access is impossible because the server will be + // blocked on ReadFrom, without this the race detector gets unhappy. + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + buf := make([]byte, 256) + for { + _, addr, err := srvr.ReadFrom(buf) + if err != nil { + return + } + reqCount++ + + srvr.WriteTo([]byte{'X'}, addr) // will cause decoding error + } + }() + + gs := &gosnmp.GoSNMP{ + Target: srvr.LocalAddr().(*net.UDPAddr).IP.String(), + Port: uint16(srvr.LocalAddr().(*net.UDPAddr).Port), + Version: gosnmp.Version2c, + Community: "public", + Timeout: time.Millisecond * 10, + Retries: 1, + } + err = gs.Connect() + require.NoError(t, err) + conn := gs.Conn + + gsw := gosnmpWrapper{gs} + err = gsw.Walk(".1.0.0", func(_ gosnmp.SnmpPDU) error { return nil }) + srvr.Close() + wg.Wait() + assert.Error(t, err) + assert.False(t, gs.Conn == conn) + assert.Equal(t, (gs.Retries+1)*2, reqCount) +} + +func TestGosnmpWrapper_get_retry(t *testing.T) { + srvr, err := net.ListenUDP("udp4", &net.UDPAddr{}) + defer srvr.Close() + require.NoError(t, err) + reqCount := 0 + // Set up a WaitGroup to wait for the server goroutine to exit and protect + // reqCount. + // Even though simultaneous access is impossible because the server will be + // blocked on ReadFrom, without this the race detector gets unhappy. + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + buf := make([]byte, 256) + for { + _, addr, err := srvr.ReadFrom(buf) + if err != nil { + return + } + reqCount++ + + srvr.WriteTo([]byte{'X'}, addr) // will cause decoding error + } + }() + + gs := &gosnmp.GoSNMP{ + Target: srvr.LocalAddr().(*net.UDPAddr).IP.String(), + Port: uint16(srvr.LocalAddr().(*net.UDPAddr).Port), + Version: gosnmp.Version2c, + Community: "public", + Timeout: time.Millisecond * 10, + Retries: 1, + } + err = gs.Connect() + require.NoError(t, err) + conn := gs.Conn + + gsw := gosnmpWrapper{gs} + _, err = gsw.Get([]string{".1.0.0"}) + srvr.Close() + wg.Wait() + assert.Error(t, err) + assert.False(t, gs.Conn == conn) + assert.Equal(t, (gs.Retries+1)*2, reqCount) +} + +func TestTableBuild_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.0.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.0.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.0.1.3", + Conversion: "float", + }, + { + Name: "myfield4", + Oid: ".1.0.0.2.1.5", + OidIndexSuffix: ".9.9", + }, + }, + } + + tb, err := tbl.Build(tsc, true) + require.NoError(t, err) + + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{"myfield1": "foo"}, + Fields: map[string]interface{}{ + "myfield2": 1, + "myfield3": float64(0.123), + "myfield4": 11, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{"myfield1": "bar"}, + Fields: map[string]interface{}{ + "myfield2": 2, + "myfield3": float64(0.456), + "myfield4": 22, + }, + } + assert.Len(t, tb.Rows, 2) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) +} + +func TestTableBuild_noWalk(t *testing.T) { + tbl := Table{ + Name: "mytable", + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.1.2", + IsTag: true, + }, + { + Name: "empty", + Oid: ".1.0.0.0.1.1.2", + }, + { + Name: "noexist", + Oid: ".1.2.3.4.5", + }, + }, + } + + tb, err := tbl.Build(tsc, false) + require.NoError(t, err) + + rtr := RTableRow{ + Tags: map[string]string{"myfield1": "baz", "myfield3": "234"}, + Fields: map[string]interface{}{"myfield2": 234}, + } + assert.Len(t, tb.Rows, 1) + assert.Contains(t, tb.Rows, rtr) +} + +func TestGather(t *testing.T) { + s := &Snmp{ + Agents: []string{"TestGather"}, + Name: "mytable", + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.1.2", + }, + { + Name: "myfield3", + Oid: "1.0.0.1.1", + }, + }, + Tables: []Table{ + { + Name: "myOtherTable", + InheritTags: []string{"myfield1"}, + Fields: []Field{ + { + Name: "myOtherField", + Oid: ".1.0.0.0.1.4", + }, + }, + }, + }, + + connectionCache: map[string]snmpConnection{ + "TestGather": tsc, + }, + } + + acc := &testutil.Accumulator{} + + tstart := time.Now() + s.Gather(acc) + tstop := time.Now() + + require.Len(t, acc.Metrics, 2) + + m := acc.Metrics[0] + assert.Equal(t, "mytable", m.Measurement) + assert.Equal(t, "tsc", m.Tags["agent_host"]) + assert.Equal(t, "baz", m.Tags["myfield1"]) + assert.Len(t, m.Fields, 2) + assert.Equal(t, 234, m.Fields["myfield2"]) + assert.Equal(t, "baz", m.Fields["myfield3"]) + assert.True(t, tstart.Before(m.Time)) + assert.True(t, tstop.After(m.Time)) + + m2 := acc.Metrics[1] + assert.Equal(t, "myOtherTable", m2.Measurement) + assert.Equal(t, "tsc", m2.Tags["agent_host"]) + assert.Equal(t, "baz", m2.Tags["myfield1"]) + assert.Len(t, m2.Fields, 1) + assert.Equal(t, 123456, m2.Fields["myOtherField"]) +} + +func TestGather_host(t *testing.T) { + s := &Snmp{ + Agents: []string{"TestGather"}, + Name: "mytable", + Fields: []Field{ + { + Name: "host", + Oid: ".1.0.0.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.1.2", + }, + }, + + connectionCache: map[string]snmpConnection{ + "TestGather": tsc, + }, + } + + acc := &testutil.Accumulator{} + + s.Gather(acc) + + require.Len(t, acc.Metrics, 1) + m := acc.Metrics[0] + assert.Equal(t, "baz", m.Tags["host"]) +} + +func TestFieldConvert(t *testing.T) { + testTable := []struct { + input interface{} + conv string + expected interface{} + }{ + {[]byte("foo"), "", string("foo")}, + {"0.123", "float", float64(0.123)}, + {[]byte("0.123"), "float", float64(0.123)}, + {float32(0.123), "float", float64(float32(0.123))}, + {float64(0.123), "float", float64(0.123)}, + {123, "float", float64(123)}, + {123, "float(0)", float64(123)}, + {123, "float(4)", float64(0.0123)}, + {int8(123), "float(3)", float64(0.123)}, + {int16(123), "float(3)", float64(0.123)}, + {int32(123), "float(3)", float64(0.123)}, + {int64(123), "float(3)", float64(0.123)}, + {uint(123), "float(3)", float64(0.123)}, + {uint8(123), "float(3)", float64(0.123)}, + {uint16(123), "float(3)", float64(0.123)}, + {uint32(123), "float(3)", float64(0.123)}, + {uint64(123), "float(3)", float64(0.123)}, + {"123", "int", int64(123)}, + {[]byte("123"), "int", int64(123)}, + {float32(12.3), "int", int64(12)}, + {float64(12.3), "int", int64(12)}, + {int(123), "int", int64(123)}, + {int8(123), "int", int64(123)}, + {int16(123), "int", int64(123)}, + {int32(123), "int", int64(123)}, + {int64(123), "int", int64(123)}, + {uint(123), "int", int64(123)}, + {uint8(123), "int", int64(123)}, + {uint16(123), "int", int64(123)}, + {uint32(123), "int", int64(123)}, + {uint64(123), "int", int64(123)}, + {[]byte("abcdef"), "hwaddr", "61:62:63:64:65:66"}, + {"abcdef", "hwaddr", "61:62:63:64:65:66"}, + {[]byte("abcd"), "ipaddr", "97.98.99.100"}, + {"abcd", "ipaddr", "97.98.99.100"}, + {[]byte("abcdefghijklmnop"), "ipaddr", "6162:6364:6566:6768:696a:6b6c:6d6e:6f70"}, + } + + for _, tc := range testTable { + act, err := fieldConvert(tc.conv, tc.input) + if !assert.NoError(t, err, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) { + continue + } + assert.EqualValues(t, tc.expected, act, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) + } +} + +func TestError(t *testing.T) { + e := fmt.Errorf("nested error") + err := Errorf(e, "top error %d", 123) require.Error(t, err) -} - -func TestSNMPErrorGet2(t *testing.T) { - get1 := Data{ - Name: "oid1", - Unit: "octets", - Oid: ".1.3.6.1.2.1.2.2.1.16.1", - } - h := Host{ - Collect: []string{"oid1"}, - } - s := Snmp{ - Host: []Host{h}, - Get: []Data{get1}, - } - - var acc testutil.Accumulator - err := s.Gather(&acc) - require.NoError(t, err) - assert.Equal(t, 0, len(acc.Metrics)) -} - -func TestSNMPErrorBulk(t *testing.T) { - bulk1 := Data{ - Name: "oid1", - Unit: "octets", - Oid: ".1.3.6.1.2.1.2.2.1.16", - } - h := Host{ - Address: testutil.GetLocalHost(), - Collect: []string{"oid1"}, - } - s := Snmp{ - Host: []Host{h}, - Bulk: []Data{bulk1}, - } - - var acc testutil.Accumulator - err := s.Gather(&acc) - require.NoError(t, err) - assert.Equal(t, 0, len(acc.Metrics)) -} - -func TestSNMPGet1(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - get1 := Data{ - Name: "oid1", - Unit: "octets", - Oid: ".1.3.6.1.2.1.2.2.1.16.1", - } - h := Host{ - Address: testutil.GetLocalHost() + ":31161", - Community: "telegraf", - Version: 2, - Timeout: 2.0, - Retries: 2, - Collect: []string{"oid1"}, - } - s := Snmp{ - Host: []Host{h}, - Get: []Data{get1}, - } - - var acc testutil.Accumulator - err := s.Gather(&acc) - require.NoError(t, err) - - acc.AssertContainsTaggedFields(t, - "oid1", - map[string]interface{}{ - "oid1": uint(543846), - }, - map[string]string{ - "unit": "octets", - "snmp_host": testutil.GetLocalHost(), - }, - ) -} - -func TestSNMPGet2(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - get1 := Data{ - Name: "oid1", - Oid: "ifNumber", - } - h := Host{ - Address: testutil.GetLocalHost() + ":31161", - Community: "telegraf", - Version: 2, - Timeout: 2.0, - Retries: 2, - Collect: []string{"oid1"}, - } - s := Snmp{ - SnmptranslateFile: "./testdata/oids.txt", - Host: []Host{h}, - Get: []Data{get1}, - } - - var acc testutil.Accumulator - err := s.Gather(&acc) - require.NoError(t, err) - - acc.AssertContainsTaggedFields(t, - "ifNumber", - map[string]interface{}{ - "ifNumber": int(4), - }, - map[string]string{ - "instance": "0", - "snmp_host": testutil.GetLocalHost(), - }, - ) -} - -func TestSNMPGet3(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - get1 := Data{ - Name: "oid1", - Unit: "octets", - Oid: "ifSpeed", - Instance: "1", - } - h := Host{ - Address: testutil.GetLocalHost() + ":31161", - Community: "telegraf", - Version: 2, - Timeout: 2.0, - Retries: 2, - Collect: []string{"oid1"}, - } - s := Snmp{ - SnmptranslateFile: "./testdata/oids.txt", - Host: []Host{h}, - Get: []Data{get1}, - } - - var acc testutil.Accumulator - err := s.Gather(&acc) - require.NoError(t, err) - - acc.AssertContainsTaggedFields(t, - "ifSpeed", - map[string]interface{}{ - "ifSpeed": uint(10000000), - }, - map[string]string{ - "unit": "octets", - "instance": "1", - "snmp_host": testutil.GetLocalHost(), - }, - ) -} - -func TestSNMPEasyGet4(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - get1 := Data{ - Name: "oid1", - Unit: "octets", - Oid: "ifSpeed", - Instance: "1", - } - h := Host{ - Address: testutil.GetLocalHost() + ":31161", - Community: "telegraf", - Version: 2, - Timeout: 2.0, - Retries: 2, - Collect: []string{"oid1"}, - GetOids: []string{"ifNumber"}, - } - s := Snmp{ - SnmptranslateFile: "./testdata/oids.txt", - Host: []Host{h}, - Get: []Data{get1}, - } - - var acc testutil.Accumulator - err := s.Gather(&acc) - require.NoError(t, err) - - acc.AssertContainsTaggedFields(t, - "ifSpeed", - map[string]interface{}{ - "ifSpeed": uint(10000000), - }, - map[string]string{ - "unit": "octets", - "instance": "1", - "snmp_host": testutil.GetLocalHost(), - }, - ) - - acc.AssertContainsTaggedFields(t, - "ifNumber", - map[string]interface{}{ - "ifNumber": int(4), - }, - map[string]string{ - "instance": "0", - "snmp_host": testutil.GetLocalHost(), - }, - ) -} - -func TestSNMPEasyGet5(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - get1 := Data{ - Name: "oid1", - Unit: "octets", - Oid: "ifSpeed", - Instance: "1", - } - h := Host{ - Address: testutil.GetLocalHost() + ":31161", - Community: "telegraf", - Version: 2, - Timeout: 2.0, - Retries: 2, - Collect: []string{"oid1"}, - GetOids: []string{".1.3.6.1.2.1.2.1.0"}, - } - s := Snmp{ - SnmptranslateFile: "./testdata/oids.txt", - Host: []Host{h}, - Get: []Data{get1}, - } - - var acc testutil.Accumulator - err := s.Gather(&acc) - require.NoError(t, err) - - acc.AssertContainsTaggedFields(t, - "ifSpeed", - map[string]interface{}{ - "ifSpeed": uint(10000000), - }, - map[string]string{ - "unit": "octets", - "instance": "1", - "snmp_host": testutil.GetLocalHost(), - }, - ) - - acc.AssertContainsTaggedFields(t, - "ifNumber", - map[string]interface{}{ - "ifNumber": int(4), - }, - map[string]string{ - "instance": "0", - "snmp_host": testutil.GetLocalHost(), - }, - ) -} - -func TestSNMPEasyGet6(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - h := Host{ - Address: testutil.GetLocalHost() + ":31161", - Community: "telegraf", - Version: 2, - Timeout: 2.0, - Retries: 2, - GetOids: []string{"1.3.6.1.2.1.2.1.0"}, - } - s := Snmp{ - SnmptranslateFile: "./testdata/oids.txt", - Host: []Host{h}, - } - - var acc testutil.Accumulator - err := s.Gather(&acc) - require.NoError(t, err) - - acc.AssertContainsTaggedFields(t, - "ifNumber", - map[string]interface{}{ - "ifNumber": int(4), - }, - map[string]string{ - "instance": "0", - "snmp_host": testutil.GetLocalHost(), - }, - ) -} - -func TestSNMPBulk1(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - bulk1 := Data{ - Name: "oid1", - Unit: "octets", - Oid: ".1.3.6.1.2.1.2.2.1.16", - MaxRepetition: 2, - } - h := Host{ - Address: testutil.GetLocalHost() + ":31161", - Community: "telegraf", - Version: 2, - Timeout: 2.0, - Retries: 2, - Collect: []string{"oid1"}, - } - s := Snmp{ - SnmptranslateFile: "./testdata/oids.txt", - Host: []Host{h}, - Bulk: []Data{bulk1}, - } - - var acc testutil.Accumulator - err := s.Gather(&acc) - require.NoError(t, err) - - acc.AssertContainsTaggedFields(t, - "ifOutOctets", - map[string]interface{}{ - "ifOutOctets": uint(543846), - }, - map[string]string{ - "unit": "octets", - "instance": "1", - "snmp_host": testutil.GetLocalHost(), - }, - ) - - acc.AssertContainsTaggedFields(t, - "ifOutOctets", - map[string]interface{}{ - "ifOutOctets": uint(26475179), - }, - map[string]string{ - "unit": "octets", - "instance": "2", - "snmp_host": testutil.GetLocalHost(), - }, - ) - - acc.AssertContainsTaggedFields(t, - "ifOutOctets", - map[string]interface{}{ - "ifOutOctets": uint(108963968), - }, - map[string]string{ - "unit": "octets", - "instance": "3", - "snmp_host": testutil.GetLocalHost(), - }, - ) - - acc.AssertContainsTaggedFields(t, - "ifOutOctets", - map[string]interface{}{ - "ifOutOctets": uint(12991453), - }, - map[string]string{ - "unit": "octets", - "instance": "36", - "snmp_host": testutil.GetLocalHost(), - }, - ) -} - -// TODO find why, if this test is active -// Circle CI stops with the following error... -// bash scripts/circle-test.sh died unexpectedly -// Maybe the test is too long ?? -func dTestSNMPBulk2(t *testing.T) { - bulk1 := Data{ - Name: "oid1", - Unit: "octets", - Oid: "ifOutOctets", - MaxRepetition: 2, - } - h := Host{ - Address: testutil.GetLocalHost() + ":31161", - Community: "telegraf", - Version: 2, - Timeout: 2.0, - Retries: 2, - Collect: []string{"oid1"}, - } - s := Snmp{ - SnmptranslateFile: "./testdata/oids.txt", - Host: []Host{h}, - Bulk: []Data{bulk1}, - } - - var acc testutil.Accumulator - err := s.Gather(&acc) - require.NoError(t, err) - - acc.AssertContainsTaggedFields(t, - "ifOutOctets", - map[string]interface{}{ - "ifOutOctets": uint(543846), - }, - map[string]string{ - "unit": "octets", - "instance": "1", - "snmp_host": testutil.GetLocalHost(), - }, - ) - - acc.AssertContainsTaggedFields(t, - "ifOutOctets", - map[string]interface{}{ - "ifOutOctets": uint(26475179), - }, - map[string]string{ - "unit": "octets", - "instance": "2", - "snmp_host": testutil.GetLocalHost(), - }, - ) - - acc.AssertContainsTaggedFields(t, - "ifOutOctets", - map[string]interface{}{ - "ifOutOctets": uint(108963968), - }, - map[string]string{ - "unit": "octets", - "instance": "3", - "snmp_host": testutil.GetLocalHost(), - }, - ) - - acc.AssertContainsTaggedFields(t, - "ifOutOctets", - map[string]interface{}{ - "ifOutOctets": uint(12991453), - }, - map[string]string{ - "unit": "octets", - "instance": "36", - "snmp_host": testutil.GetLocalHost(), - }, - ) + + ne, ok := err.(NestedError) + require.True(t, ok) + assert.Equal(t, e, ne.NestedErr) + + assert.Contains(t, err.Error(), "top error 123") + assert.Contains(t, err.Error(), "nested error") } diff --git a/plugins/inputs/snmp/testdata/oids.txt b/plugins/inputs/snmp/testdata/oids.txt deleted file mode 100644 index 1a351be90..000000000 --- a/plugins/inputs/snmp/testdata/oids.txt +++ /dev/null @@ -1,32 +0,0 @@ -org 1.3 -dod 1.3.6 -internet 1.3.6.1 -directory 1.3.6.1.1 -mgmt 1.3.6.1.2 -mib-2 1.3.6.1.2.1 -interfaces 1.3.6.1.2.1.2 -ifNumber 1.3.6.1.2.1.2.1 -ifTable 1.3.6.1.2.1.2.2 -ifEntry 1.3.6.1.2.1.2.2.1 -ifIndex 1.3.6.1.2.1.2.2.1.1 -ifDescr 1.3.6.1.2.1.2.2.1.2 -ifType 1.3.6.1.2.1.2.2.1.3 -ifMtu 1.3.6.1.2.1.2.2.1.4 -ifSpeed 1.3.6.1.2.1.2.2.1.5 -ifPhysAddress 1.3.6.1.2.1.2.2.1.6 -ifAdminStatus 1.3.6.1.2.1.2.2.1.7 -ifOperStatus 1.3.6.1.2.1.2.2.1.8 -ifLastChange 1.3.6.1.2.1.2.2.1.9 -ifInOctets 1.3.6.1.2.1.2.2.1.10 -ifInUcastPkts 1.3.6.1.2.1.2.2.1.11 -ifInNUcastPkts 1.3.6.1.2.1.2.2.1.12 -ifInDiscards 1.3.6.1.2.1.2.2.1.13 -ifInErrors 1.3.6.1.2.1.2.2.1.14 -ifInUnknownProtos 1.3.6.1.2.1.2.2.1.15 -ifOutOctets 1.3.6.1.2.1.2.2.1.16 -ifOutUcastPkts 1.3.6.1.2.1.2.2.1.17 -ifOutNUcastPkts 1.3.6.1.2.1.2.2.1.18 -ifOutDiscards 1.3.6.1.2.1.2.2.1.19 -ifOutErrors 1.3.6.1.2.1.2.2.1.20 -ifOutQLen 1.3.6.1.2.1.2.2.1.21 -ifSpecific 1.3.6.1.2.1.2.2.1.22 diff --git a/plugins/inputs/snmp/testdata/snmpd.conf b/plugins/inputs/snmp/testdata/snmpd.conf new file mode 100644 index 000000000..3f3151a65 --- /dev/null +++ b/plugins/inputs/snmp/testdata/snmpd.conf @@ -0,0 +1,17 @@ +# This config provides the data represented in the plugin documentation +# Requires net-snmp >= 5.7 + +#agentaddress UDP:127.0.0.1:1161 +rocommunity public + +override .1.0.0.0.1.1.0 octet_str "foo" +override .1.0.0.0.1.1.1 octet_str "bar" +override .1.0.0.0.1.102 octet_str "bad" +override .1.0.0.0.1.2.0 integer 1 +override .1.0.0.0.1.2.1 integer 2 +override .1.0.0.0.1.3.0 octet_str "0.123" +override .1.0.0.0.1.3.1 octet_str "0.456" +override .1.0.0.0.1.3.2 octet_str "9.999" +override .1.0.0.1.1 octet_str "baz" +override .1.0.0.1.2 uinteger 54321 +override .1.0.0.1.3 uinteger 234 diff --git a/plugins/inputs/snmp/testdata/test.mib b/plugins/inputs/snmp/testdata/test.mib new file mode 100644 index 000000000..d3246673b --- /dev/null +++ b/plugins/inputs/snmp/testdata/test.mib @@ -0,0 +1,51 @@ +TEST DEFINITIONS ::= BEGIN + +testOID ::= { 1 0 0 } + +testTable OBJECT-TYPE + SYNTAX SEQUENCE OF testTableEntry + MAX-ACCESS not-accessible + STATUS current + ::= { testOID 0 } + +testTableEntry OBJECT-TYPE + SYNTAX TestTableEntry + MAX-ACCESS not-accessible + STATUS current + INDEX { + server + } + ::= { testTable 1 } + +TestTableEntry ::= + SEQUENCE { + server OCTET STRING, + connections INTEGER, + latency OCTET STRING, + } + +server OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + ::= { testTableEntry 1 } + +connections OBJECT-TYPE + SYNTAX INTEGER + MAX-ACCESS read-only + STATUS current + ::= { testTableEntry 2 } + +latency OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + ::= { testTableEntry 3 } + +hostname OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + ::= { testOID 1 1 } + +END diff --git a/plugins/inputs/snmp_legacy/README.md b/plugins/inputs/snmp_legacy/README.md new file mode 100644 index 000000000..bee783228 --- /dev/null +++ b/plugins/inputs/snmp_legacy/README.md @@ -0,0 +1,549 @@ +# SNMP Input Plugin + +The SNMP input plugin gathers metrics from SNMP agents + +### Configuration: + + +#### Very simple example + +In this example, the plugin will gather value of OIDS: + + - `.1.3.6.1.2.1.2.2.1.4.1` + +```toml +# Very Simple Example +[[inputs.snmp]] + + [[inputs.snmp.host]] + address = "127.0.0.1:161" + # SNMP community + community = "public" # default public + # SNMP version (1, 2 or 3) + # Version 3 not supported yet + version = 2 # default 2 + # Simple list of OIDs to get, in addition to "collect" + get_oids = [".1.3.6.1.2.1.2.2.1.4.1"] +``` + + +#### Simple example + +In this example, Telegraf gathers value of OIDS: + + - named **ifnumber** + - named **interface_speed** + +With **inputs.snmp.get** section the plugin gets the oid number: + + - **ifnumber** => `.1.3.6.1.2.1.2.1.0` + - **interface_speed** => *ifSpeed* + +As you can see *ifSpeed* is not a valid OID. In order to get +the valid OID, the plugin uses `snmptranslate_file` to match the OID: + + - **ifnumber** => `.1.3.6.1.2.1.2.1.0` + - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` + +Also as the plugin will append `instance` to the corresponding OID: + + - **ifnumber** => `.1.3.6.1.2.1.2.1.0` + - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` + +In this example, the plugin will gather value of OIDS: + +- `.1.3.6.1.2.1.2.1.0` +- `.1.3.6.1.2.1.2.2.1.5.1` + + +```toml +# Simple example +[[inputs.snmp]] + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + snmptranslate_file = "/tmp/oids.txt" + [[inputs.snmp.host]] + address = "127.0.0.1:161" + # SNMP community + community = "public" # default public + # SNMP version (1, 2 or 3) + # Version 3 not supported yet + version = 2 # default 2 + # Which get/bulk do you want to collect for this host + collect = ["ifnumber", "interface_speed"] + + [[inputs.snmp.get]] + name = "ifnumber" + oid = ".1.3.6.1.2.1.2.1.0" + + [[inputs.snmp.get]] + name = "interface_speed" + oid = "ifSpeed" + instance = "1" + +``` + + +#### Simple bulk example + +In this example, Telegraf gathers value of OIDS: + + - named **ifnumber** + - named **interface_speed** + - named **if_out_octets** + +With **inputs.snmp.get** section the plugin gets oid number: + + - **ifnumber** => `.1.3.6.1.2.1.2.1.0` + - **interface_speed** => *ifSpeed* + +With **inputs.snmp.bulk** section the plugin gets the oid number: + + - **if_out_octets** => *ifOutOctets* + +As you can see *ifSpeed* and *ifOutOctets* are not a valid OID. +In order to get the valid OID, the plugin uses `snmptranslate_file` +to match the OID: + + - **ifnumber** => `.1.3.6.1.2.1.2.1.0` + - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` + - **if_out_octets** => *ifOutOctets* => `.1.3.6.1.2.1.2.2.1.16` + +Also, the plugin will append `instance` to the corresponding OID: + + - **ifnumber** => `.1.3.6.1.2.1.2.1.0` + - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` + +And **if_out_octets** is a bulk request, the plugin will gathers all +OIDS in the table. + +- `.1.3.6.1.2.1.2.2.1.16.1` +- `.1.3.6.1.2.1.2.2.1.16.2` +- `.1.3.6.1.2.1.2.2.1.16.3` +- `.1.3.6.1.2.1.2.2.1.16.4` +- `.1.3.6.1.2.1.2.2.1.16.5` +- `...` + +In this example, the plugin will gather value of OIDS: + +- `.1.3.6.1.2.1.2.1.0` +- `.1.3.6.1.2.1.2.2.1.5.1` +- `.1.3.6.1.2.1.2.2.1.16.1` +- `.1.3.6.1.2.1.2.2.1.16.2` +- `.1.3.6.1.2.1.2.2.1.16.3` +- `.1.3.6.1.2.1.2.2.1.16.4` +- `.1.3.6.1.2.1.2.2.1.16.5` +- `...` + + +```toml +# Simple bulk example +[[inputs.snmp]] + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + snmptranslate_file = "/tmp/oids.txt" + [[inputs.snmp.host]] + address = "127.0.0.1:161" + # SNMP community + community = "public" # default public + # SNMP version (1, 2 or 3) + # Version 3 not supported yet + version = 2 # default 2 + # Which get/bulk do you want to collect for this host + collect = ["interface_speed", "if_number", "if_out_octets"] + + [[inputs.snmp.get]] + name = "interface_speed" + oid = "ifSpeed" + instance = "1" + + [[inputs.snmp.get]] + name = "if_number" + oid = "ifNumber" + + [[inputs.snmp.bulk]] + name = "if_out_octets" + oid = "ifOutOctets" +``` + + +#### Table example + +In this example, we remove collect attribute to the host section, +but you can still use it in combination of the following part. + +Note: This example is like a bulk request a but using an +other configuration + +Telegraf gathers value of OIDS of the table: + + - named **iftable1** + +With **inputs.snmp.table** section the plugin gets oid number: + + - **iftable1** => `.1.3.6.1.2.1.31.1.1.1` + +Also **iftable1** is a table, the plugin will gathers all +OIDS in the table and in the subtables + +- `.1.3.6.1.2.1.31.1.1.1.1` +- `.1.3.6.1.2.1.31.1.1.1.1.1` +- `.1.3.6.1.2.1.31.1.1.1.1.2` +- `.1.3.6.1.2.1.31.1.1.1.1.3` +- `.1.3.6.1.2.1.31.1.1.1.1.4` +- `.1.3.6.1.2.1.31.1.1.1.1....` +- `.1.3.6.1.2.1.31.1.1.1.2` +- `.1.3.6.1.2.1.31.1.1.1.2....` +- `.1.3.6.1.2.1.31.1.1.1.3` +- `.1.3.6.1.2.1.31.1.1.1.3....` +- `.1.3.6.1.2.1.31.1.1.1.4` +- `.1.3.6.1.2.1.31.1.1.1.4....` +- `.1.3.6.1.2.1.31.1.1.1.5` +- `.1.3.6.1.2.1.31.1.1.1.5....` +- `.1.3.6.1.2.1.31.1.1.1.6....` +- `...` + +```toml +# Table example +[[inputs.snmp]] + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + snmptranslate_file = "/tmp/oids.txt" + [[inputs.snmp.host]] + address = "127.0.0.1:161" + # SNMP community + community = "public" # default public + # SNMP version (1, 2 or 3) + # Version 3 not supported yet + version = 2 # default 2 + # Which get/bulk do you want to collect for this host + # Which table do you want to collect + [[inputs.snmp.host.table]] + name = "iftable1" + + # table without mapping neither subtables + # This is like bulk request + [[inputs.snmp.table]] + name = "iftable1" + oid = ".1.3.6.1.2.1.31.1.1.1" +``` + + +#### Table with subtable example + +In this example, we remove collect attribute to the host section, +but you can still use it in combination of the following part. + +Note: This example is like a bulk request a but using an +other configuration + +Telegraf gathers value of OIDS of the table: + + - named **iftable2** + +With **inputs.snmp.table** section *AND* **sub_tables** attribute, +the plugin will get OIDS from subtables: + + - **iftable2** => `.1.3.6.1.2.1.2.2.1.13` + +Also **iftable2** is a table, the plugin will gathers all +OIDS in subtables: + +- `.1.3.6.1.2.1.2.2.1.13.1` +- `.1.3.6.1.2.1.2.2.1.13.2` +- `.1.3.6.1.2.1.2.2.1.13.3` +- `.1.3.6.1.2.1.2.2.1.13.4` +- `.1.3.6.1.2.1.2.2.1.13....` + + +```toml +# Table with subtable example +[[inputs.snmp]] + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + snmptranslate_file = "/tmp/oids.txt" + [[inputs.snmp.host]] + address = "127.0.0.1:161" + # SNMP community + community = "public" # default public + # SNMP version (1, 2 or 3) + # Version 3 not supported yet + version = 2 # default 2 + # Which table do you want to collect + [[inputs.snmp.host.table]] + name = "iftable2" + + # table without mapping but with subtables + [[inputs.snmp.table]] + name = "iftable2" + sub_tables = [".1.3.6.1.2.1.2.2.1.13"] + # note + # oid attribute is useless +``` + + +#### Table with mapping example + +In this example, we remove collect attribute to the host section, +but you can still use it in combination of the following part. + +Telegraf gathers value of OIDS of the table: + + - named **iftable3** + +With **inputs.snmp.table** section the plugin gets oid number: + + - **iftable3** => `.1.3.6.1.2.1.31.1.1.1` + +Also **iftable2** is a table, the plugin will gathers all +OIDS in the table and in the subtables + +- `.1.3.6.1.2.1.31.1.1.1.1` +- `.1.3.6.1.2.1.31.1.1.1.1.1` +- `.1.3.6.1.2.1.31.1.1.1.1.2` +- `.1.3.6.1.2.1.31.1.1.1.1.3` +- `.1.3.6.1.2.1.31.1.1.1.1.4` +- `.1.3.6.1.2.1.31.1.1.1.1....` +- `.1.3.6.1.2.1.31.1.1.1.2` +- `.1.3.6.1.2.1.31.1.1.1.2....` +- `.1.3.6.1.2.1.31.1.1.1.3` +- `.1.3.6.1.2.1.31.1.1.1.3....` +- `.1.3.6.1.2.1.31.1.1.1.4` +- `.1.3.6.1.2.1.31.1.1.1.4....` +- `.1.3.6.1.2.1.31.1.1.1.5` +- `.1.3.6.1.2.1.31.1.1.1.5....` +- `.1.3.6.1.2.1.31.1.1.1.6....` +- `...` + +But the **include_instances** attribute will filter which OIDS +will be gathered; As you see, there is an other attribute, `mapping_table`. +`include_instances` and `mapping_table` permit to build a hash table +to filter only OIDS you want. +Let's say, we have the following data on SNMP server: + - OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` + - OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` + - OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` + - OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` + - OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` + +The plugin will build the following hash table: + +| instance name | instance id | +|---------------|-------------| +| `enp5s0` | `1` | +| `enp5s1` | `2` | +| `enp5s2` | `3` | +| `eth0` | `4` | +| `eth1` | `5` | + +With the **include_instances** attribute, the plugin will gather +the following OIDS: + +- `.1.3.6.1.2.1.31.1.1.1.1.1` +- `.1.3.6.1.2.1.31.1.1.1.1.5` +- `.1.3.6.1.2.1.31.1.1.1.2.1` +- `.1.3.6.1.2.1.31.1.1.1.2.5` +- `.1.3.6.1.2.1.31.1.1.1.3.1` +- `.1.3.6.1.2.1.31.1.1.1.3.5` +- `.1.3.6.1.2.1.31.1.1.1.4.1` +- `.1.3.6.1.2.1.31.1.1.1.4.5` +- `.1.3.6.1.2.1.31.1.1.1.5.1` +- `.1.3.6.1.2.1.31.1.1.1.5.5` +- `.1.3.6.1.2.1.31.1.1.1.6.1` +- `.1.3.6.1.2.1.31.1.1.1.6.5` +- `...` + +Note: the plugin will add instance name as tag *instance* + +```toml +# Simple table with mapping example +[[inputs.snmp]] + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + snmptranslate_file = "/tmp/oids.txt" + [[inputs.snmp.host]] + address = "127.0.0.1:161" + # SNMP community + community = "public" # default public + # SNMP version (1, 2 or 3) + # Version 3 not supported yet + version = 2 # default 2 + # Which table do you want to collect + [[inputs.snmp.host.table]] + name = "iftable3" + include_instances = ["enp5s0", "eth1"] + + # table with mapping but without subtables + [[inputs.snmp.table]] + name = "iftable3" + oid = ".1.3.6.1.2.1.31.1.1.1" + # if empty. get all instances + mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" + # if empty, get all subtables +``` + + +#### Table with both mapping and subtable example + +In this example, we remove collect attribute to the host section, +but you can still use it in combination of the following part. + +Telegraf gathers value of OIDS of the table: + + - named **iftable4** + +With **inputs.snmp.table** section *AND* **sub_tables** attribute, +the plugin will get OIDS from subtables: + + - **iftable4** => `.1.3.6.1.2.1.31.1.1.1` + +Also **iftable2** is a table, the plugin will gathers all +OIDS in the table and in the subtables + +- `.1.3.6.1.2.1.31.1.1.1.6.1 +- `.1.3.6.1.2.1.31.1.1.1.6.2` +- `.1.3.6.1.2.1.31.1.1.1.6.3` +- `.1.3.6.1.2.1.31.1.1.1.6.4` +- `.1.3.6.1.2.1.31.1.1.1.6....` +- `.1.3.6.1.2.1.31.1.1.1.10.1` +- `.1.3.6.1.2.1.31.1.1.1.10.2` +- `.1.3.6.1.2.1.31.1.1.1.10.3` +- `.1.3.6.1.2.1.31.1.1.1.10.4` +- `.1.3.6.1.2.1.31.1.1.1.10....` + +But the **include_instances** attribute will filter which OIDS +will be gathered; As you see, there is an other attribute, `mapping_table`. +`include_instances` and `mapping_table` permit to build a hash table +to filter only OIDS you want. +Let's say, we have the following data on SNMP server: + - OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` + - OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` + - OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` + - OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` + - OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` + +The plugin will build the following hash table: + +| instance name | instance id | +|---------------|-------------| +| `enp5s0` | `1` | +| `enp5s1` | `2` | +| `enp5s2` | `3` | +| `eth0` | `4` | +| `eth1` | `5` | + +With the **include_instances** attribute, the plugin will gather +the following OIDS: + +- `.1.3.6.1.2.1.31.1.1.1.6.1` +- `.1.3.6.1.2.1.31.1.1.1.6.5` +- `.1.3.6.1.2.1.31.1.1.1.10.1` +- `.1.3.6.1.2.1.31.1.1.1.10.5` + +Note: the plugin will add instance name as tag *instance* + + + +```toml +# Table with both mapping and subtable example +[[inputs.snmp]] + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + snmptranslate_file = "/tmp/oids.txt" + [[inputs.snmp.host]] + address = "127.0.0.1:161" + # SNMP community + community = "public" # default public + # SNMP version (1, 2 or 3) + # Version 3 not supported yet + version = 2 # default 2 + # Which table do you want to collect + [[inputs.snmp.host.table]] + name = "iftable4" + include_instances = ["enp5s0", "eth1"] + + # table with both mapping and subtables + [[inputs.snmp.table]] + name = "iftable4" + # if empty get all instances + mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" + # if empty get all subtables + # sub_tables could be not "real subtables" + sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + # note + # oid attribute is useless + + # SNMP SUBTABLES + [[inputs.snmp.subtable]] + name = "bytes_recv" + oid = ".1.3.6.1.2.1.31.1.1.1.6" + unit = "octets" + + [[inputs.snmp.subtable]] + name = "bytes_send" + oid = ".1.3.6.1.2.1.31.1.1.1.10" + unit = "octets" +``` + +#### Configuration notes + +- In **inputs.snmp.table** section, the `oid` attribute is useless if + the `sub_tables` attributes is defined + +- In **inputs.snmp.subtable** section, you can put a name from `snmptranslate_file` + as `oid` attribute instead of a valid OID + +### Measurements & Fields: + +With the last example (Table with both mapping and subtable example): + +- ifHCOutOctets + - ifHCOutOctets +- ifInDiscards + - ifInDiscards +- ifHCInOctets + - ifHCInOctets + +### Tags: + +With the last example (Table with both mapping and subtable example): + +- ifHCOutOctets + - host + - instance + - unit +- ifInDiscards + - host + - instance +- ifHCInOctets + - host + - instance + - unit + +### Example Output: + +With the last example (Table with both mapping and subtable example): + +``` +ifHCOutOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCOutOctets=10565628i 1456878706044462901 +ifInDiscards,host=127.0.0.1,instance=enp5s0 ifInDiscards=0i 1456878706044510264 +ifHCInOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCInOctets=76351777i 1456878706044531312 +``` diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go new file mode 100644 index 000000000..e5dbbc459 --- /dev/null +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -0,0 +1,818 @@ +package snmp_legacy + +import ( + "io/ioutil" + "log" + "net" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + + "github.com/soniah/gosnmp" +) + +// Snmp is a snmp plugin +type Snmp struct { + Host []Host + Get []Data + Bulk []Data + Table []Table + Subtable []Subtable + SnmptranslateFile string + + nameToOid map[string]string + initNode Node + subTableMap map[string]Subtable +} + +type Host struct { + Address string + Community string + // SNMP version. Default 2 + Version int + // SNMP timeout, in seconds. 0 means no timeout + Timeout float64 + // SNMP retries + Retries int + // Data to collect (list of Data names) + Collect []string + // easy get oids + GetOids []string + // Table + Table []HostTable + // Oids + getOids []Data + bulkOids []Data + tables []HostTable + // array of processed oids + // to skip oid duplication + processedOids []string + + OidInstanceMapping map[string]map[string]string +} + +type Table struct { + // name = "iftable" + Name string + // oid = ".1.3.6.1.2.1.31.1.1.1" + Oid string + //if empty get all instances + //mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" + MappingTable string + // if empty get all subtables + // sub_tables could be not "real subtables" + //sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + SubTables []string +} + +type HostTable struct { + // name = "iftable" + Name string + // Includes only these instances + // include_instances = ["eth0", "eth1"] + IncludeInstances []string + // Excludes only these instances + // exclude_instances = ["eth20", "eth21"] + ExcludeInstances []string + // From Table struct + oid string + mappingTable string + subTables []string +} + +// TODO find better names +type Subtable struct { + //name = "bytes_send" + Name string + //oid = ".1.3.6.1.2.1.31.1.1.1.10" + Oid string + //unit = "octets" + Unit string +} + +type Data struct { + Name string + // OID (could be numbers or name) + Oid string + // Unit + Unit string + // SNMP getbulk max repetition + MaxRepetition uint8 `toml:"max_repetition"` + // SNMP Instance (default 0) + // (only used with GET request and if + // OID is a name from snmptranslate file) + Instance string + // OID (only number) (used for computation) + rawOid string +} + +type Node struct { + id string + name string + subnodes map[string]Node +} + +var sampleConfig = ` + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + snmptranslate_file = "/tmp/oids.txt" + [[inputs.snmp.host]] + address = "192.168.2.2:161" + # SNMP community + community = "public" # default public + # SNMP version (1, 2 or 3) + # Version 3 not supported yet + version = 2 # default 2 + # SNMP response timeout + timeout = 2.0 # default 2.0 + # SNMP request retries + retries = 2 # default 2 + # Which get/bulk do you want to collect for this host + collect = ["mybulk", "sysservices", "sysdescr"] + # Simple list of OIDs to get, in addition to "collect" + get_oids = [] + + [[inputs.snmp.host]] + address = "192.168.2.3:161" + community = "public" + version = 2 + timeout = 2.0 + retries = 2 + collect = ["mybulk"] + get_oids = [ + "ifNumber", + ".1.3.6.1.2.1.1.3.0", + ] + + [[inputs.snmp.get]] + name = "ifnumber" + oid = "ifNumber" + + [[inputs.snmp.get]] + name = "interface_speed" + oid = "ifSpeed" + instance = "0" + + [[inputs.snmp.get]] + name = "sysuptime" + oid = ".1.3.6.1.2.1.1.3.0" + unit = "second" + + [[inputs.snmp.bulk]] + name = "mybulk" + max_repetition = 127 + oid = ".1.3.6.1.2.1.1" + + [[inputs.snmp.bulk]] + name = "ifoutoctets" + max_repetition = 127 + oid = "ifOutOctets" + + [[inputs.snmp.host]] + address = "192.168.2.13:161" + #address = "127.0.0.1:161" + community = "public" + version = 2 + timeout = 2.0 + retries = 2 + #collect = ["mybulk", "sysservices", "sysdescr", "systype"] + collect = ["sysuptime" ] + [[inputs.snmp.host.table]] + name = "iftable3" + include_instances = ["enp5s0", "eth1"] + + # SNMP TABLEs + # table without mapping neither subtables + [[inputs.snmp.table]] + name = "iftable1" + oid = ".1.3.6.1.2.1.31.1.1.1" + + # table without mapping but with subtables + [[inputs.snmp.table]] + name = "iftable2" + oid = ".1.3.6.1.2.1.31.1.1.1" + sub_tables = [".1.3.6.1.2.1.2.2.1.13"] + + # table with mapping but without subtables + [[inputs.snmp.table]] + name = "iftable3" + oid = ".1.3.6.1.2.1.31.1.1.1" + # if empty. get all instances + mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" + # if empty, get all subtables + + # table with both mapping and subtables + [[inputs.snmp.table]] + name = "iftable4" + oid = ".1.3.6.1.2.1.31.1.1.1" + # if empty get all instances + mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" + # if empty get all subtables + # sub_tables could be not "real subtables" + sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] +` + +// SampleConfig returns sample configuration message +func (s *Snmp) SampleConfig() string { + return sampleConfig +} + +// Description returns description of Zookeeper plugin +func (s *Snmp) Description() string { + return `DEPRECATED! PLEASE USE inputs.snmp INSTEAD.` +} + +func fillnode(parentNode Node, oid_name string, ids []string) { + // ids = ["1", "3", "6", ...] + id, ids := ids[0], ids[1:] + node, ok := parentNode.subnodes[id] + if ok == false { + node = Node{ + id: id, + name: "", + subnodes: make(map[string]Node), + } + if len(ids) == 0 { + node.name = oid_name + } + parentNode.subnodes[id] = node + } + if len(ids) > 0 { + fillnode(node, oid_name, ids) + } +} + +func findnodename(node Node, ids []string) (string, string) { + // ids = ["1", "3", "6", ...] + if len(ids) == 1 { + return node.name, ids[0] + } + id, ids := ids[0], ids[1:] + // Get node + subnode, ok := node.subnodes[id] + if ok { + return findnodename(subnode, ids) + } + // We got a node + // Get node name + if node.name != "" && len(ids) == 0 && id == "0" { + // node with instance 0 + return node.name, "0" + } else if node.name != "" && len(ids) == 0 && id != "0" { + // node with an instance + return node.name, string(id) + } else if node.name != "" && len(ids) > 0 { + // node with subinstances + return node.name, strings.Join(ids, ".") + } + // return an empty node name + return node.name, "" +} + +func (s *Snmp) Gather(acc telegraf.Accumulator) error { + // TODO put this in cache on first run + // Create subtables mapping + if len(s.subTableMap) == 0 { + s.subTableMap = make(map[string]Subtable) + for _, sb := range s.Subtable { + s.subTableMap[sb.Name] = sb + } + } + // TODO put this in cache on first run + // Create oid tree + if s.SnmptranslateFile != "" && len(s.initNode.subnodes) == 0 { + s.nameToOid = make(map[string]string) + s.initNode = Node{ + id: "1", + name: "", + subnodes: make(map[string]Node), + } + + data, err := ioutil.ReadFile(s.SnmptranslateFile) + if err != nil { + log.Printf("E! Reading SNMPtranslate file error: %s", err) + return err + } else { + for _, line := range strings.Split(string(data), "\n") { + oids := strings.Fields(string(line)) + if len(oids) == 2 && oids[1] != "" { + oid_name := oids[0] + oid := oids[1] + fillnode(s.initNode, oid_name, strings.Split(string(oid), ".")) + s.nameToOid[oid_name] = oid + } + } + } + } + // Fetching data + for _, host := range s.Host { + // Set default args + if len(host.Address) == 0 { + host.Address = "127.0.0.1:161" + } + if host.Community == "" { + host.Community = "public" + } + if host.Timeout <= 0 { + host.Timeout = 2.0 + } + if host.Retries <= 0 { + host.Retries = 2 + } + // Prepare host + // Get Easy GET oids + for _, oidstring := range host.GetOids { + oid := Data{} + if val, ok := s.nameToOid[oidstring]; ok { + // TODO should we add the 0 instance ? + oid.Name = oidstring + oid.Oid = val + oid.rawOid = "." + val + ".0" + } else { + oid.Name = oidstring + oid.Oid = oidstring + if string(oidstring[:1]) != "." { + oid.rawOid = "." + oidstring + } else { + oid.rawOid = oidstring + } + } + host.getOids = append(host.getOids, oid) + } + + for _, oid_name := range host.Collect { + // Get GET oids + for _, oid := range s.Get { + if oid.Name == oid_name { + if val, ok := s.nameToOid[oid.Oid]; ok { + // TODO should we add the 0 instance ? + if oid.Instance != "" { + oid.rawOid = "." + val + "." + oid.Instance + } else { + oid.rawOid = "." + val + ".0" + } + } else { + oid.rawOid = oid.Oid + } + host.getOids = append(host.getOids, oid) + } + } + // Get GETBULK oids + for _, oid := range s.Bulk { + if oid.Name == oid_name { + if val, ok := s.nameToOid[oid.Oid]; ok { + oid.rawOid = "." + val + } else { + oid.rawOid = oid.Oid + } + host.bulkOids = append(host.bulkOids, oid) + } + } + } + // Table + for _, hostTable := range host.Table { + for _, snmpTable := range s.Table { + if hostTable.Name == snmpTable.Name { + table := hostTable + table.oid = snmpTable.Oid + table.mappingTable = snmpTable.MappingTable + table.subTables = snmpTable.SubTables + host.tables = append(host.tables, table) + } + } + } + // Launch Mapping + // TODO put this in cache on first run + // TODO save mapping and computed oids + // to do it only the first time + // only if len(s.OidInstanceMapping) == 0 + if len(host.OidInstanceMapping) >= 0 { + if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil { + log.Printf("E! SNMP Mapping error for host '%s': %s", host.Address, err) + continue + } + } + // Launch Get requests + if err := host.SNMPGet(acc, s.initNode); err != nil { + log.Printf("E! SNMP Error for host '%s': %s", host.Address, err) + } + if err := host.SNMPBulk(acc, s.initNode); err != nil { + log.Printf("E! SNMP Error for host '%s': %s", host.Address, err) + } + } + return nil +} + +func (h *Host) SNMPMap( + acc telegraf.Accumulator, + nameToOid map[string]string, + subTableMap map[string]Subtable, +) error { + if h.OidInstanceMapping == nil { + h.OidInstanceMapping = make(map[string]map[string]string) + } + // Get snmp client + snmpClient, err := h.GetSNMPClient() + if err != nil { + return err + } + // Deconnection + defer snmpClient.Conn.Close() + // Prepare OIDs + for _, table := range h.tables { + // We don't have mapping + if table.mappingTable == "" { + if len(table.subTables) == 0 { + // If We don't have mapping table + // neither subtables list + // This is just a bulk request + oid := Data{} + oid.Oid = table.oid + if val, ok := nameToOid[oid.Oid]; ok { + oid.rawOid = "." + val + } else { + oid.rawOid = oid.Oid + } + h.bulkOids = append(h.bulkOids, oid) + } else { + // If We don't have mapping table + // but we have subtables + // This is a bunch of bulk requests + // For each subtable ... + for _, sb := range table.subTables { + // ... we create a new Data (oid) object + oid := Data{} + // Looking for more information about this subtable + ssb, exists := subTableMap[sb] + if exists { + // We found a subtable section in config files + oid.Oid = ssb.Oid + oid.rawOid = ssb.Oid + oid.Unit = ssb.Unit + } else { + // We did NOT find a subtable section in config files + oid.Oid = sb + oid.rawOid = sb + } + // TODO check oid validity + + // Add the new oid to getOids list + h.bulkOids = append(h.bulkOids, oid) + } + } + } else { + // We have a mapping table + // We need to query this table + // To get mapping between instance id + // and instance name + oid_asked := table.mappingTable + oid_next := oid_asked + need_more_requests := true + // Set max repetition + maxRepetition := uint8(32) + // Launch requests + for need_more_requests { + // Launch request + result, err3 := snmpClient.GetBulk([]string{oid_next}, 0, maxRepetition) + if err3 != nil { + return err3 + } + + lastOid := "" + for _, variable := range result.Variables { + lastOid = variable.Name + if strings.HasPrefix(variable.Name, oid_asked) { + switch variable.Type { + // handle instance names + case gosnmp.OctetString: + // Check if instance is in includes instances + getInstances := true + if len(table.IncludeInstances) > 0 { + getInstances = false + for _, instance := range table.IncludeInstances { + if instance == string(variable.Value.([]byte)) { + getInstances = true + } + } + } + // Check if instance is in excludes instances + if len(table.ExcludeInstances) > 0 { + getInstances = true + for _, instance := range table.ExcludeInstances { + if instance == string(variable.Value.([]byte)) { + getInstances = false + } + } + } + // We don't want this instance + if !getInstances { + continue + } + + // remove oid table from the complete oid + // in order to get the current instance id + key := strings.Replace(variable.Name, oid_asked, "", 1) + + if len(table.subTables) == 0 { + // We have a mapping table + // but no subtables + // This is just a bulk request + + // Building mapping table + mapping := map[string]string{strings.Trim(key, "."): string(variable.Value.([]byte))} + _, exists := h.OidInstanceMapping[table.oid] + if exists { + h.OidInstanceMapping[table.oid][strings.Trim(key, ".")] = string(variable.Value.([]byte)) + } else { + h.OidInstanceMapping[table.oid] = mapping + } + + // Add table oid in bulk oid list + oid := Data{} + oid.Oid = table.oid + if val, ok := nameToOid[oid.Oid]; ok { + oid.rawOid = "." + val + } else { + oid.rawOid = oid.Oid + } + h.bulkOids = append(h.bulkOids, oid) + } else { + // We have a mapping table + // and some subtables + // This is a bunch of get requests + // This is the best case :) + + // For each subtable ... + for _, sb := range table.subTables { + // ... we create a new Data (oid) object + oid := Data{} + // Looking for more information about this subtable + ssb, exists := subTableMap[sb] + if exists { + // We found a subtable section in config files + oid.Oid = ssb.Oid + key + oid.rawOid = ssb.Oid + key + oid.Unit = ssb.Unit + oid.Instance = string(variable.Value.([]byte)) + } else { + // We did NOT find a subtable section in config files + oid.Oid = sb + key + oid.rawOid = sb + key + oid.Instance = string(variable.Value.([]byte)) + } + // TODO check oid validity + + // Add the new oid to getOids list + h.getOids = append(h.getOids, oid) + } + } + default: + } + } else { + break + } + } + // Determine if we need more requests + if strings.HasPrefix(lastOid, oid_asked) { + need_more_requests = true + oid_next = lastOid + } else { + need_more_requests = false + } + } + } + } + // Mapping finished + + // Create newoids based on mapping + + return nil +} + +func (h *Host) SNMPGet(acc telegraf.Accumulator, initNode Node) error { + // Get snmp client + snmpClient, err := h.GetSNMPClient() + if err != nil { + return err + } + // Deconnection + defer snmpClient.Conn.Close() + // Prepare OIDs + oidsList := make(map[string]Data) + for _, oid := range h.getOids { + oidsList[oid.rawOid] = oid + } + oidsNameList := make([]string, 0, len(oidsList)) + for _, oid := range oidsList { + oidsNameList = append(oidsNameList, oid.rawOid) + } + + // gosnmp.MAX_OIDS == 60 + // TODO use gosnmp.MAX_OIDS instead of hard coded value + max_oids := 60 + // limit 60 (MAX_OIDS) oids by requests + for i := 0; i < len(oidsList); i = i + max_oids { + // Launch request + max_index := i + max_oids + if i+max_oids > len(oidsList) { + max_index = len(oidsList) + } + result, err3 := snmpClient.Get(oidsNameList[i:max_index]) // Get() accepts up to g.MAX_OIDS + if err3 != nil { + return err3 + } + // Handle response + _, err = h.HandleResponse(oidsList, result, acc, initNode) + if err != nil { + return err + } + } + return nil +} + +func (h *Host) SNMPBulk(acc telegraf.Accumulator, initNode Node) error { + // Get snmp client + snmpClient, err := h.GetSNMPClient() + if err != nil { + return err + } + // Deconnection + defer snmpClient.Conn.Close() + // Prepare OIDs + oidsList := make(map[string]Data) + for _, oid := range h.bulkOids { + oidsList[oid.rawOid] = oid + } + oidsNameList := make([]string, 0, len(oidsList)) + for _, oid := range oidsList { + oidsNameList = append(oidsNameList, oid.rawOid) + } + // TODO Trying to make requests with more than one OID + // to reduce the number of requests + for _, oid := range oidsNameList { + oid_asked := oid + need_more_requests := true + // Set max repetition + maxRepetition := oidsList[oid].MaxRepetition + if maxRepetition <= 0 { + maxRepetition = 32 + } + // Launch requests + for need_more_requests { + // Launch request + result, err3 := snmpClient.GetBulk([]string{oid}, 0, maxRepetition) + if err3 != nil { + return err3 + } + // Handle response + last_oid, err := h.HandleResponse(oidsList, result, acc, initNode) + if err != nil { + return err + } + // Determine if we need more requests + if strings.HasPrefix(last_oid, oid_asked) { + need_more_requests = true + oid = last_oid + } else { + need_more_requests = false + } + } + } + return nil +} + +func (h *Host) GetSNMPClient() (*gosnmp.GoSNMP, error) { + // Prepare Version + var version gosnmp.SnmpVersion + if h.Version == 1 { + version = gosnmp.Version1 + } else if h.Version == 3 { + version = gosnmp.Version3 + } else { + version = gosnmp.Version2c + } + // Prepare host and port + host, port_str, err := net.SplitHostPort(h.Address) + if err != nil { + port_str = string("161") + } + // convert port_str to port in uint16 + port_64, err := strconv.ParseUint(port_str, 10, 16) + port := uint16(port_64) + // Get SNMP client + snmpClient := &gosnmp.GoSNMP{ + Target: host, + Port: port, + Community: h.Community, + Version: version, + Timeout: time.Duration(h.Timeout) * time.Second, + Retries: h.Retries, + } + // Connection + err2 := snmpClient.Connect() + if err2 != nil { + return nil, err2 + } + // Return snmpClient + return snmpClient, nil +} + +func (h *Host) HandleResponse( + oids map[string]Data, + result *gosnmp.SnmpPacket, + acc telegraf.Accumulator, + initNode Node, +) (string, error) { + var lastOid string + for _, variable := range result.Variables { + lastOid = variable.Name + nextresult: + // Get only oid wanted + for oid_key, oid := range oids { + // Skip oids already processed + for _, processedOid := range h.processedOids { + if variable.Name == processedOid { + break nextresult + } + } + // If variable.Name is the same as oid_key + // OR + // the result is SNMP table which "." comes right after oid_key. + // ex: oid_key: .1.3.6.1.2.1.2.2.1.16, variable.Name: .1.3.6.1.2.1.2.2.1.16.1 + if variable.Name == oid_key || strings.HasPrefix(variable.Name, oid_key+".") { + switch variable.Type { + // handle Metrics + case gosnmp.Boolean, gosnmp.Integer, gosnmp.Counter32, gosnmp.Gauge32, + gosnmp.TimeTicks, gosnmp.Counter64, gosnmp.Uinteger32, gosnmp.OctetString: + // Prepare tags + tags := make(map[string]string) + if oid.Unit != "" { + tags["unit"] = oid.Unit + } + // Get name and instance + var oid_name string + var instance string + // Get oidname and instance from translate file + oid_name, instance = findnodename(initNode, + strings.Split(string(variable.Name[1:]), ".")) + // Set instance tag + // From mapping table + mapping, inMappingNoSubTable := h.OidInstanceMapping[oid_key] + if inMappingNoSubTable { + // filter if the instance in not in + // OidInstanceMapping mapping map + if instance_name, exists := mapping[instance]; exists { + tags["instance"] = instance_name + } else { + continue + } + } else if oid.Instance != "" { + // From config files + tags["instance"] = oid.Instance + } else if instance != "" { + // Using last id of the current oid, ie: + // with .1.3.6.1.2.1.31.1.1.1.10.3 + // instance is 3 + tags["instance"] = instance + } + + // Set name + var field_name string + if oid_name != "" { + // Set fieldname as oid name from translate file + field_name = oid_name + } else { + // Set fieldname as oid name from inputs.snmp.get section + // Because the result oid is equal to inputs.snmp.get section + field_name = oid.Name + } + tags["snmp_host"], _, _ = net.SplitHostPort(h.Address) + fields := make(map[string]interface{}) + fields[string(field_name)] = variable.Value + + h.processedOids = append(h.processedOids, variable.Name) + acc.AddFields(field_name, fields, tags) + case gosnmp.NoSuchObject, gosnmp.NoSuchInstance: + // Oid not found + log.Printf("E! [snmp input] Oid not found: %s", oid_key) + default: + // delete other data + } + break + } + } + } + return lastOid, nil +} + +func init() { + inputs.Add("snmp_legacy", func() telegraf.Input { + return &Snmp{} + }) +} diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 5b754d772..e428d3098 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -166,7 +166,9 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e if query.ResultByRow { // add measurement to Accumulator - acc.Add(measurement, *columnMap["value"], tags, time.Now()) + acc.AddFields(measurement, + map[string]interface{}{"value": *columnMap["value"]}, + tags, time.Now()) } else { // values for header, val := range columnMap { @@ -290,8 +292,8 @@ IF OBJECT_ID('tempdb..#clerk') IS NOT NULL DROP TABLE #clerk; CREATE TABLE #clerk ( - ClerkCategory nvarchar(64) NOT NULL, - UsedPercent decimal(9,2), + ClerkCategory nvarchar(64) NOT NULL, + UsedPercent decimal(9,2), UsedBytes bigint ); @@ -624,26 +626,22 @@ const sqlDatabaseIO string = `SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; DECLARE @secondsBetween tinyint = 5; DECLARE @delayInterval char(8) = CONVERT(Char(8), DATEADD(SECOND, @secondsBetween, '00:00:00'), 108); - IF OBJECT_ID('tempdb..#baseline') IS NOT NULL DROP TABLE #baseline; IF OBJECT_ID('tempdb..#baselinewritten') IS NOT NULL DROP TABLE #baselinewritten; - SELECT DB_NAME(mf.database_id) AS databaseName , mf.physical_name, divfs.num_of_bytes_read, divfs.num_of_bytes_written, divfs.num_of_reads, divfs.num_of_writes, - GETDATE() AS baselineDate + GETDATE() AS baselinedate INTO #baseline FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS divfs INNER JOIN sys.master_files AS mf ON mf.database_id = divfs.database_id AND mf.file_id = divfs.file_id - WAITFOR DELAY @delayInterval; - ;WITH currentLine AS ( SELECT DB_NAME(mf.database_id) AS databaseName , @@ -653,12 +651,11 @@ WAITFOR DELAY @delayInterval; divfs.num_of_bytes_written, divfs.num_of_reads, divfs.num_of_writes, - GETDATE() AS currentlineDate + GETDATE() AS currentlinedate FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS divfs INNER JOIN sys.master_files AS mf ON mf.database_id = divfs.database_id AND mf.file_id = divfs.file_id ) - SELECT database_name , datafile_type , num_of_bytes_read_persec = SUM(num_of_bytes_read_persec) @@ -671,23 +668,21 @@ FROM SELECT database_name = currentLine.databaseName , datafile_type = type_desc -, num_of_bytes_read_persec = (currentLine.num_of_bytes_read - T1.num_of_bytes_read) / (DATEDIFF(SECOND,baseLineDate,currentLineDate)) -, num_of_bytes_written_persec = (currentLine.num_of_bytes_written - T1.num_of_bytes_written) / (DATEDIFF(SECOND,baseLineDate,currentLineDate)) -, num_of_reads_persec = (currentLine.num_of_reads - T1.num_of_reads) / (DATEDIFF(SECOND,baseLineDate,currentLineDate)) -, num_of_writes_persec = (currentLine.num_of_writes - T1.num_of_writes) / (DATEDIFF(SECOND,baseLineDate,currentLineDate)) +, num_of_bytes_read_persec = (currentLine.num_of_bytes_read - T1.num_of_bytes_read) / (DATEDIFF(SECOND,baselinedate,currentlinedate)) +, num_of_bytes_written_persec = (currentLine.num_of_bytes_written - T1.num_of_bytes_written) / (DATEDIFF(SECOND,baselinedate,currentlinedate)) +, num_of_reads_persec = (currentLine.num_of_reads - T1.num_of_reads) / (DATEDIFF(SECOND,baselinedate,currentlinedate)) +, num_of_writes_persec = (currentLine.num_of_writes - T1.num_of_writes) / (DATEDIFF(SECOND,baselinedate,currentlinedate)) FROM currentLine INNER JOIN #baseline T1 ON T1.databaseName = currentLine.databaseName AND T1.physical_name = currentLine.physical_name ) as T GROUP BY database_name, datafile_type - DECLARE @DynamicPivotQuery AS NVARCHAR(MAX) DECLARE @ColumnName AS NVARCHAR(MAX), @ColumnName2 AS NVARCHAR(MAX) SELECT @ColumnName = ISNULL(@ColumnName + ',','') + QUOTENAME(database_name) FROM (SELECT DISTINCT database_name FROM #baselinewritten) AS bl SELECT @ColumnName2 = ISNULL(@ColumnName2 + '+','') + QUOTENAME(database_name) FROM (SELECT DISTINCT database_name FROM #baselinewritten) AS bl - SET @DynamicPivotQuery = N' SELECT measurement = ''Log writes (bytes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM @@ -697,9 +692,7 @@ FROM #baselinewritten WHERE datafile_type = ''LOG'' ) as V PIVOT(SUM(num_of_bytes_written_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable - UNION ALL - SELECT measurement = ''Rows writes (bytes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM ( @@ -708,9 +701,7 @@ FROM #baselinewritten WHERE datafile_type = ''ROWS'' ) as V PIVOT(SUM(num_of_bytes_written_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable - UNION ALL - SELECT measurement = ''Log reads (bytes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM ( @@ -719,9 +710,7 @@ FROM #baselinewritten WHERE datafile_type = ''LOG'' ) as V PIVOT(SUM(num_of_bytes_read_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable - UNION ALL - SELECT measurement = ''Rows reads (bytes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM ( @@ -730,9 +719,7 @@ FROM #baselinewritten WHERE datafile_type = ''ROWS'' ) as V PIVOT(SUM(num_of_bytes_read_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable - UNION ALL - SELECT measurement = ''Log (writes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM ( @@ -741,9 +728,7 @@ FROM #baselinewritten WHERE datafile_type = ''LOG'' ) as V PIVOT(SUM(num_of_writes_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable - UNION ALL - SELECT measurement = ''Rows (writes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM ( @@ -752,9 +737,7 @@ FROM #baselinewritten WHERE datafile_type = ''ROWS'' ) as V PIVOT(SUM(num_of_writes_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTabl - UNION ALL - SELECT measurement = ''Log (reads/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM ( @@ -763,9 +746,7 @@ FROM #baselinewritten WHERE datafile_type = ''LOG'' ) as V PIVOT(SUM(num_of_reads_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable - UNION ALL - SELECT measurement = ''Rows (reads/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM ( @@ -775,7 +756,6 @@ WHERE datafile_type = ''ROWS'' ) as V PIVOT(SUM(num_of_reads_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable ' - EXEC sp_executesql @DynamicPivotQuery; ` @@ -1159,7 +1139,7 @@ DECLARE @w4 TABLE ) DECLARE @w5 TABLE ( - WaitCategory nvarchar(16) NOT NULL, + WaitCategory nvarchar(64) NOT NULL, WaitTimeInMs bigint NOT NULL, WaitTaskCount bigint NOT NULL ) diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index 1cbe4bd95..063af7595 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -53,7 +53,9 @@ func TestSqlServer_ParseMetrics(t *testing.T) { require.NoError(t, err) // add value to Accumulator - acc.Add(measurement, value, tags, time.Now()) + acc.AddFields(measurement, + map[string]interface{}{"value": value}, + tags, time.Now()) // assert acc.AssertContainsTaggedFields(t, measurement, map[string]interface{}{"value": value}, tags) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index fb191974f..a46af0a87 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -24,10 +24,11 @@ const ( defaultFieldName = "value" - defaultSeparator = "_" + defaultSeparator = "_" + defaultAllowPendingMessage = 10000 ) -var dropwarn = "ERROR: statsd message queue full. " + +var dropwarn = "E! Error: statsd message queue full. " + "We have dropped %d messages so far. " + "You may want to increase allowed_pending_messages in the config\n" @@ -85,6 +86,8 @@ type Statsd struct { Templates []string listener *net.UDPConn + + graphiteParser *graphite.GraphiteParser } // One statsd metric, form is :||@ @@ -248,7 +251,7 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { } if s.ConvertNames { - log.Printf("WARNING statsd: convert_names config option is deprecated," + + log.Printf("I! WARNING statsd: convert_names config option is deprecated," + " please use metric_separator instead") } @@ -261,7 +264,7 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { go s.udpListen() // Start the line parser go s.parser() - log.Printf("Started the statsd service on %s\n", s.ServiceAddress) + log.Printf("I! Started the statsd service on %s\n", s.ServiceAddress) prevInstance = s return nil } @@ -275,7 +278,7 @@ func (s *Statsd) udpListen() error { if err != nil { log.Fatalf("ERROR: ListenUDP - %s", err) } - log.Println("Statsd listener listening on: ", s.listener.LocalAddr().String()) + log.Println("I! Statsd listener listening on: ", s.listener.LocalAddr().String()) buf := make([]byte, UDP_MAX_PACKET_SIZE) for { @@ -285,7 +288,7 @@ func (s *Statsd) udpListen() error { default: n, _, err := s.listener.ReadFromUDP(buf) if err != nil && !strings.Contains(err.Error(), "closed network") { - log.Printf("ERROR READ: %s\n", err.Error()) + log.Printf("E! Error READ: %s\n", err.Error()) continue } bufCopy := make([]byte, n) @@ -295,7 +298,7 @@ func (s *Statsd) udpListen() error { case s.in <- bufCopy: default: s.drops++ - if s.drops == 1 || s.drops%s.AllowedPendingMessages == 0 { + if s.drops == 1 || s.AllowedPendingMessages == 0 || s.drops%s.AllowedPendingMessages == 0 { log.Printf(dropwarn, s.drops) } } @@ -346,7 +349,7 @@ func (s *Statsd) parseStatsdLine(line string) error { tagstr := segment[1:] tags := strings.Split(tagstr, ",") for _, tag := range tags { - ts := strings.Split(tag, ":") + ts := strings.SplitN(tag, ":", 2) var k, v string switch len(ts) { case 1: @@ -371,7 +374,7 @@ func (s *Statsd) parseStatsdLine(line string) error { // Validate splitting the line on ":" bits := strings.Split(line, ":") if len(bits) < 2 { - log.Printf("Error: splitting ':', Unable to parse metric: %s\n", line) + log.Printf("E! Error: splitting ':', Unable to parse metric: %s\n", line) return errors.New("Error Parsing statsd line") } @@ -387,11 +390,11 @@ func (s *Statsd) parseStatsdLine(line string) error { // Validate splitting the bit on "|" pipesplit := strings.Split(bit, "|") if len(pipesplit) < 2 { - log.Printf("Error: splitting '|', Unable to parse metric: %s\n", line) + log.Printf("E! Error: splitting '|', Unable to parse metric: %s\n", line) return errors.New("Error Parsing statsd line") } else if len(pipesplit) > 2 { sr := pipesplit[2] - errmsg := "Error: parsing sample rate, %s, it must be in format like: " + + errmsg := "E! Error: parsing sample rate, %s, it must be in format like: " + "@0.1, @0.5, etc. Ignoring sample rate for line: %s\n" if strings.Contains(sr, "@") && len(sr) > 1 { samplerate, err := strconv.ParseFloat(sr[1:], 64) @@ -411,14 +414,14 @@ func (s *Statsd) parseStatsdLine(line string) error { case "g", "c", "s", "ms", "h": m.mtype = pipesplit[1] default: - log.Printf("Error: Statsd Metric type %s unsupported", pipesplit[1]) + log.Printf("E! Error: Statsd Metric type %s unsupported", pipesplit[1]) return errors.New("Error Parsing statsd line") } // Parse the value - if strings.ContainsAny(pipesplit[0], "-+") { + if strings.HasPrefix(pipesplit[0], "-") || strings.HasPrefix(pipesplit[0], "+") { if m.mtype != "g" { - log.Printf("Error: +- values are only supported for gauges: %s\n", line) + log.Printf("E! Error: +- values are only supported for gauges: %s\n", line) return errors.New("Error Parsing statsd line") } m.additive = true @@ -428,7 +431,7 @@ func (s *Statsd) parseStatsdLine(line string) error { case "g", "ms", "h": v, err := strconv.ParseFloat(pipesplit[0], 64) if err != nil { - log.Printf("Error: parsing value to float64: %s\n", line) + log.Printf("E! Error: parsing value to float64: %s\n", line) return errors.New("Error Parsing statsd line") } m.floatvalue = v @@ -438,7 +441,7 @@ func (s *Statsd) parseStatsdLine(line string) error { if err != nil { v2, err2 := strconv.ParseFloat(pipesplit[0], 64) if err2 != nil { - log.Printf("Error: parsing value to int64: %s\n", line) + log.Printf("E! Error: parsing value to int64: %s\n", line) return errors.New("Error Parsing statsd line") } v = int64(v2) @@ -505,7 +508,15 @@ func (s *Statsd) parseName(bucket string) (string, string, map[string]string) { var field string name := bucketparts[0] - p, err := graphite.NewGraphiteParser(s.MetricSeparator, s.Templates, nil) + + p := s.graphiteParser + var err error + + if p == nil || s.graphiteParser.Separator != s.MetricSeparator { + p, err = graphite.NewGraphiteParser(s.MetricSeparator, s.Templates, nil) + s.graphiteParser = p + } + if err == nil { p.DefaultTags = tags name, tags, field, _ = p.ApplyTemplate(name) @@ -630,7 +641,7 @@ func (s *Statsd) aggregate(m metric) { func (s *Statsd) Stop() { s.Lock() defer s.Unlock() - log.Println("Stopping the statsd service") + log.Println("I! Stopping the statsd service") close(s.done) s.listener.Close() s.wg.Wait() @@ -640,7 +651,8 @@ func (s *Statsd) Stop() { func init() { inputs.Add("statsd", func() telegraf.Input { return &Statsd{ - MetricSeparator: "_", + MetricSeparator: "_", + AllowedPendingMessages: defaultAllowPendingMessage, } }) } diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 9332f9d4f..bb0d68c16 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -24,6 +24,267 @@ func NewTestStatsd() *Statsd { return &s } +// Valid lines should be parsed and their values should be cached +func TestParse_ValidLines(t *testing.T) { + s := NewTestStatsd() + valid_lines := []string{ + "valid:45|c", + "valid:45|s", + "valid:45|g", + "valid.timer:45|ms", + "valid.timer:45|h", + } + + for _, line := range valid_lines { + err := s.parseStatsdLine(line) + if err != nil { + t.Errorf("Parsing line %s should not have resulted in an error\n", line) + } + } +} + +// Tests low-level functionality of gauges +func TestParse_Gauges(t *testing.T) { + s := NewTestStatsd() + + // Test that gauge +- values work + valid_lines := []string{ + "plus.minus:100|g", + "plus.minus:-10|g", + "plus.minus:+30|g", + "plus.plus:100|g", + "plus.plus:+100|g", + "plus.plus:+100|g", + "minus.minus:100|g", + "minus.minus:-100|g", + "minus.minus:-100|g", + "lone.plus:+100|g", + "lone.minus:-100|g", + "overwrite:100|g", + "overwrite:300|g", + "scientific.notation:4.696E+5|g", + "scientific.notation.minus:4.7E-5|g", + } + + for _, line := range valid_lines { + err := s.parseStatsdLine(line) + if err != nil { + t.Errorf("Parsing line %s should not have resulted in an error\n", line) + } + } + + validations := []struct { + name string + value float64 + }{ + { + "scientific_notation", + 469600, + }, + { + "scientific_notation_minus", + 0.000047, + }, + { + "plus_minus", + 120, + }, + { + "plus_plus", + 300, + }, + { + "minus_minus", + -100, + }, + { + "lone_plus", + 100, + }, + { + "lone_minus", + -100, + }, + { + "overwrite", + 300, + }, + } + + for _, test := range validations { + err := test_validate_gauge(test.name, test.value, s.gauges) + if err != nil { + t.Error(err.Error()) + } + } +} + +// Tests low-level functionality of sets +func TestParse_Sets(t *testing.T) { + s := NewTestStatsd() + + // Test that sets work + valid_lines := []string{ + "unique.user.ids:100|s", + "unique.user.ids:100|s", + "unique.user.ids:100|s", + "unique.user.ids:100|s", + "unique.user.ids:100|s", + "unique.user.ids:101|s", + "unique.user.ids:102|s", + "unique.user.ids:102|s", + "unique.user.ids:123456789|s", + "oneuser.id:100|s", + "oneuser.id:100|s", + "scientific.notation.sets:4.696E+5|s", + "scientific.notation.sets:4.696E+5|s", + "scientific.notation.sets:4.697E+5|s", + } + + for _, line := range valid_lines { + err := s.parseStatsdLine(line) + if err != nil { + t.Errorf("Parsing line %s should not have resulted in an error\n", line) + } + } + + validations := []struct { + name string + value int64 + }{ + { + "scientific_notation_sets", + 2, + }, + { + "unique_user_ids", + 4, + }, + { + "oneuser_id", + 1, + }, + } + + for _, test := range validations { + err := test_validate_set(test.name, test.value, s.sets) + if err != nil { + t.Error(err.Error()) + } + } +} + +// Tests low-level functionality of counters +func TestParse_Counters(t *testing.T) { + s := NewTestStatsd() + + // Test that counters work + valid_lines := []string{ + "small.inc:1|c", + "big.inc:100|c", + "big.inc:1|c", + "big.inc:100000|c", + "big.inc:1000000|c", + "small.inc:1|c", + "zero.init:0|c", + "sample.rate:1|c|@0.1", + "sample.rate:1|c", + "scientific.notation:4.696E+5|c", + } + + for _, line := range valid_lines { + err := s.parseStatsdLine(line) + if err != nil { + t.Errorf("Parsing line %s should not have resulted in an error\n", line) + } + } + + validations := []struct { + name string + value int64 + }{ + { + "scientific_notation", + 469600, + }, + { + "small_inc", + 2, + }, + { + "big_inc", + 1100101, + }, + { + "zero_init", + 0, + }, + { + "sample_rate", + 11, + }, + } + + for _, test := range validations { + err := test_validate_counter(test.name, test.value, s.counters) + if err != nil { + t.Error(err.Error()) + } + } +} + +// Tests low-level functionality of timings +func TestParse_Timings(t *testing.T) { + s := NewTestStatsd() + s.Percentiles = []int{90} + acc := &testutil.Accumulator{} + + // Test that counters work + valid_lines := []string{ + "test.timing:1|ms", + "test.timing:11|ms", + "test.timing:1|ms", + "test.timing:1|ms", + "test.timing:1|ms", + } + + for _, line := range valid_lines { + err := s.parseStatsdLine(line) + if err != nil { + t.Errorf("Parsing line %s should not have resulted in an error\n", line) + } + } + + s.Gather(acc) + + valid := map[string]interface{}{ + "90_percentile": float64(11), + "count": int64(5), + "lower": float64(1), + "mean": float64(3), + "stddev": float64(4), + "upper": float64(11), + } + + acc.AssertContainsFields(t, "test_timing", valid) +} + +func TestParseScientificNotation(t *testing.T) { + s := NewTestStatsd() + sciNotationLines := []string{ + "scientific.notation:4.6968460083008E-5|ms", + "scientific.notation:4.6968460083008E-5|g", + "scientific.notation:4.6968460083008E-5|c", + "scientific.notation:4.6968460083008E-5|h", + } + for _, line := range sciNotationLines { + err := s.parseStatsdLine(line) + if err != nil { + t.Errorf("Parsing line [%s] should not have resulted in error: %s\n", line, err) + } + } +} + // Invalid lines should return an error func TestParse_InvalidLines(t *testing.T) { s := NewTestStatsd() @@ -432,7 +693,7 @@ func TestParse_DataDogTags(t *testing.T) { s.ParseDataDogTags = true lines := []string{ - "my_counter:1|c|#host:localhost,environment:prod", + "my_counter:1|c|#host:localhost,environment:prod,endpoint:/:tenant?/oauth/ro", "my_gauge:10.1|g|#live", "my_set:1|s|#host:localhost", "my_timer:3|ms|@0.1|#live,host:localhost", @@ -442,6 +703,7 @@ func TestParse_DataDogTags(t *testing.T) { "my_counter": map[string]string{ "host": "localhost", "environment": "prod", + "endpoint": "/:tenant?/oauth/ro", }, "my_gauge": map[string]string{ @@ -715,229 +977,6 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { } } -// Valid lines should be parsed and their values should be cached -func TestParse_ValidLines(t *testing.T) { - s := NewTestStatsd() - valid_lines := []string{ - "valid:45|c", - "valid:45|s", - "valid:45|g", - "valid.timer:45|ms", - "valid.timer:45|h", - } - - for _, line := range valid_lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } - } -} - -// Tests low-level functionality of gauges -func TestParse_Gauges(t *testing.T) { - s := NewTestStatsd() - - // Test that gauge +- values work - valid_lines := []string{ - "plus.minus:100|g", - "plus.minus:-10|g", - "plus.minus:+30|g", - "plus.plus:100|g", - "plus.plus:+100|g", - "plus.plus:+100|g", - "minus.minus:100|g", - "minus.minus:-100|g", - "minus.minus:-100|g", - "lone.plus:+100|g", - "lone.minus:-100|g", - "overwrite:100|g", - "overwrite:300|g", - } - - for _, line := range valid_lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } - } - - validations := []struct { - name string - value float64 - }{ - { - "plus_minus", - 120, - }, - { - "plus_plus", - 300, - }, - { - "minus_minus", - -100, - }, - { - "lone_plus", - 100, - }, - { - "lone_minus", - -100, - }, - { - "overwrite", - 300, - }, - } - - for _, test := range validations { - err := test_validate_gauge(test.name, test.value, s.gauges) - if err != nil { - t.Error(err.Error()) - } - } -} - -// Tests low-level functionality of sets -func TestParse_Sets(t *testing.T) { - s := NewTestStatsd() - - // Test that sets work - valid_lines := []string{ - "unique.user.ids:100|s", - "unique.user.ids:100|s", - "unique.user.ids:100|s", - "unique.user.ids:100|s", - "unique.user.ids:100|s", - "unique.user.ids:101|s", - "unique.user.ids:102|s", - "unique.user.ids:102|s", - "unique.user.ids:123456789|s", - "oneuser.id:100|s", - "oneuser.id:100|s", - } - - for _, line := range valid_lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } - } - - validations := []struct { - name string - value int64 - }{ - { - "unique_user_ids", - 4, - }, - { - "oneuser_id", - 1, - }, - } - - for _, test := range validations { - err := test_validate_set(test.name, test.value, s.sets) - if err != nil { - t.Error(err.Error()) - } - } -} - -// Tests low-level functionality of counters -func TestParse_Counters(t *testing.T) { - s := NewTestStatsd() - - // Test that counters work - valid_lines := []string{ - "small.inc:1|c", - "big.inc:100|c", - "big.inc:1|c", - "big.inc:100000|c", - "big.inc:1000000|c", - "small.inc:1|c", - "zero.init:0|c", - "sample.rate:1|c|@0.1", - "sample.rate:1|c", - } - - for _, line := range valid_lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } - } - - validations := []struct { - name string - value int64 - }{ - { - "small_inc", - 2, - }, - { - "big_inc", - 1100101, - }, - { - "zero_init", - 0, - }, - { - "sample_rate", - 11, - }, - } - - for _, test := range validations { - err := test_validate_counter(test.name, test.value, s.counters) - if err != nil { - t.Error(err.Error()) - } - } -} - -// Tests low-level functionality of timings -func TestParse_Timings(t *testing.T) { - s := NewTestStatsd() - s.Percentiles = []int{90} - acc := &testutil.Accumulator{} - - // Test that counters work - valid_lines := []string{ - "test.timing:1|ms", - "test.timing:11|ms", - "test.timing:1|ms", - "test.timing:1|ms", - "test.timing:1|ms", - } - - for _, line := range valid_lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } - } - - s.Gather(acc) - - valid := map[string]interface{}{ - "90_percentile": float64(11), - "count": int64(5), - "lower": float64(1), - "mean": float64(3), - "stddev": float64(4), - "upper": float64(11), - } - - acc.AssertContainsFields(t, "test_timing", valid) -} - // Tests low-level functionality of timings when multiple fields is enabled // and a measurement template has been defined which can parse field names func TestParse_Timings_MultipleFieldsWithTemplate(t *testing.T) { @@ -1037,6 +1076,136 @@ func TestParse_Timings_MultipleFieldsWithoutTemplate(t *testing.T) { acc.AssertContainsFields(t, "test_timing_error", expectedError) } +func BenchmarkParse(b *testing.B) { + s := NewTestStatsd() + validLines := []string{ + "test.timing.success:1|ms", + "test.timing.success:11|ms", + "test.timing.success:1|ms", + "test.timing.success:1|ms", + "test.timing.success:1|ms", + "test.timing.error:2|ms", + "test.timing.error:22|ms", + "test.timing.error:2|ms", + "test.timing.error:2|ms", + "test.timing.error:2|ms", + } + for n := 0; n < b.N; n++ { + for _, line := range validLines { + err := s.parseStatsdLine(line) + if err != nil { + b.Errorf("Parsing line %s should not have resulted in an error\n", line) + } + } + } +} + +func BenchmarkParseWithTemplate(b *testing.B) { + s := NewTestStatsd() + s.Templates = []string{"measurement.measurement.field"} + validLines := []string{ + "test.timing.success:1|ms", + "test.timing.success:11|ms", + "test.timing.success:1|ms", + "test.timing.success:1|ms", + "test.timing.success:1|ms", + "test.timing.error:2|ms", + "test.timing.error:22|ms", + "test.timing.error:2|ms", + "test.timing.error:2|ms", + "test.timing.error:2|ms", + } + for n := 0; n < b.N; n++ { + for _, line := range validLines { + err := s.parseStatsdLine(line) + if err != nil { + b.Errorf("Parsing line %s should not have resulted in an error\n", line) + } + } + } +} + +func BenchmarkParseWithTemplateAndFilter(b *testing.B) { + s := NewTestStatsd() + s.Templates = []string{"cpu* measurement.measurement.field"} + validLines := []string{ + "test.timing.success:1|ms", + "test.timing.success:11|ms", + "test.timing.success:1|ms", + "cpu.timing.success:1|ms", + "cpu.timing.success:1|ms", + "cpu.timing.error:2|ms", + "cpu.timing.error:22|ms", + "test.timing.error:2|ms", + "test.timing.error:2|ms", + "test.timing.error:2|ms", + } + for n := 0; n < b.N; n++ { + for _, line := range validLines { + err := s.parseStatsdLine(line) + if err != nil { + b.Errorf("Parsing line %s should not have resulted in an error\n", line) + } + } + } +} + +func BenchmarkParseWith2TemplatesAndFilter(b *testing.B) { + s := NewTestStatsd() + s.Templates = []string{ + "cpu1* measurement.measurement.field", + "cpu2* measurement.measurement.field", + } + validLines := []string{ + "test.timing.success:1|ms", + "test.timing.success:11|ms", + "test.timing.success:1|ms", + "cpu1.timing.success:1|ms", + "cpu1.timing.success:1|ms", + "cpu2.timing.error:2|ms", + "cpu2.timing.error:22|ms", + "test.timing.error:2|ms", + "test.timing.error:2|ms", + "test.timing.error:2|ms", + } + for n := 0; n < b.N; n++ { + for _, line := range validLines { + err := s.parseStatsdLine(line) + if err != nil { + b.Errorf("Parsing line %s should not have resulted in an error\n", line) + } + } + } +} + +func BenchmarkParseWith2Templates3TagsAndFilter(b *testing.B) { + s := NewTestStatsd() + s.Templates = []string{ + "cpu1* measurement.measurement.region.city.rack.field", + "cpu2* measurement.measurement.region.city.rack.field", + } + validLines := []string{ + "test.timing.us-east.nyc.rack01.success:1|ms", + "test.timing.us-east.nyc.rack01.success:11|ms", + "test.timing.us-west.sf.rack01.success:1|ms", + "cpu1.timing.us-west.sf.rack01.success:1|ms", + "cpu1.timing.us-east.nyc.rack01.success:1|ms", + "cpu2.timing.us-east.nyc.rack01.error:2|ms", + "cpu2.timing.us-west.sf.rack01.error:22|ms", + "test.timing.us-west.sf.rack01.error:2|ms", + "test.timing.us-west.sf.rack01.error:2|ms", + "test.timing.us-east.nyc.rack01.error:2|ms", + } + for n := 0; n < b.N; n++ { + for _, line := range validLines { + err := s.parseStatsdLine(line) + if err != nil { + b.Errorf("Parsing line %s should not have resulted in an error\n", line) + } + } + } +} + func TestParse_Timings_Delete(t *testing.T) { s := NewTestStatsd() s.DeleteTimings = true diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index 795ad2f60..9c9ef6b05 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "io" + "log" "os" "os/exec" "path" @@ -201,6 +202,9 @@ func (s *Sysstat) collect() error { cmd := execCommand(s.Sadc, options...) out, err := internal.CombinedOutputTimeout(cmd, time.Second*time.Duration(collectInterval+parseInterval)) if err != nil { + if err := os.Remove(s.tmpFile); err != nil { + log.Printf("E! failed to remove tmp file after %s command: %s", strings.Join(cmd.Args, " "), err) + } return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) } return nil diff --git a/plugins/inputs/system/cpu.go b/plugins/inputs/system/cpu.go index 8fc9739ba..295c0d347 100644 --- a/plugins/inputs/system/cpu.go +++ b/plugins/inputs/system/cpu.go @@ -13,13 +13,15 @@ type CPUStats struct { ps PS lastStats []cpu.TimesStat - PerCPU bool `toml:"percpu"` - TotalCPU bool `toml:"totalcpu"` + PerCPU bool `toml:"percpu"` + TotalCPU bool `toml:"totalcpu"` + CollectCPUTime bool `toml:"collect_cpu_time"` } func NewCPUStats(ps PS) *CPUStats { return &CPUStats{ - ps: ps, + ps: ps, + CollectCPUTime: true, } } @@ -32,8 +34,8 @@ var sampleConfig = ` percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## Comment this line if you want the raw CPU time metrics - fielddrop = ["time_*"] + ## If true, collect raw CPU time metrics. + collect_cpu_time = false ` func (_ *CPUStats) SampleConfig() string { @@ -54,23 +56,25 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { total := totalCpuTime(cts) - // Add cpu time metrics - fields := map[string]interface{}{ - "time_user": cts.User, - "time_system": cts.System, - "time_idle": cts.Idle, - "time_nice": cts.Nice, - "time_iowait": cts.Iowait, - "time_irq": cts.Irq, - "time_softirq": cts.Softirq, - "time_steal": cts.Steal, - "time_guest": cts.Guest, - "time_guest_nice": cts.GuestNice, + if s.CollectCPUTime { + // Add cpu time metrics + fieldsC := map[string]interface{}{ + "time_user": cts.User, + "time_system": cts.System, + "time_idle": cts.Idle, + "time_nice": cts.Nice, + "time_iowait": cts.Iowait, + "time_irq": cts.Irq, + "time_softirq": cts.Softirq, + "time_steal": cts.Steal, + "time_guest": cts.Guest, + "time_guest_nice": cts.GuestNice, + } + acc.AddCounter("cpu", fieldsC, tags, now) } // Add in percentage if len(s.lastStats) == 0 { - acc.AddFields("cpu", fields, tags, now) // If it's the 1st gather, can't get CPU Usage stats yet continue } @@ -86,18 +90,19 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { if totalDelta == 0 { continue } - - fields["usage_user"] = 100 * (cts.User - lastCts.User) / totalDelta - fields["usage_system"] = 100 * (cts.System - lastCts.System) / totalDelta - fields["usage_idle"] = 100 * (cts.Idle - lastCts.Idle) / totalDelta - fields["usage_nice"] = 100 * (cts.Nice - lastCts.Nice) / totalDelta - fields["usage_iowait"] = 100 * (cts.Iowait - lastCts.Iowait) / totalDelta - fields["usage_irq"] = 100 * (cts.Irq - lastCts.Irq) / totalDelta - fields["usage_softirq"] = 100 * (cts.Softirq - lastCts.Softirq) / totalDelta - fields["usage_steal"] = 100 * (cts.Steal - lastCts.Steal) / totalDelta - fields["usage_guest"] = 100 * (cts.Guest - lastCts.Guest) / totalDelta - fields["usage_guest_nice"] = 100 * (cts.GuestNice - lastCts.GuestNice) / totalDelta - acc.AddFields("cpu", fields, tags, now) + fieldsG := map[string]interface{}{ + "usage_user": 100 * (cts.User - lastCts.User) / totalDelta, + "usage_system": 100 * (cts.System - lastCts.System) / totalDelta, + "usage_idle": 100 * (cts.Idle - lastCts.Idle) / totalDelta, + "usage_nice": 100 * (cts.Nice - lastCts.Nice) / totalDelta, + "usage_iowait": 100 * (cts.Iowait - lastCts.Iowait) / totalDelta, + "usage_irq": 100 * (cts.Irq - lastCts.Irq) / totalDelta, + "usage_softirq": 100 * (cts.Softirq - lastCts.Softirq) / totalDelta, + "usage_steal": 100 * (cts.Steal - lastCts.Steal) / totalDelta, + "usage_guest": 100 * (cts.Guest - lastCts.Guest) / totalDelta, + "usage_guest_nice": 100 * (cts.GuestNice - lastCts.GuestNice) / totalDelta, + } + acc.AddGauge("cpu", fieldsG, tags, now) } s.lastStats = times diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/system/disk.go index f79295294..e686a442d 100644 --- a/plugins/inputs/system/disk.go +++ b/plugins/inputs/system/disk.go @@ -70,7 +70,7 @@ func (s *DiskStats) Gather(acc telegraf.Accumulator) error { "inodes_free": du.InodesFree, "inodes_used": du.InodesUsed, } - acc.AddFields("disk", fields, tags) + acc.AddGauge("disk", fields, tags) } return nil @@ -139,7 +139,7 @@ func (s *DiskIOStats) Gather(acc telegraf.Accumulator) error { "write_time": io.WriteTime, "io_time": io.IoTime, } - acc.AddFields("diskio", fields, tags) + acc.AddCounter("diskio", fields, tags) } return nil diff --git a/plugins/inputs/system/kernel.go b/plugins/inputs/system/kernel.go index abad47731..66cb0f763 100644 --- a/plugins/inputs/system/kernel.go +++ b/plugins/inputs/system/kernel.go @@ -81,7 +81,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { } } - acc.AddFields("kernel", fields, map[string]string{}) + acc.AddCounter("kernel", fields, map[string]string{}) return nil } diff --git a/plugins/inputs/system/memory.go b/plugins/inputs/system/memory.go index c6dbff45e..26dc550f8 100644 --- a/plugins/inputs/system/memory.go +++ b/plugins/inputs/system/memory.go @@ -35,7 +35,7 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error { "used_percent": 100 * float64(vm.Used) / float64(vm.Total), "available_percent": 100 * float64(vm.Available) / float64(vm.Total), } - acc.AddFields("mem", fields, nil) + acc.AddCounter("mem", fields, nil) return nil } @@ -56,15 +56,18 @@ func (s *SwapStats) Gather(acc telegraf.Accumulator) error { return fmt.Errorf("error getting swap memory info: %s", err) } - fields := map[string]interface{}{ + fieldsG := map[string]interface{}{ "total": swap.Total, "used": swap.Used, "free": swap.Free, "used_percent": swap.UsedPercent, - "in": swap.Sin, - "out": swap.Sout, } - acc.AddFields("swap", fields, nil) + fieldsC := map[string]interface{}{ + "in": swap.Sin, + "out": swap.Sout, + } + acc.AddGauge("swap", fieldsG, nil) + acc.AddCounter("swap", fieldsC, nil) return nil } diff --git a/plugins/inputs/system/memory_test.go b/plugins/inputs/system/memory_test.go index 1fced6918..4467c69aa 100644 --- a/plugins/inputs/system/memory_test.go +++ b/plugins/inputs/system/memory_test.go @@ -67,8 +67,6 @@ func TestMemStats(t *testing.T) { "used": uint64(1232), "used_percent": float64(12.2), "free": uint64(6412), - "in": uint64(7), - "out": uint64(830), } acc.AssertContainsTaggedFields(t, "swap", swapfields, make(map[string]string)) } diff --git a/plugins/inputs/system/net.go b/plugins/inputs/system/net.go index f6bc05818..3f89176fb 100644 --- a/plugins/inputs/system/net.go +++ b/plugins/inputs/system/net.go @@ -81,7 +81,7 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { "drop_in": io.Dropin, "drop_out": io.Dropout, } - acc.AddFields("net", fields, tags) + acc.AddCounter("net", fields, tags) } // Get system wide stats for different network protocols diff --git a/plugins/inputs/system/processes.go b/plugins/inputs/system/processes.go index c16f7a480..0950323fd 100644 --- a/plugins/inputs/system/processes.go +++ b/plugins/inputs/system/processes.go @@ -57,7 +57,7 @@ func (p *Processes) Gather(acc telegraf.Accumulator) error { } } - acc.AddFields("processes", fields, nil) + acc.AddGauge("processes", fields, nil) return nil } @@ -118,7 +118,7 @@ func (p *Processes) gatherFromPS(fields map[string]interface{}) error { case '?': fields["unknown"] = fields["unknown"].(int64) + int64(1) default: - log.Printf("processes: Unknown state [ %s ] from ps", + log.Printf("I! processes: Unknown state [ %s ] from ps", string(status[0])) } fields["total"] = fields["total"].(int64) + int64(1) @@ -169,14 +169,14 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error { case 'W': fields["paging"] = fields["paging"].(int64) + int64(1) default: - log.Printf("processes: Unknown state [ %s ] in file %s", + log.Printf("I! processes: Unknown state [ %s ] in file %s", string(stats[0][0]), filename) } fields["total"] = fields["total"].(int64) + int64(1) threads, err := strconv.Atoi(string(stats[17])) if err != nil { - log.Printf("processes: Error parsing thread count: %s", err) + log.Printf("I! processes: Error parsing thread count: %s", err) continue } fields["total_threads"] = fields["total_threads"].(int64) + int64(threads) diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index 3ed123d15..d740f6748 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -26,7 +26,7 @@ type PS interface { func add(acc telegraf.Accumulator, name string, val float64, tags map[string]string) { if val >= 0 { - acc.Add(name, val, tags) + acc.AddFields(name, map[string]interface{}{"value": val}, tags) } } diff --git a/plugins/inputs/system/system.go b/plugins/inputs/system/system.go index ff64740bf..1a61f11bf 100644 --- a/plugins/inputs/system/system.go +++ b/plugins/inputs/system/system.go @@ -37,16 +37,17 @@ func (_ *SystemStats) Gather(acc telegraf.Accumulator) error { return err } - fields := map[string]interface{}{ - "load1": loadavg.Load1, - "load5": loadavg.Load5, - "load15": loadavg.Load15, + acc.AddGauge("system", map[string]interface{}{ + "load1": loadavg.Load1, + "load5": loadavg.Load5, + "load15": loadavg.Load15, + "n_users": len(users), + "n_cpus": runtime.NumCPU(), + }, nil) + acc.AddCounter("system", map[string]interface{}{ "uptime": hostinfo.Uptime, - "n_users": len(users), "uptime_format": format_uptime(hostinfo.Uptime), - "n_cpus": runtime.NumCPU(), - } - acc.AddFields("system", fields, nil) + }, nil) return nil } diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index 942fd6bae..e1bc32e51 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -81,7 +81,7 @@ func (t *Tail) Start(acc telegraf.Accumulator) error { for _, filepath := range t.Files { g, err := globpath.Compile(filepath) if err != nil { - log.Printf("ERROR Glob %s failed to compile, %s", filepath, err) + log.Printf("E! Error Glob %s failed to compile, %s", filepath, err) } for file, _ := range g.Match() { tailer, err := tail.TailFile(file, @@ -118,7 +118,7 @@ func (t *Tail) receiver(tailer *tail.Tail) { var line *tail.Line for line = range tailer.Lines { if line.Err != nil { - log.Printf("ERROR tailing file %s, Error: %s\n", + log.Printf("E! Error tailing file %s, Error: %s\n", tailer.Filename, err) continue } @@ -126,7 +126,7 @@ func (t *Tail) receiver(tailer *tail.Tail) { if err == nil { t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) } else { - log.Printf("Malformed log line in %s: [%s], Error: %s\n", + log.Printf("E! Malformed log line in %s: [%s], Error: %s\n", tailer.Filename, line.Text, err) } } @@ -139,7 +139,7 @@ func (t *Tail) Stop() { for _, t := range t.tailers { err := t.Stop() if err != nil { - log.Printf("ERROR stopping tail on file %s\n", t.Filename) + log.Printf("E! Error stopping tail on file %s\n", t.Filename) } t.Cleanup() } diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index f9f6bff28..31ecfbf30 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -17,6 +17,8 @@ func TestTailFromBeginning(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) + _, err = tmpfile.WriteString("cpu,mytag=foo usage_idle=100\n") + require.NoError(t, err) tt := NewTail() tt.FromBeginning = true @@ -28,12 +30,10 @@ func TestTailFromBeginning(t *testing.T) { acc := testutil.Accumulator{} require.NoError(t, tt.Start(&acc)) - - _, err = tmpfile.WriteString("cpu,mytag=foo usage_idle=100\n") - require.NoError(t, err) + time.Sleep(time.Millisecond * 100) require.NoError(t, tt.Gather(&acc)) // arbitrary sleep to wait for message to show up - time.Sleep(time.Millisecond * 250) + time.Sleep(time.Millisecond * 150) acc.AssertContainsTaggedFields(t, "cpu", map[string]interface{}{ diff --git a/plugins/inputs/tcp_listener/tcp_listener.go b/plugins/inputs/tcp_listener/tcp_listener.go index 4688e008b..a8827c037 100644 --- a/plugins/inputs/tcp_listener/tcp_listener.go +++ b/plugins/inputs/tcp_listener/tcp_listener.go @@ -43,11 +43,11 @@ type TcpListener struct { acc telegraf.Accumulator } -var dropwarn = "ERROR: tcp_listener message queue full. " + +var dropwarn = "E! Error: tcp_listener message queue full. " + "We have dropped %d messages so far. " + "You may want to increase allowed_pending_messages in the config\n" -var malformedwarn = "WARNING: tcp_listener has received %d malformed packets" + +var malformedwarn = "E! tcp_listener has received %d malformed packets" + " thus far." const sampleConfig = ` @@ -108,13 +108,13 @@ func (t *TcpListener) Start(acc telegraf.Accumulator) error { log.Fatalf("ERROR: ListenUDP - %s", err) return err } - log.Println("TCP server listening on: ", t.listener.Addr().String()) + log.Println("I! TCP server listening on: ", t.listener.Addr().String()) t.wg.Add(2) go t.tcpListen() go t.tcpParser() - log.Printf("Started TCP listener service on %s\n", t.ServiceAddress) + log.Printf("I! Started TCP listener service on %s\n", t.ServiceAddress) return nil } @@ -141,7 +141,7 @@ func (t *TcpListener) Stop() { t.wg.Wait() close(t.in) - log.Println("Stopped TCP listener service on ", t.ServiceAddress) + log.Println("I! Stopped TCP listener service on ", t.ServiceAddress) } // tcpListen listens for incoming TCP connections. @@ -158,7 +158,6 @@ func (t *TcpListener) tcpListen() error { if err != nil { return err } - // log.Printf("Received TCP Connection from %s", conn.RemoteAddr()) select { case <-t.accept: @@ -183,8 +182,8 @@ func (t *TcpListener) refuser(conn *net.TCPConn) { " reached, closing.\nYou may want to increase max_tcp_connections in"+ " the Telegraf tcp listener configuration.\n", t.MaxTCPConnections) conn.Close() - log.Printf("Refused TCP Connection from %s", conn.RemoteAddr()) - log.Printf("WARNING: Maximum TCP Connections reached, you may want to" + + log.Printf("I! Refused TCP Connection from %s", conn.RemoteAddr()) + log.Printf("I! WARNING: Maximum TCP Connections reached, you may want to" + " adjust max_tcp_connections") } @@ -194,7 +193,6 @@ func (t *TcpListener) handler(conn *net.TCPConn, id string) { defer func() { t.wg.Done() conn.Close() - // log.Printf("Closed TCP Connection from %s", conn.RemoteAddr()) // Add one connection potential back to channel when this one closes t.accept <- true t.forget(id) @@ -239,14 +237,19 @@ func (t *TcpListener) tcpParser() error { for { select { case <-t.done: - return nil + // drain input packets before finishing: + if len(t.in) == 0 { + return nil + } case packet = <-t.in: if len(packet) == 0 { continue } metrics, err = t.parser.Parse(packet) if err == nil { - t.storeMetrics(metrics) + for _, m := range metrics { + t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + } } else { t.malformed++ if t.malformed == 1 || t.malformed%1000 == 0 { @@ -257,15 +260,6 @@ func (t *TcpListener) tcpParser() error { } } -func (t *TcpListener) storeMetrics(metrics []telegraf.Metric) error { - t.Lock() - defer t.Unlock() - for _, m := range metrics { - t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) - } - return nil -} - // forget a TCP connection func (t *TcpListener) forget(id string) { t.cleanup.Lock() diff --git a/plugins/inputs/tcp_listener/tcp_listener_test.go b/plugins/inputs/tcp_listener/tcp_listener_test.go index b4aec9dd2..f7e5784d3 100644 --- a/plugins/inputs/tcp_listener/tcp_listener_test.go +++ b/plugins/inputs/tcp_listener/tcp_listener_test.go @@ -37,6 +37,62 @@ func newTestTcpListener() (*TcpListener, chan []byte) { return listener, in } +// benchmark how long it takes to accept & process 100,000 metrics: +func BenchmarkTCP(b *testing.B) { + listener := TcpListener{ + ServiceAddress: ":8198", + AllowedPendingMessages: 100000, + MaxTCPConnections: 250, + } + listener.parser, _ = parsers.NewInfluxParser() + acc := &testutil.Accumulator{Discard: true} + + // send multiple messages to socket + for n := 0; n < b.N; n++ { + err := listener.Start(acc) + if err != nil { + panic(err) + } + + time.Sleep(time.Millisecond * 25) + conn, err := net.Dial("tcp", "127.0.0.1:8198") + if err != nil { + panic(err) + } + for i := 0; i < 100000; i++ { + fmt.Fprintf(conn, testMsg) + } + // wait for 100,000 metrics to get added to accumulator + time.Sleep(time.Millisecond) + listener.Stop() + } +} + +func TestHighTrafficTCP(t *testing.T) { + listener := TcpListener{ + ServiceAddress: ":8199", + AllowedPendingMessages: 100000, + MaxTCPConnections: 250, + } + listener.parser, _ = parsers.NewInfluxParser() + acc := &testutil.Accumulator{} + + // send multiple messages to socket + err := listener.Start(acc) + require.NoError(t, err) + + time.Sleep(time.Millisecond * 25) + conn, err := net.Dial("tcp", "127.0.0.1:8199") + require.NoError(t, err) + for i := 0; i < 100000; i++ { + fmt.Fprintf(conn, testMsg) + } + time.Sleep(time.Millisecond) + listener.Stop() + + assert.Equal(t, 100000, len(acc.Metrics)) +} + func TestConnectTCP(t *testing.T) { listener := TcpListener{ ServiceAddress: ":8194", diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go index 120ee50e5..d2c4d0bbc 100644 --- a/plugins/inputs/udp_listener/udp_listener.go +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -3,8 +3,8 @@ package udp_listener import ( "log" "net" - "strings" "sync" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" @@ -42,11 +42,11 @@ type UdpListener struct { // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure const UDP_MAX_PACKET_SIZE int = 64 * 1024 -var dropwarn = "ERROR: udp_listener message queue full. " + +var dropwarn = "E! Error: udp_listener message queue full. " + "We have dropped %d messages so far. " + "You may want to increase allowed_pending_messages in the config\n" -var malformedwarn = "WARNING: udp_listener has received %d malformed packets" + +var malformedwarn = "E! udp_listener has received %d malformed packets" + " thus far." const sampleConfig = ` @@ -94,16 +94,18 @@ func (u *UdpListener) Start(acc telegraf.Accumulator) error { go u.udpListen() go u.udpParser() - log.Printf("Started UDP listener service on %s\n", u.ServiceAddress) + log.Printf("I! Started UDP listener service on %s\n", u.ServiceAddress) return nil } func (u *UdpListener) Stop() { + u.Lock() + defer u.Unlock() close(u.done) - u.listener.Close() u.wg.Wait() + u.listener.Close() close(u.in) - log.Println("Stopped UDP listener service on ", u.ServiceAddress) + log.Println("I! Stopped UDP listener service on ", u.ServiceAddress) } func (u *UdpListener) udpListen() error { @@ -114,7 +116,7 @@ func (u *UdpListener) udpListen() error { if err != nil { log.Fatalf("ERROR: ListenUDP - %s", err) } - log.Println("UDP server listening on: ", u.listener.LocalAddr().String()) + log.Println("I! UDP server listening on: ", u.listener.LocalAddr().String()) buf := make([]byte, UDP_MAX_PACKET_SIZE) for { @@ -122,9 +124,13 @@ func (u *UdpListener) udpListen() error { case <-u.done: return nil default: + u.listener.SetReadDeadline(time.Now().Add(time.Second)) n, _, err := u.listener.ReadFromUDP(buf) - if err != nil && !strings.Contains(err.Error(), "closed network") { - log.Printf("ERROR: %s\n", err.Error()) + if err != nil { + if err, ok := err.(net.Error); ok && err.Timeout() { + } else { + log.Printf("E! Error: %s\n", err.Error()) + } continue } bufCopy := make([]byte, n) @@ -151,11 +157,15 @@ func (u *UdpListener) udpParser() error { for { select { case <-u.done: - return nil + if len(u.in) == 0 { + return nil + } case packet = <-u.in: metrics, err = u.parser.Parse(packet) if err == nil { - u.storeMetrics(metrics) + for _, m := range metrics { + u.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + } } else { u.malformed++ if u.malformed == 1 || u.malformed%1000 == 0 { @@ -166,15 +176,6 @@ func (u *UdpListener) udpParser() error { } } -func (u *UdpListener) storeMetrics(metrics []telegraf.Metric) error { - u.Lock() - defer u.Unlock() - for _, m := range metrics { - u.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) - } - return nil -} - func init() { inputs.Add("udp_listener", func() telegraf.Input { return &UdpListener{} diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index bdbab318b..fa9980682 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -1,20 +1,36 @@ package udp_listener import ( + "fmt" "io/ioutil" "log" + "net" "testing" "time" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n" + + testMsgs = ` +cpu_load_short,host=server02 value=12.0 1422568543702900257 +cpu_load_short,host=server03 value=12.0 1422568543702900257 +cpu_load_short,host=server04 value=12.0 1422568543702900257 +cpu_load_short,host=server05 value=12.0 1422568543702900257 +cpu_load_short,host=server06 value=12.0 1422568543702900257 +` ) func newTestUdpListener() (*UdpListener, chan []byte) { in := make(chan []byte, 1500) listener := &UdpListener{ ServiceAddress: ":8125", - UDPPacketSize: 1500, AllowedPendingMessages: 10000, in: in, done: make(chan struct{}), @@ -22,6 +38,72 @@ func newTestUdpListener() (*UdpListener, chan []byte) { return listener, in } +func TestHighTrafficUDP(t *testing.T) { + listener := UdpListener{ + ServiceAddress: ":8126", + AllowedPendingMessages: 100000, + } + listener.parser, _ = parsers.NewInfluxParser() + acc := &testutil.Accumulator{} + + // send multiple messages to socket + err := listener.Start(acc) + require.NoError(t, err) + + time.Sleep(time.Millisecond * 25) + conn, err := net.Dial("udp", "127.0.0.1:8126") + require.NoError(t, err) + for i := 0; i < 20000; i++ { + // arbitrary, just to give the OS buffer some slack handling the + // packet storm. + time.Sleep(time.Microsecond) + fmt.Fprintf(conn, testMsgs) + } + time.Sleep(time.Millisecond) + listener.Stop() + + // this is not an exact science, since UDP packets can easily get lost or + // dropped, but assume that the OS will be able to + // handle at least 90% of the sent UDP packets. + assert.InDelta(t, 100000, len(acc.Metrics), 10000) +} + +func TestConnectUDP(t *testing.T) { + listener := UdpListener{ + ServiceAddress: ":8127", + AllowedPendingMessages: 10000, + } + listener.parser, _ = parsers.NewInfluxParser() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + time.Sleep(time.Millisecond * 25) + conn, err := net.Dial("udp", "127.0.0.1:8127") + require.NoError(t, err) + + // send single message to socket + fmt.Fprintf(conn, testMsg) + time.Sleep(time.Millisecond * 15) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01"}, + ) + + // send multiple messages to socket + fmt.Fprintf(conn, testMsgs) + time.Sleep(time.Millisecond * 15) + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } +} + func TestRunParser(t *testing.T) { log.SetOutput(ioutil.Discard) var testmsg = []byte("cpu_load_short,host=server01 value=12.0 1422568543702900257") diff --git a/plugins/inputs/varnish/varnish.go b/plugins/inputs/varnish/varnish.go index 2b0e84514..c75041221 100644 --- a/plugins/inputs/varnish/varnish.go +++ b/plugins/inputs/varnish/varnish.go @@ -77,13 +77,13 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error { if s.filter == nil { var err error if len(s.Stats) == 0 { - s.filter, err = filter.CompileFilter(defaultStats) + s.filter, err = filter.Compile(defaultStats) } else { // legacy support, change "all" -> "*": if s.Stats[0] == "all" { s.Stats[0] = "*" } - s.filter, err = filter.CompileFilter(s.Stats) + s.filter, err = filter.Compile(s.Stats) } if err != nil { return err @@ -146,7 +146,9 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("varnish", func() telegraf.Input { return &Varnish{ - run: varnishRunner, + run: varnishRunner, + Stats: defaultStats, + Binary: defaultBinary, } }) } diff --git a/plugins/inputs/webhooks/README.md b/plugins/inputs/webhooks/README.md index 86e6685b8..bc7714e9e 100644 --- a/plugins/inputs/webhooks/README.md +++ b/plugins/inputs/webhooks/README.md @@ -15,6 +15,7 @@ $ sudo service telegraf start ## Available webhooks +- [Filestack](filestack/) - [Github](github/) - [Mandrill](mandrill/) - [Rollbar](rollbar/) diff --git a/plugins/inputs/webhooks/filestack/README.md b/plugins/inputs/webhooks/filestack/README.md new file mode 100644 index 000000000..585e6f202 --- /dev/null +++ b/plugins/inputs/webhooks/filestack/README.md @@ -0,0 +1,17 @@ +# Filestack webhook + +You should configure your Filestack's Webhooks to point at the `webhooks` service. To do this go to `filestack.com/`, select your app and click `Credentials > Webhooks`. In the resulting page, set the `URL` to `http://:1619/filestack`, and click on `Add`. + +## Events + +See the [webhook doc](https://www.filestack.com/docs/webhooks). + +*Limitations*: It stores all events except video conversions events. + +All events for logs the original timestamp, the action and the id. + +**Tags:** +* 'action' = `event.action` string + +**Fields:** +* 'id' = `event.id` string diff --git a/plugins/inputs/webhooks/filestack/filestack_webhooks.go b/plugins/inputs/webhooks/filestack/filestack_webhooks.go new file mode 100644 index 000000000..19f8c0251 --- /dev/null +++ b/plugins/inputs/webhooks/filestack/filestack_webhooks.go @@ -0,0 +1,44 @@ +package filestack + +import ( + "encoding/json" + "io/ioutil" + "log" + "net/http" + "time" + + "github.com/gorilla/mux" + "github.com/influxdata/telegraf" +) + +type FilestackWebhook struct { + Path string + acc telegraf.Accumulator +} + +func (fs *FilestackWebhook) Register(router *mux.Router, acc telegraf.Accumulator) { + router.HandleFunc(fs.Path, fs.eventHandler).Methods("POST") + + log.Printf("I! Started the webhooks_filestack on %s\n", fs.Path) + fs.acc = acc +} + +func (fs *FilestackWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + event := &FilestackEvent{} + err = json.Unmarshal(body, event) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + fs.acc.AddFields("filestack_webhooks", event.Fields(), event.Tags(), time.Unix(event.TimeStamp, 0)) + + w.WriteHeader(http.StatusOK) +} diff --git a/plugins/inputs/webhooks/filestack/filestack_webhooks_events.go b/plugins/inputs/webhooks/filestack/filestack_webhooks_events.go new file mode 100644 index 000000000..93f976f60 --- /dev/null +++ b/plugins/inputs/webhooks/filestack/filestack_webhooks_events.go @@ -0,0 +1,21 @@ +package filestack + +import "strconv" + +type FilestackEvent struct { + Action string `json:"action"` + TimeStamp int64 `json:"timestamp"` + Id int `json:"id"` +} + +func (fe *FilestackEvent) Tags() map[string]string { + return map[string]string{ + "action": fe.Action, + } +} + +func (fe *FilestackEvent) Fields() map[string]interface{} { + return map[string]interface{}{ + "id": strconv.Itoa(fe.Id), + } +} diff --git a/plugins/inputs/webhooks/filestack/filestack_webhooks_events_json_test.go b/plugins/inputs/webhooks/filestack/filestack_webhooks_events_json_test.go new file mode 100644 index 000000000..351de01b4 --- /dev/null +++ b/plugins/inputs/webhooks/filestack/filestack_webhooks_events_json_test.go @@ -0,0 +1,110 @@ +package filestack + +func DialogOpenJSON() string { + return `{ + "action": "fp.dialog", + "timestamp": 1435584646, + "id": 102, + "text": { + "mimetypes": ["*/*"], + "iframe": false, + "language": "en", + "id": "1435584650723", + "mobile": false, + "app":{ + "upsell": "false", + "apikey": "YOUR_API_KEY", + "customization":{ + "saveas_subheader": "Save it down to your local device or onto the Cloud", + "folder_subheader": "Choose a folder to share with this application", + "open_subheader": "Choose from the files on your local device or the ones you have online", + "folder_header": "Select a folder", + "help_text": "", + "saveas_header": "Save your file", + "open_header": "Upload a file" + } + }, + "dialogType": "open", + "auth": false, + "welcome_header": "Upload a file", + "welcome_subheader": "Choose from the files on your local device or the ones you have online", + "help_text": "", + "recent_path": "/", + "extensions": null, + "maxSize": 0, + "signature": null, + "policy": null, + "custom_providers": "imgur,cloudapp", + "intra": false + } + }` +} + +func UploadJSON() string { + return `{ + "action":"fp.upload", + "timestamp":1443444905, + "id":100946, + "text":{ + "url":"https://www.filestackapi.com/api/file/WAunDTTqQfCNWwUUyf6n", + "client":"Facebook", + "type":"image/jpeg", + "filename":"1579337399020824.jpg", + "size":139154 + } + }` +} + +func VideoConversionJSON() string { + return `{ + "status":"completed", + "message":"Done", + "data":{ + "thumb":"https://cdn.filestackcontent.com/f1e8V88QDuxzOvtOAq1W", + "thumb100x100":"https://process.filestackapi.com/AhTgLagciQByzXpFGRI0Az/resize=w:100,h:100,f:crop/output=f:jpg,q:66/https://cdn.filestackcontent.com/f1e8V88QDuxzOvtOAq1W", + "thumb200x200":"https://process.filestackapi.com/AhTgLagciQByzXpFGRI0Az/resize=w:200,h:200,f:crop/output=f:jpg,q:66/https://cdn.filestackcontent.com/f1e8V88QDuxzOvtOAq1W", + "thumb300x300":"https://process.filestackapi.com/AhTgLagciQByzXpFGRI0Az/resize=w:300,h:300,f:crop/output=f:jpg,q:66/https://cdn.filestackcontent.com/f1e8V88QDuxzOvtOAq1W", + "url":"https://cdn.filestackcontent.com/VgvFVdvvTkml0WXPIoGn" + }, + "metadata":{ + "result":{ + "audio_channels":2, + "audio_codec":"vorbis", + "audio_sample_rate":44100, + "created_at":"2015/12/21 20:45:19 +0000", + "duration":10587, + "encoding_progress":100, + "encoding_time":8, + "extname":".webm", + "file_size":293459, + "fps":24, + "height":260, + "mime_type":"video/webm", + "started_encoding_at":"2015/12/21 20:45:22 +0000", + "updated_at":"2015/12/21 20:45:32 +0000", + "video_bitrate":221, + "video_codec":"vp8", + "width":300 + }, + "source":{ + "audio_bitrate":125, + "audio_channels":2, + "audio_codec":"aac", + "audio_sample_rate":44100, + "created_at":"2015/12/21 20:45:19 +0000", + "duration":10564, + "extname":".mp4", + "file_size":875797, + "fps":24, + "height":360, + "mime_type":"video/mp4", + "updated_at":"2015/12/21 20:45:32 +0000", + "video_bitrate":196, + "video_codec":"h264", + "width":480 + } + }, + "timestamp":"1453850583", + "uuid":"638311d89d2bc849563a674a45809b7c" + }` +} diff --git a/plugins/inputs/webhooks/filestack/filestack_webhooks_test.go b/plugins/inputs/webhooks/filestack/filestack_webhooks_test.go new file mode 100644 index 000000000..13f69e5cb --- /dev/null +++ b/plugins/inputs/webhooks/filestack/filestack_webhooks_test.go @@ -0,0 +1,74 @@ +package filestack + +import ( + "github.com/influxdata/telegraf/testutil" + "net/http" + "net/http/httptest" + + "strings" + "testing" +) + +func postWebhooks(md *FilestackWebhook, eventBody string) *httptest.ResponseRecorder { + req, _ := http.NewRequest("POST", "/filestack", strings.NewReader(eventBody)) + w := httptest.NewRecorder() + + md.eventHandler(w, req) + + return w +} + +func TestDialogEvent(t *testing.T) { + var acc testutil.Accumulator + fs := &FilestackWebhook{Path: "/filestack", acc: &acc} + resp := postWebhooks(fs, DialogOpenJSON()) + if resp.Code != http.StatusOK { + t.Errorf("POST returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK) + } + + fields := map[string]interface{}{ + "id": "102", + } + + tags := map[string]string{ + "action": "fp.dialog", + } + + acc.AssertContainsTaggedFields(t, "filestack_webhooks", fields, tags) +} + +func TestParseError(t *testing.T) { + fs := &FilestackWebhook{Path: "/filestack"} + resp := postWebhooks(fs, "") + if resp.Code != http.StatusBadRequest { + t.Errorf("POST returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusBadRequest) + } +} + +func TestUploadEvent(t *testing.T) { + var acc testutil.Accumulator + fs := &FilestackWebhook{Path: "/filestack", acc: &acc} + resp := postWebhooks(fs, UploadJSON()) + if resp.Code != http.StatusOK { + t.Errorf("POST returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK) + } + + fields := map[string]interface{}{ + "id": "100946", + } + + tags := map[string]string{ + "action": "fp.upload", + } + + acc.AssertContainsTaggedFields(t, "filestack_webhooks", fields, tags) +} + +func TestVideoConversionEvent(t *testing.T) { + var acc testutil.Accumulator + fs := &FilestackWebhook{Path: "/filestack", acc: &acc} + resp := postWebhooks(fs, VideoConversionJSON()) + if resp.Code != http.StatusBadRequest { + t.Errorf("POST returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusBadRequest) + } +} diff --git a/plugins/inputs/webhooks/github/github_webhooks.go b/plugins/inputs/webhooks/github/github_webhooks.go index 5327363f4..139c76971 100644 --- a/plugins/inputs/webhooks/github/github_webhooks.go +++ b/plugins/inputs/webhooks/github/github_webhooks.go @@ -17,7 +17,7 @@ type GithubWebhook struct { func (gh *GithubWebhook) Register(router *mux.Router, acc telegraf.Accumulator) { router.HandleFunc(gh.Path, gh.eventHandler).Methods("POST") - log.Printf("Started the webhooks_github on %s\n", gh.Path) + log.Printf("I! Started the webhooks_github on %s\n", gh.Path) gh.acc = acc } @@ -58,7 +58,7 @@ func (e *newEventError) Error() string { } func NewEvent(data []byte, name string) (Event, error) { - log.Printf("New %v event received", name) + log.Printf("D! New %v event received", name) switch name { case "commit_comment": return generateEvent(data, &CommitCommentEvent{}) diff --git a/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go b/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go index e9d4a6de4..4a14c8894 100644 --- a/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go +++ b/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go @@ -21,7 +21,7 @@ func (md *MandrillWebhook) Register(router *mux.Router, acc telegraf.Accumulator router.HandleFunc(md.Path, md.returnOK).Methods("HEAD") router.HandleFunc(md.Path, md.eventHandler).Methods("POST") - log.Printf("Started the webhooks_mandrill on %s\n", md.Path) + log.Printf("I! Started the webhooks_mandrill on %s\n", md.Path) md.acc = acc } diff --git a/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go b/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go index 8b8dada50..6b6f0965c 100644 --- a/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go +++ b/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go @@ -19,7 +19,7 @@ type RollbarWebhook struct { func (rb *RollbarWebhook) Register(router *mux.Router, acc telegraf.Accumulator) { router.HandleFunc(rb.Path, rb.eventHandler).Methods("POST") - log.Printf("Started the webhooks_rollbar on %s\n", rb.Path) + log.Printf("I! Started the webhooks_rollbar on %s\n", rb.Path) rb.acc = acc } diff --git a/plugins/inputs/webhooks/webhooks.go b/plugins/inputs/webhooks/webhooks.go index 884435c36..fcddbebd7 100644 --- a/plugins/inputs/webhooks/webhooks.go +++ b/plugins/inputs/webhooks/webhooks.go @@ -10,6 +10,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/webhooks/filestack" "github.com/influxdata/telegraf/plugins/inputs/webhooks/github" "github.com/influxdata/telegraf/plugins/inputs/webhooks/mandrill" "github.com/influxdata/telegraf/plugins/inputs/webhooks/rollbar" @@ -26,9 +27,10 @@ func init() { type Webhooks struct { ServiceAddress string - Github *github.GithubWebhook - Mandrill *mandrill.MandrillWebhook - Rollbar *rollbar.RollbarWebhook + Github *github.GithubWebhook + Filestack *filestack.FilestackWebhook + Mandrill *mandrill.MandrillWebhook + Rollbar *rollbar.RollbarWebhook } func NewWebhooks() *Webhooks { @@ -40,6 +42,9 @@ func (wb *Webhooks) SampleConfig() string { ## Address and port to host Webhook listener on service_address = ":1619" + [inputs.webhooks.filestack] + path = "/filestack" + [inputs.webhooks.github] path = "/github" @@ -68,7 +73,7 @@ func (wb *Webhooks) Listen(acc telegraf.Accumulator) { err := http.ListenAndServe(fmt.Sprintf("%s", wb.ServiceAddress), r) if err != nil { - log.Printf("Error starting server: %v", err) + log.Printf("E! Error starting server: %v", err) } } @@ -95,10 +100,10 @@ func (wb *Webhooks) AvailableWebhooks() []Webhook { func (wb *Webhooks) Start(acc telegraf.Accumulator) error { go wb.Listen(acc) - log.Printf("Started the webhooks service on %s\n", wb.ServiceAddress) + log.Printf("I! Started the webhooks service on %s\n", wb.ServiceAddress) return nil } func (rb *Webhooks) Stop() { - log.Println("Stopping the Webhooks service") + log.Println("I! Stopping the Webhooks service") } diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index 4684289ee..60b9ff55d 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -107,7 +107,8 @@ type item struct { counterHandle win.PDH_HCOUNTER } -var sanitizedChars = strings.NewReplacer("/sec", "_persec", "/Sec", "_persec", " ", "_") +var sanitizedChars = strings.NewReplacer("/sec", "_persec", "/Sec", "_persec", + " ", "_", "%", "Percent", `\`, "") func (m *Win_PerfCounters) AddItem(metrics *itemList, query string, objectName string, counter string, instance string, measurement string, include_total bool) { @@ -271,6 +272,9 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error { &bufCount, &emptyBuf[0]) // uses null ptr here according to MSDN. if ret == win.PDH_MORE_DATA { filledBuf := make([]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size) + if len(filledBuf) == 0 { + continue + } ret = win.PdhGetFormattedCounterArrayDouble(metric.counterHandle, &bufSize, &bufCount, &filledBuf[0]) for i := 0; i < int(bufCount); i++ { @@ -299,13 +303,12 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error { tags["instance"] = s } tags["objectname"] = metric.objectName - fields[sanitizedChars.Replace(string(metric.counter))] = float32(c.FmtValue.DoubleValue) + fields[sanitizedChars.Replace(metric.counter)] = + float32(c.FmtValue.DoubleValue) - var measurement string - if metric.measurement == "" { + measurement := sanitizedChars.Replace(metric.measurement) + if measurement == "" { measurement = "win_perf_counters" - } else { - measurement = metric.measurement } acc.AddFields(measurement, fields, tags) } diff --git a/plugins/inputs/zookeeper/README.md b/plugins/inputs/zookeeper/README.md index bc7c17a4b..80281a87d 100644 --- a/plugins/inputs/zookeeper/README.md +++ b/plugins/inputs/zookeeper/README.md @@ -27,40 +27,39 @@ echo mntr | nc localhost 2181 zk_max_file_descriptor_count 1024 - only available on Unix platforms ``` -## Measurements: -#### Zookeeper measurements: +## Configuration -Meta: -- units: int64 -- tags: `server= port= state=` +``` +# Reads 'mntr' stats from one or many zookeeper servers +[[inputs.zookeeper]] + ## An array of address to gather stats about. Specify an ip or hostname + ## with port. ie localhost:2181, 10.0.0.1:2181, etc. -Measurement names: -- zookeeper_avg_latency -- zookeeper_max_latency -- zookeeper_min_latency -- zookeeper_packets_received -- zookeeper_packets_sent -- zookeeper_outstanding_requests -- zookeeper_znode_count -- zookeeper_watch_count -- zookeeper_ephemerals_count -- zookeeper_approximate_data_size -- zookeeper_followers #only exposed by the Leader -- zookeeper_synced_followers #only exposed by the Leader -- zookeeper_pending_syncs #only exposed by the Leader -- zookeeper_open_file_descriptor_count -- zookeeper_max_file_descriptor_count + ## If no servers are specified, then localhost is used as the host. + ## If no port is specified, 2181 is used + servers = [":2181"] +``` -#### Zookeeper string measurements: +## InfluxDB Measurement: -Meta: -- units: string -- tags: `server= port= state=` - -Measurement names: -- zookeeper_version - -### Tags: - -- All measurements have the following tags: - - +``` +M zookeeper + T host + T port + T state + + F approximate_data_size integer + F avg_latency integer + F ephemerals_count integer + F max_file_descriptor_count integer + F max_latency integer + F min_latency integer + F num_alive_connections integer + F open_file_descriptor_count integer + F outstanding_requests integer + F packets_received integer + F packets_sent integer + F version string + F watch_count integer + F znode_count integer +``` \ No newline at end of file diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 27f8958fe..28354e7e4 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -14,6 +14,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/kinesis" _ "github.com/influxdata/telegraf/plugins/outputs/librato" _ "github.com/influxdata/telegraf/plugins/outputs/mqtt" + _ "github.com/influxdata/telegraf/plugins/outputs/nats" _ "github.com/influxdata/telegraf/plugins/outputs/nsq" _ "github.com/influxdata/telegraf/plugins/outputs/opentsdb" _ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" diff --git a/plugins/outputs/amon/amon.go b/plugins/outputs/amon/amon.go index f88c2ddc5..a113f2616 100644 --- a/plugins/outputs/amon/amon.go +++ b/plugins/outputs/amon/amon.go @@ -73,7 +73,7 @@ func (a *Amon) Write(metrics []telegraf.Metric) error { metricCounter++ } } else { - log.Printf("unable to build Metric for %s, skipping\n", m.Name()) + log.Printf("I! unable to build Metric for %s, skipping\n", m.Name()) } } diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index bf9353d6e..00cc1a39d 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -153,10 +153,10 @@ func (q *AMQP) Connect() error { } q.channel = channel go func() { - log.Printf("Closing: %s", <-connection.NotifyClose(make(chan *amqp.Error))) - log.Printf("Trying to reconnect") + log.Printf("I! Closing: %s", <-connection.NotifyClose(make(chan *amqp.Error))) + log.Printf("I! Trying to reconnect") for err := q.Connect(); err != nil; err = q.Connect() { - log.Println(err) + log.Println("E! ", err.Error()) time.Sleep(10 * time.Second) } diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index e143c23aa..045dae462 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -80,7 +80,7 @@ func (c *CloudWatch) Connect() error { _, err := svc.ListMetrics(params) // Try a read-only call to test connection. if err != nil { - log.Printf("cloudwatch: Error in ListMetrics API call : %+v \n", err.Error()) + log.Printf("E! cloudwatch: Error in ListMetrics API call : %+v \n", err.Error()) } c.svc = svc @@ -131,7 +131,7 @@ func (c *CloudWatch) WriteToCloudWatch(datums []*cloudwatch.MetricDatum) error { _, err := c.svc.PutMetricData(params) if err != nil { - log.Printf("CloudWatch: Unable to write to CloudWatch : %+v \n", err.Error()) + log.Printf("E! CloudWatch: Unable to write to CloudWatch : %+v \n", err.Error()) } return err diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 088568718..cf54de725 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -92,7 +92,7 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error { metricCounter++ } } else { - log.Printf("unable to build Metric for %s, skipping\n", m.Name()) + log.Printf("I! unable to build Metric for %s, skipping\n", m.Name()) } } diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index fb95aff83..c78b74275 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -85,7 +85,7 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error { for _, metric := range metrics { gMetrics, err := s.Serialize(metric) if err != nil { - log.Printf("Error serializing some metrics to graphite: %s", err.Error()) + log.Printf("E! Error serializing some metrics to graphite: %s", err.Error()) } bp = append(bp, gMetrics...) } @@ -102,7 +102,7 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error { } if _, e := g.conns[n].Write([]byte(graphitePoints)); e != nil { // Error - log.Println("ERROR: " + e.Error()) + log.Println("E! Graphite Error: " + e.Error()) // Let's try the next one } else { // Success diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index b55a2c4c9..864177a36 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -2,6 +2,42 @@ This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP. +### Configuration: + +```toml +# Configuration for influxdb server to send metrics to +[[outputs.influxdb]] + ## The full HTTP or UDP endpoint URL for your InfluxDB instance. + ## Multiple urls can be specified as part of the same cluster, + ## this means that only ONE of the urls will be written to each interval. + # urls = ["udp://localhost:8089"] # UDP endpoint example + urls = ["http://localhost:8086"] # required + ## The target database for metrics (telegraf will create it if not exists). + database = "telegraf" # required + + ## Retention policy to write to. Empty string writes to the default rp. + retention_policy = "" + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all" + write_consistency = "any" + + ## Write timeout (for the InfluxDB client), formatted as a string. + ## If not provided, will default to 5s. 0s means no timeout (not recommended). + timeout = "5s" + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" + ## Set the user agent for HTTP POSTs (can be useful for log differentiation) + # user_agent = "telegraf" + ## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) + # udp_payload = 512 + + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false +``` + ### Required parameters: * `urls`: List of strings, this is for InfluxDB clustering @@ -12,16 +48,14 @@ to write to. Each URL should start with either `http://` or `udp://` ### Optional parameters: +* `write_consistency`: Write consistency (clusters only), can be: "any", "one", "quorum", "all". * `retention_policy`: Retention policy to write to. -* `precision`: Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". note: using "s" precision greatly improves InfluxDB compression. * `timeout`: Write timeout (for the InfluxDB client), formatted as a string. If not provided, will default to 5s. 0s means no timeout (not recommended). * `username`: Username for influxdb * `password`: Password for influxdb * `user_agent`: Set the user agent for HTTP POSTs (can be useful for log differentiation) * `udp_payload`: Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) - ## Optional SSL Config * `ssl_ca`: SSL CA * `ssl_cert`: SSL CERT * `ssl_key`: SSL key * `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false) -* `write_consistency`: Write consistency for clusters only, can be: "any", "one", "quorom", "all" diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 2b9fd101c..8c23b2c5a 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -55,7 +55,7 @@ var sampleConfig = ` ## Retention policy to write to. Empty string writes to the default rp. retention_policy = "" - ## Write consistency (clusters only), can be: "any", "one", "quorom", "all" + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all" write_consistency = "any" ## Write timeout (for the InfluxDB client), formatted as a string. @@ -130,7 +130,7 @@ func (i *InfluxDB) Connect() error { err = createDatabase(c, i.Database) if err != nil { - log.Println("Database creation failed: " + err.Error()) + log.Println("E! Database creation failed: " + err.Error()) continue } @@ -146,7 +146,7 @@ func (i *InfluxDB) Connect() error { func createDatabase(c client.Client, database string) error { // Create Database if it doesn't exist _, err := c.Query(client.Query{ - Command: fmt.Sprintf("CREATE DATABASE IF NOT EXISTS \"%s\"", database), + Command: fmt.Sprintf("CREATE DATABASE \"%s\"", database), }) return err } @@ -201,11 +201,11 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { for _, n := range p { if e := i.conns[n].Write(bp); e != nil { // Log write failure - log.Printf("ERROR: %s", e) + log.Printf("E! InfluxDB Output Error: %s", e) // If the database was not found, try to recreate it if strings.Contains(e.Error(), "database not found") { if errc := createDatabase(i.conns[n], i.Database); errc != nil { - log.Printf("ERROR: Database %s not found and failed to recreate\n", + log.Printf("E! Error: Database %s not found and failed to recreate\n", i.Database) } } diff --git a/plugins/outputs/instrumental/instrumental.go b/plugins/outputs/instrumental/instrumental.go index 2fcc28cc0..ac8ac57b2 100644 --- a/plugins/outputs/instrumental/instrumental.go +++ b/plugins/outputs/instrumental/instrumental.go @@ -35,7 +35,8 @@ const ( ) var ( - StatIncludesBadChar = regexp.MustCompile("[^[:alnum:][:blank:]-_.]") + ValueIncludesBadChar = regexp.MustCompile("[^[:digit:].]") + MetricNameReplacer = regexp.MustCompile("[^-[:alnum:]_.]+") ) var sampleConfig = ` @@ -118,7 +119,7 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { stats, err := s.Serialize(toSerialize) if err != nil { - log.Printf("Error serializing a metric to Instrumental: %s", err) + log.Printf("E! Error serializing a metric to Instrumental: %s", err) } switch metricType { @@ -131,10 +132,19 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { } for _, stat := range stats { - if !StatIncludesBadChar.MatchString(stat) { - points = append(points, fmt.Sprintf("%s %s", metricType, stat)) + // decompose "metric.name value time" + splitStat := strings.SplitN(stat, " ", 3) + metric := splitStat[0] + value := splitStat[1] + time := splitStat[2] + + // replace invalid components of metric name with underscore + clean_metric := MetricNameReplacer.ReplaceAllString(metric, "_") + + if !ValueIncludesBadChar.MatchString(value) { + points = append(points, fmt.Sprintf("%s %s %s %s", metricType, clean_metric, value, time)) } else if i.Debug { - log.Printf("Unable to send bad stat: %s", stat) + log.Printf("E! Instrumental unable to send bad stat: %s", stat) } } } @@ -142,9 +152,7 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { allPoints := strings.Join(points, "\n") + "\n" _, err = fmt.Fprintf(i.conn, allPoints) - if i.Debug { - log.Println(allPoints) - } + log.Println("D! Instrumental: " + allPoints) if err != nil { if err == io.EOF { diff --git a/plugins/outputs/instrumental/instrumental_test.go b/plugins/outputs/instrumental/instrumental_test.go index 9708a2590..0d1501ac1 100644 --- a/plugins/outputs/instrumental/instrumental_test.go +++ b/plugins/outputs/instrumental/instrumental_test.go @@ -49,21 +49,28 @@ func TestWrite(t *testing.T) { map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - // We will drop metrics that simply won't be accepted by Instrumental + // We will modify metric names that won't be accepted by Instrumental m4, _ := telegraf.NewMetric( + "bad_metric_name", + map[string]string{"host": "192.168.0.1:8888::123", "metric_type": "counter"}, + map[string]interface{}{"value": 1}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + // We will drop metric values that won't be accepted by Instrumental + m5, _ := telegraf.NewMetric( "bad_values", map[string]string{"host": "192.168.0.1", "metric_type": "counter"}, map[string]interface{}{"value": "\" 3:30\""}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m5, _ := telegraf.NewMetric( + m6, _ := telegraf.NewMetric( "my_counter", map[string]string{"host": "192.168.0.1", "metric_type": "counter"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - metrics = []telegraf.Metric{m3, m4, m5} + metrics = []telegraf.Metric{m3, m4, m5, m6} i.Write(metrics) wg.Wait() @@ -101,8 +108,15 @@ func TCPServer(t *testing.T, wg *sync.WaitGroup) { data3, _ := tp.ReadLine() assert.Equal(t, "increment my.prefix.192_168_0_1.my_histogram 3.14 1289430000", data3) + data4, _ := tp.ReadLine() - assert.Equal(t, "increment my.prefix.192_168_0_1.my_counter 3.14 1289430000", data4) + assert.Equal(t, "increment my.prefix.192_168_0_1_8888_123.bad_metric_name 1 1289430000", data4) + + data5, _ := tp.ReadLine() + assert.Equal(t, "increment my.prefix.192_168_0_1.my_counter 3.14 1289430000", data5) + + data6, _ := tp.ReadLine() + assert.Equal(t, "", data6) conn.Close() } diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md new file mode 100644 index 000000000..390407e14 --- /dev/null +++ b/plugins/outputs/kafka/README.md @@ -0,0 +1,67 @@ +# Kafka Producer Output Plugin + +This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.html) acting a Kafka Producer. + +``` +[[outputs.kafka]] + ## URLs of kafka brokers + brokers = ["localhost:9092"] + ## Kafka topic for producer messages + topic = "telegraf" + ## Telegraf tag to use as a routing key + ## ie, if this tag exists, it's value will be used as the routing key + routing_tag = "host" + + ## CompressionCodec represents the various compression codecs recognized by + ## Kafka in messages. + ## 0 : No compression + ## 1 : Gzip compression + ## 2 : Snappy compression + compression_codec = 0 + + ## RequiredAcks is used in Produce Requests to tell the broker how many + ## replica acknowledgements it must see before responding + ## 0 : the producer never waits for an acknowledgement from the broker. + ## This option provides the lowest latency but the weakest durability + ## guarantees (some data will be lost when a server fails). + ## 1 : the producer gets an acknowledgement after the leader replica has + ## received the data. This option provides better durability as the + ## client waits until the server acknowledges the request as successful + ## (only messages that were written to the now-dead leader but not yet + ## replicated will be lost). + ## -1: the producer gets an acknowledgement after all in-sync replicas have + ## received the data. This option provides the best durability, we + ## guarantee that no messages will be lost as long as at least one in + ## sync replica remains. + required_acks = -1 + + ## The total number of times to retry sending a message + max_retry = 3 + + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + + data_format = "influx" +``` + +### Required parameters: + +* `brokers`: List of strings, this is for speaking to a cluster of `kafka` brokers. On each flush interval, Telegraf will randomly choose one of the urls to write to. Each URL should just include host and port e.g. -> `["{host}:{port}","{host2}:{port2}"]` +* `topic`: The `kafka` topic to publish to. + + +### Optional parameters: + +* `routing_tag`: if this tag exists, it's value will be used as the routing key +* `compression_codec`: What level of compression to use: `0` -> no compression, `1` -> gzip compression, `2` -> snappy compression +* `required_acks`: a setting for how may `acks` required from the `kafka` broker cluster. +* `max_retry`: Max number of times to retry failed write +* `ssl_ca`: SSL CA +* `ssl_cert`: SSL CERT +* `ssl_key`: SSL key +* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false) +* `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md) diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index b0fb56655..a30ab8801 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -83,7 +83,7 @@ func (k *KinesisOutput) Connect() error { // We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using // environment variables, and then Shared Credentials. if k.Debug { - log.Printf("kinesis: Establishing a connection to Kinesis in %+v", k.Region) + log.Printf("E! kinesis: Establishing a connection to Kinesis in %+v", k.Region) } credentialConfig := &internalaws.CredentialConfig{ @@ -105,17 +105,17 @@ func (k *KinesisOutput) Connect() error { resp, err := svc.ListStreams(KinesisParams) if err != nil { - log.Printf("kinesis: Error in ListSteams API call : %+v \n", err) + log.Printf("E! kinesis: Error in ListSteams API call : %+v \n", err) } if checkstream(resp.StreamNames, k.StreamName) { if k.Debug { - log.Printf("kinesis: Stream Exists") + log.Printf("E! kinesis: Stream Exists") } k.svc = svc return nil } else { - log.Printf("kinesis : You have configured a StreamName %+v which does not exist. exiting.", k.StreamName) + log.Printf("E! kinesis : You have configured a StreamName %+v which does not exist. exiting.", k.StreamName) os.Exit(1) } return err @@ -147,14 +147,14 @@ func writekinesis(k *KinesisOutput, r []*kinesis.PutRecordsRequestEntry) time.Du if k.Debug { resp, err := k.svc.PutRecords(payload) if err != nil { - log.Printf("kinesis: Unable to write to Kinesis : %+v \n", err.Error()) + log.Printf("E! kinesis: Unable to write to Kinesis : %+v \n", err.Error()) } - log.Printf("%+v \n", resp) + log.Printf("E! %+v \n", resp) } else { _, err := k.svc.PutRecords(payload) if err != nil { - log.Printf("kinesis: Unable to write to Kinesis : %+v \n", err.Error()) + log.Printf("E! kinesis: Unable to write to Kinesis : %+v \n", err.Error()) } } return time.Since(start) @@ -182,7 +182,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { if sz == 500 { // Max Messages Per PutRecordRequest is 500 elapsed := writekinesis(k, r) - log.Printf("Wrote a %+v point batch to Kinesis in %+v.\n", sz, elapsed) + log.Printf("E! Wrote a %+v point batch to Kinesis in %+v.\n", sz, elapsed) atomic.StoreUint32(&sz, 0) r = nil } diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index ccb2acd9a..3c4cb6d2a 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "log" "net/http" + "regexp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" @@ -14,19 +15,22 @@ import ( "github.com/influxdata/telegraf/plugins/serializers/graphite" ) +// Librato structure for configuration and client type Librato struct { - ApiUser string - ApiToken string - Debug bool - NameFromTags bool - SourceTag string - Timeout internal.Duration - Template string + APIUser string + APIToken string + Debug bool + SourceTag string // Deprecated, keeping for backward-compatibility + Timeout internal.Duration + Template string - apiUrl string + APIUrl string client *http.Client } +// https://www.librato.com/docs/kb/faq/best_practices/naming_convention_metrics_sources.html#naming-limitations-for-sources-and-metrics +var reUnacceptedChar = regexp.MustCompile("[^.a-zA-Z0-9_-]") + var sampleConfig = ` ## Librator API Docs ## http://dev.librato.com/v1/metrics-authentication @@ -36,20 +40,21 @@ var sampleConfig = ` api_token = "my-secret-token" # required. ## Debug # debug = false - ## Tag Field to populate source attribute (optional) - ## This is typically the _hostname_ from which the metric was obtained. - source_tag = "host" ## Connection timeout. # timeout = "5s" - ## Output Name Template (same as graphite buckets) + ## Output source Template (same as graphite buckets) ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite - template = "host.tags.measurement.field" + ## This template is used in librato's source (not metric's name) + template = "host" + ` +// LMetrics is the default struct for Librato's API fromat type LMetrics struct { Gauges []*Gauge `json:"gauges"` } +// Gauge is the gauge format for Librato's API fromat type Gauge struct { Name string `json:"name"` Value float64 `json:"value"` @@ -57,17 +62,22 @@ type Gauge struct { MeasureTime int64 `json:"measure_time"` } -const librato_api = "https://metrics-api.librato.com/v1/metrics" +const libratoAPI = "https://metrics-api.librato.com/v1/metrics" -func NewLibrato(apiUrl string) *Librato { +// NewLibrato is the main constructor for librato output plugins +func NewLibrato(apiURL string) *Librato { return &Librato{ - apiUrl: apiUrl, + APIUrl: apiURL, + Template: "host", } } +// Connect is the default output plugin connection function who make sure it +// can connect to the endpoint func (l *Librato) Connect() error { - if l.ApiUser == "" || l.ApiToken == "" { - return fmt.Errorf("api_user and api_token are required fields for librato output") + if l.APIUser == "" || l.APIToken == "" { + return fmt.Errorf( + "api_user and api_token are required fields for librato output") } l.client = &http.Client{ Timeout: l.Timeout.Duration, @@ -76,114 +86,144 @@ func (l *Librato) Connect() error { } func (l *Librato) Write(metrics []telegraf.Metric) error { + if len(metrics) == 0 { return nil } - lmetrics := LMetrics{} + if l.Template == "" { + l.Template = "host" + } + if l.SourceTag != "" { + l.Template = l.SourceTag + } + tempGauges := []*Gauge{} - metricCounter := 0 for _, m := range metrics { if gauges, err := l.buildGauges(m); err == nil { for _, gauge := range gauges { tempGauges = append(tempGauges, gauge) - metricCounter++ - if l.Debug { - log.Printf("[DEBUG] Got a gauge: %v\n", gauge) - } + log.Printf("D! Got a gauge: %v\n", gauge) + } } else { - log.Printf("unable to build Gauge for %s, skipping\n", m.Name()) - if l.Debug { - log.Printf("[DEBUG] Couldn't build gauge: %v\n", err) - } + log.Printf("I! unable to build Gauge for %s, skipping\n", m.Name()) + log.Printf("D! Couldn't build gauge: %v\n", err) + } } - lmetrics.Gauges = make([]*Gauge, metricCounter) - copy(lmetrics.Gauges, tempGauges[0:]) - metricsBytes, err := json.Marshal(lmetrics) - if err != nil { - return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error()) - } else { - if l.Debug { - log.Printf("[DEBUG] Librato request: %v\n", string(metricsBytes)) + metricCounter := len(tempGauges) + // make sur we send a batch of maximum 300 + sizeBatch := 300 + for start := 0; start < metricCounter; start += sizeBatch { + lmetrics := LMetrics{} + end := start + sizeBatch + if end > metricCounter { + end = metricCounter + sizeBatch = end - start + } + lmetrics.Gauges = make([]*Gauge, sizeBatch) + copy(lmetrics.Gauges, tempGauges[start:end]) + metricsBytes, err := json.Marshal(lmetrics) + if err != nil { + return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error()) } - } - req, err := http.NewRequest("POST", l.apiUrl, bytes.NewBuffer(metricsBytes)) - if err != nil { - return fmt.Errorf("unable to create http.Request, %s\n", err.Error()) - } - req.Header.Add("Content-Type", "application/json") - req.SetBasicAuth(l.ApiUser, l.ApiToken) - resp, err := l.client.Do(req) - if err != nil { - if l.Debug { - log.Printf("[DEBUG] Error POSTing metrics: %v\n", err.Error()) + log.Printf("D! Librato request: %v\n", string(metricsBytes)) + + req, err := http.NewRequest( + "POST", + l.APIUrl, + bytes.NewBuffer(metricsBytes)) + if err != nil { + return fmt.Errorf( + "unable to create http.Request, %s\n", + err.Error()) } - return fmt.Errorf("error POSTing metrics, %s\n", err.Error()) - } else { - if l.Debug { + req.Header.Add("Content-Type", "application/json") + req.SetBasicAuth(l.APIUser, l.APIToken) + + resp, err := l.client.Do(req) + if err != nil { + log.Printf("D! Error POSTing metrics: %v\n", err.Error()) + return fmt.Errorf("error POSTing metrics, %s\n", err.Error()) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 || l.Debug { htmlData, err := ioutil.ReadAll(resp.Body) if err != nil { - log.Printf("[DEBUG] Couldn't get response! (%v)\n", err) - } else { - log.Printf("[DEBUG] Librato response: %v\n", string(htmlData)) + log.Printf("D! Couldn't get response! (%v)\n", err) } + if resp.StatusCode != 200 { + return fmt.Errorf( + "received bad status code, %d\n %s", + resp.StatusCode, + string(htmlData)) + } + log.Printf("D! Librato response: %v\n", string(htmlData)) } } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return fmt.Errorf("received bad status code, %d\n", resp.StatusCode) - } - return nil } +// SampleConfig is function who return the default configuration for this +// output func (l *Librato) SampleConfig() string { return sampleConfig } +// Description is function who return the Description of this output func (l *Librato) Description() string { return "Configuration for Librato API to send metrics to." } func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) { + gauges := []*Gauge{} - bucket := graphite.SerializeBucketName(m.Name(), m.Tags(), l.Template, "") + if m.Time().Unix() == 0 { + return gauges, fmt.Errorf( + "Measure time must not be zero\n <%s> \n", + m.String()) + } + metricSource := graphite.InsertField( + graphite.SerializeBucketName("", m.Tags(), l.Template, ""), + "value") + if metricSource == "" { + return gauges, + fmt.Errorf("undeterminable Source type from Field, %s\n", + l.Template) + } for fieldName, value := range m.Fields() { + + metricName := m.Name() + if fieldName != "value" { + metricName = fmt.Sprintf("%s.%s", m.Name(), fieldName) + } + gauge := &Gauge{ - Name: graphite.InsertField(bucket, fieldName), + Source: reUnacceptedChar.ReplaceAllString(metricSource, "-"), + Name: reUnacceptedChar.ReplaceAllString(metricName, "-"), MeasureTime: m.Time().Unix(), } - if !gauge.verifyValue(value) { + if !verifyValue(value) { continue } if err := gauge.setValue(value); err != nil { - return gauges, fmt.Errorf("unable to extract value from Fields, %s\n", + return gauges, fmt.Errorf( + "unable to extract value from Fields, %s\n", err.Error()) } - if l.SourceTag != "" { - if source, ok := m.Tags()[l.SourceTag]; ok { - gauge.Source = source - } else { - return gauges, - fmt.Errorf("undeterminable Source type from Field, %s\n", - l.SourceTag) - } - } gauges = append(gauges, gauge) } - if l.Debug { - fmt.Printf("[DEBUG] Built gauges: %v\n", gauges) - } + + log.Printf("D! Built gauges: %v\n", gauges) return gauges, nil } -func (g *Gauge) verifyValue(v interface{}) bool { +func verifyValue(v interface{}) bool { switch v.(type) { case string: return false @@ -209,12 +249,13 @@ func (g *Gauge) setValue(v interface{}) error { return nil } +//Close is used to close the connection to librato Output func (l *Librato) Close() error { return nil } func init() { outputs.Add("librato", func() telegraf.Output { - return NewLibrato(librato_api) + return NewLibrato(libratoAPI) }) } diff --git a/plugins/outputs/librato/librato_test.go b/plugins/outputs/librato/librato_test.go index e90339928..dd5755a8c 100644 --- a/plugins/outputs/librato/librato_test.go +++ b/plugins/outputs/librato/librato_test.go @@ -1,7 +1,6 @@ package librato import ( - "encoding/json" "fmt" "net/http" "net/http/httptest" @@ -10,141 +9,137 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/serializers/graphite" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) var ( - fakeUrl = "http://test.librato.com" + fakeURL = "http://test.librato.com" fakeUser = "telegraf@influxdb.com" fakeToken = "123456" ) func fakeLibrato() *Librato { - l := NewLibrato(fakeUrl) - l.ApiUser = fakeUser - l.ApiToken = fakeToken + l := NewLibrato(fakeURL) + l.APIUser = fakeUser + l.APIToken = fakeToken return l } -func BuildTags(t *testing.T) { - testMetric := testutil.TestMetric(0.0, "test1") - graphiteSerializer := graphite.GraphiteSerializer{} - tags, err := graphiteSerializer.Serialize(testMetric) - fmt.Printf("Tags: %v", tags) - require.NoError(t, err) -} - func TestUriOverride(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - })) + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) defer ts.Close() l := NewLibrato(ts.URL) - l.ApiUser = "telegraf@influxdb.com" - l.ApiToken = "123456" + l.APIUser = "telegraf@influxdb.com" + l.APIToken = "123456" err := l.Connect() require.NoError(t, err) - err = l.Write(testutil.MockMetrics()) + err = l.Write([]telegraf.Metric{newHostMetric(int32(0), "name", "host")}) require.NoError(t, err) } func TestBadStatusCode(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusServiceUnavailable) - json.NewEncoder(w).Encode(`{ - "errors": { - "system": [ - "The API is currently down for maintenance. It'll be back shortly." - ] - } - }`) - })) + ts := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + })) defer ts.Close() l := NewLibrato(ts.URL) - l.ApiUser = "telegraf@influxdb.com" - l.ApiToken = "123456" + l.APIUser = "telegraf@influxdb.com" + l.APIToken = "123456" err := l.Connect() require.NoError(t, err) - err = l.Write(testutil.MockMetrics()) + err = l.Write([]telegraf.Metric{newHostMetric(int32(0), "name", "host")}) if err == nil { t.Errorf("error expected but none returned") } else { - require.EqualError(t, fmt.Errorf("received bad status code, 503\n"), err.Error()) + require.EqualError( + t, + fmt.Errorf("received bad status code, 503\n "), err.Error()) } } func TestBuildGauge(t *testing.T) { + + mtime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix() var gaugeTests = []struct { ptIn telegraf.Metric outGauge *Gauge err error }{ { - testutil.TestMetric(0.0, "test1"), + newHostMetric(0.0, "test1", "host1"), &Gauge{ - Name: "value1.test1", - MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), + Name: "test1", + MeasureTime: mtime, Value: 0.0, + Source: "host1", }, nil, }, { - testutil.TestMetric(1.0, "test2"), + newHostMetric(1.0, "test2", "host2"), &Gauge{ - Name: "value1.test2", - MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), + Name: "test2", + MeasureTime: mtime, Value: 1.0, + Source: "host2", }, nil, }, { - testutil.TestMetric(10, "test3"), + newHostMetric(10, "test3", "host3"), &Gauge{ - Name: "value1.test3", - MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), + Name: "test3", + MeasureTime: mtime, Value: 10.0, + Source: "host3", }, nil, }, { - testutil.TestMetric(int32(112345), "test4"), + newHostMetric(int32(112345), "test4", "host4"), &Gauge{ - Name: "value1.test4", - MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), + Name: "test4", + MeasureTime: mtime, Value: 112345.0, + Source: "host4", }, nil, }, { - testutil.TestMetric(int64(112345), "test5"), + newHostMetric(int64(112345), "test5", "host5"), &Gauge{ - Name: "value1.test5", - MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), + Name: "test5", + MeasureTime: mtime, Value: 112345.0, + Source: "host5", }, nil, }, { - testutil.TestMetric(float32(11234.5), "test6"), + newHostMetric(float32(11234.5), "test6", "host6"), &Gauge{ - Name: "value1.test6", - MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), + Name: "test6", + MeasureTime: mtime, Value: 11234.5, + Source: "host6", }, nil, }, { - testutil.TestMetric("11234.5", "test7"), + newHostMetric("11234.5", "test7", "host7"), nil, nil, }, } - l := NewLibrato(fakeUrl) + l := NewLibrato(fakeURL) for _, gt := range gaugeTests { gauges, err := l.buildGauges(gt.ptIn) if err != nil && gt.err == nil { @@ -167,61 +162,121 @@ func TestBuildGauge(t *testing.T) { } } +func newHostMetric(value interface{}, name, host string) (metric telegraf.Metric) { + metric, _ = telegraf.NewMetric( + name, + map[string]string{"host": host}, + map[string]interface{}{"value": value}, + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + return +} + func TestBuildGaugeWithSource(t *testing.T) { + mtime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) pt1, _ := telegraf.NewMetric( "test1", map[string]string{"hostname": "192.168.0.1", "tag1": "value1"}, map[string]interface{}{"value": 0.0}, - time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + mtime, ) pt2, _ := telegraf.NewMetric( "test2", map[string]string{"hostnam": "192.168.0.1", "tag1": "value1"}, map[string]interface{}{"value": 1.0}, - time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC), + mtime, + ) + pt3, _ := telegraf.NewMetric( + "test3", + map[string]string{ + "hostname": "192.168.0.1", + "tag2": "value2", + "tag1": "value1"}, + map[string]interface{}{"value": 1.0}, + mtime, + ) + pt4, _ := telegraf.NewMetric( + "test4", + map[string]string{ + "hostname": "192.168.0.1", + "tag2": "value2", + "tag1": "value1"}, + map[string]interface{}{"value": 1.0}, + mtime, ) var gaugeTests = []struct { ptIn telegraf.Metric + template string outGauge *Gauge err error }{ { pt1, + "hostname", &Gauge{ - Name: "192_168_0_1.value1.test1", - MeasureTime: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), + Name: "test1", + MeasureTime: mtime.Unix(), Value: 0.0, - Source: "192.168.0.1", + Source: "192_168_0_1", }, nil, }, { pt2, + "hostname", &Gauge{ - Name: "192_168_0_1.value1.test1", - MeasureTime: time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC).Unix(), + Name: "test2", + MeasureTime: mtime.Unix(), Value: 1.0, }, fmt.Errorf("undeterminable Source type from Field, hostname"), }, + { + pt3, + "tags", + &Gauge{ + Name: "test3", + MeasureTime: mtime.Unix(), + Value: 1.0, + Source: "192_168_0_1.value1.value2", + }, + nil, + }, + { + pt4, + "hostname.tag2", + &Gauge{ + Name: "test4", + MeasureTime: mtime.Unix(), + Value: 1.0, + Source: "192_168_0_1.value2", + }, + nil, + }, } - l := NewLibrato(fakeUrl) - l.SourceTag = "hostname" + l := NewLibrato(fakeURL) for _, gt := range gaugeTests { + l.Template = gt.template gauges, err := l.buildGauges(gt.ptIn) if err != nil && gt.err == nil { t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err) } if gt.err != nil && err == nil { - t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error()) + t.Errorf( + "%s: expected an error (%s) but none returned", + gt.ptIn.Name(), + gt.err.Error()) } if len(gauges) == 0 { continue } if gt.err == nil && !reflect.DeepEqual(gauges[0], gt.outGauge) { - t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauges[0]) + t.Errorf( + "%s: \nexpected %+v\ngot %+v\n", + gt.ptIn.Name(), + gt.outGauge, gauges[0]) } } } diff --git a/plugins/outputs/nats/README.md b/plugins/outputs/nats/README.md new file mode 100644 index 000000000..501bd377c --- /dev/null +++ b/plugins/outputs/nats/README.md @@ -0,0 +1,37 @@ +# NATS Output Plugin + +This plugin writes to a (list of) specified NATS instance(s). + +``` +[[outputs.nats]] + ## URLs of NATS servers + servers = ["nats://localhost:4222"] + ## Optional credentials + # username = "" + # password = "" + ## NATS subject for producer messages + subject = "telegraf" + ## Optional TLS Config + ## CA certificate used to self-sign NATS server(s) TLS certificate(s) + # tls_ca = "/etc/telegraf/ca.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" +``` + +### Required parameters: + +* `servers`: List of strings, this is for NATS clustering support. Each URL should start with `nats://`. +* `subject`: The NATS subject to publish to. + +### Optional parameters: + +* `username`: Username for NATS +* `password`: Password for NATS +* `tls_ca`: TLS CA +* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false) diff --git a/plugins/outputs/nats/nats.go b/plugins/outputs/nats/nats.go new file mode 100644 index 000000000..5008937d9 --- /dev/null +++ b/plugins/outputs/nats/nats.go @@ -0,0 +1,133 @@ +package nats + +import ( + "fmt" + + nats_client "github.com/nats-io/nats" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" +) + +type NATS struct { + // Servers is the NATS server pool to connect to + Servers []string + // Credentials + Username string + Password string + // NATS subject to publish metrics to + Subject string + + // Path to CA file + SSLCA string `toml:"ssl_ca"` + // Path to host cert file + SSLCert string `toml:"ssl_cert"` + // Path to cert key file + SSLKey string `toml:"ssl_key"` + // Use SSL but skip chain & host verification + InsecureSkipVerify bool + + conn *nats_client.Conn + serializer serializers.Serializer +} + +var sampleConfig = ` + ## URLs of NATS servers + servers = ["nats://localhost:4222"] + ## Optional credentials + # username = "" + # password = "" + ## NATS subject for producer messages + subject = "telegraf" + + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" +` + +func (n *NATS) SetSerializer(serializer serializers.Serializer) { + n.serializer = serializer +} + +func (n *NATS) Connect() error { + var err error + // set NATS connection options + opts := nats_client.DefaultOptions + opts.Servers = n.Servers + if n.Username != "" { + opts.User = n.Username + opts.Password = n.Password + } + + tlsConfig, err := internal.GetTLSConfig( + n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify) + if err != nil { + return err + } + if tlsConfig != nil { + // set NATS connection TLS options + opts.Secure = true + opts.TLSConfig = tlsConfig + } + + // try and connect + n.conn, err = opts.Connect() + + return err +} + +func (n *NATS) Close() error { + n.conn.Close() + return nil +} + +func (n *NATS) SampleConfig() string { + return sampleConfig +} + +func (n *NATS) Description() string { + return "Send telegraf measurements to NATS" +} + +func (n *NATS) Write(metrics []telegraf.Metric) error { + if len(metrics) == 0 { + return nil + } + + for _, metric := range metrics { + values, err := n.serializer.Serialize(metric) + if err != nil { + return err + } + + var pubErr error + for _, value := range values { + err = n.conn.Publish(n.Subject, []byte(value)) + if err != nil { + pubErr = err + } + } + + if pubErr != nil { + return fmt.Errorf("FAILED to send NATS message: %s", err) + } + } + return nil +} + +func init() { + outputs.Add("nats", func() telegraf.Output { + return &NATS{} + }) +} diff --git a/plugins/outputs/nats/nats_test.go b/plugins/outputs/nats/nats_test.go new file mode 100644 index 000000000..773dbaa6e --- /dev/null +++ b/plugins/outputs/nats/nats_test.go @@ -0,0 +1,31 @@ +package nats + +import ( + "testing" + + "github.com/influxdata/telegraf/plugins/serializers" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestConnectAndWrite(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + server := []string{"nats://" + testutil.GetLocalHost() + ":4222"} + s, _ := serializers.NewInfluxSerializer() + n := &NATS{ + Servers: server, + Subject: "telegraf", + serializer: s, + } + + // Verify that we can connect to the NATS daemon + err := n.Connect() + require.NoError(t, err) + + // Verify that we can successfully write data to the NATS daemon + err = n.Write(testutil.MockMetrics()) + require.NoError(t, err) +} diff --git a/plugins/outputs/opentsdb/README.md b/plugins/outputs/opentsdb/README.md index 59a03d3fd..2fd0bd2d8 100644 --- a/plugins/outputs/opentsdb/README.md +++ b/plugins/outputs/opentsdb/README.md @@ -1,6 +1,12 @@ # OpenTSDB Output Plugin -This plugin writes to a OpenTSDB instance using the "telnet" mode +This plugin writes to an OpenTSDB instance using either the "telnet" or Http mode. + +Using the Http API is the recommended way of writing metrics since OpenTSDB 2.0 +To use Http mode, set useHttp to true in config. You can also control how many +metrics is sent in each http request by setting batchSize in config. + +See http://opentsdb.net/docs/build/html/api_http/put.html for details. ## Transfer "Protocol" in the telnet mode @@ -10,14 +16,14 @@ The expected input from OpenTSDB is specified in the following way: put ``` -The telegraf output plugin adds an optional prefix to the metric keys so +The telegraf output plugin adds an optional prefix to the metric keys so that a subamount can be selected. ``` put <[prefix.]metric> ``` -### Example +### Example ``` put nine.telegraf.system_load1 1441910356 0.430000 dc=homeoffice host=irimame scope=green @@ -38,12 +44,12 @@ put nine.telegraf.ping_average_response_ms 1441910366 24.006000 dc=homeoffice ho ... ``` -## +## -The OpenTSDB interface can be simulated with this reader: +The OpenTSDB telnet interface can be simulated with this reader: ``` -// opentsdb_telnet_mode_mock.go +// opentsdb_telnet_mode_mock.go package main import ( @@ -75,4 +81,4 @@ func main() { ## Allowed values for metrics -OpenTSDB allows `integers` and `floats` as input values \ No newline at end of file +OpenTSDB allows `integers` and `floats` as input values diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go index 4675dfffe..d7b3eb915 100644 --- a/plugins/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -3,10 +3,10 @@ package opentsdb import ( "fmt" "net" + "net/url" "sort" "strconv" "strings" - "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" @@ -18,6 +18,8 @@ type OpenTSDB struct { Host string Port int + HttpBatchSize int + Debug bool } @@ -28,27 +30,41 @@ var sampleConfig = ` ## prefix for metrics keys prefix = "my.specific.prefix." - ## Telnet Mode ## - ## DNS name of the OpenTSDB server in telnet mode + ## DNS name of the OpenTSDB server + ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the + ## telnet API. "http://opentsdb.example.com" will use the Http API. host = "opentsdb.example.com" - ## Port of the OpenTSDB server in telnet mode + ## Port of the OpenTSDB server port = 4242 + ## Number of data points to send to OpenTSDB in Http requests. + ## Not used with telnet API. + httpBatchSize = 50 + ## Debug true - Prints OpenTSDB communication debug = false ` -type MetricLine struct { - Metric string - Timestamp int64 - Value string - Tags string +func ToLineFormat(tags map[string]string) string { + tagsArray := make([]string, len(tags)) + index := 0 + for k, v := range tags { + tagsArray[index] = fmt.Sprintf("%s=%s", k, v) + index++ + } + sort.Strings(tagsArray) + return strings.Join(tagsArray, " ") } func (o *OpenTSDB) Connect() error { // Test Connection to OpenTSDB Server - uri := fmt.Sprintf("%s:%d", o.Host, o.Port) + u, err := url.Parse(o.Host) + if err != nil { + return fmt.Errorf("Error in parsing host url: %s", err.Error()) + } + + uri := fmt.Sprintf("%s:%d", u.Host, o.Port) tcpAddr, err := net.ResolveTCPAddr("tcp", uri) if err != nil { return fmt.Errorf("OpenTSDB: TCP address cannot be resolved") @@ -65,10 +81,64 @@ func (o *OpenTSDB) Write(metrics []telegraf.Metric) error { if len(metrics) == 0 { return nil } - now := time.Now() + u, err := url.Parse(o.Host) + if err != nil { + return fmt.Errorf("Error in parsing host url: %s", err.Error()) + } + + if u.Scheme == "" || u.Scheme == "tcp" { + return o.WriteTelnet(metrics, u) + } else if u.Scheme == "http" { + return o.WriteHttp(metrics, u) + } else { + return fmt.Errorf("Unknown scheme in host parameter.") + } +} + +func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error { + http := openTSDBHttp{ + Host: u.Host, + Port: o.Port, + BatchSize: o.HttpBatchSize, + Debug: o.Debug, + } + + for _, m := range metrics { + now := m.UnixNano() / 1000000000 + tags := cleanTags(m.Tags()) + + for fieldName, value := range m.Fields() { + metricValue, buildError := buildValue(value) + if buildError != nil { + fmt.Printf("OpenTSDB: %s\n", buildError.Error()) + continue + } + + metric := &HttpMetric{ + Metric: sanitizedChars.Replace(fmt.Sprintf("%s%s_%s", + o.Prefix, m.Name(), fieldName)), + Tags: tags, + Timestamp: now, + Value: metricValue, + } + + if err := http.sendDataPoint(metric); err != nil { + return err + } + } + } + + if err := http.flush(); err != nil { + return err + } + + return nil +} + +func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error { // Send Data with telnet / socket communication - uri := fmt.Sprintf("%s:%d", o.Host, o.Port) + uri := fmt.Sprintf("%s:%d", u.Host, o.Port) tcpAddr, _ := net.ResolveTCPAddr("tcp", uri) connection, err := net.DialTCP("tcp", nil, tcpAddr) if err != nil { @@ -77,9 +147,20 @@ func (o *OpenTSDB) Write(metrics []telegraf.Metric) error { defer connection.Close() for _, m := range metrics { - for _, metric := range buildMetrics(m, now, o.Prefix) { + now := m.UnixNano() / 1000000000 + tags := ToLineFormat(cleanTags(m.Tags())) + + for fieldName, value := range m.Fields() { + metricValue, buildError := buildValue(value) + if buildError != nil { + fmt.Printf("OpenTSDB: %s\n", buildError.Error()) + continue + } + messageLine := fmt.Sprintf("put %s %v %s %s\n", - metric.Metric, metric.Timestamp, metric.Value, metric.Tags) + sanitizedChars.Replace(fmt.Sprintf("%s%s_%s", o.Prefix, m.Name(), fieldName)), + now, metricValue, tags) + if o.Debug { fmt.Print(messageLine) } @@ -93,37 +174,12 @@ func (o *OpenTSDB) Write(metrics []telegraf.Metric) error { return nil } -func buildTags(mTags map[string]string) []string { - tags := make([]string, len(mTags)) - index := 0 - for k, v := range mTags { - tags[index] = sanitizedChars.Replace(fmt.Sprintf("%s=%s", k, v)) - index++ +func cleanTags(tags map[string]string) map[string]string { + tagSet := make(map[string]string, len(tags)) + for k, v := range tags { + tagSet[sanitizedChars.Replace(k)] = sanitizedChars.Replace(v) } - sort.Strings(tags) - return tags -} - -func buildMetrics(m telegraf.Metric, now time.Time, prefix string) []*MetricLine { - ret := []*MetricLine{} - for fieldName, value := range m.Fields() { - metric := &MetricLine{ - Metric: sanitizedChars.Replace(fmt.Sprintf("%s%s_%s", - prefix, m.Name(), fieldName)), - Timestamp: now.Unix(), - } - - metricValue, buildError := buildValue(value) - if buildError != nil { - fmt.Printf("OpenTSDB: %s\n", buildError.Error()) - continue - } - metric.Value = metricValue - tagsSlice := buildTags(m.Tags()) - metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " ")) - ret = append(ret, metric) - } - return ret + return tagSet } func buildValue(v interface{}) (string, error) { diff --git a/plugins/outputs/opentsdb/opentsdb_http.go b/plugins/outputs/opentsdb/opentsdb_http.go new file mode 100644 index 000000000..f347d5e60 --- /dev/null +++ b/plugins/outputs/opentsdb/opentsdb_http.go @@ -0,0 +1,176 @@ +package opentsdb + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/http/httputil" + "net/url" +) + +type HttpMetric struct { + Metric string `json:"metric"` + Timestamp int64 `json:"timestamp"` + Value string `json:"value"` + Tags map[string]string `json:"tags"` +} + +type openTSDBHttp struct { + Host string + Port int + BatchSize int + Debug bool + + metricCounter int + body requestBody +} + +type requestBody struct { + b bytes.Buffer + g *gzip.Writer + + dbgB bytes.Buffer + + w io.Writer + enc *json.Encoder + + empty bool +} + +func (r *requestBody) reset(debug bool) { + r.b.Reset() + r.dbgB.Reset() + + if r.g == nil { + r.g = gzip.NewWriter(&r.b) + } else { + r.g.Reset(&r.b) + } + + if debug { + r.w = io.MultiWriter(r.g, &r.dbgB) + } else { + r.w = r.g + } + + r.enc = json.NewEncoder(r.w) + + io.WriteString(r.w, "[") + + r.empty = true +} + +func (r *requestBody) addMetric(metric *HttpMetric) error { + if !r.empty { + io.WriteString(r.w, ",") + } + + if err := r.enc.Encode(metric); err != nil { + return fmt.Errorf("Metric serialization error %s", err.Error()) + } + + r.empty = false + + return nil +} + +func (r *requestBody) close() error { + io.WriteString(r.w, "]") + + if err := r.g.Close(); err != nil { + return fmt.Errorf("Error when closing gzip writer: %s", err.Error()) + } + + return nil +} + +func (o *openTSDBHttp) sendDataPoint(metric *HttpMetric) error { + if o.metricCounter == 0 { + o.body.reset(o.Debug) + } + + if err := o.body.addMetric(metric); err != nil { + return err + } + + o.metricCounter++ + if o.metricCounter == o.BatchSize { + if err := o.flush(); err != nil { + return err + } + + o.metricCounter = 0 + } + + return nil +} + +func (o *openTSDBHttp) flush() error { + if o.metricCounter == 0 { + return nil + } + + o.body.close() + + u := url.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s:%d", o.Host, o.Port), + Path: "/api/put", + } + + if o.Debug { + u.RawQuery = "details" + } + + req, err := http.NewRequest("POST", u.String(), &o.body.b) + if err != nil { + return fmt.Errorf("Error when building request: %s", err.Error()) + } + req.Header.Set("Content-Type", "applicaton/json") + req.Header.Set("Content-Encoding", "gzip") + + if o.Debug { + dump, err := httputil.DumpRequestOut(req, false) + if err != nil { + return fmt.Errorf("Error when dumping request: %s", err.Error()) + } + + fmt.Printf("Sending metrics:\n%s", dump) + fmt.Printf("Body:\n%s\n\n", o.body.dbgB.String()) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("Error when sending metrics: %s", err.Error()) + } + defer resp.Body.Close() + + if o.Debug { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return fmt.Errorf("Error when dumping response: %s", err.Error()) + } + + fmt.Printf("Received response\n%s\n\n", dump) + } else { + // Important so http client reuse connection for next request if need be. + io.Copy(ioutil.Discard, resp.Body) + } + + if resp.StatusCode/100 != 2 { + if resp.StatusCode/100 == 4 { + log.Printf("E! Received %d status code. Dropping metrics to avoid overflowing buffer.", + resp.StatusCode) + } else { + return fmt.Errorf("Error when sending metrics. Received status %d", + resp.StatusCode) + } + } + + return nil +} diff --git a/plugins/outputs/opentsdb/opentsdb_test.go b/plugins/outputs/opentsdb/opentsdb_test.go index 6c141d463..669ab5303 100644 --- a/plugins/outputs/opentsdb/opentsdb_test.go +++ b/plugins/outputs/opentsdb/opentsdb_test.go @@ -1,46 +1,119 @@ package opentsdb import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" "reflect" + "strconv" "testing" - // "github.com/influxdata/telegraf/testutil" - // "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + //"github.com/stretchr/testify/require" ) +func TestCleanTags(t *testing.T) { + var tagtests = []struct { + ptIn map[string]string + outTags map[string]string + }{ + { + map[string]string{"one": "two", "three": "four"}, + map[string]string{"one": "two", "three": "four"}, + }, + { + map[string]string{"aaa": "bbb"}, + map[string]string{"aaa": "bbb"}, + }, + { + map[string]string{"Sp%ci@l Chars": "g$t repl#ced"}, + map[string]string{"Sp-ci-l_Chars": "g-t_repl-ced"}, + }, + { + map[string]string{}, + map[string]string{}, + }, + } + for _, tt := range tagtests { + tags := cleanTags(tt.ptIn) + if !reflect.DeepEqual(tags, tt.outTags) { + t.Errorf("\nexpected %+v\ngot %+v\n", tt.outTags, tags) + } + } +} + func TestBuildTagsTelnet(t *testing.T) { var tagtests = []struct { ptIn map[string]string - outTags []string + outTags string }{ { map[string]string{"one": "two", "three": "four"}, - []string{"one=two", "three=four"}, + "one=two three=four", }, { map[string]string{"aaa": "bbb"}, - []string{"aaa=bbb"}, + "aaa=bbb", }, { map[string]string{"one": "two", "aaa": "bbb"}, - []string{"aaa=bbb", "one=two"}, - }, - { - map[string]string{"Sp%ci@l Chars": "g$t repl#ced"}, - []string{"Sp-ci-l_Chars=g-t_repl-ced"}, + "aaa=bbb one=two", }, { map[string]string{}, - []string{}, + "", }, } for _, tt := range tagtests { - tags := buildTags(tt.ptIn) + tags := ToLineFormat(tt.ptIn) if !reflect.DeepEqual(tags, tt.outTags) { t.Errorf("\nexpected %+v\ngot %+v\n", tt.outTags, tags) } } } +func BenchmarkHttpSend(b *testing.B) { + const BatchSize = 50 + const MetricsCount = 4 * BatchSize + metrics := make([]telegraf.Metric, MetricsCount) + for i := 0; i < MetricsCount; i++ { + metrics[i] = testutil.TestMetric(1.0) + } + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, "{}") + })) + defer ts.Close() + + u, err := url.Parse(ts.URL) + if err != nil { + panic(err) + } + + _, p, _ := net.SplitHostPort(u.Host) + + port, err := strconv.Atoi(p) + if err != nil { + panic(err) + } + + o := &OpenTSDB{ + Host: ts.URL, + Port: port, + Prefix: "", + HttpBatchSize: BatchSize, + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + o.Write(metrics) + } +} + // func TestWrite(t *testing.T) { // if testing.Short() { // t.Skip("Skipping integration test in short mode") diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 4f7ce8053..fc8926602 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -12,22 +12,13 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -var ( - invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) - - // Prometheus metric names must match this regex - // see https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels - metricName = regexp.MustCompile("^[a-zA-Z_:][a-zA-Z0-9_:]*$") - - // Prometheus labels must match this regex - // see https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels - labelName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") -) +var invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) type PrometheusClient struct { Listen string - metrics map[string]prometheus.Metric + metrics map[string]prometheus.Metric + lastMetrics map[string]prometheus.Metric sync.Mutex } @@ -38,7 +29,9 @@ var sampleConfig = ` ` func (p *PrometheusClient) Start() error { - prometheus.MustRegister(p) + p.metrics = make(map[string]prometheus.Metric) + p.lastMetrics = make(map[string]prometheus.Metric) + prometheus.Register(p) defer func() { if r := recover(); r != nil { // recovering from panic here because there is no way to stop a @@ -93,8 +86,17 @@ func (p *PrometheusClient) Collect(ch chan<- prometheus.Metric) { p.Lock() defer p.Unlock() - for _, m := range p.metrics { - ch <- m + if len(p.metrics) > 0 { + p.lastMetrics = make(map[string]prometheus.Metric) + for k, m := range p.metrics { + ch <- m + p.lastMetrics[k] = m + } + p.metrics = make(map[string]prometheus.Metric) + } else { + for _, m := range p.lastMetrics { + ch <- m + } } } @@ -102,8 +104,6 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { p.Lock() defer p.Unlock() - p.metrics = make(map[string]prometheus.Metric) - if len(metrics) == 0 { return nil } @@ -112,6 +112,7 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { key := point.Name() key = invalidNameCharRE.ReplaceAllString(key, "_") + // convert tags into prometheus labels var labels []string l := prometheus.Labels{} for k, v := range point.Tags() { @@ -119,13 +120,21 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { if len(k) == 0 { continue } - if !labelName.MatchString(k) { - continue - } labels = append(labels, k) l[k] = v } + // Get a type if it's available, defaulting to Untyped + var mType prometheus.ValueType + switch point.Type() { + case telegraf.Counter: + mType = prometheus.CounterValue + case telegraf.Gauge: + mType = prometheus.GaugeValue + default: + mType = prometheus.UntypedValue + } + for n, val := range point.Fields() { // Ignore string and bool fields. switch val.(type) { @@ -144,24 +153,21 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { mname = fmt.Sprintf("%s_%s", key, n) } - // verify that it is a valid measurement name - if !metricName.MatchString(mname) { - continue - } - desc := prometheus.NewDesc(mname, "Telegraf collected metric", nil, l) var metric prometheus.Metric var err error + + // switch for field type switch val := val.(type) { case int64: - metric, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, float64(val)) + metric, err = prometheus.NewConstMetric(desc, mType, float64(val)) case float64: - metric, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, val) + metric, err = prometheus.NewConstMetric(desc, mType, val) default: continue } if err != nil { - log.Printf("ERROR creating prometheus metric, "+ + log.Printf("E! Error creating prometheus metric, "+ "key: %s, labels: %v,\nerr: %s\n", mname, l, err.Error()) } diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index e5172ac97..180f2452a 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -35,6 +35,10 @@ func (p *JSONParser) Parse(buf []byte) ([]telegraf.Metric, error) { switch v := jsonOut[tag].(type) { case string: tags[tag] = v + case bool: + tags[tag] = strconv.FormatBool(v) + case float64: + tags[tag] = strconv.FormatFloat(v, 'f', -1, 64) } delete(jsonOut, tag) } diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index 2cc4add56..bff64d088 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -12,7 +12,7 @@ const DEFAULT_TEMPLATE = "host.tags.measurement.field" var ( fieldDeleter = strings.NewReplacer(".FIELDNAME", "", "FIELDNAME.", "") - sanitizedChars = strings.NewReplacer("/", "-", "@", "-", "*", "-", " ", "_", "..", ".") + sanitizedChars = strings.NewReplacer("/", "-", "@", "-", "*", "-", " ", "_", "..", ".", `\`, "", ")", "_", "(", "_") ) type GraphiteSerializer struct { @@ -36,8 +36,8 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]string, error) valueS := fmt.Sprintf("%#v", value) point := fmt.Sprintf("%s %s %d", // insert "field" section of template - InsertField(bucket, fieldName), - valueS, + sanitizedChars.Replace(InsertField(bucket, fieldName)), + sanitizedChars.Replace(valueS), timestamp) out = append(out, point) } @@ -100,9 +100,9 @@ func SerializeBucketName( } if prefix == "" { - return sanitizedChars.Replace(strings.Join(out, ".")) + return strings.Join(out, ".") } - return sanitizedChars.Replace(prefix + "." + strings.Join(out, ".")) + return prefix + "." + strings.Join(out, ".") } // InsertField takes the bucket string from SerializeBucketName and replaces the diff --git a/plugins/serializers/graphite/graphite_test.go b/plugins/serializers/graphite/graphite_test.go index 50ba0e2e0..57196b861 100644 --- a/plugins/serializers/graphite/graphite_test.go +++ b/plugins/serializers/graphite/graphite_test.go @@ -160,6 +160,58 @@ func TestSerializeValueField2(t *testing.T) { assert.Equal(t, expS, mS) } +// test that fields with spaces get fixed. +func TestSerializeFieldWithSpaces(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "host": "localhost", + "cpu": "cpu0", + "datacenter": "us-west-2", + } + fields := map[string]interface{}{ + `field\ with\ spaces`: float64(91.5), + } + m, err := telegraf.NewMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + s := GraphiteSerializer{ + Template: "host.tags.measurement.field", + } + mS, err := s.Serialize(m) + assert.NoError(t, err) + + expS := []string{ + fmt.Sprintf("localhost.cpu0.us-west-2.cpu.field_with_spaces 91.5 %d", now.Unix()), + } + assert.Equal(t, expS, mS) +} + +// test that tags with spaces get fixed. +func TestSerializeTagWithSpaces(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "host": "localhost", + "cpu": `cpu\ 0`, + "datacenter": "us-west-2", + } + fields := map[string]interface{}{ + `field_with_spaces`: float64(91.5), + } + m, err := telegraf.NewMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + s := GraphiteSerializer{ + Template: "host.tags.measurement.field", + } + mS, err := s.Serialize(m) + assert.NoError(t, err) + + expS := []string{ + fmt.Sprintf("localhost.cpu_0.us-west-2.cpu.field_with_spaces 91.5 %d", now.Unix()), + } + assert.Equal(t, expS, mS) +} + // test that a field named "value" gets ignored at beginning of template. func TestSerializeValueField3(t *testing.T) { now := time.Now() @@ -186,6 +238,32 @@ func TestSerializeValueField3(t *testing.T) { assert.Equal(t, expS, mS) } +// test that a field named "value" gets ignored at beginning of template. +func TestSerializeValueField5(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "host": "localhost", + "cpu": "cpu0", + "datacenter": "us-west-2", + } + fields := map[string]interface{}{ + "value": float64(91.5), + } + m, err := telegraf.NewMetric("cpu", tags, fields, now) + assert.NoError(t, err) + + s := GraphiteSerializer{ + Template: template5, + } + mS, err := s.Serialize(m) + assert.NoError(t, err) + + expS := []string{ + fmt.Sprintf("localhost.us-west-2.cpu0.cpu 91.5 %d", now.Unix()), + } + assert.Equal(t, expS, mS) +} + func TestSerializeMetricPrefix(t *testing.T) { now := time.Now() tags := map[string]string{ @@ -315,20 +393,6 @@ func TestTemplate4(t *testing.T) { assert.Equal(t, expS, mS) } -func TestTemplate5(t *testing.T) { - now := time.Now() - fields := map[string]interface{}{ - "usage_idle": float64(91.5), - } - m, err := telegraf.NewMetric("cpu", defaultTags, fields, now) - assert.NoError(t, err) - - mS := SerializeBucketName(m.Name(), m.Tags(), template5, "") - - expS := "localhost.us-west-2.cpu0.cpu.FIELDNAME" - assert.Equal(t, expS, mS) -} - func TestTemplate6(t *testing.T) { now := time.Now() fields := map[string]interface{}{ diff --git a/scripts/build.py b/scripts/build.py index 426aa87bb..77befd599 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -83,29 +83,17 @@ targets = { } supported_builds = { - "darwin": [ "amd64" ], "windows": [ "amd64" ], "linux": [ "amd64", "i386", "armhf", "armel", "arm64", "static_amd64" ], "freebsd": [ "amd64" ] } supported_packages = { - "darwin": [ "tar" ], "linux": [ "deb", "rpm", "tar" ], "windows": [ "zip" ], "freebsd": [ "tar" ] } -supported_tags = { - # "linux": { - # "amd64": ["sensors"] - # } -} - -prereq_cmds = { - # "linux": "sudo apt-get install lm-sensors libsensors4-dev" -} - ################ #### Telegraf Functions ################ diff --git a/scripts/circle-test.sh b/scripts/circle-test.sh index 93bafe320..662426392 100755 --- a/scripts/circle-test.sh +++ b/scripts/circle-test.sh @@ -56,7 +56,8 @@ exit_if_fail make # Run the tests exit_if_fail go vet ./... exit_if_fail make docker-run-circle -sleep 10 +# Sleep for OpenTSDB leadership election, aerospike cluster, etc. +exit_if_fail sleep 60 exit_if_fail go test -race ./... # Simple Integration Tests diff --git a/scripts/post-remove.sh b/scripts/post-remove.sh index 96b178f4d..0f262d225 100644 --- a/scripts/post-remove.sh +++ b/scripts/post-remove.sh @@ -15,32 +15,28 @@ function disable_chkconfig { rm -f /etc/init.d/telegraf } -if [[ -f /etc/redhat-release ]]; then - # RHEL-variant logic - if [[ "$1" = "0" ]]; then - # InfluxDB is no longer installed, remove from init system - rm -f /etc/default/telegraf - - which systemctl &>/dev/null - if [[ $? -eq 0 ]]; then - disable_systemd - else - # Assuming sysv - disable_chkconfig - fi +if [[ "$1" == "0" ]]; then + # RHEL and any distribution that follow RHEL, Amazon Linux covered + # telegraf is no longer installed, remove from init system + rm -f /etc/default/telegraf + + which systemctl &>/dev/null + if [[ $? -eq 0 ]]; then + disable_systemd + else + # Assuming sysv + disable_chkconfig fi -elif [[ -f /etc/debian_version ]]; then +elif [ "$1" == "remove" -o "$1" == "purge" ]; then # Debian/Ubuntu logic - if [[ "$1" != "upgrade" ]]; then - # Remove/purge - rm -f /etc/default/telegraf - - which systemctl &>/dev/null - if [[ $? -eq 0 ]]; then - disable_systemd - else - # Assuming sysv - disable_update_rcd - fi + # Remove/purge + rm -f /etc/default/telegraf + + which systemctl &>/dev/null + if [[ $? -eq 0 ]]; then + disable_systemd + else + # Assuming sysv + disable_update_rcd fi fi diff --git a/scripts/telegraf.service b/scripts/telegraf.service index 81c9b5408..72a4a338e 100644 --- a/scripts/telegraf.service +++ b/scripts/telegraf.service @@ -6,9 +6,7 @@ After=network.target [Service] EnvironmentFile=-/etc/default/telegraf User=telegraf -Environment='STDOUT=/var/log/telegraf/telegraf.log' -Environment='STDERR=/var/log/telegraf/telegraf.log' -ExecStart=/bin/sh -c "exec /usr/bin/telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d ${TELEGRAF_OPTS} >>${STDOUT} 2>>${STDERR}" +ExecStart=/usr/bin/telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d ${TELEGRAF_OPTS} ExecReload=/bin/kill -HUP $MAINPID Restart=on-failure KillMode=control-group diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 1058faf83..fe5727917 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -5,6 +5,7 @@ import ( "fmt" "reflect" "sync" + "sync/atomic" "testing" "time" @@ -27,19 +28,15 @@ func (p *Metric) String() string { type Accumulator struct { sync.Mutex - Metrics []*Metric - debug bool + Metrics []*Metric + nMetrics uint64 + Discard bool + Errors []error + debug bool } -// Add adds a measurement point to the accumulator -func (a *Accumulator) Add( - measurement string, - value interface{}, - tags map[string]string, - t ...time.Time, -) { - fields := map[string]interface{}{"value": value} - a.AddFields(measurement, fields, tags, t...) +func (a *Accumulator) NMetrics() uint64 { + return atomic.LoadUint64(&a.nMetrics) } // AddFields adds a measurement point with a specified timestamp. @@ -49,6 +46,10 @@ func (a *Accumulator) AddFields( tags map[string]string, timestamp ...time.Time, ) { + atomic.AddUint64(&a.nMetrics, 1) + if a.Discard { + return + } a.Lock() defer a.Unlock() if tags == nil { @@ -84,6 +85,34 @@ func (a *Accumulator) AddFields( a.Metrics = append(a.Metrics, p) } +func (a *Accumulator) AddCounter( + measurement string, + fields map[string]interface{}, + tags map[string]string, + timestamp ...time.Time, +) { + a.AddFields(measurement, fields, tags, timestamp...) +} + +func (a *Accumulator) AddGauge( + measurement string, + fields map[string]interface{}, + tags map[string]string, + timestamp ...time.Time, +) { + a.AddFields(measurement, fields, tags, timestamp...) +} + +// AddError appends the given error to Accumulator.Errors. +func (a *Accumulator) AddError(err error) { + if err == nil { + return + } + a.Lock() + a.Errors = append(a.Errors, err) + a.Unlock() +} + func (a *Accumulator) SetPrecision(precision, interval time.Duration) { return }