Compare commits
111 Commits
Author | SHA1 | Date |
---|---|---|
Daniel Nelson | ddcb93188f | |
Daniel Nelson | cb193d0e8a | |
Daniel Nelson | 600f9fa067 | |
Daniel Nelson | 4cedae9d2c | |
Daniel Nelson | 4c8e8fc2f1 | |
Daniel Nelson | 7c5bcfe84e | |
Bob Shannon | efa20d05fa | |
Daniel Nelson | 187c7e12a8 | |
Daniel Nelson | f29a994743 | |
Daniel Nelson | f416f429d7 | |
Daniel Nelson | ec6b1aae94 | |
Daniel Nelson | b473b6a659 | |
Daniel Nelson | e5d08a4d86 | |
Daniel Nelson | 3c894bb056 | |
Daniel Nelson | d2d173b792 | |
Daniel Nelson | 145f7da42e | |
Daniel Nelson | f9f8d9ed7e | |
Sergei Smolianinov | 0dd3b0507b | |
Daniel Nelson | c44b4fcc89 | |
Daniel Nelson | cb9c1653d3 | |
Daniel Nelson | cf7590b88e | |
clheikes | 5a7d889908 | |
Daniel Nelson | ef652678dd | |
Dimitris Rozakis | c4cc57956b | |
Daniel Nelson | 7b8a761c63 | |
Ayrdrie | 7d66319f59 | |
Pierre Fersing | 22f64f8417 | |
Daniel Nelson | 6b4deb01bb | |
Daniel Nelson | e4835cdc30 | |
Daniel Nelson | e32ffdde06 | |
Windkit Li | 0f905eaee7 | |
Daniel Nelson | 4d48dcb84f | |
Daniel Nelson | 17377b4942 | |
Daniel Nelson | 0cc5fc0ce4 | |
Daniel Nelson | 8011109466 | |
Daniel Nelson | 588f0c77f8 | |
Daniel Nelson | 4301b8e32a | |
Daniel Nelson | 3c9d7db0a0 | |
Daniel Nelson | f7b3eb1ebd | |
Daniel Nelson | b8ab827629 | |
Daniel Nelson | d03e2fca32 | |
Daniel Nelson | eca00c10e0 | |
Daniel Nelson | 9cf19df04e | |
Daniel Nelson | e77c2b76e7 | |
Daniel Nelson | c749c43dab | |
Daniel Nelson | 1be17ea5af | |
Daniel Nelson | e1155bec20 | |
Daniel Nelson | cfac750469 | |
Daniel Nelson | f10d5b43c4 | |
Daniel Nelson | 47b2d04d5b | |
Daniel Nelson | 0e0da57b9a | |
Daniel Nelson | 8e7cf0109e | |
Daniel Nelson | 5b791fd2e5 | |
Daniel Nelson | 293b1a0093 | |
Daniel Nelson | 761ea06d6a | |
Daniel Nelson | 8fafe9878b | |
Daniel Nelson | 5da3eef38b | |
Daniel Nelson | 2de7aa23d7 | |
Daniel Nelson | 52cd38150c | |
Daniel Nelson | c08f492f78 | |
Daniel Nelson | 66cfe80e37 | |
Trevor Pounds | ba5e5ec283 | |
Daniel Nelson | 259f8e4002 | |
Mark Wilkinson - m82labs | 558ab0c730 | |
Daniel Nelson | 8d4fbe29e7 | |
Daniel Nelson | 72337a1c97 | |
Daniel Nelson | 86537899b2 | |
Trevor Pounds | a727d5d1f0 | |
Daniel Nelson | 7ec194a482 | |
Daniel Nelson | 5a77d28837 | |
Daniel Nelson | 47927c353d | |
Daniel Nelson | b9e7fa27aa | |
Daniel Nelson | 0d437140bd | |
Daniel Nelson | 36969a63c2 | |
DanKans | e9a12bb694 | |
Daniel Nelson | 34b7a4c361 | |
Daniel Nelson | f46370d982 | |
Daniel Nelson | 07b7e09749 | |
Daniel Nelson | e54795795d | |
Daniel Nelson | b2b2bd8a27 | |
Daniel Nelson | f96cbb48c7 | |
Seua Polyakov | 9077cb83bc | |
Daniel Nelson | 0f188f280f | |
Dylan Meissner | b9420e73bd | |
Daniel Nelson | 1e43e5e7ae | |
Jeff Nickoloff | 5e104ad974 | |
Daniel Nelson | cc9d8c700c | |
Ashton Kinslow | b15ec21ba7 | |
Daniel Nelson | a9abfe8f08 | |
Rickard von Essen | 307210242c | |
Daniel Nelson | 0a41db16f1 | |
Jan Willem Janssen | 7480267fd2 | |
Daniel Nelson | 30949c4596 | |
Daniel Nelson | 47264bc860 | |
Daniel Nelson | 67e693e9a8 | |
Daniel Nelson | 851352bc8a | |
Daniel Nelson | c807452c14 | |
Rickard von Essen | 48e00f7ea0 | |
Daniel Nelson | 8ce901aaa4 | |
Daniel Nelson | 78d1715601 | |
Daniel Nelson | 1b0a18897d | |
Daniel Nelson | 257b6a09d9 | |
Rickard von Essen | e6feac735c | |
Rickard von Essen | 6616065acf | |
Daniel Nelson | 98774d60e2 | |
Chris Goller | d4cd1b7eb4 | |
Daniel Nelson | 7254111d37 | |
Daniel Nelson | 4551efb459 | |
Daniel Nelson | 2610eba0e3 | |
Daniel Nelson | c277dc27a6 | |
Daniel Nelson | a4f5c6fbc3 |
64
CHANGELOG.md
64
CHANGELOG.md
|
@ -1,4 +1,55 @@
|
||||||
## v1.4 [unreleased]
|
## v1.4.4 [2017-11-08]
|
||||||
|
|
||||||
|
- [#3401](https://github.com/influxdata/telegraf/pull/3401): Use schema specified in mqtt_consumer input.
|
||||||
|
- [#3419](https://github.com/influxdata/telegraf/issues/3419): Redact datadog API key in log output.
|
||||||
|
- [#3311](https://github.com/influxdata/telegraf/issues/3311): Fix error getting pids in netstat input.
|
||||||
|
- [#3339](https://github.com/influxdata/telegraf/issues/3339): Support HOST_VAR envvar to locate /var in system input.
|
||||||
|
- [#3383](https://github.com/influxdata/telegraf/issues/3383): Use current time if docker container read time is zero value.
|
||||||
|
|
||||||
|
## v1.4.3 [2017-10-25]
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- [#3327](https://github.com/influxdata/telegraf/issues/3327): Fix container name filters in docker input.
|
||||||
|
- [#3321](https://github.com/influxdata/telegraf/issues/3321): Fix snmpwalk address format in leofs input.
|
||||||
|
- [#3329](https://github.com/influxdata/telegraf/issues/3329): Fix case sensitivity issue in sqlserver query.
|
||||||
|
- [#3342](https://github.com/influxdata/telegraf/pull/3342): Fix CPU input plugin stuck after suspend on Linux.
|
||||||
|
- [#3013](https://github.com/influxdata/telegraf/issues/3013): Fix mongodb input panic when restarting mongodb.
|
||||||
|
- [#3224](https://github.com/influxdata/telegraf/pull/3224): Preserve url path prefix in influx output.
|
||||||
|
- [#3354](https://github.com/influxdata/telegraf/pull/3354): Fix TELEGRAF_OPTS expansion in systemd service unit.
|
||||||
|
- [#3357](https://github.com/influxdata/telegraf/issues/3357): Remove warning when JSON contains null value.
|
||||||
|
- [#3375](https://github.com/influxdata/telegraf/issues/3375): Fix ACL token usage in consul input plugin.
|
||||||
|
- [#3369](https://github.com/influxdata/telegraf/issues/3369): Fix unquoting error with Tomcat 6.
|
||||||
|
- [#3373](https://github.com/influxdata/telegraf/issues/3373): Fix syscall panic in diskio on some Linux systems.
|
||||||
|
|
||||||
|
## v1.4.2 [2017-10-10]
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- [#3259](https://github.com/influxdata/telegraf/issues/3259): Fix error if int larger than 32-bit in /proc/vmstat.
|
||||||
|
- [#3265](https://github.com/influxdata/telegraf/issues/3265): Fix parsing of JSON with a UTF8 BOM in httpjson.
|
||||||
|
- [#2887](https://github.com/influxdata/telegraf/issues/2887): Allow JSON data format to contain zero metrics.
|
||||||
|
- [#3284](https://github.com/influxdata/telegraf/issues/3284): Fix format of connection_timeout in mqtt_consumer.
|
||||||
|
- [#3081](https://github.com/influxdata/telegraf/issues/3081): Fix case sensitivity error in sqlserver input.
|
||||||
|
- [#3297](https://github.com/influxdata/telegraf/issues/3297): Add support for proxy environment variables to http_response.
|
||||||
|
- [#1588](https://github.com/influxdata/telegraf/issues/1588): Add support for standard proxy env vars in outputs.
|
||||||
|
- [#3282](https://github.com/influxdata/telegraf/issues/3282): Fix panic in cpu input if number of cpus changes.
|
||||||
|
- [#2854](https://github.com/influxdata/telegraf/issues/2854): Use chunked transfer encoding in InfluxDB output.
|
||||||
|
|
||||||
|
## v1.4.1 [2017-09-26]
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- [#3167](https://github.com/influxdata/telegraf/issues/3167): Fix MQTT input exits if Broker is not available on startup.
|
||||||
|
- [#3217](https://github.com/influxdata/telegraf/issues/3217): Fix optional field value conversions in fluentd input.
|
||||||
|
- [#3227](https://github.com/influxdata/telegraf/issues/3227): Whitelist allowed char classes for opentsdb output.
|
||||||
|
- [#3232](https://github.com/influxdata/telegraf/issues/3232): Fix counter and gauge metric types.
|
||||||
|
- [#3235](https://github.com/influxdata/telegraf/issues/3235): Fix skipped line with empty target in iptables.
|
||||||
|
- [#3175](https://github.com/influxdata/telegraf/issues/3175): Fix duplicate keys in perf counters sqlserver query.
|
||||||
|
- [#3230](https://github.com/influxdata/telegraf/issues/3230): Fix panic in statsd p100 calculation.
|
||||||
|
- [#3242](https://github.com/influxdata/telegraf/issues/3242): Fix arm64 packages contain 32-bit executable.
|
||||||
|
|
||||||
|
## v1.4 [2017-09-05]
|
||||||
|
|
||||||
### Release Notes
|
### Release Notes
|
||||||
|
|
||||||
|
@ -62,6 +113,7 @@
|
||||||
- [#2978](https://github.com/influxdata/telegraf/pull/2978): Add gzip content-encoding support to influxdb output.
|
- [#2978](https://github.com/influxdata/telegraf/pull/2978): Add gzip content-encoding support to influxdb output.
|
||||||
- [#3127](https://github.com/influxdata/telegraf/pull/3127): Allow using system plugin in Windows.
|
- [#3127](https://github.com/influxdata/telegraf/pull/3127): Allow using system plugin in Windows.
|
||||||
- [#3112](https://github.com/influxdata/telegraf/pull/3112): Add tomcat input plugin.
|
- [#3112](https://github.com/influxdata/telegraf/pull/3112): Add tomcat input plugin.
|
||||||
|
- [#3182](https://github.com/influxdata/telegraf/pull/3182): HTTP headers can be added to InfluxDB output.
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
|
|
||||||
|
@ -93,6 +145,16 @@
|
||||||
- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilcation of logparser and tail on solaris.
|
- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilcation of logparser and tail on solaris.
|
||||||
- [#2951](https://github.com/influxdata/telegraf/issues/2951): Discard logging from tail library.
|
- [#2951](https://github.com/influxdata/telegraf/issues/2951): Discard logging from tail library.
|
||||||
- [#3126](https://github.com/influxdata/telegraf/pull/3126): Remove log message on ping timeout.
|
- [#3126](https://github.com/influxdata/telegraf/pull/3126): Remove log message on ping timeout.
|
||||||
|
- [#3144](https://github.com/influxdata/telegraf/issues/3144): Don't retry points beyond retention policy.
|
||||||
|
- [#3015](https://github.com/influxdata/telegraf/issues/3015): Don't start Telegraf on install in Amazon Linux.
|
||||||
|
- [#3153](https://github.com/influxdata/telegraf/issues/3053): Enable hddtemp input on all platforms.
|
||||||
|
- [#3142](https://github.com/influxdata/telegraf/issues/3142): Escape backslash within string fields.
|
||||||
|
- [#3162](https://github.com/influxdata/telegraf/issues/3162): Fix parsing of SHM remotes in ntpq input
|
||||||
|
- [#3149](https://github.com/influxdata/telegraf/issues/3149): Don't fail parsing zpool stats if pool health is UNAVAIL on FreeBSD.
|
||||||
|
- [#2672](https://github.com/influxdata/telegraf/issues/2672): Fix NSQ input plugin when used with version 1.0.0-compat.
|
||||||
|
- [#2523](https://github.com/influxdata/telegraf/issues/2523): Added CloudWatch metric constraint validation.
|
||||||
|
- [#3179](https://github.com/influxdata/telegraf/issues/3179): Skip non-numerical values in graphite format.
|
||||||
|
- [#3187](https://github.com/influxdata/telegraf/issues/3187): Fix panic when handling string fields with escapes.
|
||||||
|
|
||||||
## v1.3.5 [2017-07-26]
|
## v1.3.5 [2017-07-26]
|
||||||
|
|
||||||
|
|
2
Godeps
2
Godeps
|
@ -60,7 +60,7 @@ github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
|
||||||
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||||
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||||
github.com/shirou/gopsutil 9a4a9167ad3b4355dbf1c2c7a0f5f0d3fb1e9ab9
|
github.com/shirou/gopsutil 48fc5612898a1213aa5d6a0fb2d4f7b968e898fb
|
||||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||||
github.com/Shopify/sarama c01858abb625b73a3af51d0798e4ad42c8147093
|
github.com/Shopify/sarama c01858abb625b73a3af51d0798e4ad42c8147093
|
||||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -25,7 +25,7 @@ deps:
|
||||||
gdm restore
|
gdm restore
|
||||||
|
|
||||||
telegraf:
|
telegraf:
|
||||||
go build -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
|
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
|
||||||
|
|
||||||
go-install:
|
go-install:
|
||||||
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
|
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
|
||||||
|
|
|
@ -173,6 +173,7 @@ configuration options.
|
||||||
* [zfs](./plugins/inputs/zfs)
|
* [zfs](./plugins/inputs/zfs)
|
||||||
* [zookeeper](./plugins/inputs/zookeeper)
|
* [zookeeper](./plugins/inputs/zookeeper)
|
||||||
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
|
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
|
||||||
|
* [win_services](./plugins/inputs/win_services)
|
||||||
* [sysstat](./plugins/inputs/sysstat)
|
* [sysstat](./plugins/inputs/sysstat)
|
||||||
* [system](./plugins/inputs/system)
|
* [system](./plugins/inputs/system)
|
||||||
* cpu
|
* cpu
|
||||||
|
|
|
@ -1,11 +1,14 @@
|
||||||
machine:
|
machine:
|
||||||
go:
|
|
||||||
version: 1.8.1
|
|
||||||
services:
|
services:
|
||||||
- docker
|
- docker
|
||||||
- memcached
|
- memcached
|
||||||
- redis
|
- redis
|
||||||
- rabbitmq-server
|
- rabbitmq-server
|
||||||
|
post:
|
||||||
|
- sudo rm -rf /usr/local/go
|
||||||
|
- wget https://storage.googleapis.com/golang/go1.8.4.linux-amd64.tar.gz
|
||||||
|
- sudo tar -C /usr/local -xzf go1.8.4.linux-amd64.tar.gz
|
||||||
|
- go version
|
||||||
|
|
||||||
dependencies:
|
dependencies:
|
||||||
override:
|
override:
|
||||||
|
|
|
@ -96,6 +96,9 @@ tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
|
||||||
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Fields with string values will be skipped. Boolean fields will be converted
|
||||||
|
to 1 (true) or 0 (false).
|
||||||
|
|
||||||
### Graphite Configuration:
|
### Graphite Configuration:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
|
|
|
@ -84,9 +84,7 @@
|
||||||
|
|
||||||
# Configuration for influxdb server to send metrics to
|
# Configuration for influxdb server to send metrics to
|
||||||
[[outputs.influxdb]]
|
[[outputs.influxdb]]
|
||||||
## The HTTP or UDP URL for your InfluxDB instance. Each item should be
|
## The full HTTP or UDP URL for your InfluxDB instance.
|
||||||
## of the form:
|
|
||||||
## scheme "://" host [ ":" port]
|
|
||||||
##
|
##
|
||||||
## Multiple urls can be specified as part of the same cluster,
|
## Multiple urls can be specified as part of the same cluster,
|
||||||
## this means that only ONE of the urls will be written to each interval.
|
## this means that only ONE of the urls will be written to each interval.
|
||||||
|
@ -118,6 +116,15 @@
|
||||||
## Use SSL but skip chain & host verification
|
## Use SSL but skip chain & host verification
|
||||||
# insecure_skip_verify = false
|
# insecure_skip_verify = false
|
||||||
|
|
||||||
|
## HTTP Proxy Config
|
||||||
|
# http_proxy = "http://corporate.proxy:3128"
|
||||||
|
|
||||||
|
## Optional HTTP headers
|
||||||
|
# http_headers = {"X-Special-Header" = "Special-Value"}
|
||||||
|
|
||||||
|
## Compress each HTTP request payload using GZIP.
|
||||||
|
# content_encoding = "gzip"
|
||||||
|
|
||||||
|
|
||||||
# # Configuration for Amon Server to send metrics to.
|
# # Configuration for Amon Server to send metrics to.
|
||||||
# [[outputs.amon]]
|
# [[outputs.amon]]
|
||||||
|
@ -272,11 +279,11 @@
|
||||||
# timeout = 2
|
# timeout = 2
|
||||||
#
|
#
|
||||||
# ## Optional SSL Config
|
# ## Optional SSL Config
|
||||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||||
# ssl_key = "/etc/telegraf/key.pem"
|
# # ssl_key = "/etc/telegraf/key.pem"
|
||||||
# ## Use SSL but skip chain & host verification
|
# ## Use SSL but skip chain & host verification
|
||||||
# insecure_skip_verify = false
|
# # insecure_skip_verify = false
|
||||||
|
|
||||||
|
|
||||||
# # Send telegraf metrics to graylog(s)
|
# # Send telegraf metrics to graylog(s)
|
||||||
|
@ -596,6 +603,32 @@
|
||||||
# AGGREGATOR PLUGINS #
|
# AGGREGATOR PLUGINS #
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
|
# # Create aggregate histograms.
|
||||||
|
# [[aggregators.histogram]]
|
||||||
|
# ## The period in which to flush the aggregator.
|
||||||
|
# period = "30s"
|
||||||
|
#
|
||||||
|
# ## If true, the original metric will be dropped by the
|
||||||
|
# ## aggregator and will not get sent to the output plugins.
|
||||||
|
# drop_original = false
|
||||||
|
#
|
||||||
|
# ## Example config that aggregates all fields of the metric.
|
||||||
|
# # [[aggregators.histogram.config]]
|
||||||
|
# # ## The set of buckets.
|
||||||
|
# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||||
|
# # ## The name of metric.
|
||||||
|
# # measurement_name = "cpu"
|
||||||
|
#
|
||||||
|
# ## Example config that aggregates only specific fields of the metric.
|
||||||
|
# # [[aggregators.histogram.config]]
|
||||||
|
# # ## The set of buckets.
|
||||||
|
# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||||
|
# # ## The name of metric.
|
||||||
|
# # measurement_name = "diskio"
|
||||||
|
# # ## The concrete fields of metric
|
||||||
|
# # fields = ["io_time", "read_time", "write_time"]
|
||||||
|
|
||||||
|
|
||||||
# # Keep the aggregate min/max of each metric passing through.
|
# # Keep the aggregate min/max of each metric passing through.
|
||||||
# [[aggregators.minmax]]
|
# [[aggregators.minmax]]
|
||||||
# ## General Aggregator Arguments:
|
# ## General Aggregator Arguments:
|
||||||
|
@ -606,32 +639,6 @@
|
||||||
# drop_original = false
|
# drop_original = false
|
||||||
|
|
||||||
|
|
||||||
# # Configuration for aggregate histogram metrics
|
|
||||||
# [[aggregators.histogram]]
|
|
||||||
# ## General Aggregator Arguments:
|
|
||||||
# ## The period on which to flush & clear the aggregator.
|
|
||||||
# period = "30s"
|
|
||||||
# ## If true, the original metric will be dropped by the
|
|
||||||
# ## aggregator and will not get sent to the output plugins.
|
|
||||||
# drop_original = false
|
|
||||||
#
|
|
||||||
# ## The example of config to aggregate histogram for all fields of specified metric.
|
|
||||||
# [[aggregators.histogram.config]]
|
|
||||||
# ## The set of buckets.
|
|
||||||
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
|
||||||
# ## The name of metric.
|
|
||||||
# metric_name = "cpu"
|
|
||||||
#
|
|
||||||
# ## The example of config to aggregate for specified fields of metric.
|
|
||||||
# [[aggregators.histogram.config]]
|
|
||||||
# ## The set of buckets.
|
|
||||||
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
|
||||||
# ## The name of metric.
|
|
||||||
# metric_name = "diskio"
|
|
||||||
# ## The concrete fields of metric
|
|
||||||
# metric_fields = ["io_time", "read_time", "write_time"]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# INPUT PLUGINS #
|
# INPUT PLUGINS #
|
||||||
|
@ -645,6 +652,8 @@
|
||||||
totalcpu = true
|
totalcpu = true
|
||||||
## If true, collect raw CPU time metrics.
|
## If true, collect raw CPU time metrics.
|
||||||
collect_cpu_time = false
|
collect_cpu_time = false
|
||||||
|
## If true, compute and report the sum of all non-idle CPU states.
|
||||||
|
report_active = false
|
||||||
|
|
||||||
|
|
||||||
# Read metrics about disk usage by mount point
|
# Read metrics about disk usage by mount point
|
||||||
|
@ -720,15 +729,17 @@
|
||||||
|
|
||||||
# # Read Apache status information (mod_status)
|
# # Read Apache status information (mod_status)
|
||||||
# [[inputs.apache]]
|
# [[inputs.apache]]
|
||||||
# ## An array of Apache status URI to gather stats.
|
# ## An array of URLs to gather from, must be directed at the machine
|
||||||
|
# ## readable version of the mod_status page including the auto query string.
|
||||||
# ## Default is "http://localhost/server-status?auto".
|
# ## Default is "http://localhost/server-status?auto".
|
||||||
# urls = ["http://localhost/server-status?auto"]
|
# urls = ["http://localhost/server-status?auto"]
|
||||||
# ## user credentials for basic HTTP authentication
|
|
||||||
# username = "myuser"
|
|
||||||
# password = "mypassword"
|
|
||||||
#
|
#
|
||||||
# ## Timeout to the complete conection and reponse time in seconds
|
# ## Credentials for basic HTTP authentication.
|
||||||
# response_timeout = "25s" ## default to 5 seconds
|
# # username = "myuser"
|
||||||
|
# # password = "mypassword"
|
||||||
|
#
|
||||||
|
# ## Maximum time to receive response.
|
||||||
|
# # response_timeout = "5s"
|
||||||
#
|
#
|
||||||
# ## Optional SSL Config
|
# ## Optional SSL Config
|
||||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
|
@ -846,7 +857,7 @@
|
||||||
#
|
#
|
||||||
# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
|
# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
|
||||||
# # metrics are made available to the 1 minute period. Some are collected at
|
# # metrics are made available to the 1 minute period. Some are collected at
|
||||||
# # 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||||
# # Note that if a period is configured that is smaller than the minimum for a
|
# # Note that if a period is configured that is smaller than the minimum for a
|
||||||
# # particular metric, that metric will not be returned by the Cloudwatch API
|
# # particular metric, that metric will not be returned by the Cloudwatch API
|
||||||
# # and will not be collected by Telegraf.
|
# # and will not be collected by Telegraf.
|
||||||
|
@ -958,20 +969,23 @@
|
||||||
# # Query given DNS server and gives statistics
|
# # Query given DNS server and gives statistics
|
||||||
# [[inputs.dns_query]]
|
# [[inputs.dns_query]]
|
||||||
# ## servers to query
|
# ## servers to query
|
||||||
# servers = ["8.8.8.8"] # required
|
# servers = ["8.8.8.8"]
|
||||||
#
|
#
|
||||||
# ## Domains or subdomains to query. "."(root) is default
|
# ## Network is the network protocol name.
|
||||||
# domains = ["."] # optional
|
# # network = "udp"
|
||||||
#
|
#
|
||||||
# ## Query record type. Default is "A"
|
# ## Domains or subdomains to query.
|
||||||
|
# # domains = ["."]
|
||||||
|
#
|
||||||
|
# ## Query record type.
|
||||||
# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
|
# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
|
||||||
# record_type = "A" # optional
|
# # record_type = "A"
|
||||||
#
|
#
|
||||||
# ## Dns server port. 53 is default
|
# ## Dns server port.
|
||||||
# port = 53 # optional
|
# # port = 53
|
||||||
#
|
#
|
||||||
# ## Query timeout in seconds. Default is 2 seconds
|
# ## Query timeout in seconds.
|
||||||
# timeout = 2 # optional
|
# # timeout = 2
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics about docker containers
|
# # Read metrics about docker containers
|
||||||
|
@ -980,8 +994,15 @@
|
||||||
# ## To use TCP, set endpoint = "tcp://[ip]:[port]"
|
# ## To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||||
# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||||
# endpoint = "unix:///var/run/docker.sock"
|
# endpoint = "unix:///var/run/docker.sock"
|
||||||
|
#
|
||||||
# ## Only collect metrics for these containers, collect all if empty
|
# ## Only collect metrics for these containers, collect all if empty
|
||||||
# container_names = []
|
# container_names = []
|
||||||
|
#
|
||||||
|
# ## Containers to include and exclude. Globs accepted.
|
||||||
|
# ## Note that an empty array for both will include all containers
|
||||||
|
# container_name_include = []
|
||||||
|
# container_name_exclude = []
|
||||||
|
#
|
||||||
# ## Timeout for docker list, info, and stats commands
|
# ## Timeout for docker list, info, and stats commands
|
||||||
# timeout = "5s"
|
# timeout = "5s"
|
||||||
#
|
#
|
||||||
|
@ -990,11 +1011,20 @@
|
||||||
# perdevice = true
|
# perdevice = true
|
||||||
# ## Whether to report for each container total blkio and network stats or not
|
# ## Whether to report for each container total blkio and network stats or not
|
||||||
# total = false
|
# total = false
|
||||||
|
# ## Which environment variables should we use as a tag
|
||||||
|
# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
||||||
#
|
#
|
||||||
# ## docker labels to include and exclude as tags. Globs accepted.
|
# ## docker labels to include and exclude as tags. Globs accepted.
|
||||||
# ## Note that an empty array for both will include all labels as tags
|
# ## Note that an empty array for both will include all labels as tags
|
||||||
# docker_label_include = []
|
# docker_label_include = []
|
||||||
# docker_label_exclude = []
|
# docker_label_exclude = []
|
||||||
|
#
|
||||||
|
# ## Optional SSL Config
|
||||||
|
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||||
|
# # ssl_key = "/etc/telegraf/key.pem"
|
||||||
|
# ## Use SSL but skip chain & host verification
|
||||||
|
# # insecure_skip_verify = false
|
||||||
|
|
||||||
|
|
||||||
# # Read statistics from one or many dovecot servers
|
# # Read statistics from one or many dovecot servers
|
||||||
|
@ -1064,6 +1094,12 @@
|
||||||
# data_format = "influx"
|
# data_format = "influx"
|
||||||
|
|
||||||
|
|
||||||
|
# # Read metrics from fail2ban.
|
||||||
|
# [[inputs.fail2ban]]
|
||||||
|
# ## Use sudo to run fail2ban-client
|
||||||
|
# use_sudo = false
|
||||||
|
|
||||||
|
|
||||||
# # Read stats about given file(s)
|
# # Read stats about given file(s)
|
||||||
# [[inputs.filestat]]
|
# [[inputs.filestat]]
|
||||||
# ## Files to gather stats about.
|
# ## Files to gather stats about.
|
||||||
|
@ -1080,6 +1116,22 @@
|
||||||
# md5 = false
|
# md5 = false
|
||||||
|
|
||||||
|
|
||||||
|
# # Read metrics exposed by fluentd in_monitor plugin
|
||||||
|
# [[inputs.fluentd]]
|
||||||
|
# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
|
||||||
|
# ##
|
||||||
|
# ## Endpoint:
|
||||||
|
# ## - only one URI is allowed
|
||||||
|
# ## - https is not supported
|
||||||
|
# endpoint = "http://localhost:24220/api/plugins.json"
|
||||||
|
#
|
||||||
|
# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
|
||||||
|
# exclude = [
|
||||||
|
# "monitor_agent",
|
||||||
|
# "dummy",
|
||||||
|
# ]
|
||||||
|
|
||||||
|
|
||||||
# # Read flattened metrics from one or more GrayLog HTTP endpoints
|
# # Read flattened metrics from one or more GrayLog HTTP endpoints
|
||||||
# [[inputs.graylog]]
|
# [[inputs.graylog]]
|
||||||
# ## API endpoint, currently supported API:
|
# ## API endpoint, currently supported API:
|
||||||
|
@ -1161,25 +1213,26 @@
|
||||||
# # HTTP/HTTPS request given an address a method and a timeout
|
# # HTTP/HTTPS request given an address a method and a timeout
|
||||||
# [[inputs.http_response]]
|
# [[inputs.http_response]]
|
||||||
# ## Server address (default http://localhost)
|
# ## Server address (default http://localhost)
|
||||||
# address = "http://github.com"
|
# # address = "http://localhost"
|
||||||
|
#
|
||||||
# ## Set response_timeout (default 5 seconds)
|
# ## Set response_timeout (default 5 seconds)
|
||||||
# response_timeout = "5s"
|
# # response_timeout = "5s"
|
||||||
|
#
|
||||||
# ## HTTP Request Method
|
# ## HTTP Request Method
|
||||||
# method = "GET"
|
# # method = "GET"
|
||||||
|
#
|
||||||
# ## Whether to follow redirects from the server (defaults to false)
|
# ## Whether to follow redirects from the server (defaults to false)
|
||||||
# follow_redirects = true
|
# # follow_redirects = false
|
||||||
# ## HTTP Request Headers (all values must be strings)
|
#
|
||||||
# # [inputs.http_response.headers]
|
|
||||||
# # Host = "github.com"
|
|
||||||
# ## Optional HTTP Request Body
|
# ## Optional HTTP Request Body
|
||||||
# # body = '''
|
# # body = '''
|
||||||
# # {'fake':'data'}
|
# # {'fake':'data'}
|
||||||
# # '''
|
# # '''
|
||||||
#
|
#
|
||||||
# ## Optional substring or regex match in body of the response
|
# ## Optional substring or regex match in body of the response
|
||||||
# ## response_string_match = "\"service_status\": \"up\""
|
# # response_string_match = "\"service_status\": \"up\""
|
||||||
# ## response_string_match = "ok"
|
# # response_string_match = "ok"
|
||||||
# ## response_string_match = "\".*_status\".?:.?\"up\""
|
# # response_string_match = "\".*_status\".?:.?\"up\""
|
||||||
#
|
#
|
||||||
# ## Optional SSL Config
|
# ## Optional SSL Config
|
||||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
|
@ -1187,6 +1240,10 @@
|
||||||
# # ssl_key = "/etc/telegraf/key.pem"
|
# # ssl_key = "/etc/telegraf/key.pem"
|
||||||
# ## Use SSL but skip chain & host verification
|
# ## Use SSL but skip chain & host verification
|
||||||
# # insecure_skip_verify = false
|
# # insecure_skip_verify = false
|
||||||
|
#
|
||||||
|
# ## HTTP Request Headers (all values must be strings)
|
||||||
|
# # [inputs.http_response.headers]
|
||||||
|
# # Host = "github.com"
|
||||||
|
|
||||||
|
|
||||||
# # Read flattened metrics from one or more JSON HTTP endpoints
|
# # Read flattened metrics from one or more JSON HTTP endpoints
|
||||||
|
@ -1249,6 +1306,13 @@
|
||||||
# "http://localhost:8086/debug/vars"
|
# "http://localhost:8086/debug/vars"
|
||||||
# ]
|
# ]
|
||||||
#
|
#
|
||||||
|
# ## Optional SSL Config
|
||||||
|
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||||
|
# # ssl_key = "/etc/telegraf/key.pem"
|
||||||
|
# ## Use SSL but skip chain & host verification
|
||||||
|
# # insecure_skip_verify = false
|
||||||
|
#
|
||||||
# ## http request & header timeout
|
# ## http request & header timeout
|
||||||
# timeout = "5s"
|
# timeout = "5s"
|
||||||
|
|
||||||
|
@ -1279,6 +1343,13 @@
|
||||||
# ## if no servers are specified, local machine sensor stats will be queried
|
# ## if no servers are specified, local machine sensor stats will be queried
|
||||||
# ##
|
# ##
|
||||||
# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
|
# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
|
||||||
|
#
|
||||||
|
# ## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid
|
||||||
|
# ## gaps or overlap in pulled data
|
||||||
|
# interval = "30s"
|
||||||
|
#
|
||||||
|
# ## Timeout for the ipmitool command to complete
|
||||||
|
# timeout = "20s"
|
||||||
|
|
||||||
|
|
||||||
# # Gather packets and bytes throughput from iptables
|
# # Gather packets and bytes throughput from iptables
|
||||||
|
@ -1398,9 +1469,9 @@
|
||||||
|
|
||||||
# # Read metrics from a LeoFS Server via SNMP
|
# # Read metrics from a LeoFS Server via SNMP
|
||||||
# [[inputs.leofs]]
|
# [[inputs.leofs]]
|
||||||
# ## An array of URI to gather stats about LeoFS.
|
# ## An array of URLs of the form:
|
||||||
# ## Specify an ip or hostname with port. ie 127.0.0.1:4020
|
# ## host [ ":" port]
|
||||||
# servers = ["127.0.0.1:4021"]
|
# servers = ["127.0.0.1:4020"]
|
||||||
|
|
||||||
|
|
||||||
# # Provides Linux sysctl fs metrics
|
# # Provides Linux sysctl fs metrics
|
||||||
|
@ -1475,14 +1546,24 @@
|
||||||
# # ]
|
# # ]
|
||||||
|
|
||||||
|
|
||||||
|
# # Collects scores from a minecraft server's scoreboard using the RCON protocol
|
||||||
|
# [[inputs.minecraft]]
|
||||||
|
# ## server address for minecraft
|
||||||
|
# # server = "localhost"
|
||||||
|
# ## port for RCON
|
||||||
|
# # port = "25575"
|
||||||
|
# ## password RCON for mincraft server
|
||||||
|
# # password = ""
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics from one or many MongoDB servers
|
# # Read metrics from one or many MongoDB servers
|
||||||
# [[inputs.mongodb]]
|
# [[inputs.mongodb]]
|
||||||
# ## An array of URI to gather stats about. Specify an ip or hostname
|
# ## An array of URLs of the form:
|
||||||
# ## with optional port add password. ie,
|
# ## "mongodb://" [user ":" pass "@"] host [ ":" port]
|
||||||
|
# ## For example:
|
||||||
# ## mongodb://user:auth_key@10.10.3.30:27017,
|
# ## mongodb://user:auth_key@10.10.3.30:27017,
|
||||||
# ## mongodb://10.10.3.33:18832,
|
# ## mongodb://10.10.3.33:18832,
|
||||||
# ## 10.0.0.1:10000, etc.
|
# servers = ["mongodb://127.0.0.1:27017"]
|
||||||
# servers = ["127.0.0.1:27017"]
|
|
||||||
# gather_perdb_stats = false
|
# gather_perdb_stats = false
|
||||||
#
|
#
|
||||||
# ## Optional SSL Config
|
# ## Optional SSL Config
|
||||||
|
@ -1496,7 +1577,7 @@
|
||||||
# # Read metrics from one or many mysql servers
|
# # Read metrics from one or many mysql servers
|
||||||
# [[inputs.mysql]]
|
# [[inputs.mysql]]
|
||||||
# ## specify servers via a url matching:
|
# ## specify servers via a url matching:
|
||||||
# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
|
||||||
# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
|
# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
|
||||||
# ## e.g.
|
# ## e.g.
|
||||||
# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
|
# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
|
||||||
|
@ -1553,7 +1634,7 @@
|
||||||
# #
|
# #
|
||||||
# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
|
# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
|
||||||
# interval_slow = "30m"
|
# interval_slow = "30m"
|
||||||
|
#
|
||||||
# ## Optional SSL Config (will be used if tls=custom parameter specified in server uri)
|
# ## Optional SSL Config (will be used if tls=custom parameter specified in server uri)
|
||||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||||
|
@ -1599,8 +1680,17 @@
|
||||||
|
|
||||||
# # Read Nginx's basic status information (ngx_http_stub_status_module)
|
# # Read Nginx's basic status information (ngx_http_stub_status_module)
|
||||||
# [[inputs.nginx]]
|
# [[inputs.nginx]]
|
||||||
# ## An array of Nginx stub_status URI to gather stats.
|
# # An array of Nginx stub_status URI to gather stats.
|
||||||
# urls = ["http://localhost/status"]
|
# urls = ["http://localhost/server_status"]
|
||||||
|
#
|
||||||
|
# # TLS/SSL configuration
|
||||||
|
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# ssl_cert = "/etc/telegraf/cert.cer"
|
||||||
|
# ssl_key = "/etc/telegraf/key.key"
|
||||||
|
# insecure_skip_verify = false
|
||||||
|
#
|
||||||
|
# # HTTP response timeout (default: 5s)
|
||||||
|
# response_timeout = "5s"
|
||||||
|
|
||||||
|
|
||||||
# # Read NSQ topic and channel statistics.
|
# # Read NSQ topic and channel statistics.
|
||||||
|
@ -1627,6 +1717,27 @@
|
||||||
# dns_lookup = true
|
# dns_lookup = true
|
||||||
|
|
||||||
|
|
||||||
|
# # OpenLDAP cn=Monitor plugin
|
||||||
|
# [[inputs.openldap]]
|
||||||
|
# host = "localhost"
|
||||||
|
# port = 389
|
||||||
|
#
|
||||||
|
# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
|
||||||
|
# # note that port will likely need to be changed to 636 for ldaps
|
||||||
|
# # valid options: "" | "starttls" | "ldaps"
|
||||||
|
# ssl = ""
|
||||||
|
#
|
||||||
|
# # skip peer certificate verification. Default is false.
|
||||||
|
# insecure_skip_verify = false
|
||||||
|
#
|
||||||
|
# # Path to PEM-encoded Root certificate to use to verify server certificate
|
||||||
|
# ssl_ca = "/etc/ssl/certs.pem"
|
||||||
|
#
|
||||||
|
# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
|
||||||
|
# bind_dn = ""
|
||||||
|
# bind_password = ""
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics of passenger using passenger-status
|
# # Read metrics of passenger using passenger-status
|
||||||
# [[inputs.passenger]]
|
# [[inputs.passenger]]
|
||||||
# ## Path of passenger-status.
|
# ## Path of passenger-status.
|
||||||
|
@ -1820,10 +1931,13 @@
|
||||||
# location = "/var/lib/puppet/state/last_run_summary.yaml"
|
# location = "/var/lib/puppet/state/last_run_summary.yaml"
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics from one or many RabbitMQ servers via the management API
|
# # Reads metrics from RabbitMQ servers via the Management Plugin
|
||||||
# [[inputs.rabbitmq]]
|
# [[inputs.rabbitmq]]
|
||||||
|
# ## Management Plugin url. (default: http://localhost:15672)
|
||||||
# # url = "http://localhost:15672"
|
# # url = "http://localhost:15672"
|
||||||
# # name = "rmq-server-1" # optional tag
|
# ## Tag added to rabbitmq_overview series; deprecated: use tags
|
||||||
|
# # name = "rmq-server-1"
|
||||||
|
# ## Credentials
|
||||||
# # username = "guest"
|
# # username = "guest"
|
||||||
# # password = "guest"
|
# # password = "guest"
|
||||||
#
|
#
|
||||||
|
@ -1880,14 +1994,11 @@
|
||||||
# ##
|
# ##
|
||||||
# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
|
# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
|
||||||
# ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
|
# ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
|
||||||
# servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
|
# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
|
||||||
# ##
|
# ##
|
||||||
# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
|
# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
|
||||||
# ## have to be named "rethinkdb".
|
# ## have to be named "rethinkdb".
|
||||||
# servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
|
# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics one or many Riak servers
|
# # Read metrics one or many Riak servers
|
||||||
|
@ -1896,6 +2007,26 @@
|
||||||
# servers = ["http://localhost:8098"]
|
# servers = ["http://localhost:8098"]
|
||||||
|
|
||||||
|
|
||||||
|
# # Read API usage and limits for a Salesforce organisation
|
||||||
|
# [[inputs.salesforce]]
|
||||||
|
# ## specify your credentials
|
||||||
|
# ##
|
||||||
|
# username = "your_username"
|
||||||
|
# password = "your_password"
|
||||||
|
# ##
|
||||||
|
# ## (optional) security token
|
||||||
|
# # security_token = "your_security_token"
|
||||||
|
# ##
|
||||||
|
# ## (optional) environment type (sandbox or production)
|
||||||
|
# ## default is: production
|
||||||
|
# ##
|
||||||
|
# # environment = "production"
|
||||||
|
# ##
|
||||||
|
# ## (optional) API version (default: "39.0")
|
||||||
|
# ##
|
||||||
|
# # version = "39.0"
|
||||||
|
|
||||||
|
|
||||||
# # Monitor sensors, requires lm-sensors package
|
# # Monitor sensors, requires lm-sensors package
|
||||||
# [[inputs.sensors]]
|
# [[inputs.sensors]]
|
||||||
# ## Remove numbers from field names.
|
# ## Remove numbers from field names.
|
||||||
|
@ -2141,6 +2272,26 @@
|
||||||
# # vg = "rootvg"
|
# # vg = "rootvg"
|
||||||
|
|
||||||
|
|
||||||
|
# # Gather metrics from the Tomcat server status page.
|
||||||
|
# [[inputs.tomcat]]
|
||||||
|
# ## URL of the Tomcat server status
|
||||||
|
# # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
|
||||||
|
#
|
||||||
|
# ## HTTP Basic Auth Credentials
|
||||||
|
# # username = "tomcat"
|
||||||
|
# # password = "s3cret"
|
||||||
|
#
|
||||||
|
# ## Request timeout
|
||||||
|
# # timeout = "5s"
|
||||||
|
#
|
||||||
|
# ## Optional SSL Config
|
||||||
|
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||||
|
# # ssl_key = "/etc/telegraf/key.pem"
|
||||||
|
# ## Use SSL but skip chain & host verification
|
||||||
|
# # insecure_skip_verify = false
|
||||||
|
|
||||||
|
|
||||||
# # Inserts sine and cosine waves for demonstration purposes
|
# # Inserts sine and cosine waves for demonstration purposes
|
||||||
# [[inputs.trig]]
|
# [[inputs.trig]]
|
||||||
# ## Set the amplitude
|
# ## Set the amplitude
|
||||||
|
@ -2157,6 +2308,9 @@
|
||||||
|
|
||||||
# # A plugin to collect stats from Varnish HTTP Cache
|
# # A plugin to collect stats from Varnish HTTP Cache
|
||||||
# [[inputs.varnish]]
|
# [[inputs.varnish]]
|
||||||
|
# ## If running as a restricted user you can prepend sudo for additional access:
|
||||||
|
# #use_sudo = false
|
||||||
|
#
|
||||||
# ## The default location of the varnishstat binary can be overridden with:
|
# ## The default location of the varnishstat binary can be overridden with:
|
||||||
# binary = "/usr/bin/varnishstat"
|
# binary = "/usr/bin/varnishstat"
|
||||||
#
|
#
|
||||||
|
@ -2222,10 +2376,10 @@
|
||||||
# ## Use SSL but skip chain & host verification
|
# ## Use SSL but skip chain & host verification
|
||||||
# # insecure_skip_verify = false
|
# # insecure_skip_verify = false
|
||||||
#
|
#
|
||||||
# ## Data format to output.
|
# ## Data format to consume.
|
||||||
# ## Each data format has its own unique set of configuration options, read
|
# ## Each data format has its own unique set of configuration options, read
|
||||||
# ## more about them here:
|
# ## more about them here:
|
||||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
# data_format = "influx"
|
# data_format = "influx"
|
||||||
|
|
||||||
|
|
||||||
|
@ -2247,16 +2401,13 @@
|
||||||
# ## 0 means to use the default of 65536 bytes (64 kibibytes)
|
# ## 0 means to use the default of 65536 bytes (64 kibibytes)
|
||||||
# max_line_size = 0
|
# max_line_size = 0
|
||||||
|
|
||||||
# # Read metrics from Kafka 0.9+ topic(s)
|
|
||||||
|
# # Read metrics from Kafka topic(s)
|
||||||
# [[inputs.kafka_consumer]]
|
# [[inputs.kafka_consumer]]
|
||||||
# ## topic(s) to consume
|
|
||||||
# topics = ["telegraf"]
|
|
||||||
# ## kafka servers
|
# ## kafka servers
|
||||||
# brokers = ["localhost:9092"]
|
# brokers = ["localhost:9092"]
|
||||||
# ## the name of the consumer group
|
# ## topic(s) to consume
|
||||||
# consumer_group = "telegraf_metrics_consumers"
|
# topics = ["telegraf"]
|
||||||
# ## Offset (must be either "oldest" or "newest")
|
|
||||||
# offset = "oldest"
|
|
||||||
#
|
#
|
||||||
# ## Optional SSL Config
|
# ## Optional SSL Config
|
||||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||||
|
@ -2269,6 +2420,11 @@
|
||||||
# # sasl_username = "kafka"
|
# # sasl_username = "kafka"
|
||||||
# # sasl_password = "secret"
|
# # sasl_password = "secret"
|
||||||
#
|
#
|
||||||
|
# ## the name of the consumer group
|
||||||
|
# consumer_group = "telegraf_metrics_consumers"
|
||||||
|
# ## Offset (must be either "oldest" or "newest")
|
||||||
|
# offset = "oldest"
|
||||||
|
#
|
||||||
# ## Data format to consume.
|
# ## Data format to consume.
|
||||||
# ## Each data format has its own unique set of configuration options, read
|
# ## Each data format has its own unique set of configuration options, read
|
||||||
# ## more about them here:
|
# ## more about them here:
|
||||||
|
@ -2279,7 +2435,8 @@
|
||||||
# ## larger messages are dropped
|
# ## larger messages are dropped
|
||||||
# max_message_len = 65536
|
# max_message_len = 65536
|
||||||
|
|
||||||
# # Read metrics from Kafka (0.8 or less) topic(s)
|
|
||||||
|
# # Read metrics from Kafka topic(s)
|
||||||
# [[inputs.kafka_consumer_legacy]]
|
# [[inputs.kafka_consumer_legacy]]
|
||||||
# ## topic(s) to consume
|
# ## topic(s) to consume
|
||||||
# topics = ["telegraf"]
|
# topics = ["telegraf"]
|
||||||
|
@ -2312,6 +2469,7 @@
|
||||||
# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
|
# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
|
||||||
# ## /var/log/apache.log -> only tail the apache log file
|
# ## /var/log/apache.log -> only tail the apache log file
|
||||||
# files = ["/var/log/apache/access.log"]
|
# files = ["/var/log/apache/access.log"]
|
||||||
|
#
|
||||||
# ## Read files that currently exist from the beginning. Files that are created
|
# ## Read files that currently exist from the beginning. Files that are created
|
||||||
# ## while telegraf is running (and that match the "files" globs) will always
|
# ## while telegraf is running (and that match the "files" globs) will always
|
||||||
# ## be read from the beginning.
|
# ## be read from the beginning.
|
||||||
|
@ -2327,12 +2485,26 @@
|
||||||
# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
|
# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
|
||||||
# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
|
# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
|
||||||
# patterns = ["%{COMBINED_LOG_FORMAT}"]
|
# patterns = ["%{COMBINED_LOG_FORMAT}"]
|
||||||
|
#
|
||||||
# ## Name of the outputted measurement name.
|
# ## Name of the outputted measurement name.
|
||||||
# measurement = "apache_access_log"
|
# measurement = "apache_access_log"
|
||||||
|
#
|
||||||
# ## Full path(s) to custom pattern files.
|
# ## Full path(s) to custom pattern files.
|
||||||
# custom_pattern_files = []
|
# custom_pattern_files = []
|
||||||
|
#
|
||||||
# ## Custom patterns can also be defined here. Put one pattern per line.
|
# ## Custom patterns can also be defined here. Put one pattern per line.
|
||||||
# custom_patterns = '''
|
# custom_patterns = '''
|
||||||
|
#
|
||||||
|
# ## Timezone allows you to provide an override for timestamps that
|
||||||
|
# ## don't already include an offset
|
||||||
|
# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
|
||||||
|
# ##
|
||||||
|
# ## Default: "" which renders UTC
|
||||||
|
# ## Options are as follows:
|
||||||
|
# ## 1. Local -- interpret based on machine localtime
|
||||||
|
# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
|
||||||
|
# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC
|
||||||
|
# timezone = "Canada/Eastern"
|
||||||
# '''
|
# '''
|
||||||
|
|
||||||
|
|
||||||
|
@ -2341,6 +2513,8 @@
|
||||||
# servers = ["localhost:1883"]
|
# servers = ["localhost:1883"]
|
||||||
# ## MQTT QoS, must be 0, 1, or 2
|
# ## MQTT QoS, must be 0, 1, or 2
|
||||||
# qos = 0
|
# qos = 0
|
||||||
|
# ## Connection timeout for initial connection in seconds
|
||||||
|
# connection_timeout = "30s"
|
||||||
#
|
#
|
||||||
# ## Topics to subscribe to
|
# ## Topics to subscribe to
|
||||||
# topics = [
|
# topics = [
|
||||||
|
@ -2431,6 +2605,11 @@
|
||||||
# ## 0 (default) is unlimited.
|
# ## 0 (default) is unlimited.
|
||||||
# # max_connections = 1024
|
# # max_connections = 1024
|
||||||
#
|
#
|
||||||
|
# ## Read timeout.
|
||||||
|
# ## Only applies to stream sockets (e.g. TCP).
|
||||||
|
# ## 0 (default) is unlimited.
|
||||||
|
# # read_timeout = "30s"
|
||||||
|
#
|
||||||
# ## Maximum socket buffer size in bytes.
|
# ## Maximum socket buffer size in bytes.
|
||||||
# ## For stream sockets, once the buffer fills up, the sender will start backing up.
|
# ## For stream sockets, once the buffer fills up, the sender will start backing up.
|
||||||
# ## For datagram sockets, once the buffer fills up, metrics will start dropping.
|
# ## For datagram sockets, once the buffer fills up, metrics will start dropping.
|
||||||
|
@ -2450,12 +2629,14 @@
|
||||||
# # data_format = "influx"
|
# # data_format = "influx"
|
||||||
|
|
||||||
|
|
||||||
# # Statsd Server
|
# # Statsd UDP/TCP Server
|
||||||
# [[inputs.statsd]]
|
# [[inputs.statsd]]
|
||||||
# ## Protocol, must be "tcp" or "udp"
|
# ## Protocol, must be "tcp" or "udp" (default=udp)
|
||||||
# protocol = "udp"
|
# protocol = "udp"
|
||||||
# ## Maximum number of concurrent TCP connections to allow
|
#
|
||||||
|
# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
|
||||||
# max_tcp_connections = 250
|
# max_tcp_connections = 250
|
||||||
|
#
|
||||||
# ## Address and port to host UDP listener on
|
# ## Address and port to host UDP listener on
|
||||||
# service_address = ":8125"
|
# service_address = ":8125"
|
||||||
#
|
#
|
||||||
|
@ -2556,3 +2737,9 @@
|
||||||
# [inputs.webhooks.papertrail]
|
# [inputs.webhooks.papertrail]
|
||||||
# path = "/papertrail"
|
# path = "/papertrail"
|
||||||
|
|
||||||
|
|
||||||
|
# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
|
||||||
|
# [[inputs.zipkin]]
|
||||||
|
# # path = "/api/v1/spans" # URL path for span data
|
||||||
|
# # port = 9411 # Port on which Telegraf listens
|
||||||
|
|
||||||
|
|
|
@ -77,3 +77,40 @@ func compileFilterNoGlob(filters []string) Filter {
|
||||||
}
|
}
|
||||||
return &out
|
return &out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type IncludeExcludeFilter struct {
|
||||||
|
include Filter
|
||||||
|
exclude Filter
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewIncludeExcludeFilter(
|
||||||
|
include []string,
|
||||||
|
exclude []string,
|
||||||
|
) (Filter, error) {
|
||||||
|
in, err := Compile(include)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ex, err := Compile(exclude)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &IncludeExcludeFilter{in, ex}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *IncludeExcludeFilter) Match(s string) bool {
|
||||||
|
if f.include != nil {
|
||||||
|
if !f.include.Match(s) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.exclude != nil {
|
||||||
|
if f.exclude.Match(s) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
|
@ -150,12 +150,6 @@ func makemetric(
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
case string:
|
case string:
|
||||||
if strings.HasSuffix(val, `\`) {
|
|
||||||
log.Printf("D! Measurement [%s] field [%s] has a value "+
|
|
||||||
"ending with a backslash, skipping", measurement, k)
|
|
||||||
delete(fields, k)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fields[k] = v
|
fields[k] = v
|
||||||
default:
|
default:
|
||||||
fields[k] = v
|
fields[k] = v
|
||||||
|
|
|
@ -370,16 +370,17 @@ func TestMakeMetric_TrailingSlash(t *testing.T) {
|
||||||
expectedTags: map[string]string{},
|
expectedTags: map[string]string{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Field value with trailing slash dropped",
|
name: "Field value with trailing slash okay",
|
||||||
measurement: `cpu`,
|
measurement: `cpu`,
|
||||||
fields: map[string]interface{}{
|
fields: map[string]interface{}{
|
||||||
"value": int64(42),
|
"value": int64(42),
|
||||||
"bad": `xyzzy\`,
|
"ok": `xyzzy\`,
|
||||||
},
|
},
|
||||||
tags: map[string]string{},
|
tags: map[string]string{},
|
||||||
expectedMeasurement: `cpu`,
|
expectedMeasurement: `cpu`,
|
||||||
expectedFields: map[string]interface{}{
|
expectedFields: map[string]interface{}{
|
||||||
"value": int64(42),
|
"value": int64(42),
|
||||||
|
"ok": `xyzzy\`,
|
||||||
},
|
},
|
||||||
expectedTags: map[string]string{},
|
expectedTags: map[string]string{},
|
||||||
},
|
},
|
||||||
|
@ -387,7 +388,7 @@ func TestMakeMetric_TrailingSlash(t *testing.T) {
|
||||||
name: "Must have one field after dropped",
|
name: "Must have one field after dropped",
|
||||||
measurement: `cpu`,
|
measurement: `cpu`,
|
||||||
fields: map[string]interface{}{
|
fields: map[string]interface{}{
|
||||||
"bad": `xyzzy\`,
|
"bad": math.NaN(),
|
||||||
},
|
},
|
||||||
tags: map[string]string{},
|
tags: map[string]string{},
|
||||||
expectedNil: true,
|
expectedNil: true,
|
||||||
|
|
|
@ -20,8 +20,14 @@ var (
|
||||||
|
|
||||||
// stringFieldEscaper is for escaping string field values only.
|
// stringFieldEscaper is for escaping string field values only.
|
||||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||||
stringFieldEscaper = strings.NewReplacer(`"`, `\"`)
|
stringFieldEscaper = strings.NewReplacer(
|
||||||
stringFieldUnEscaper = strings.NewReplacer(`\"`, `"`)
|
`"`, `\"`,
|
||||||
|
`\`, `\\`,
|
||||||
|
)
|
||||||
|
stringFieldUnEscaper = strings.NewReplacer(
|
||||||
|
`\"`, `"`,
|
||||||
|
`\\`, `\`,
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
func escape(s string, t string) string {
|
func escape(s string, t string) string {
|
||||||
|
|
|
@ -21,14 +21,14 @@ func New(
|
||||||
t time.Time,
|
t time.Time,
|
||||||
mType ...telegraf.ValueType,
|
mType ...telegraf.ValueType,
|
||||||
) (telegraf.Metric, error) {
|
) (telegraf.Metric, error) {
|
||||||
if len(fields) == 0 {
|
|
||||||
return nil, fmt.Errorf("Metric cannot be made without any fields")
|
|
||||||
}
|
|
||||||
if len(name) == 0 {
|
if len(name) == 0 {
|
||||||
return nil, fmt.Errorf("Metric cannot be made with an empty name")
|
return nil, fmt.Errorf("missing measurement name")
|
||||||
|
}
|
||||||
|
if len(fields) == 0 {
|
||||||
|
return nil, fmt.Errorf("%s: must have one or more fields", name)
|
||||||
}
|
}
|
||||||
if strings.HasSuffix(name, `\`) {
|
if strings.HasSuffix(name, `\`) {
|
||||||
return nil, fmt.Errorf("Metric cannot have measurement name ending with a backslash")
|
return nil, fmt.Errorf("%s: measurement name cannot end with a backslash", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
var thisType telegraf.ValueType
|
var thisType telegraf.ValueType
|
||||||
|
@ -49,10 +49,10 @@ func New(
|
||||||
taglen := 0
|
taglen := 0
|
||||||
for k, v := range tags {
|
for k, v := range tags {
|
||||||
if strings.HasSuffix(k, `\`) {
|
if strings.HasSuffix(k, `\`) {
|
||||||
return nil, fmt.Errorf("Metric cannot have tag key ending with a backslash")
|
return nil, fmt.Errorf("%s: tag key cannot end with a backslash: %s", name, k)
|
||||||
}
|
}
|
||||||
if strings.HasSuffix(v, `\`) {
|
if strings.HasSuffix(v, `\`) {
|
||||||
return nil, fmt.Errorf("Metric cannot have tag value ending with a backslash")
|
return nil, fmt.Errorf("%s: tag value cannot end with a backslash: %s", name, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(k) == 0 || len(v) == 0 {
|
if len(k) == 0 || len(v) == 0 {
|
||||||
|
@ -77,15 +77,9 @@ func New(
|
||||||
|
|
||||||
// pre-allocate capacity of the fields slice
|
// pre-allocate capacity of the fields slice
|
||||||
fieldlen := 0
|
fieldlen := 0
|
||||||
for k, v := range fields {
|
for k, _ := range fields {
|
||||||
if strings.HasSuffix(k, `\`) {
|
if strings.HasSuffix(k, `\`) {
|
||||||
return nil, fmt.Errorf("Metric cannot have field key ending with a backslash")
|
return nil, fmt.Errorf("%s: field key cannot end with a backslash: %s", name, k)
|
||||||
}
|
|
||||||
switch val := v.(type) {
|
|
||||||
case string:
|
|
||||||
if strings.HasSuffix(val, `\`) {
|
|
||||||
return nil, fmt.Errorf("Metric cannot have field value ending with a backslash")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 10 bytes is completely arbitrary, but will at least prevent some
|
// 10 bytes is completely arbitrary, but will at least prevent some
|
||||||
|
@ -108,7 +102,8 @@ func New(
|
||||||
}
|
}
|
||||||
|
|
||||||
// indexUnescapedByte finds the index of the first byte equal to b in buf that
|
// indexUnescapedByte finds the index of the first byte equal to b in buf that
|
||||||
// is not escaped. Returns -1 if not found.
|
// is not escaped. Does not allow the escape char to be escaped. Returns -1 if
|
||||||
|
// not found.
|
||||||
func indexUnescapedByte(buf []byte, b byte) int {
|
func indexUnescapedByte(buf []byte, b byte) int {
|
||||||
var keyi int
|
var keyi int
|
||||||
for {
|
for {
|
||||||
|
@ -128,6 +123,46 @@ func indexUnescapedByte(buf []byte, b byte) int {
|
||||||
return keyi
|
return keyi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// indexUnescapedByteBackslashEscaping finds the index of the first byte equal
|
||||||
|
// to b in buf that is not escaped. Allows for the escape char `\` to be
|
||||||
|
// escaped. Returns -1 if not found.
|
||||||
|
func indexUnescapedByteBackslashEscaping(buf []byte, b byte) int {
|
||||||
|
var keyi int
|
||||||
|
for {
|
||||||
|
i := bytes.IndexByte(buf[keyi:], b)
|
||||||
|
if i == -1 {
|
||||||
|
return -1
|
||||||
|
} else if i == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
keyi += i
|
||||||
|
if countBackslashes(buf, keyi-1)%2 == 0 {
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
keyi++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return keyi
|
||||||
|
}
|
||||||
|
|
||||||
|
// countBackslashes counts the number of preceding backslashes starting at
|
||||||
|
// the 'start' index.
|
||||||
|
func countBackslashes(buf []byte, index int) int {
|
||||||
|
var count int
|
||||||
|
for {
|
||||||
|
if index < 0 {
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
if buf[index] == '\\' {
|
||||||
|
count++
|
||||||
|
index--
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
type metric struct {
|
type metric struct {
|
||||||
name []byte
|
name []byte
|
||||||
tags []byte
|
tags []byte
|
||||||
|
@ -289,7 +324,7 @@ func (m *metric) Fields() map[string]interface{} {
|
||||||
// end index of field value
|
// end index of field value
|
||||||
var i3 int
|
var i3 int
|
||||||
if m.fields[i:][i2] == '"' {
|
if m.fields[i:][i2] == '"' {
|
||||||
i3 = indexUnescapedByte(m.fields[i:][i2+1:], '"')
|
i3 = indexUnescapedByteBackslashEscaping(m.fields[i:][i2+1:], '"')
|
||||||
if i3 == -1 {
|
if i3 == -1 {
|
||||||
i3 = len(m.fields[i:])
|
i3 = len(m.fields[i:])
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ func TestNewMetric(t *testing.T) {
|
||||||
assert.Equal(t, tags, m.Tags())
|
assert.Equal(t, tags, m.Tags())
|
||||||
assert.Equal(t, fields, m.Fields())
|
assert.Equal(t, fields, m.Fields())
|
||||||
assert.Equal(t, "cpu", m.Name())
|
assert.Equal(t, "cpu", m.Name())
|
||||||
assert.Equal(t, now, m.Time())
|
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,6 +257,8 @@ func TestNewMetric_Fields(t *testing.T) {
|
||||||
"string": "test",
|
"string": "test",
|
||||||
"quote_string": `x"y`,
|
"quote_string": `x"y`,
|
||||||
"backslash_quote_string": `x\"y`,
|
"backslash_quote_string": `x\"y`,
|
||||||
|
"backslash": `x\y`,
|
||||||
|
"ends_with_backslash": `x\`,
|
||||||
}
|
}
|
||||||
m, err := New("cpu", tags, fields, now)
|
m, err := New("cpu", tags, fields, now)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -412,7 +414,7 @@ func TestNewGaugeMetric(t *testing.T) {
|
||||||
assert.Equal(t, tags, m.Tags())
|
assert.Equal(t, tags, m.Tags())
|
||||||
assert.Equal(t, fields, m.Fields())
|
assert.Equal(t, fields, m.Fields())
|
||||||
assert.Equal(t, "cpu", m.Name())
|
assert.Equal(t, "cpu", m.Name())
|
||||||
assert.Equal(t, now, m.Time())
|
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -434,7 +436,7 @@ func TestNewCounterMetric(t *testing.T) {
|
||||||
assert.Equal(t, tags, m.Tags())
|
assert.Equal(t, tags, m.Tags())
|
||||||
assert.Equal(t, fields, m.Fields())
|
assert.Equal(t, fields, m.Fields())
|
||||||
assert.Equal(t, "cpu", m.Name())
|
assert.Equal(t, "cpu", m.Name())
|
||||||
assert.Equal(t, now, m.Time())
|
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -708,12 +710,6 @@ func TestNewMetric_TrailingSlash(t *testing.T) {
|
||||||
`value\`: "x",
|
`value\`: "x",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "cpu",
|
|
||||||
fields: map[string]interface{}{
|
|
||||||
"value": `x\`,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "cpu",
|
name: "cpu",
|
||||||
tags: map[string]string{
|
tags: map[string]string{
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -620,6 +621,83 @@ func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReader_Read(t *testing.T) {
|
||||||
|
epoch := time.Unix(0, 0)
|
||||||
|
|
||||||
|
type args struct {
|
||||||
|
name string
|
||||||
|
tags map[string]string
|
||||||
|
fields map[string]interface{}
|
||||||
|
t time.Time
|
||||||
|
mType []telegraf.ValueType
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
expected []byte
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "escape backslashes in string field",
|
||||||
|
args: args{
|
||||||
|
name: "cpu",
|
||||||
|
tags: map[string]string{},
|
||||||
|
fields: map[string]interface{}{"value": `test\`},
|
||||||
|
t: epoch,
|
||||||
|
},
|
||||||
|
expected: []byte(`cpu value="test\\" 0`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "escape quote in string field",
|
||||||
|
args: args{
|
||||||
|
name: "cpu",
|
||||||
|
tags: map[string]string{},
|
||||||
|
fields: map[string]interface{}{"value": `test"`},
|
||||||
|
t: epoch,
|
||||||
|
},
|
||||||
|
expected: []byte(`cpu value="test\"" 0`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "escape quote and backslash in string field",
|
||||||
|
args: args{
|
||||||
|
name: "cpu",
|
||||||
|
tags: map[string]string{},
|
||||||
|
fields: map[string]interface{}{"value": `test\"`},
|
||||||
|
t: epoch,
|
||||||
|
},
|
||||||
|
expected: []byte(`cpu value="test\\\"" 0`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "escape multiple backslash in string field",
|
||||||
|
args: args{
|
||||||
|
name: "cpu",
|
||||||
|
tags: map[string]string{},
|
||||||
|
fields: map[string]interface{}{"value": `test\\`},
|
||||||
|
t: epoch,
|
||||||
|
},
|
||||||
|
expected: []byte(`cpu value="test\\\\" 0`),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
buf := make([]byte, 512)
|
||||||
|
m, err := New(tt.args.name, tt.args.tags, tt.args.fields, tt.args.t, tt.args.mType...)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
r := NewReader([]telegraf.Metric{m})
|
||||||
|
num, err := r.Read(buf)
|
||||||
|
if err != io.EOF {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
line := string(buf[:num])
|
||||||
|
// This is done so that we can use raw strings in the test spec
|
||||||
|
noeol := strings.TrimRight(line, "\n")
|
||||||
|
require.Equal(t, string(tt.expected), noeol)
|
||||||
|
require.Equal(t, len(tt.expected)+1, num)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestMetricRoundtrip(t *testing.T) {
|
func TestMetricRoundtrip(t *testing.T) {
|
||||||
const lp = `nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=netstat,sr=database IpExtInBcastOctets=12570626154i,IpExtInBcastPkts=95541226i,IpExtInCEPkts=0i,IpExtInCsumErrors=0i,IpExtInECT0Pkts=55674i,IpExtInECT1Pkts=0i,IpExtInMcastOctets=5928296i,IpExtInMcastPkts=174365i,IpExtInNoECTPkts=17965863529i,IpExtInNoRoutes=20i,IpExtInOctets=3334866321815i,IpExtInTruncatedPkts=0i,IpExtOutBcastOctets=0i,IpExtOutBcastPkts=0i,IpExtOutMcastOctets=0i,IpExtOutMcastPkts=0i,IpExtOutOctets=31397892391399i,TcpExtArpFilter=0i,TcpExtBusyPollRxPackets=0i,TcpExtDelayedACKLocked=14094i,TcpExtDelayedACKLost=302083i,TcpExtDelayedACKs=55486507i,TcpExtEmbryonicRsts=11879i,TcpExtIPReversePathFilter=0i,TcpExtListenDrops=1736i,TcpExtListenOverflows=0i,TcpExtLockDroppedIcmps=0i,TcpExtOfoPruned=0i,TcpExtOutOfWindowIcmps=8i,TcpExtPAWSActive=0i,TcpExtPAWSEstab=974i,TcpExtPAWSPassive=0i,TcpExtPruneCalled=0i,TcpExtRcvPruned=0i,TcpExtSyncookiesFailed=12593i,TcpExtSyncookiesRecv=0i,TcpExtSyncookiesSent=0i,TcpExtTCPACKSkippedChallenge=0i,TcpExtTCPACKSkippedFinWait2=0i,TcpExtTCPACKSkippedPAWS=806i,TcpExtTCPACKSkippedSeq=519i,TcpExtTCPACKSkippedSynRecv=0i,TcpExtTCPACKSkippedTimeWait=0i,TcpExtTCPAbortFailed=0i,TcpExtTCPAbortOnClose=22i,TcpExtTCPAbortOnData=36593i,TcpExtTCPAbortOnLinger=0i,TcpExtTCPAbortOnMemory=0i,TcpExtTCPAbortOnTimeout=674i,TcpExtTCPAutoCorking=494253233i,TcpExtTCPBacklogDrop=0i,TcpExtTCPChallengeACK=281i,TcpExtTCPDSACKIgnoredNoUndo=93354i,TcpExtTCPDSACKIgnoredOld=336i,TcpExtTCPDSACKOfoRecv=0i,TcpExtTCPDSACKOfoSent=7i,TcpExtTCPDSACKOldSent=302073i,TcpExtTCPDSACKRecv=215884i,TcpExtTCPDSACKUndo=7633i,TcpExtTCPDeferAcceptDrop=0i,TcpExtTCPDirectCopyFromBacklog=0i,TcpExtTCPDirectCopyFromPrequeue=0i,TcpExtTCPFACKReorder=1320i,TcpExtTCPFastOpenActive=0i,TcpExtTCPFastOpenActiveFail=0i,TcpExtTCPFastOpenCookieReqd=0i,TcpExtTCPFastOpenListenOverflow=0i,TcpExtTCPFastOpenPassive=0i,TcpExtTCPFastOpenPassiveFail=0i,TcpExtTCPFastRetrans=350681i,TcpExtTCPForwardRetrans=142168i,TcpExtTCPFromZeroWindowAdv=4317i,TcpExtTCPFullUndo=29502i,TcpExtTCPHPAcks=10267073000i,TcpExtTCPHPHits=5629837098i,TcpExtTCPHPHitsToUser=0i,TcpExtTCPHystartDelayCwnd=285127i,TcpExtTCPHystartDelayDetect=12318i,TcpExtTCPHystartTrainCwnd=69160570i,TcpExtTCPHystartTrainDetect=3315799i,TcpExtTCPLossFailures=109i,TcpExtTCPLossProbeRecovery=110819i,TcpExtTCPLossProbes=233995i,TcpExtTCPLossUndo=5276i,TcpExtTCPLostRetransmit=397i,TcpExtTCPMD5NotFound=0i,TcpExtTCPMD5Unexpected=0i,TcpExtTCPMemoryPressures=0i,TcpExtTCPMinTTLDrop=0i,TcpExtTCPOFODrop=0i,TcpExtTCPOFOMerge=7i,TcpExtTCPOFOQueue=15196i,TcpExtTCPOrigDataSent=29055119435i,TcpExtTCPPartialUndo=21320i,TcpExtTCPPrequeueDropped=0i,TcpExtTCPPrequeued=0i,TcpExtTCPPureAcks=1236441827i,TcpExtTCPRcvCoalesce=225590473i,TcpExtTCPRcvCollapsed=0i,TcpExtTCPRenoFailures=0i,TcpExtTCPRenoRecovery=0i,TcpExtTCPRenoRecoveryFail=0i,TcpExtTCPRenoReorder=0i,TcpExtTCPReqQFullDoCookies=0i,TcpExtTCPReqQFullDrop=0i,TcpExtTCPRetransFail=41i,TcpExtTCPSACKDiscard=0i,TcpExtTCPSACKReneging=0i,TcpExtTCPSACKReorder=4307i,TcpExtTCPSYNChallenge=244i,TcpExtTCPSackFailures=1698i,TcpExtTCPSackMerged=184668i,TcpExtTCPSackRecovery=97369i,TcpExtTCPSackRecoveryFail=381i,TcpExtTCPSackShiftFallback=2697079i,TcpExtTCPSackShifted=760299i,TcpExtTCPSchedulerFailed=0i,TcpExtTCPSlowStartRetrans=9276i,TcpExtTCPSpuriousRTOs=959i,TcpExtTCPSpuriousRtxHostQueues=2973i,TcpExtTCPSynRetrans=200970i,TcpExtTCPTSReorder=15221i,TcpExtTCPTimeWaitOverflow=0i,TcpExtTCPTimeouts=70127i,TcpExtTCPToZeroWindowAdv=4317i,TcpExtTCPWantZeroWindowAdv=2133i,TcpExtTW=24809813i,TcpExtTWKilled=0i,TcpExtTWRecycled=0i 1496460785000000000
|
const lp = `nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=netstat,sr=database IpExtInBcastOctets=12570626154i,IpExtInBcastPkts=95541226i,IpExtInCEPkts=0i,IpExtInCsumErrors=0i,IpExtInECT0Pkts=55674i,IpExtInECT1Pkts=0i,IpExtInMcastOctets=5928296i,IpExtInMcastPkts=174365i,IpExtInNoECTPkts=17965863529i,IpExtInNoRoutes=20i,IpExtInOctets=3334866321815i,IpExtInTruncatedPkts=0i,IpExtOutBcastOctets=0i,IpExtOutBcastPkts=0i,IpExtOutMcastOctets=0i,IpExtOutMcastPkts=0i,IpExtOutOctets=31397892391399i,TcpExtArpFilter=0i,TcpExtBusyPollRxPackets=0i,TcpExtDelayedACKLocked=14094i,TcpExtDelayedACKLost=302083i,TcpExtDelayedACKs=55486507i,TcpExtEmbryonicRsts=11879i,TcpExtIPReversePathFilter=0i,TcpExtListenDrops=1736i,TcpExtListenOverflows=0i,TcpExtLockDroppedIcmps=0i,TcpExtOfoPruned=0i,TcpExtOutOfWindowIcmps=8i,TcpExtPAWSActive=0i,TcpExtPAWSEstab=974i,TcpExtPAWSPassive=0i,TcpExtPruneCalled=0i,TcpExtRcvPruned=0i,TcpExtSyncookiesFailed=12593i,TcpExtSyncookiesRecv=0i,TcpExtSyncookiesSent=0i,TcpExtTCPACKSkippedChallenge=0i,TcpExtTCPACKSkippedFinWait2=0i,TcpExtTCPACKSkippedPAWS=806i,TcpExtTCPACKSkippedSeq=519i,TcpExtTCPACKSkippedSynRecv=0i,TcpExtTCPACKSkippedTimeWait=0i,TcpExtTCPAbortFailed=0i,TcpExtTCPAbortOnClose=22i,TcpExtTCPAbortOnData=36593i,TcpExtTCPAbortOnLinger=0i,TcpExtTCPAbortOnMemory=0i,TcpExtTCPAbortOnTimeout=674i,TcpExtTCPAutoCorking=494253233i,TcpExtTCPBacklogDrop=0i,TcpExtTCPChallengeACK=281i,TcpExtTCPDSACKIgnoredNoUndo=93354i,TcpExtTCPDSACKIgnoredOld=336i,TcpExtTCPDSACKOfoRecv=0i,TcpExtTCPDSACKOfoSent=7i,TcpExtTCPDSACKOldSent=302073i,TcpExtTCPDSACKRecv=215884i,TcpExtTCPDSACKUndo=7633i,TcpExtTCPDeferAcceptDrop=0i,TcpExtTCPDirectCopyFromBacklog=0i,TcpExtTCPDirectCopyFromPrequeue=0i,TcpExtTCPFACKReorder=1320i,TcpExtTCPFastOpenActive=0i,TcpExtTCPFastOpenActiveFail=0i,TcpExtTCPFastOpenCookieReqd=0i,TcpExtTCPFastOpenListenOverflow=0i,TcpExtTCPFastOpenPassive=0i,TcpExtTCPFastOpenPassiveFail=0i,TcpExtTCPFastRetrans=350681i,TcpExtTCPForwardRetrans=142168i,TcpExtTCPFromZeroWindowAdv=4317i,TcpExtTCPFullUndo=29502i,TcpExtTCPHPAcks=10267073000i,TcpExtTCPHPHits=5629837098i,TcpExtTCPHPHitsToUser=0i,TcpExtTCPHystartDelayCwnd=285127i,TcpExtTCPHystartDelayDetect=12318i,TcpExtTCPHystartTrainCwnd=69160570i,TcpExtTCPHystartTrainDetect=3315799i,TcpExtTCPLossFailures=109i,TcpExtTCPLossProbeRecovery=110819i,TcpExtTCPLossProbes=233995i,TcpExtTCPLossUndo=5276i,TcpExtTCPLostRetransmit=397i,TcpExtTCPMD5NotFound=0i,TcpExtTCPMD5Unexpected=0i,TcpExtTCPMemoryPressures=0i,TcpExtTCPMinTTLDrop=0i,TcpExtTCPOFODrop=0i,TcpExtTCPOFOMerge=7i,TcpExtTCPOFOQueue=15196i,TcpExtTCPOrigDataSent=29055119435i,TcpExtTCPPartialUndo=21320i,TcpExtTCPPrequeueDropped=0i,TcpExtTCPPrequeued=0i,TcpExtTCPPureAcks=1236441827i,TcpExtTCPRcvCoalesce=225590473i,TcpExtTCPRcvCollapsed=0i,TcpExtTCPRenoFailures=0i,TcpExtTCPRenoRecovery=0i,TcpExtTCPRenoRecoveryFail=0i,TcpExtTCPRenoReorder=0i,TcpExtTCPReqQFullDoCookies=0i,TcpExtTCPReqQFullDrop=0i,TcpExtTCPRetransFail=41i,TcpExtTCPSACKDiscard=0i,TcpExtTCPSACKReneging=0i,TcpExtTCPSACKReorder=4307i,TcpExtTCPSYNChallenge=244i,TcpExtTCPSackFailures=1698i,TcpExtTCPSackMerged=184668i,TcpExtTCPSackRecovery=97369i,TcpExtTCPSackRecoveryFail=381i,TcpExtTCPSackShiftFallback=2697079i,TcpExtTCPSackShifted=760299i,TcpExtTCPSchedulerFailed=0i,TcpExtTCPSlowStartRetrans=9276i,TcpExtTCPSpuriousRTOs=959i,TcpExtTCPSpuriousRtxHostQueues=2973i,TcpExtTCPSynRetrans=200970i,TcpExtTCPTSReorder=15221i,TcpExtTCPTimeWaitOverflow=0i,TcpExtTCPTimeouts=70127i,TcpExtTCPToZeroWindowAdv=4317i,TcpExtTCPWantZeroWindowAdv=2133i,TcpExtTW=24809813i,TcpExtTWKilled=0i,TcpExtTWRecycled=0i 1496460785000000000
|
||||||
nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=snmp,sr=database IcmpInAddrMaskReps=0i,IcmpInAddrMasks=90i,IcmpInCsumErrors=0i,IcmpInDestUnreachs=284401i,IcmpInEchoReps=9i,IcmpInEchos=1761912i,IcmpInErrors=407i,IcmpInMsgs=2047767i,IcmpInParmProbs=0i,IcmpInRedirects=0i,IcmpInSrcQuenchs=0i,IcmpInTimeExcds=46i,IcmpInTimestampReps=0i,IcmpInTimestamps=1309i,IcmpMsgInType0=9i,IcmpMsgInType11=46i,IcmpMsgInType13=1309i,IcmpMsgInType17=90i,IcmpMsgInType3=284401i,IcmpMsgInType8=1761912i,IcmpMsgOutType0=1761912i,IcmpMsgOutType14=1248i,IcmpMsgOutType3=108709i,IcmpMsgOutType8=9i,IcmpOutAddrMaskReps=0i,IcmpOutAddrMasks=0i,IcmpOutDestUnreachs=108709i,IcmpOutEchoReps=1761912i,IcmpOutEchos=9i,IcmpOutErrors=0i,IcmpOutMsgs=1871878i,IcmpOutParmProbs=0i,IcmpOutRedirects=0i,IcmpOutSrcQuenchs=0i,IcmpOutTimeExcds=0i,IcmpOutTimestampReps=1248i,IcmpOutTimestamps=0i,IpDefaultTTL=64i,IpForwDatagrams=0i,IpForwarding=2i,IpFragCreates=0i,IpFragFails=0i,IpFragOKs=0i,IpInAddrErrors=0i,IpInDelivers=17658795773i,IpInDiscards=0i,IpInHdrErrors=0i,IpInReceives=17659269339i,IpInUnknownProtos=0i,IpOutDiscards=236976i,IpOutNoRoutes=1009i,IpOutRequests=23466783734i,IpReasmFails=0i,IpReasmOKs=0i,IpReasmReqds=0i,IpReasmTimeout=0i,TcpActiveOpens=23308977i,TcpAttemptFails=3757543i,TcpCurrEstab=280i,TcpEstabResets=184792i,TcpInCsumErrors=0i,TcpInErrs=232i,TcpInSegs=17536573089i,TcpMaxConn=-1i,TcpOutRsts=4051451i,TcpOutSegs=29836254873i,TcpPassiveOpens=176546974i,TcpRetransSegs=878085i,TcpRtoAlgorithm=1i,TcpRtoMax=120000i,TcpRtoMin=200i,UdpInCsumErrors=0i,UdpInDatagrams=24441661i,UdpInErrors=0i,UdpLiteInCsumErrors=0i,UdpLiteInDatagrams=0i,UdpLiteInErrors=0i,UdpLiteNoPorts=0i,UdpLiteOutDatagrams=0i,UdpLiteRcvbufErrors=0i,UdpLiteSndbufErrors=0i,UdpNoPorts=17660i,UdpOutDatagrams=51807896i,UdpRcvbufErrors=0i,UdpSndbufErrors=236922i 1496460785000000000
|
nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=snmp,sr=database IcmpInAddrMaskReps=0i,IcmpInAddrMasks=90i,IcmpInCsumErrors=0i,IcmpInDestUnreachs=284401i,IcmpInEchoReps=9i,IcmpInEchos=1761912i,IcmpInErrors=407i,IcmpInMsgs=2047767i,IcmpInParmProbs=0i,IcmpInRedirects=0i,IcmpInSrcQuenchs=0i,IcmpInTimeExcds=46i,IcmpInTimestampReps=0i,IcmpInTimestamps=1309i,IcmpMsgInType0=9i,IcmpMsgInType11=46i,IcmpMsgInType13=1309i,IcmpMsgInType17=90i,IcmpMsgInType3=284401i,IcmpMsgInType8=1761912i,IcmpMsgOutType0=1761912i,IcmpMsgOutType14=1248i,IcmpMsgOutType3=108709i,IcmpMsgOutType8=9i,IcmpOutAddrMaskReps=0i,IcmpOutAddrMasks=0i,IcmpOutDestUnreachs=108709i,IcmpOutEchoReps=1761912i,IcmpOutEchos=9i,IcmpOutErrors=0i,IcmpOutMsgs=1871878i,IcmpOutParmProbs=0i,IcmpOutRedirects=0i,IcmpOutSrcQuenchs=0i,IcmpOutTimeExcds=0i,IcmpOutTimestampReps=1248i,IcmpOutTimestamps=0i,IpDefaultTTL=64i,IpForwDatagrams=0i,IpForwarding=2i,IpFragCreates=0i,IpFragFails=0i,IpFragOKs=0i,IpInAddrErrors=0i,IpInDelivers=17658795773i,IpInDiscards=0i,IpInHdrErrors=0i,IpInReceives=17659269339i,IpInUnknownProtos=0i,IpOutDiscards=236976i,IpOutNoRoutes=1009i,IpOutRequests=23466783734i,IpReasmFails=0i,IpReasmOKs=0i,IpReasmReqds=0i,IpReasmTimeout=0i,TcpActiveOpens=23308977i,TcpAttemptFails=3757543i,TcpCurrEstab=280i,TcpEstabResets=184792i,TcpInCsumErrors=0i,TcpInErrs=232i,TcpInSegs=17536573089i,TcpMaxConn=-1i,TcpOutRsts=4051451i,TcpOutSegs=29836254873i,TcpPassiveOpens=176546974i,TcpRetransSegs=878085i,TcpRtoAlgorithm=1i,TcpRtoMax=120000i,TcpRtoMin=200i,UdpInCsumErrors=0i,UdpInDatagrams=24441661i,UdpInErrors=0i,UdpLiteInCsumErrors=0i,UdpLiteInDatagrams=0i,UdpLiteInErrors=0i,UdpLiteNoPorts=0i,UdpLiteOutDatagrams=0i,UdpLiteRcvbufErrors=0i,UdpLiteSndbufErrors=0i,UdpNoPorts=17660i,UdpOutDatagrams=51807896i,UdpRcvbufErrors=0i,UdpSndbufErrors=236922i 1496460785000000000
|
||||||
|
|
|
@ -1,38 +1,25 @@
|
||||||
# Histogram Aggregator Plugin
|
# Histogram Aggregator Plugin
|
||||||
|
|
||||||
#### Goal
|
The histogram aggregator plugin creates histograms containing the counts of
|
||||||
|
field values within a range.
|
||||||
|
|
||||||
This plugin was added for ability to build histograms.
|
Values added to a bucket are also added to the larger buckets in the
|
||||||
|
distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg).
|
||||||
|
|
||||||
#### Description
|
Like other Telegraf aggregators, the metric is emitted every `period` seconds.
|
||||||
|
Bucket counts however are not reset between periods and will be non-strictly
|
||||||
|
increasing while Telegraf is running.
|
||||||
|
|
||||||
The histogram aggregator plugin aggregates values of specified metric's
|
#### Design
|
||||||
fields. The metric is emitted every `period` seconds. All you need to do
|
|
||||||
is to specify borders of histogram buckets and fields, for which you want
|
|
||||||
to aggregate histogram.
|
|
||||||
|
|
||||||
#### How it works
|
Each metric is passed to the aggregator and this aggregator searches
|
||||||
|
|
||||||
The each metric is passed to the aggregator and this aggregator searches
|
|
||||||
histogram buckets for those fields, which have been specified in the
|
histogram buckets for those fields, which have been specified in the
|
||||||
config. If buckets are found, the aggregator will put +1 to appropriate
|
config. If buckets are found, the aggregator will increment +1 to the appropriate
|
||||||
bucket. Otherwise, nothing will happen. Every `period` seconds these data
|
bucket otherwise it will be added to the `+Inf` bucket. Every `period`
|
||||||
will be pushed to output.
|
seconds this data will be forwarded to the outputs.
|
||||||
|
|
||||||
Note, that the all hits of current bucket will be also added to all next
|
The algorithm of hit counting to buckets was implemented on the base
|
||||||
buckets in final result of distribution. Why does it work this way? In
|
of the algorithm which is implemented in the Prometheus
|
||||||
configuration you define right borders for each bucket in a ascending
|
|
||||||
sequence. Internally buckets are presented as ranges with borders
|
|
||||||
(0..bucketBorder]: 0..1, 0..10, 0..50, …, 0..+Inf. So the value "+1" will be
|
|
||||||
put into those buckets, in which the metric value fell with such ranges of
|
|
||||||
buckets.
|
|
||||||
|
|
||||||
This plugin creates cumulative histograms. It means, that the hits in the
|
|
||||||
buckets will always increase from the moment of telegraf start. But if you
|
|
||||||
restart telegraf, all hits in the buckets will be reset to 0.
|
|
||||||
|
|
||||||
Also, the algorithm of hit counting to buckets was implemented on the base
|
|
||||||
of the algorithm, which is implemented in the Prometheus
|
|
||||||
[client](https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go).
|
[client](https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go).
|
||||||
|
|
||||||
### Configuration
|
### Configuration
|
||||||
|
@ -40,61 +27,44 @@ of the algorithm, which is implemented in the Prometheus
|
||||||
```toml
|
```toml
|
||||||
# Configuration for aggregate histogram metrics
|
# Configuration for aggregate histogram metrics
|
||||||
[[aggregators.histogram]]
|
[[aggregators.histogram]]
|
||||||
## General Aggregator Arguments:
|
## The period in which to flush the aggregator.
|
||||||
## The period on which to flush & clear the aggregator.
|
|
||||||
period = "30s"
|
period = "30s"
|
||||||
|
|
||||||
## If true, the original metric will be dropped by the
|
## If true, the original metric will be dropped by the
|
||||||
## aggregator and will not get sent to the output plugins.
|
## aggregator and will not get sent to the output plugins.
|
||||||
drop_original = false
|
drop_original = false
|
||||||
|
|
||||||
## The example of config to aggregate histogram for all fields of specified metric.
|
## Example config that aggregates all fields of the metric.
|
||||||
[[aggregators.histogram.config]]
|
# [[aggregators.histogram.config]]
|
||||||
## The set of buckets.
|
# ## The set of buckets.
|
||||||
buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||||
## The name of metric.
|
# ## The name of metric.
|
||||||
metric_name = "cpu"
|
# measurement_name = "cpu"
|
||||||
|
|
||||||
## The example of config to aggregate histogram for concrete fields of specified metric.
|
## Example config that aggregates only specific fields of the metric.
|
||||||
[[aggregators.histogram.config]]
|
# [[aggregators.histogram.config]]
|
||||||
## The set of buckets.
|
# ## The set of buckets.
|
||||||
buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||||
## The name of metric.
|
# ## The name of metric.
|
||||||
metric_name = "diskio"
|
# measurement_name = "diskio"
|
||||||
## The concrete fields of metric.
|
# ## The concrete fields of metric
|
||||||
metric_fields = ["io_time", "read_time", "write_time"]
|
# fields = ["io_time", "read_time", "write_time"]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Explanation
|
The user is responsible for defining the bounds of the histogram bucket as
|
||||||
|
well as the measurement name and fields to aggregate.
|
||||||
|
|
||||||
The field `metric_fields` is the list of metric fields. For example, the
|
Each histogram config section must contain a `buckets` and `measurement_name`
|
||||||
metric `cpu` has the following fields: usage_user, usage_system,
|
option. Optionally, if `fields` is set only the fields listed will be
|
||||||
usage_idle, usage_nice, usage_iowait, usage_irq, usage_softirq, usage_steal,
|
aggregated. If `fields` is not set all fields are aggregated.
|
||||||
usage_guest, usage_guest_nice.
|
|
||||||
|
|
||||||
Note that histogram metrics will be pushed every `period` seconds.
|
The `buckets` option contains a list of floats which specify the bucket
|
||||||
As you know telegraf calls aggregator `Reset()` func each `period` seconds.
|
boundaries. Each float value defines the inclusive upper bound of the bucket.
|
||||||
Histogram aggregator ignores `Reset()` and continues to count hits.
|
The `+Inf` bucket is added automatically and does not need to be defined.
|
||||||
|
|
||||||
#### Use cases
|
|
||||||
|
|
||||||
You can specify fields using two cases:
|
|
||||||
|
|
||||||
1. The specifying only metric name. In this case all fields of metric
|
|
||||||
will be aggregated.
|
|
||||||
2. The specifying metric name and concrete field.
|
|
||||||
|
|
||||||
#### Some rules
|
|
||||||
|
|
||||||
- The setting of each histogram must be in separate section with title
|
|
||||||
`aggregators.histogram.config`.
|
|
||||||
|
|
||||||
- The each value of bucket must be float value.
|
|
||||||
|
|
||||||
- Don\`t include the border bucket `+Inf`. It will be done automatically.
|
|
||||||
|
|
||||||
### Measurements & Fields:
|
### Measurements & Fields:
|
||||||
|
|
||||||
The postfix `bucket` will be added to each field.
|
The postfix `bucket` will be added to each field key.
|
||||||
|
|
||||||
- measurement1
|
- measurement1
|
||||||
- field1_bucket
|
- field1_bucket
|
||||||
|
@ -102,16 +72,15 @@ The postfix `bucket` will be added to each field.
|
||||||
|
|
||||||
### Tags:
|
### Tags:
|
||||||
|
|
||||||
All measurements have tag `le`. This tag has the border value of bucket. It
|
All measurements are given the tag `le`. This tag has the border value of
|
||||||
means that the metric value is less or equal to the value of this tag. For
|
bucket. It means that the metric value is less than or equal to the value of
|
||||||
example, let assume that we have the metric value 10 and the following
|
this tag. For example, let assume that we have the metric value 10 and the
|
||||||
buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value 10,
|
following buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value
|
||||||
because the metrics value is passed into bucket with right border value `10`.
|
10, because the metrics value is passed into bucket with right border value
|
||||||
|
`10`.
|
||||||
|
|
||||||
### Example Output:
|
### Example Output:
|
||||||
|
|
||||||
The following output will return to the Prometheus client.
|
|
||||||
|
|
||||||
```
|
```
|
||||||
cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000
|
cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000
|
||||||
cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=0i 1486998330000000000
|
cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=0i 1486998330000000000
|
||||||
|
|
|
@ -24,8 +24,8 @@ type HistogramAggregator struct {
|
||||||
|
|
||||||
// config is the config, which contains name, field of metric and histogram buckets.
|
// config is the config, which contains name, field of metric and histogram buckets.
|
||||||
type config struct {
|
type config struct {
|
||||||
Metric string `toml:"metric_name"`
|
Metric string `toml:"measurement_name"`
|
||||||
Fields []string `toml:"metric_fields"`
|
Fields []string `toml:"fields"`
|
||||||
Buckets buckets `toml:"buckets"`
|
Buckets buckets `toml:"buckets"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,28 +65,28 @@ func NewHistogramAggregator() telegraf.Aggregator {
|
||||||
}
|
}
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
## General Aggregator Arguments:
|
## The period in which to flush the aggregator.
|
||||||
## The period on which to flush & clear the aggregator.
|
|
||||||
period = "30s"
|
period = "30s"
|
||||||
|
|
||||||
## If true, the original metric will be dropped by the
|
## If true, the original metric will be dropped by the
|
||||||
## aggregator and will not get sent to the output plugins.
|
## aggregator and will not get sent to the output plugins.
|
||||||
drop_original = false
|
drop_original = false
|
||||||
|
|
||||||
## The example of config to aggregate histogram for all fields of specified metric.
|
## Example config that aggregates all fields of the metric.
|
||||||
[[aggregators.histogram.config]]
|
# [[aggregators.histogram.config]]
|
||||||
## The set of buckets.
|
# ## The set of buckets.
|
||||||
buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||||
## The name of metric.
|
# ## The name of metric.
|
||||||
metric_name = "cpu"
|
# measurement_name = "cpu"
|
||||||
|
|
||||||
## The example of config to aggregate for specified fields of metric.
|
## Example config that aggregates only specific fields of the metric.
|
||||||
[[aggregators.histogram.config]]
|
# [[aggregators.histogram.config]]
|
||||||
## The set of buckets.
|
# ## The set of buckets.
|
||||||
buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||||
## The name of metric.
|
# ## The name of metric.
|
||||||
metric_name = "diskio"
|
# measurement_name = "diskio"
|
||||||
## The concrete fields of metric
|
# ## The concrete fields of metric
|
||||||
metric_fields = ["io_time", "read_time", "write_time"]
|
# fields = ["io_time", "read_time", "write_time"]
|
||||||
`
|
`
|
||||||
|
|
||||||
// SampleConfig returns sample of config
|
// SampleConfig returns sample of config
|
||||||
|
@ -96,7 +96,7 @@ func (h *HistogramAggregator) SampleConfig() string {
|
||||||
|
|
||||||
// Description returns description of aggregator plugin
|
// Description returns description of aggregator plugin
|
||||||
func (h *HistogramAggregator) Description() string {
|
func (h *HistogramAggregator) Description() string {
|
||||||
return "Keep the aggregate histogram of each metric passing through."
|
return "Create aggregate histograms."
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add adds new hit to the buckets
|
// Add adds new hit to the buckets
|
||||||
|
|
|
@ -39,9 +39,9 @@ The following defaults are known to work with RabbitMQ:
|
||||||
## Use SSL but skip chain & host verification
|
## Use SSL but skip chain & host verification
|
||||||
# insecure_skip_verify = false
|
# insecure_skip_verify = false
|
||||||
|
|
||||||
## Data format to output.
|
## Data format to consume.
|
||||||
## Each data format has its own unique set of configuration options, read
|
## Each data format has its own unique set of configuration options, read
|
||||||
## more about them here:
|
## more about them here:
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
data_format = "influx"
|
data_format = "influx"
|
||||||
```
|
```
|
||||||
|
|
|
@ -85,10 +85,10 @@ func (a *AMQPConsumer) SampleConfig() string {
|
||||||
## Use SSL but skip chain & host verification
|
## Use SSL but skip chain & host verification
|
||||||
# insecure_skip_verify = false
|
# insecure_skip_verify = false
|
||||||
|
|
||||||
## Data format to output.
|
## Data format to consume.
|
||||||
## Each data format has its own unique set of configuration options, read
|
## Each data format has its own unique set of configuration options, read
|
||||||
## more about them here:
|
## more about them here:
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
data_format = "influx"
|
data_format = "influx"
|
||||||
`
|
`
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,7 @@ func TestParseSockId(t *testing.T) {
|
||||||
func TestParseMonDump(t *testing.T) {
|
func TestParseMonDump(t *testing.T) {
|
||||||
dump, err := parseDump(monPerfDump)
|
dump, err := parseDump(monPerfDump)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.InEpsilon(t, 5678670180, dump["cluster"]["osd_kb_used"], epsilon)
|
assert.InEpsilon(t, int64(5678670180), dump["cluster"]["osd_kb_used"], epsilon)
|
||||||
assert.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon)
|
assert.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -225,7 +225,7 @@ var fileFormats = [...]fileFormat{
|
||||||
}
|
}
|
||||||
|
|
||||||
func numberOrString(s string) interface{} {
|
func numberOrString(s string) interface{} {
|
||||||
i, err := strconv.Atoi(s)
|
i, err := strconv.ParseInt(s, 10, 64)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return i
|
return i
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,17 +31,17 @@ func TestCgroupStatistics_1(t *testing.T) {
|
||||||
"path": "testdata/memory",
|
"path": "testdata/memory",
|
||||||
}
|
}
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"memory.stat.cache": 1739362304123123123,
|
"memory.stat.cache": int64(1739362304123123123),
|
||||||
"memory.stat.rss": 1775325184,
|
"memory.stat.rss": int64(1775325184),
|
||||||
"memory.stat.rss_huge": 778043392,
|
"memory.stat.rss_huge": int64(778043392),
|
||||||
"memory.stat.mapped_file": 421036032,
|
"memory.stat.mapped_file": int64(421036032),
|
||||||
"memory.stat.dirty": -307200,
|
"memory.stat.dirty": int64(-307200),
|
||||||
"memory.max_usage_in_bytes.0": 0,
|
"memory.max_usage_in_bytes.0": int64(0),
|
||||||
"memory.max_usage_in_bytes.1": -1,
|
"memory.max_usage_in_bytes.1": int64(-1),
|
||||||
"memory.max_usage_in_bytes.2": 2,
|
"memory.max_usage_in_bytes.2": int64(2),
|
||||||
"memory.limit_in_bytes": 223372036854771712,
|
"memory.limit_in_bytes": int64(223372036854771712),
|
||||||
"memory.use_hierarchy": "12-781",
|
"memory.use_hierarchy": "12-781",
|
||||||
"notify_on_release": 0,
|
"notify_on_release": int64(0),
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||||
}
|
}
|
||||||
|
@ -63,10 +63,10 @@ func TestCgroupStatistics_2(t *testing.T) {
|
||||||
"path": "testdata/cpu",
|
"path": "testdata/cpu",
|
||||||
}
|
}
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"cpuacct.usage_percpu.0": -1452543795404,
|
"cpuacct.usage_percpu.0": int64(-1452543795404),
|
||||||
"cpuacct.usage_percpu.1": 1376681271659,
|
"cpuacct.usage_percpu.1": int64(1376681271659),
|
||||||
"cpuacct.usage_percpu.2": 1450950799997,
|
"cpuacct.usage_percpu.2": int64(1450950799997),
|
||||||
"cpuacct.usage_percpu.3": -1473113374257,
|
"cpuacct.usage_percpu.3": int64(-1473113374257),
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||||
}
|
}
|
||||||
|
@ -88,7 +88,7 @@ func TestCgroupStatistics_3(t *testing.T) {
|
||||||
"path": "testdata/memory/group_1",
|
"path": "testdata/memory/group_1",
|
||||||
}
|
}
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"memory.limit_in_bytes": 223372036854771712,
|
"memory.limit_in_bytes": int64(223372036854771712),
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ func TestCgroupStatistics_4(t *testing.T) {
|
||||||
"path": "testdata/memory/group_1/group_1_1",
|
"path": "testdata/memory/group_1/group_1_1",
|
||||||
}
|
}
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"memory.limit_in_bytes": 223372036854771712,
|
"memory.limit_in_bytes": int64(223372036854771712),
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||||
|
|
||||||
|
@ -147,7 +147,7 @@ func TestCgroupStatistics_5(t *testing.T) {
|
||||||
"path": "testdata/memory/group_1/group_1_1",
|
"path": "testdata/memory/group_1/group_1_1",
|
||||||
}
|
}
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"memory.limit_in_bytes": 223372036854771712,
|
"memory.limit_in_bytes": int64(223372036854771712),
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||||
|
|
||||||
|
@ -174,9 +174,9 @@ func TestCgroupStatistics_6(t *testing.T) {
|
||||||
"path": "testdata/memory",
|
"path": "testdata/memory",
|
||||||
}
|
}
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"memory.usage_in_bytes": 3513667584,
|
"memory.usage_in_bytes": int64(3513667584),
|
||||||
"memory.use_hierarchy": "12-781",
|
"memory.use_hierarchy": "12-781",
|
||||||
"memory.kmem.limit_in_bytes": 9223372036854771712,
|
"memory.kmem.limit_in_bytes": int64(9223372036854771712),
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
// +build linux
|
|
||||||
|
|
||||||
package chrony
|
package chrony
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
// +build !linux
|
|
||||||
|
|
||||||
package chrony
|
|
|
@ -1,5 +1,3 @@
|
||||||
// +build linux
|
|
||||||
|
|
||||||
package chrony
|
package chrony
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -69,6 +69,10 @@ func (c *Consul) createAPIClient() (*api.Client, error) {
|
||||||
config.Datacenter = c.Datacentre
|
config.Datacenter = c.Datacentre
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.Token != "" {
|
||||||
|
config.Token = c.Token
|
||||||
|
}
|
||||||
|
|
||||||
if c.Username != "" {
|
if c.Username != "" {
|
||||||
config.HttpAuth = &api.HttpBasicAuth{
|
config.HttpAuth = &api.HttpBasicAuth{
|
||||||
Username: c.Username,
|
Username: c.Username,
|
||||||
|
|
|
@ -20,7 +20,7 @@ var sampleChecks = []*api.HealthCheck{
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGatherHealtCheck(t *testing.T) {
|
func TestGatherHealthCheck(t *testing.T) {
|
||||||
expectedFields := map[string]interface{}{
|
expectedFields := map[string]interface{}{
|
||||||
"check_name": "foo.health",
|
"check_name": "foo.health",
|
||||||
"status": "passing",
|
"status": "passing",
|
||||||
|
|
|
@ -16,21 +16,21 @@ const metricName = "dmcache"
|
||||||
|
|
||||||
type cacheStatus struct {
|
type cacheStatus struct {
|
||||||
device string
|
device string
|
||||||
length int
|
length int64
|
||||||
target string
|
target string
|
||||||
metadataBlocksize int
|
metadataBlocksize int64
|
||||||
metadataUsed int
|
metadataUsed int64
|
||||||
metadataTotal int
|
metadataTotal int64
|
||||||
cacheBlocksize int
|
cacheBlocksize int64
|
||||||
cacheUsed int
|
cacheUsed int64
|
||||||
cacheTotal int
|
cacheTotal int64
|
||||||
readHits int
|
readHits int64
|
||||||
readMisses int
|
readMisses int64
|
||||||
writeHits int
|
writeHits int64
|
||||||
writeMisses int
|
writeMisses int64
|
||||||
demotions int
|
demotions int64
|
||||||
promotions int
|
promotions int64
|
||||||
dirty int
|
dirty int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *DMCache) Gather(acc telegraf.Accumulator) error {
|
func (c *DMCache) Gather(acc telegraf.Accumulator) error {
|
||||||
|
@ -69,12 +69,12 @@ func parseDMSetupStatus(line string) (cacheStatus, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
status.device = strings.TrimRight(values[0], ":")
|
status.device = strings.TrimRight(values[0], ":")
|
||||||
status.length, err = strconv.Atoi(values[2])
|
status.length, err = strconv.ParseInt(values[2], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cacheStatus{}, err
|
return cacheStatus{}, err
|
||||||
}
|
}
|
||||||
status.target = values[3]
|
status.target = values[3]
|
||||||
status.metadataBlocksize, err = strconv.Atoi(values[4])
|
status.metadataBlocksize, err = strconv.ParseInt(values[4], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cacheStatus{}, err
|
return cacheStatus{}, err
|
||||||
}
|
}
|
||||||
|
@ -82,15 +82,15 @@ func parseDMSetupStatus(line string) (cacheStatus, error) {
|
||||||
if len(metadata) != 2 {
|
if len(metadata) != 2 {
|
||||||
return cacheStatus{}, parseError
|
return cacheStatus{}, parseError
|
||||||
}
|
}
|
||||||
status.metadataUsed, err = strconv.Atoi(metadata[0])
|
status.metadataUsed, err = strconv.ParseInt(metadata[0], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cacheStatus{}, err
|
return cacheStatus{}, err
|
||||||
}
|
}
|
||||||
status.metadataTotal, err = strconv.Atoi(metadata[1])
|
status.metadataTotal, err = strconv.ParseInt(metadata[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cacheStatus{}, err
|
return cacheStatus{}, err
|
||||||
}
|
}
|
||||||
status.cacheBlocksize, err = strconv.Atoi(values[6])
|
status.cacheBlocksize, err = strconv.ParseInt(values[6], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cacheStatus{}, err
|
return cacheStatus{}, err
|
||||||
}
|
}
|
||||||
|
@ -98,39 +98,39 @@ func parseDMSetupStatus(line string) (cacheStatus, error) {
|
||||||
if len(cache) != 2 {
|
if len(cache) != 2 {
|
||||||
return cacheStatus{}, parseError
|
return cacheStatus{}, parseError
|
||||||
}
|
}
|
||||||
status.cacheUsed, err = strconv.Atoi(cache[0])
|
status.cacheUsed, err = strconv.ParseInt(cache[0], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cacheStatus{}, err
|
return cacheStatus{}, err
|
||||||
}
|
}
|
||||||
status.cacheTotal, err = strconv.Atoi(cache[1])
|
status.cacheTotal, err = strconv.ParseInt(cache[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cacheStatus{}, err
|
return cacheStatus{}, err
|
||||||
}
|
}
|
||||||
status.readHits, err = strconv.Atoi(values[8])
|
status.readHits, err = strconv.ParseInt(values[8], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cacheStatus{}, err
|
return cacheStatus{}, err
|
||||||
}
|
}
|
||||||
status.readMisses, err = strconv.Atoi(values[9])
|
status.readMisses, err = strconv.ParseInt(values[9], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cacheStatus{}, err
|
return cacheStatus{}, err
|
||||||
}
|
}
|
||||||
status.writeHits, err = strconv.Atoi(values[10])
|
status.writeHits, err = strconv.ParseInt(values[10], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cacheStatus{}, err
|
return cacheStatus{}, err
|
||||||
}
|
}
|
||||||
status.writeMisses, err = strconv.Atoi(values[11])
|
status.writeMisses, err = strconv.ParseInt(values[11], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cacheStatus{}, err
|
return cacheStatus{}, err
|
||||||
}
|
}
|
||||||
status.demotions, err = strconv.Atoi(values[12])
|
status.demotions, err = strconv.ParseInt(values[12], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cacheStatus{}, err
|
return cacheStatus{}, err
|
||||||
}
|
}
|
||||||
status.promotions, err = strconv.Atoi(values[13])
|
status.promotions, err = strconv.ParseInt(values[13], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cacheStatus{}, err
|
return cacheStatus{}, err
|
||||||
}
|
}
|
||||||
status.dirty, err = strconv.Atoi(values[14])
|
status.dirty, err = strconv.ParseInt(values[14], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cacheStatus{}, err
|
return cacheStatus{}, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
package dmcache
|
package dmcache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -33,20 +35,20 @@ func TestPerDeviceGoodOutput(t *testing.T) {
|
||||||
"device": "cs-1",
|
"device": "cs-1",
|
||||||
}
|
}
|
||||||
fields1 := map[string]interface{}{
|
fields1 := map[string]interface{}{
|
||||||
"length": 4883791872,
|
"length": int64(4883791872),
|
||||||
"metadata_blocksize": 8,
|
"metadata_blocksize": int64(8),
|
||||||
"metadata_used": 1018,
|
"metadata_used": int64(1018),
|
||||||
"metadata_total": 1501122,
|
"metadata_total": int64(1501122),
|
||||||
"cache_blocksize": 512,
|
"cache_blocksize": int64(512),
|
||||||
"cache_used": 7,
|
"cache_used": int64(7),
|
||||||
"cache_total": 464962,
|
"cache_total": int64(464962),
|
||||||
"read_hits": 139,
|
"read_hits": int64(139),
|
||||||
"read_misses": 352643,
|
"read_misses": int64(352643),
|
||||||
"write_hits": 15,
|
"write_hits": int64(15),
|
||||||
"write_misses": 46,
|
"write_misses": int64(46),
|
||||||
"demotions": 0,
|
"demotions": int64(0),
|
||||||
"promotions": 7,
|
"promotions": int64(7),
|
||||||
"dirty": 0,
|
"dirty": int64(0),
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, measurement, fields1, tags1)
|
acc.AssertContainsTaggedFields(t, measurement, fields1, tags1)
|
||||||
|
|
||||||
|
@ -54,20 +56,20 @@ func TestPerDeviceGoodOutput(t *testing.T) {
|
||||||
"device": "cs-2",
|
"device": "cs-2",
|
||||||
}
|
}
|
||||||
fields2 := map[string]interface{}{
|
fields2 := map[string]interface{}{
|
||||||
"length": 4294967296,
|
"length": int64(4294967296),
|
||||||
"metadata_blocksize": 8,
|
"metadata_blocksize": int64(8),
|
||||||
"metadata_used": 72352,
|
"metadata_used": int64(72352),
|
||||||
"metadata_total": 1310720,
|
"metadata_total": int64(1310720),
|
||||||
"cache_blocksize": 128,
|
"cache_blocksize": int64(128),
|
||||||
"cache_used": 26,
|
"cache_used": int64(26),
|
||||||
"cache_total": 24327168,
|
"cache_total": int64(24327168),
|
||||||
"read_hits": 2409,
|
"read_hits": int64(2409),
|
||||||
"read_misses": 286,
|
"read_misses": int64(286),
|
||||||
"write_hits": 265,
|
"write_hits": int64(265),
|
||||||
"write_misses": 524682,
|
"write_misses": int64(524682),
|
||||||
"demotions": 0,
|
"demotions": int64(0),
|
||||||
"promotions": 0,
|
"promotions": int64(0),
|
||||||
"dirty": 0,
|
"dirty": int64(0),
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, measurement, fields2, tags2)
|
acc.AssertContainsTaggedFields(t, measurement, fields2, tags2)
|
||||||
|
|
||||||
|
@ -76,20 +78,20 @@ func TestPerDeviceGoodOutput(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
fields3 := map[string]interface{}{
|
fields3 := map[string]interface{}{
|
||||||
"length": 9178759168,
|
"length": int64(9178759168),
|
||||||
"metadata_blocksize": 16,
|
"metadata_blocksize": int64(16),
|
||||||
"metadata_used": 73370,
|
"metadata_used": int64(73370),
|
||||||
"metadata_total": 2811842,
|
"metadata_total": int64(2811842),
|
||||||
"cache_blocksize": 640,
|
"cache_blocksize": int64(640),
|
||||||
"cache_used": 33,
|
"cache_used": int64(33),
|
||||||
"cache_total": 24792130,
|
"cache_total": int64(24792130),
|
||||||
"read_hits": 2548,
|
"read_hits": int64(2548),
|
||||||
"read_misses": 352929,
|
"read_misses": int64(352929),
|
||||||
"write_hits": 280,
|
"write_hits": int64(280),
|
||||||
"write_misses": 524728,
|
"write_misses": int64(524728),
|
||||||
"demotions": 0,
|
"demotions": int64(0),
|
||||||
"promotions": 7,
|
"promotions": int64(7),
|
||||||
"dirty": 0,
|
"dirty": int64(0),
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, measurement, fields3, tags3)
|
acc.AssertContainsTaggedFields(t, measurement, fields3, tags3)
|
||||||
}
|
}
|
||||||
|
@ -111,20 +113,20 @@ func TestNotPerDeviceGoodOutput(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"length": 9178759168,
|
"length": int64(9178759168),
|
||||||
"metadata_blocksize": 16,
|
"metadata_blocksize": int64(16),
|
||||||
"metadata_used": 73370,
|
"metadata_used": int64(73370),
|
||||||
"metadata_total": 2811842,
|
"metadata_total": int64(2811842),
|
||||||
"cache_blocksize": 640,
|
"cache_blocksize": int64(640),
|
||||||
"cache_used": 33,
|
"cache_used": int64(33),
|
||||||
"cache_total": 24792130,
|
"cache_total": int64(24792130),
|
||||||
"read_hits": 2548,
|
"read_hits": int64(2548),
|
||||||
"read_misses": 352929,
|
"read_misses": int64(352929),
|
||||||
"write_hits": 280,
|
"write_hits": int64(280),
|
||||||
"write_misses": 524728,
|
"write_misses": int64(524728),
|
||||||
"demotions": 0,
|
"demotions": int64(0),
|
||||||
"promotions": 7,
|
"promotions": int64(7),
|
||||||
"dirty": 0,
|
"dirty": int64(0),
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, measurement, fields, tags)
|
acc.AssertContainsTaggedFields(t, measurement, fields, tags)
|
||||||
}
|
}
|
|
@ -20,16 +20,6 @@ import (
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DockerLabelFilter struct {
|
|
||||||
labelInclude filter.Filter
|
|
||||||
labelExclude filter.Filter
|
|
||||||
}
|
|
||||||
|
|
||||||
type DockerContainerFilter struct {
|
|
||||||
containerInclude filter.Filter
|
|
||||||
containerExclude filter.Filter
|
|
||||||
}
|
|
||||||
|
|
||||||
// Docker object
|
// Docker object
|
||||||
type Docker struct {
|
type Docker struct {
|
||||||
Endpoint string
|
Endpoint string
|
||||||
|
@ -41,11 +31,9 @@ type Docker struct {
|
||||||
TagEnvironment []string `toml:"tag_env"`
|
TagEnvironment []string `toml:"tag_env"`
|
||||||
LabelInclude []string `toml:"docker_label_include"`
|
LabelInclude []string `toml:"docker_label_include"`
|
||||||
LabelExclude []string `toml:"docker_label_exclude"`
|
LabelExclude []string `toml:"docker_label_exclude"`
|
||||||
LabelFilter DockerLabelFilter
|
|
||||||
|
|
||||||
ContainerInclude []string `toml:"container_name_include"`
|
ContainerInclude []string `toml:"container_name_include"`
|
||||||
ContainerExclude []string `toml:"container_name_exclude"`
|
ContainerExclude []string `toml:"container_name_exclude"`
|
||||||
ContainerFilter DockerContainerFilter
|
|
||||||
|
|
||||||
SSLCA string `toml:"ssl_ca"`
|
SSLCA string `toml:"ssl_ca"`
|
||||||
SSLCert string `toml:"ssl_cert"`
|
SSLCert string `toml:"ssl_cert"`
|
||||||
|
@ -59,6 +47,8 @@ type Docker struct {
|
||||||
httpClient *http.Client
|
httpClient *http.Client
|
||||||
engine_host string
|
engine_host string
|
||||||
filtersCreated bool
|
filtersCreated bool
|
||||||
|
labelFilter filter.Filter
|
||||||
|
containerFilter filter.Filter
|
||||||
}
|
}
|
||||||
|
|
||||||
// KB, MB, GB, TB, PB...human friendly
|
// KB, MB, GB, TB, PB...human friendly
|
||||||
|
@ -291,13 +281,9 @@ func (d *Docker) gatherContainer(
|
||||||
"container_version": imageVersion,
|
"container_version": imageVersion,
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(d.ContainerInclude) > 0 || len(d.ContainerExclude) > 0 {
|
if !d.containerFilter.Match(cname) {
|
||||||
if len(d.ContainerInclude) == 0 || !d.ContainerFilter.containerInclude.Match(cname) {
|
|
||||||
if len(d.ContainerExclude) == 0 || d.ContainerFilter.containerExclude.Match(cname) {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -317,12 +303,10 @@ func (d *Docker) gatherContainer(
|
||||||
|
|
||||||
// Add labels to tags
|
// Add labels to tags
|
||||||
for k, label := range container.Labels {
|
for k, label := range container.Labels {
|
||||||
if len(d.LabelInclude) == 0 || d.LabelFilter.labelInclude.Match(k) {
|
if d.labelFilter.Match(k) {
|
||||||
if len(d.LabelExclude) == 0 || !d.LabelFilter.labelExclude.Match(k) {
|
|
||||||
tags[k] = label
|
tags[k] = label
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Add whitelisted environment variables to tags
|
// Add whitelisted environment variables to tags
|
||||||
if len(d.TagEnvironment) > 0 {
|
if len(d.TagEnvironment) > 0 {
|
||||||
|
@ -355,7 +339,11 @@ func gatherContainerStats(
|
||||||
total bool,
|
total bool,
|
||||||
daemonOSType string,
|
daemonOSType string,
|
||||||
) {
|
) {
|
||||||
now := stat.Read
|
tm := stat.Read
|
||||||
|
|
||||||
|
if tm.Before(time.Unix(0, 0)) {
|
||||||
|
tm = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
memfields := map[string]interface{}{
|
memfields := map[string]interface{}{
|
||||||
"container_id": id,
|
"container_id": id,
|
||||||
|
@ -415,7 +403,7 @@ func gatherContainerStats(
|
||||||
memfields["private_working_set"] = stat.MemoryStats.PrivateWorkingSet
|
memfields["private_working_set"] = stat.MemoryStats.PrivateWorkingSet
|
||||||
}
|
}
|
||||||
|
|
||||||
acc.AddFields("docker_container_mem", memfields, tags, now)
|
acc.AddFields("docker_container_mem", memfields, tags, tm)
|
||||||
|
|
||||||
cpufields := map[string]interface{}{
|
cpufields := map[string]interface{}{
|
||||||
"usage_total": stat.CPUStats.CPUUsage.TotalUsage,
|
"usage_total": stat.CPUStats.CPUUsage.TotalUsage,
|
||||||
|
@ -440,7 +428,7 @@ func gatherContainerStats(
|
||||||
|
|
||||||
cputags := copyTags(tags)
|
cputags := copyTags(tags)
|
||||||
cputags["cpu"] = "cpu-total"
|
cputags["cpu"] = "cpu-total"
|
||||||
acc.AddFields("docker_container_cpu", cpufields, cputags, now)
|
acc.AddFields("docker_container_cpu", cpufields, cputags, tm)
|
||||||
|
|
||||||
// If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs
|
// If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs
|
||||||
// (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400)
|
// (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400)
|
||||||
|
@ -458,7 +446,7 @@ func gatherContainerStats(
|
||||||
"usage_total": percpu,
|
"usage_total": percpu,
|
||||||
"container_id": id,
|
"container_id": id,
|
||||||
}
|
}
|
||||||
acc.AddFields("docker_container_cpu", fields, percputags, now)
|
acc.AddFields("docker_container_cpu", fields, percputags, tm)
|
||||||
}
|
}
|
||||||
|
|
||||||
totalNetworkStatMap := make(map[string]interface{})
|
totalNetworkStatMap := make(map[string]interface{})
|
||||||
|
@ -478,7 +466,7 @@ func gatherContainerStats(
|
||||||
if perDevice {
|
if perDevice {
|
||||||
nettags := copyTags(tags)
|
nettags := copyTags(tags)
|
||||||
nettags["network"] = network
|
nettags["network"] = network
|
||||||
acc.AddFields("docker_container_net", netfields, nettags, now)
|
acc.AddFields("docker_container_net", netfields, nettags, tm)
|
||||||
}
|
}
|
||||||
if total {
|
if total {
|
||||||
for field, value := range netfields {
|
for field, value := range netfields {
|
||||||
|
@ -511,17 +499,17 @@ func gatherContainerStats(
|
||||||
nettags := copyTags(tags)
|
nettags := copyTags(tags)
|
||||||
nettags["network"] = "total"
|
nettags["network"] = "total"
|
||||||
totalNetworkStatMap["container_id"] = id
|
totalNetworkStatMap["container_id"] = id
|
||||||
acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, now)
|
acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, tm)
|
||||||
}
|
}
|
||||||
|
|
||||||
gatherBlockIOMetrics(stat, acc, tags, now, id, perDevice, total)
|
gatherBlockIOMetrics(stat, acc, tags, tm, id, perDevice, total)
|
||||||
}
|
}
|
||||||
|
|
||||||
func gatherBlockIOMetrics(
|
func gatherBlockIOMetrics(
|
||||||
stat *types.StatsJSON,
|
stat *types.StatsJSON,
|
||||||
acc telegraf.Accumulator,
|
acc telegraf.Accumulator,
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
now time.Time,
|
tm time.Time,
|
||||||
id string,
|
id string,
|
||||||
perDevice bool,
|
perDevice bool,
|
||||||
total bool,
|
total bool,
|
||||||
|
@ -592,7 +580,7 @@ func gatherBlockIOMetrics(
|
||||||
if perDevice {
|
if perDevice {
|
||||||
iotags := copyTags(tags)
|
iotags := copyTags(tags)
|
||||||
iotags["device"] = device
|
iotags["device"] = device
|
||||||
acc.AddFields("docker_container_blkio", fields, iotags, now)
|
acc.AddFields("docker_container_blkio", fields, iotags, tm)
|
||||||
}
|
}
|
||||||
if total {
|
if total {
|
||||||
for field, value := range fields {
|
for field, value := range fields {
|
||||||
|
@ -623,7 +611,7 @@ func gatherBlockIOMetrics(
|
||||||
totalStatMap["container_id"] = id
|
totalStatMap["container_id"] = id
|
||||||
iotags := copyTags(tags)
|
iotags := copyTags(tags)
|
||||||
iotags["device"] = "total"
|
iotags["device"] = "total"
|
||||||
acc.AddFields("docker_container_blkio", totalStatMap, iotags, now)
|
acc.AddFields("docker_container_blkio", totalStatMap, iotags, tm)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -666,46 +654,25 @@ func parseSize(sizeStr string) (int64, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Docker) createContainerFilters() error {
|
func (d *Docker) createContainerFilters() error {
|
||||||
|
// Backwards compatibility for deprecated `container_names` parameter.
|
||||||
if len(d.ContainerNames) > 0 {
|
if len(d.ContainerNames) > 0 {
|
||||||
d.ContainerInclude = append(d.ContainerInclude, d.ContainerNames...)
|
d.ContainerInclude = append(d.ContainerInclude, d.ContainerNames...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(d.ContainerInclude) != 0 {
|
filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude)
|
||||||
var err error
|
|
||||||
d.ContainerFilter.containerInclude, err = filter.Compile(d.ContainerInclude)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
d.containerFilter = filter
|
||||||
|
|
||||||
if len(d.ContainerExclude) != 0 {
|
|
||||||
var err error
|
|
||||||
d.ContainerFilter.containerExclude, err = filter.Compile(d.ContainerExclude)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Docker) createLabelFilters() error {
|
func (d *Docker) createLabelFilters() error {
|
||||||
if len(d.LabelInclude) != 0 {
|
filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude)
|
||||||
var err error
|
|
||||||
d.LabelFilter.labelInclude, err = filter.Compile(d.LabelInclude)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
d.labelFilter = filter
|
||||||
|
|
||||||
if len(d.LabelExclude) != 0 {
|
|
||||||
var err error
|
|
||||||
d.LabelFilter.labelExclude, err = filter.Compile(d.LabelExclude)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,8 +44,7 @@ func (c *MockClient) ContainerInspect(
|
||||||
return c.ContainerInspectF(ctx, containerID)
|
return c.ContainerInspectF(ctx, containerID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newClient(host string, tlsConfig *tls.Config) (Client, error) {
|
var baseClient = MockClient{
|
||||||
return &MockClient{
|
|
||||||
InfoF: func(context.Context) (types.Info, error) {
|
InfoF: func(context.Context) (types.Info, error) {
|
||||||
return info, nil
|
return info, nil
|
||||||
},
|
},
|
||||||
|
@ -58,7 +57,10 @@ func newClient(host string, tlsConfig *tls.Config) (Client, error) {
|
||||||
ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) {
|
ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) {
|
||||||
return containerInspect, nil
|
return containerInspect, nil
|
||||||
},
|
},
|
||||||
}, nil
|
}
|
||||||
|
|
||||||
|
func newClient(host string, tlsConfig *tls.Config) (Client, error) {
|
||||||
|
return &baseClient, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDockerGatherContainerStats(t *testing.T) {
|
func TestDockerGatherContainerStats(t *testing.T) {
|
||||||
|
@ -234,82 +236,291 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDockerGatherLabels(t *testing.T) {
|
func TestContainerLabels(t *testing.T) {
|
||||||
var gatherLabelsTests = []struct {
|
var tests = []struct {
|
||||||
|
name string
|
||||||
|
container types.Container
|
||||||
include []string
|
include []string
|
||||||
exclude []string
|
exclude []string
|
||||||
expected []string
|
expected map[string]string
|
||||||
notexpected []string
|
|
||||||
}{
|
}{
|
||||||
{[]string{}, []string{}, []string{"label1", "label2"}, []string{}},
|
{
|
||||||
{[]string{"*"}, []string{}, []string{"label1", "label2"}, []string{}},
|
name: "Nil filters matches all",
|
||||||
{[]string{"lab*"}, []string{}, []string{"label1", "label2"}, []string{}},
|
container: types.Container{
|
||||||
{[]string{"label1"}, []string{}, []string{"label1"}, []string{"label2"}},
|
Labels: map[string]string{
|
||||||
{[]string{"label1*"}, []string{}, []string{"label1"}, []string{"label2"}},
|
"a": "x",
|
||||||
{[]string{}, []string{"*"}, []string{}, []string{"label1", "label2"}},
|
},
|
||||||
{[]string{}, []string{"lab*"}, []string{}, []string{"label1", "label2"}},
|
},
|
||||||
{[]string{}, []string{"label1"}, []string{"label2"}, []string{"label1"}},
|
include: nil,
|
||||||
{[]string{"*"}, []string{"*"}, []string{}, []string{"label1", "label2"}},
|
exclude: nil,
|
||||||
|
expected: map[string]string{
|
||||||
|
"a": "x",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty filters matches all",
|
||||||
|
container: types.Container{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"a": "x",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
include: []string{},
|
||||||
|
exclude: []string{},
|
||||||
|
expected: map[string]string{
|
||||||
|
"a": "x",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Must match include",
|
||||||
|
container: types.Container{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"a": "x",
|
||||||
|
"b": "y",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
include: []string{"a"},
|
||||||
|
exclude: []string{},
|
||||||
|
expected: map[string]string{
|
||||||
|
"a": "x",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Must not match exclude",
|
||||||
|
container: types.Container{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"a": "x",
|
||||||
|
"b": "y",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
include: []string{},
|
||||||
|
exclude: []string{"b"},
|
||||||
|
expected: map[string]string{
|
||||||
|
"a": "x",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Include Glob",
|
||||||
|
container: types.Container{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"aa": "x",
|
||||||
|
"ab": "y",
|
||||||
|
"bb": "z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
include: []string{"a*"},
|
||||||
|
exclude: []string{},
|
||||||
|
expected: map[string]string{
|
||||||
|
"aa": "x",
|
||||||
|
"ab": "y",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Exclude Glob",
|
||||||
|
container: types.Container{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"aa": "x",
|
||||||
|
"ab": "y",
|
||||||
|
"bb": "z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
include: []string{},
|
||||||
|
exclude: []string{"a*"},
|
||||||
|
expected: map[string]string{
|
||||||
|
"bb": "z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Excluded Includes",
|
||||||
|
container: types.Container{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"aa": "x",
|
||||||
|
"ab": "y",
|
||||||
|
"bb": "z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
include: []string{"a*"},
|
||||||
|
exclude: []string{"*b"},
|
||||||
|
expected: map[string]string{
|
||||||
|
"aa": "x",
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
for _, tt := range gatherLabelsTests {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run("", func(t *testing.T) {
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
d := Docker{
|
|
||||||
newClient: newClient,
|
newClientFunc := func(host string, tlsConfig *tls.Config) (Client, error) {
|
||||||
|
client := baseClient
|
||||||
|
client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
|
||||||
|
return []types.Container{tt.container}, nil
|
||||||
|
}
|
||||||
|
return &client, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, label := range tt.include {
|
d := Docker{
|
||||||
d.LabelInclude = append(d.LabelInclude, label)
|
newClient: newClientFunc,
|
||||||
}
|
LabelInclude: tt.include,
|
||||||
for _, label := range tt.exclude {
|
LabelExclude: tt.exclude,
|
||||||
d.LabelExclude = append(d.LabelExclude, label)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err := d.Gather(&acc)
|
err := d.Gather(&acc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for _, label := range tt.expected {
|
// Grab tags from a container metric
|
||||||
if !acc.HasTag("docker_container_cpu", label) {
|
var actual map[string]string
|
||||||
t.Errorf("Didn't get expected label of %s. Test was: Include: %s Exclude %s",
|
for _, metric := range acc.Metrics {
|
||||||
label, tt.include, tt.exclude)
|
if metric.Measurement == "docker_container_cpu" {
|
||||||
|
actual = metric.Tags
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, label := range tt.notexpected {
|
for k, v := range tt.expected {
|
||||||
if acc.HasTag("docker_container_cpu", label) {
|
require.Equal(t, v, actual[k])
|
||||||
t.Errorf("Got unexpected label of %s. Test was: Include: %s Exclude %s",
|
|
||||||
label, tt.include, tt.exclude)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestContainerNames(t *testing.T) {
|
func TestContainerNames(t *testing.T) {
|
||||||
var gatherContainerNames = []struct {
|
var tests = []struct {
|
||||||
|
name string
|
||||||
|
containers [][]string
|
||||||
include []string
|
include []string
|
||||||
exclude []string
|
exclude []string
|
||||||
expected []string
|
expected []string
|
||||||
notexpected []string
|
|
||||||
}{
|
}{
|
||||||
{[]string{}, []string{}, []string{"etcd", "etcd2"}, []string{}},
|
{
|
||||||
{[]string{"*"}, []string{}, []string{"etcd", "etcd2"}, []string{}},
|
name: "Nil filters matches all",
|
||||||
{[]string{"etc*"}, []string{}, []string{"etcd", "etcd2"}, []string{}},
|
containers: [][]string{
|
||||||
{[]string{"etcd"}, []string{}, []string{"etcd"}, []string{"etcd2"}},
|
{"/etcd"},
|
||||||
{[]string{"etcd2*"}, []string{}, []string{"etcd2"}, []string{"etcd"}},
|
{"/etcd2"},
|
||||||
{[]string{}, []string{"etc*"}, []string{}, []string{"etcd", "etcd2"}},
|
},
|
||||||
{[]string{}, []string{"etcd"}, []string{"etcd2"}, []string{"etcd"}},
|
include: nil,
|
||||||
{[]string{"*"}, []string{"*"}, []string{"etcd", "etcd2"}, []string{}},
|
exclude: nil,
|
||||||
{[]string{}, []string{"*"}, []string{""}, []string{"etcd", "etcd2"}},
|
expected: []string{"etcd", "etcd2"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty filters matches all",
|
||||||
|
containers: [][]string{
|
||||||
|
{"/etcd"},
|
||||||
|
{"/etcd2"},
|
||||||
|
},
|
||||||
|
include: []string{},
|
||||||
|
exclude: []string{},
|
||||||
|
expected: []string{"etcd", "etcd2"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Match all containers",
|
||||||
|
containers: [][]string{
|
||||||
|
{"/etcd"},
|
||||||
|
{"/etcd2"},
|
||||||
|
},
|
||||||
|
include: []string{"*"},
|
||||||
|
exclude: []string{},
|
||||||
|
expected: []string{"etcd", "etcd2"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Include prefix match",
|
||||||
|
containers: [][]string{
|
||||||
|
{"/etcd"},
|
||||||
|
{"/etcd2"},
|
||||||
|
},
|
||||||
|
include: []string{"etc*"},
|
||||||
|
exclude: []string{},
|
||||||
|
expected: []string{"etcd", "etcd2"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Exact match",
|
||||||
|
containers: [][]string{
|
||||||
|
{"/etcd"},
|
||||||
|
{"/etcd2"},
|
||||||
|
},
|
||||||
|
include: []string{"etcd"},
|
||||||
|
exclude: []string{},
|
||||||
|
expected: []string{"etcd"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Star matches zero length",
|
||||||
|
containers: [][]string{
|
||||||
|
{"/etcd"},
|
||||||
|
{"/etcd2"},
|
||||||
|
},
|
||||||
|
include: []string{"etcd2*"},
|
||||||
|
exclude: []string{},
|
||||||
|
expected: []string{"etcd2"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Exclude matches all",
|
||||||
|
containers: [][]string{
|
||||||
|
{"/etcd"},
|
||||||
|
{"/etcd2"},
|
||||||
|
},
|
||||||
|
include: []string{},
|
||||||
|
exclude: []string{"etc*"},
|
||||||
|
expected: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Exclude single",
|
||||||
|
containers: [][]string{
|
||||||
|
{"/etcd"},
|
||||||
|
{"/etcd2"},
|
||||||
|
},
|
||||||
|
include: []string{},
|
||||||
|
exclude: []string{"etcd"},
|
||||||
|
expected: []string{"etcd2"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Exclude all",
|
||||||
|
containers: [][]string{
|
||||||
|
{"/etcd"},
|
||||||
|
{"/etcd2"},
|
||||||
|
},
|
||||||
|
include: []string{"*"},
|
||||||
|
exclude: []string{"*"},
|
||||||
|
expected: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Exclude item matching include",
|
||||||
|
containers: [][]string{
|
||||||
|
{"acme"},
|
||||||
|
{"foo"},
|
||||||
|
{"acme-test"},
|
||||||
|
},
|
||||||
|
include: []string{"acme*"},
|
||||||
|
exclude: []string{"*test*"},
|
||||||
|
expected: []string{"acme"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Exclude item no wildcards",
|
||||||
|
containers: [][]string{
|
||||||
|
{"acme"},
|
||||||
|
{"acme-test"},
|
||||||
|
},
|
||||||
|
include: []string{"acme*"},
|
||||||
|
exclude: []string{"test"},
|
||||||
|
expected: []string{"acme", "acme-test"},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
for _, tt := range gatherContainerNames {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run("", func(t *testing.T) {
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
newClientFunc := func(host string, tlsConfig *tls.Config) (Client, error) {
|
||||||
|
client := baseClient
|
||||||
|
client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
|
||||||
|
var containers []types.Container
|
||||||
|
for _, names := range tt.containers {
|
||||||
|
containers = append(containers, types.Container{
|
||||||
|
Names: names,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return containers, nil
|
||||||
|
}
|
||||||
|
return &client, nil
|
||||||
|
}
|
||||||
|
|
||||||
d := Docker{
|
d := Docker{
|
||||||
newClient: newClient,
|
newClient: newClientFunc,
|
||||||
ContainerInclude: tt.include,
|
ContainerInclude: tt.include,
|
||||||
ContainerExclude: tt.exclude,
|
ContainerExclude: tt.exclude,
|
||||||
}
|
}
|
||||||
|
@ -317,39 +528,21 @@ func TestContainerNames(t *testing.T) {
|
||||||
err := d.Gather(&acc)
|
err := d.Gather(&acc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Set of expected names
|
||||||
|
var expected = make(map[string]bool)
|
||||||
|
for _, v := range tt.expected {
|
||||||
|
expected[v] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set of actual names
|
||||||
|
var actual = make(map[string]bool)
|
||||||
for _, metric := range acc.Metrics {
|
for _, metric := range acc.Metrics {
|
||||||
if metric.Measurement == "docker_container_cpu" {
|
if name, ok := metric.Tags["container_name"]; ok {
|
||||||
if val, ok := metric.Tags["container_name"]; ok {
|
actual[name] = true
|
||||||
var found bool = false
|
|
||||||
for _, cname := range tt.expected {
|
|
||||||
if val == cname {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, metric := range acc.Metrics {
|
require.Equal(t, expected, actual)
|
||||||
if metric.Measurement == "docker_container_cpu" {
|
|
||||||
if val, ok := metric.Tags["container_name"]; ok {
|
|
||||||
var found bool = false
|
|
||||||
for _, cname := range tt.notexpected {
|
|
||||||
if val == cname {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if found {
|
|
||||||
t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,19 +1,19 @@
|
||||||
# Fail2ban Plugin
|
# Fail2ban Input Plugin
|
||||||
|
|
||||||
The fail2ban plugin gathers counts of failed and banned ip addresses from fail2ban.
|
The fail2ban plugin gathers the count of failed and banned ip addresses using [fail2ban](https://www.fail2ban.org).
|
||||||
|
|
||||||
This plugin run fail2ban-client command, and fail2ban-client require root access.
|
This plugin runs the `fail2ban-client` command which generally requires root access.
|
||||||
You have to grant telegraf to run fail2ban-client:
|
Acquiring the required permissions can be done using several methods:
|
||||||
|
|
||||||
- Run telegraf as root. (deprecate)
|
- Use sudo run fail2ban-client.
|
||||||
- Configure sudo to grant telegraf to fail2ban-client.
|
- Run telegraf as root. (not recommended)
|
||||||
|
|
||||||
### Using sudo
|
### Using sudo
|
||||||
|
|
||||||
You may edit your sudo configuration with the following:
|
You may edit your sudo configuration with the following:
|
||||||
|
|
||||||
``` sudo
|
``` sudo
|
||||||
telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
|
telegraf ALL=(root) NOEXEC: NOPASSWD: /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status *
|
||||||
```
|
```
|
||||||
|
|
||||||
### Configuration:
|
### Configuration:
|
||||||
|
@ -21,10 +21,7 @@ telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
|
||||||
``` toml
|
``` toml
|
||||||
# Read metrics from fail2ban.
|
# Read metrics from fail2ban.
|
||||||
[[inputs.fail2ban]]
|
[[inputs.fail2ban]]
|
||||||
## fail2ban-client require root access.
|
## Use sudo to run fail2ban-client
|
||||||
## Setting 'use_sudo' to true will make use of sudo to run fail2ban-client.
|
|
||||||
## Users must configure sudo to allow telegraf user to run fail2ban-client with no password.
|
|
||||||
## This plugin run only "fail2ban-client status".
|
|
||||||
use_sudo = false
|
use_sudo = false
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -55,6 +52,5 @@ Status for the jail: sshd
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```
|
||||||
$ ./telegraf --config telegraf.conf --input-filter fail2ban --test
|
|
||||||
fail2ban,jail=sshd failed=5i,banned=2i 1495868667000000000
|
fail2ban,jail=sshd failed=5i,banned=2i 1495868667000000000
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
// +build linux
|
|
||||||
|
|
||||||
package fail2ban
|
package fail2ban
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -8,9 +6,10 @@ import (
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
"strconv"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -23,10 +22,7 @@ type Fail2ban struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
## fail2ban-client require root access.
|
## Use sudo to run fail2ban-client
|
||||||
## Setting 'use_sudo' to true will make use of sudo to run fail2ban-client.
|
|
||||||
## Users must configure sudo to allow telegraf user to run fail2ban-client with no password.
|
|
||||||
## This plugin run only "fail2ban-client status".
|
|
||||||
use_sudo = false
|
use_sudo = false
|
||||||
`
|
`
|
||||||
|
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
// +build !linux
|
|
||||||
|
|
||||||
package fail2ban
|
|
|
@ -148,15 +148,15 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.BufferQueueLength != nil {
|
if p.BufferQueueLength != nil {
|
||||||
tmpFields["buffer_queue_length"] = p.BufferQueueLength
|
tmpFields["buffer_queue_length"] = *p.BufferQueueLength
|
||||||
|
|
||||||
}
|
}
|
||||||
if p.RetryCount != nil {
|
if p.RetryCount != nil {
|
||||||
tmpFields["retry_count"] = p.RetryCount
|
tmpFields["retry_count"] = *p.RetryCount
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.BufferTotalQueuedSize != nil {
|
if p.BufferTotalQueuedSize != nil {
|
||||||
tmpFields["buffer_total_queued_size"] = p.BufferTotalQueuedSize
|
tmpFields["buffer_total_queued_size"] = *p.BufferTotalQueuedSize
|
||||||
}
|
}
|
||||||
|
|
||||||
if !((p.BufferQueueLength == nil) && (p.RetryCount == nil) && (p.BufferTotalQueuedSize == nil)) {
|
if !((p.BufferQueueLength == nil) && (p.RetryCount == nil) && (p.BufferTotalQueuedSize == nil)) {
|
||||||
|
|
|
@ -122,12 +122,6 @@ func Test_parse(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_Gather(t *testing.T) {
|
func Test_Gather(t *testing.T) {
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping Gather function test")
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Log("Testing Gather function")
|
|
||||||
|
|
||||||
t.Logf("Start HTTP mock (%s) with sampleJSON", fluentdTest.Endpoint)
|
t.Logf("Start HTTP mock (%s) with sampleJSON", fluentdTest.Endpoint)
|
||||||
|
|
||||||
ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
@ -157,13 +151,13 @@ func Test_Gather(t *testing.T) {
|
||||||
assert.Equal(t, expectedOutput[0].PluginID, acc.Metrics[0].Tags["plugin_id"])
|
assert.Equal(t, expectedOutput[0].PluginID, acc.Metrics[0].Tags["plugin_id"])
|
||||||
assert.Equal(t, expectedOutput[0].PluginType, acc.Metrics[0].Tags["plugin_type"])
|
assert.Equal(t, expectedOutput[0].PluginType, acc.Metrics[0].Tags["plugin_type"])
|
||||||
assert.Equal(t, expectedOutput[0].PluginCategory, acc.Metrics[0].Tags["plugin_category"])
|
assert.Equal(t, expectedOutput[0].PluginCategory, acc.Metrics[0].Tags["plugin_category"])
|
||||||
assert.Equal(t, expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"])
|
assert.Equal(t, *expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"])
|
||||||
|
|
||||||
assert.Equal(t, expectedOutput[1].PluginID, acc.Metrics[1].Tags["plugin_id"])
|
assert.Equal(t, expectedOutput[1].PluginID, acc.Metrics[1].Tags["plugin_id"])
|
||||||
assert.Equal(t, expectedOutput[1].PluginType, acc.Metrics[1].Tags["plugin_type"])
|
assert.Equal(t, expectedOutput[1].PluginType, acc.Metrics[1].Tags["plugin_type"])
|
||||||
assert.Equal(t, expectedOutput[1].PluginCategory, acc.Metrics[1].Tags["plugin_category"])
|
assert.Equal(t, expectedOutput[1].PluginCategory, acc.Metrics[1].Tags["plugin_category"])
|
||||||
assert.Equal(t, expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"])
|
assert.Equal(t, *expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"])
|
||||||
assert.Equal(t, expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"])
|
assert.Equal(t, *expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"])
|
||||||
assert.Equal(t, expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"])
|
assert.Equal(t, *expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"])
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
// +build linux
|
|
||||||
|
|
||||||
package hddtemp
|
package hddtemp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
// +build !linux
|
|
||||||
|
|
||||||
package hddtemp
|
|
|
@ -98,6 +98,7 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
|
||||||
}
|
}
|
||||||
client := &http.Client{
|
client := &http.Client{
|
||||||
Transport: &http.Transport{
|
Transport: &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
DisableKeepAlives: true,
|
DisableKeepAlives: true,
|
||||||
TLSClientConfig: tlsCfg,
|
TLSClientConfig: tlsCfg,
|
||||||
},
|
},
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package httpjson
|
package httpjson
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -15,6 +16,10 @@ import (
|
||||||
"github.com/influxdata/telegraf/plugins/parsers"
|
"github.com/influxdata/telegraf/plugins/parsers"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
utf8BOM = []byte("\xef\xbb\xbf")
|
||||||
|
)
|
||||||
|
|
||||||
// HttpJson struct
|
// HttpJson struct
|
||||||
type HttpJson struct {
|
type HttpJson struct {
|
||||||
Name string
|
Name string
|
||||||
|
@ -170,7 +175,6 @@ func (h *HttpJson) gatherServer(
|
||||||
serverURL string,
|
serverURL string,
|
||||||
) error {
|
) error {
|
||||||
resp, responseTime, err := h.sendRequest(serverURL)
|
resp, responseTime, err := h.sendRequest(serverURL)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -266,6 +270,7 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return string(body), responseTime, err
|
return string(body), responseTime, err
|
||||||
}
|
}
|
||||||
|
body = bytes.TrimPrefix(body, utf8BOM)
|
||||||
|
|
||||||
// Process response
|
// Process response
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
|
|
@ -477,15 +477,13 @@ func TestHttpJsonBadJson(t *testing.T) {
|
||||||
assert.Equal(t, 0, acc.NFields())
|
assert.Equal(t, 0, acc.NFields())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test response to empty string as response objectgT
|
// Test response to empty string as response object
|
||||||
func TestHttpJsonEmptyResponse(t *testing.T) {
|
func TestHttpJsonEmptyResponse(t *testing.T) {
|
||||||
httpjson := genMockHttpJson(empty, 200)
|
httpjson := genMockHttpJson(empty, 200)
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
err := acc.GatherError(httpjson[0].Gather)
|
err := acc.GatherError(httpjson[0].Gather)
|
||||||
|
assert.NoError(t, err)
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Equal(t, 0, acc.NFields())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that the proper values are ignored or collected
|
// Test that the proper values are ignored or collected
|
||||||
|
@ -560,3 +558,18 @@ func TestHttpJsonArray200Tags(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var jsonBOM = []byte("\xef\xbb\xbf[{\"value\":17}]")
|
||||||
|
|
||||||
|
// TestHttpJsonBOM tests that UTF-8 JSON with a BOM can be parsed
|
||||||
|
func TestHttpJsonBOM(t *testing.T) {
|
||||||
|
httpjson := genMockHttpJson(string(jsonBOM), 200)
|
||||||
|
|
||||||
|
for _, service := range httpjson {
|
||||||
|
if service.Name == "other_webapp" {
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
err := acc.GatherError(service.Gather)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -95,7 +95,7 @@ const measurement = "iptables"
|
||||||
var errParse = errors.New("Cannot parse iptables list information")
|
var errParse = errors.New("Cannot parse iptables list information")
|
||||||
var chainNameRe = regexp.MustCompile(`^Chain\s+(\S+)`)
|
var chainNameRe = regexp.MustCompile(`^Chain\s+(\S+)`)
|
||||||
var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+`)
|
var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+`)
|
||||||
var commentRe = regexp.MustCompile(`\s*/\*\s*(.+?)\s*\*/\s*`)
|
var valuesRe = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s+.*?/\*\s*(.+?)\s*\*/\s*`)
|
||||||
|
|
||||||
func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error {
|
func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error {
|
||||||
lines := strings.Split(data, "\n")
|
lines := strings.Split(data, "\n")
|
||||||
|
@ -110,21 +110,14 @@ func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error
|
||||||
return errParse
|
return errParse
|
||||||
}
|
}
|
||||||
for _, line := range lines[2:] {
|
for _, line := range lines[2:] {
|
||||||
tokens := strings.Fields(line)
|
matches := valuesRe.FindStringSubmatch(line)
|
||||||
if len(tokens) < 10 {
|
if len(matches) != 4 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
pkts := tokens[0]
|
pkts := matches[1]
|
||||||
bytes := tokens[1]
|
bytes := matches[2]
|
||||||
end := strings.Join(tokens[9:], " ")
|
comment := matches[3]
|
||||||
|
|
||||||
matches := commentRe.FindStringSubmatch(end)
|
|
||||||
if matches == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
comment := matches[1]
|
|
||||||
|
|
||||||
tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": comment}
|
tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": comment}
|
||||||
fields := make(map[string]interface{})
|
fields := make(map[string]interface{})
|
||||||
|
|
|
@ -154,9 +154,25 @@ func TestIptables_Gather(t *testing.T) {
|
||||||
tags: []map[string]string{},
|
tags: []map[string]string{},
|
||||||
fields: [][]map[string]interface{}{},
|
fields: [][]map[string]interface{}{},
|
||||||
},
|
},
|
||||||
|
{ // 11 - all target and ports
|
||||||
|
table: "all_recv",
|
||||||
|
chains: []string{"accountfwd"},
|
||||||
|
values: []string{
|
||||||
|
`Chain accountfwd (1 references)
|
||||||
|
pkts bytes target prot opt in out source destination
|
||||||
|
123 456 all -- eth0 * 0.0.0.0/0 0.0.0.0/0 /* all_recv */
|
||||||
|
`},
|
||||||
|
tags: []map[string]string{
|
||||||
|
map[string]string{"table": "all_recv", "chain": "accountfwd", "ruleid": "all_recv"},
|
||||||
|
},
|
||||||
|
fields: [][]map[string]interface{}{
|
||||||
|
{map[string]interface{}{"pkts": uint64(123), "bytes": uint64(456)}},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
|
t.Run(tt.table, func(t *testing.T) {
|
||||||
i++
|
i++
|
||||||
ipt := &Iptables{
|
ipt := &Iptables{
|
||||||
Table: tt.table,
|
Table: tt.table,
|
||||||
|
@ -180,21 +196,21 @@ func TestIptables_Gather(t *testing.T) {
|
||||||
if n != 0 {
|
if n != 0 {
|
||||||
t.Errorf("%d: expected 0 fields if empty table got %d", i, n)
|
t.Errorf("%d: expected 0 fields if empty table got %d", i, n)
|
||||||
}
|
}
|
||||||
continue
|
return
|
||||||
}
|
}
|
||||||
if len(tt.chains) == 0 {
|
if len(tt.chains) == 0 {
|
||||||
n := acc.NFields()
|
n := acc.NFields()
|
||||||
if n != 0 {
|
if n != 0 {
|
||||||
t.Errorf("%d: expected 0 fields if empty chains got %d", i, n)
|
t.Errorf("%d: expected 0 fields if empty chains got %d", i, n)
|
||||||
}
|
}
|
||||||
continue
|
return
|
||||||
}
|
}
|
||||||
if len(tt.tags) == 0 {
|
if len(tt.tags) == 0 {
|
||||||
n := acc.NFields()
|
n := acc.NFields()
|
||||||
if n != 0 {
|
if n != 0 {
|
||||||
t.Errorf("%d: expected 0 values got %d", i, n)
|
t.Errorf("%d: expected 0 values got %d", i, n)
|
||||||
}
|
}
|
||||||
continue
|
return
|
||||||
}
|
}
|
||||||
n := 0
|
n := 0
|
||||||
for j, tags := range tt.tags {
|
for j, tags := range tt.tags {
|
||||||
|
@ -216,6 +232,7 @@ func TestIptables_Gather(t *testing.T) {
|
||||||
n++
|
n++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,8 +3,6 @@ package leofs
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net/url"
|
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -19,7 +17,7 @@ import (
|
||||||
const oid = ".1.3.6.1.4.1.35450"
|
const oid = ".1.3.6.1.4.1.35450"
|
||||||
|
|
||||||
// For Manager Master
|
// For Manager Master
|
||||||
const defaultEndpoint = "udp://127.0.0.1:4020"
|
const defaultEndpoint = "127.0.0.1:4020"
|
||||||
|
|
||||||
type ServerType int
|
type ServerType int
|
||||||
|
|
||||||
|
@ -137,8 +135,8 @@ var serverTypeMapping = map[string]ServerType{
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
## An array of URLs of the form:
|
## An array of URLs of the form:
|
||||||
## "udp://" host [ ":" port]
|
## host [ ":" port]
|
||||||
servers = ["udp://127.0.0.1:4020"]
|
servers = ["127.0.0.1:4020"]
|
||||||
`
|
`
|
||||||
|
|
||||||
func (l *LeoFS) SampleConfig() string {
|
func (l *LeoFS) SampleConfig() string {
|
||||||
|
@ -155,28 +153,22 @@ func (l *LeoFS) Gather(acc telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i, endpoint := range l.Servers {
|
for _, endpoint := range l.Servers {
|
||||||
if !strings.HasPrefix(endpoint, "udp://") {
|
results := strings.Split(endpoint, ":")
|
||||||
// Preserve backwards compatibility for hostnames without a
|
|
||||||
// scheme, broken in go 1.8. Remove in Telegraf 2.0
|
port := "4020"
|
||||||
endpoint = "udp://" + endpoint
|
if len(results) > 2 {
|
||||||
log.Printf("W! [inputs.mongodb] Using %q as connection URL; please update your configuration to use an URL", endpoint)
|
|
||||||
l.Servers[i] = endpoint
|
|
||||||
}
|
|
||||||
u, err := url.Parse(endpoint)
|
|
||||||
if err != nil {
|
|
||||||
acc.AddError(fmt.Errorf("Unable to parse address %q: %s", endpoint, err))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if u.Host == "" {
|
|
||||||
acc.AddError(fmt.Errorf("Unable to parse address %q", endpoint))
|
acc.AddError(fmt.Errorf("Unable to parse address %q", endpoint))
|
||||||
continue
|
continue
|
||||||
|
} else if len(results) == 2 {
|
||||||
|
if _, err := strconv.Atoi(results[1]); err == nil {
|
||||||
|
port = results[1]
|
||||||
|
} else {
|
||||||
|
acc.AddError(fmt.Errorf("Unable to parse port from %q", endpoint))
|
||||||
|
continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
port := u.Port()
|
|
||||||
if port == "" {
|
|
||||||
port = "4020"
|
|
||||||
}
|
|
||||||
st, ok := serverTypeMapping[port]
|
st, ok := serverTypeMapping[port]
|
||||||
if !ok {
|
if !ok {
|
||||||
st = ServerTypeStorage
|
st = ServerTypeStorage
|
||||||
|
@ -196,7 +188,7 @@ func (l *LeoFS) gatherServer(
|
||||||
serverType ServerType,
|
serverType ServerType,
|
||||||
acc telegraf.Accumulator,
|
acc telegraf.Accumulator,
|
||||||
) error {
|
) error {
|
||||||
cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", endpoint, oid)
|
cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", "-On", endpoint, oid)
|
||||||
stdout, err := cmd.StdoutPipe()
|
stdout, err := cmd.StdoutPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -16,21 +16,21 @@ package main
|
||||||
|
|
||||||
import "fmt"
|
import "fmt"
|
||||||
|
|
||||||
const output = ` + "`" + `iso.3.6.1.4.1.35450.15.1.0 = STRING: "manager_888@127.0.0.1"
|
const output = ` + "`" + `.1.3.6.1.4.1.35450.15.1.0 = STRING: "manager_888@127.0.0.1"
|
||||||
iso.3.6.1.4.1.35450.15.2.0 = Gauge32: 186
|
.1.3.6.1.4.1.35450.15.2.0 = Gauge32: 186
|
||||||
iso.3.6.1.4.1.35450.15.3.0 = Gauge32: 46235519
|
.1.3.6.1.4.1.35450.15.3.0 = Gauge32: 46235519
|
||||||
iso.3.6.1.4.1.35450.15.4.0 = Gauge32: 32168525
|
.1.3.6.1.4.1.35450.15.4.0 = Gauge32: 32168525
|
||||||
iso.3.6.1.4.1.35450.15.5.0 = Gauge32: 14066068
|
.1.3.6.1.4.1.35450.15.5.0 = Gauge32: 14066068
|
||||||
iso.3.6.1.4.1.35450.15.6.0 = Gauge32: 5512968
|
.1.3.6.1.4.1.35450.15.6.0 = Gauge32: 5512968
|
||||||
iso.3.6.1.4.1.35450.15.7.0 = Gauge32: 186
|
.1.3.6.1.4.1.35450.15.7.0 = Gauge32: 186
|
||||||
iso.3.6.1.4.1.35450.15.8.0 = Gauge32: 46269006
|
.1.3.6.1.4.1.35450.15.8.0 = Gauge32: 46269006
|
||||||
iso.3.6.1.4.1.35450.15.9.0 = Gauge32: 32202867
|
.1.3.6.1.4.1.35450.15.9.0 = Gauge32: 32202867
|
||||||
iso.3.6.1.4.1.35450.15.10.0 = Gauge32: 14064995
|
.1.3.6.1.4.1.35450.15.10.0 = Gauge32: 14064995
|
||||||
iso.3.6.1.4.1.35450.15.11.0 = Gauge32: 5492634
|
.1.3.6.1.4.1.35450.15.11.0 = Gauge32: 5492634
|
||||||
iso.3.6.1.4.1.35450.15.12.0 = Gauge32: 60
|
.1.3.6.1.4.1.35450.15.12.0 = Gauge32: 60
|
||||||
iso.3.6.1.4.1.35450.15.13.0 = Gauge32: 43515904
|
.1.3.6.1.4.1.35450.15.13.0 = Gauge32: 43515904
|
||||||
iso.3.6.1.4.1.35450.15.14.0 = Gauge32: 60
|
.1.3.6.1.4.1.35450.15.14.0 = Gauge32: 60
|
||||||
iso.3.6.1.4.1.35450.15.15.0 = Gauge32: 43533983` + "`" +
|
.1.3.6.1.4.1.35450.15.15.0 = Gauge32: 43533983` + "`" +
|
||||||
`
|
`
|
||||||
func main() {
|
func main() {
|
||||||
fmt.Println(output)
|
fmt.Println(output)
|
||||||
|
@ -42,34 +42,34 @@ package main
|
||||||
|
|
||||||
import "fmt"
|
import "fmt"
|
||||||
|
|
||||||
const output = ` + "`" + `iso.3.6.1.4.1.35450.34.1.0 = STRING: "storage_0@127.0.0.1"
|
const output = ` + "`" + `.1.3.6.1.4.1.35450.34.1.0 = STRING: "storage_0@127.0.0.1"
|
||||||
iso.3.6.1.4.1.35450.34.2.0 = Gauge32: 512
|
.1.3.6.1.4.1.35450.34.2.0 = Gauge32: 512
|
||||||
iso.3.6.1.4.1.35450.34.3.0 = Gauge32: 38126307
|
.1.3.6.1.4.1.35450.34.3.0 = Gauge32: 38126307
|
||||||
iso.3.6.1.4.1.35450.34.4.0 = Gauge32: 22308716
|
.1.3.6.1.4.1.35450.34.4.0 = Gauge32: 22308716
|
||||||
iso.3.6.1.4.1.35450.34.5.0 = Gauge32: 15816448
|
.1.3.6.1.4.1.35450.34.5.0 = Gauge32: 15816448
|
||||||
iso.3.6.1.4.1.35450.34.6.0 = Gauge32: 5232008
|
.1.3.6.1.4.1.35450.34.6.0 = Gauge32: 5232008
|
||||||
iso.3.6.1.4.1.35450.34.7.0 = Gauge32: 512
|
.1.3.6.1.4.1.35450.34.7.0 = Gauge32: 512
|
||||||
iso.3.6.1.4.1.35450.34.8.0 = Gauge32: 38113176
|
.1.3.6.1.4.1.35450.34.8.0 = Gauge32: 38113176
|
||||||
iso.3.6.1.4.1.35450.34.9.0 = Gauge32: 22313398
|
.1.3.6.1.4.1.35450.34.9.0 = Gauge32: 22313398
|
||||||
iso.3.6.1.4.1.35450.34.10.0 = Gauge32: 15798779
|
.1.3.6.1.4.1.35450.34.10.0 = Gauge32: 15798779
|
||||||
iso.3.6.1.4.1.35450.34.11.0 = Gauge32: 5237315
|
.1.3.6.1.4.1.35450.34.11.0 = Gauge32: 5237315
|
||||||
iso.3.6.1.4.1.35450.34.12.0 = Gauge32: 191
|
.1.3.6.1.4.1.35450.34.12.0 = Gauge32: 191
|
||||||
iso.3.6.1.4.1.35450.34.13.0 = Gauge32: 824
|
.1.3.6.1.4.1.35450.34.13.0 = Gauge32: 824
|
||||||
iso.3.6.1.4.1.35450.34.14.0 = Gauge32: 0
|
.1.3.6.1.4.1.35450.34.14.0 = Gauge32: 0
|
||||||
iso.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105
|
.1.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105
|
||||||
iso.3.6.1.4.1.35450.34.16.0 = Gauge32: 196654
|
.1.3.6.1.4.1.35450.34.16.0 = Gauge32: 196654
|
||||||
iso.3.6.1.4.1.35450.34.17.0 = Gauge32: 0
|
.1.3.6.1.4.1.35450.34.17.0 = Gauge32: 0
|
||||||
iso.3.6.1.4.1.35450.34.18.0 = Gauge32: 2052
|
.1.3.6.1.4.1.35450.34.18.0 = Gauge32: 2052
|
||||||
iso.3.6.1.4.1.35450.34.19.0 = Gauge32: 50296
|
.1.3.6.1.4.1.35450.34.19.0 = Gauge32: 50296
|
||||||
iso.3.6.1.4.1.35450.34.20.0 = Gauge32: 35
|
.1.3.6.1.4.1.35450.34.20.0 = Gauge32: 35
|
||||||
iso.3.6.1.4.1.35450.34.21.0 = Gauge32: 898
|
.1.3.6.1.4.1.35450.34.21.0 = Gauge32: 898
|
||||||
iso.3.6.1.4.1.35450.34.22.0 = Gauge32: 0
|
.1.3.6.1.4.1.35450.34.22.0 = Gauge32: 0
|
||||||
iso.3.6.1.4.1.35450.34.23.0 = Gauge32: 0
|
.1.3.6.1.4.1.35450.34.23.0 = Gauge32: 0
|
||||||
iso.3.6.1.4.1.35450.34.24.0 = Gauge32: 0
|
.1.3.6.1.4.1.35450.34.24.0 = Gauge32: 0
|
||||||
iso.3.6.1.4.1.35450.34.31.0 = Gauge32: 51
|
.1.3.6.1.4.1.35450.34.31.0 = Gauge32: 51
|
||||||
iso.3.6.1.4.1.35450.34.32.0 = Gauge32: 53219328
|
.1.3.6.1.4.1.35450.34.32.0 = Gauge32: 53219328
|
||||||
iso.3.6.1.4.1.35450.34.33.0 = Gauge32: 51
|
.1.3.6.1.4.1.35450.34.33.0 = Gauge32: 51
|
||||||
iso.3.6.1.4.1.35450.34.34.0 = Gauge32: 53351083` + "`" +
|
.1.3.6.1.4.1.35450.34.34.0 = Gauge32: 53351083` + "`" +
|
||||||
`
|
`
|
||||||
func main() {
|
func main() {
|
||||||
fmt.Println(output)
|
fmt.Println(output)
|
||||||
|
@ -81,31 +81,31 @@ package main
|
||||||
|
|
||||||
import "fmt"
|
import "fmt"
|
||||||
|
|
||||||
const output = ` + "`" + `iso.3.6.1.4.1.35450.34.1.0 = STRING: "gateway_0@127.0.0.1"
|
const output = ` + "`" + `.1.3.6.1.4.1.35450.34.1.0 = STRING: "gateway_0@127.0.0.1"
|
||||||
iso.3.6.1.4.1.35450.34.2.0 = Gauge32: 465
|
.1.3.6.1.4.1.35450.34.2.0 = Gauge32: 465
|
||||||
iso.3.6.1.4.1.35450.34.3.0 = Gauge32: 61676335
|
.1.3.6.1.4.1.35450.34.3.0 = Gauge32: 61676335
|
||||||
iso.3.6.1.4.1.35450.34.4.0 = Gauge32: 46890415
|
.1.3.6.1.4.1.35450.34.4.0 = Gauge32: 46890415
|
||||||
iso.3.6.1.4.1.35450.34.5.0 = Gauge32: 14785011
|
.1.3.6.1.4.1.35450.34.5.0 = Gauge32: 14785011
|
||||||
iso.3.6.1.4.1.35450.34.6.0 = Gauge32: 5578855
|
.1.3.6.1.4.1.35450.34.6.0 = Gauge32: 5578855
|
||||||
iso.3.6.1.4.1.35450.34.7.0 = Gauge32: 465
|
.1.3.6.1.4.1.35450.34.7.0 = Gauge32: 465
|
||||||
iso.3.6.1.4.1.35450.34.8.0 = Gauge32: 61644426
|
.1.3.6.1.4.1.35450.34.8.0 = Gauge32: 61644426
|
||||||
iso.3.6.1.4.1.35450.34.9.0 = Gauge32: 46880358
|
.1.3.6.1.4.1.35450.34.9.0 = Gauge32: 46880358
|
||||||
iso.3.6.1.4.1.35450.34.10.0 = Gauge32: 14763002
|
.1.3.6.1.4.1.35450.34.10.0 = Gauge32: 14763002
|
||||||
iso.3.6.1.4.1.35450.34.11.0 = Gauge32: 5582125
|
.1.3.6.1.4.1.35450.34.11.0 = Gauge32: 5582125
|
||||||
iso.3.6.1.4.1.35450.34.12.0 = Gauge32: 191
|
.1.3.6.1.4.1.35450.34.12.0 = Gauge32: 191
|
||||||
iso.3.6.1.4.1.35450.34.13.0 = Gauge32: 827
|
.1.3.6.1.4.1.35450.34.13.0 = Gauge32: 827
|
||||||
iso.3.6.1.4.1.35450.34.14.0 = Gauge32: 0
|
.1.3.6.1.4.1.35450.34.14.0 = Gauge32: 0
|
||||||
iso.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105
|
.1.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105
|
||||||
iso.3.6.1.4.1.35450.34.16.0 = Gauge32: 196650
|
.1.3.6.1.4.1.35450.34.16.0 = Gauge32: 196650
|
||||||
iso.3.6.1.4.1.35450.34.17.0 = Gauge32: 0
|
.1.3.6.1.4.1.35450.34.17.0 = Gauge32: 0
|
||||||
iso.3.6.1.4.1.35450.34.18.0 = Gauge32: 30256
|
.1.3.6.1.4.1.35450.34.18.0 = Gauge32: 30256
|
||||||
iso.3.6.1.4.1.35450.34.19.0 = Gauge32: 532158
|
.1.3.6.1.4.1.35450.34.19.0 = Gauge32: 532158
|
||||||
iso.3.6.1.4.1.35450.34.20.0 = Gauge32: 34
|
.1.3.6.1.4.1.35450.34.20.0 = Gauge32: 34
|
||||||
iso.3.6.1.4.1.35450.34.21.0 = Gauge32: 1
|
.1.3.6.1.4.1.35450.34.21.0 = Gauge32: 1
|
||||||
iso.3.6.1.4.1.35450.34.31.0 = Gauge32: 53
|
.1.3.6.1.4.1.35450.34.31.0 = Gauge32: 53
|
||||||
iso.3.6.1.4.1.35450.34.32.0 = Gauge32: 55050240
|
.1.3.6.1.4.1.35450.34.32.0 = Gauge32: 55050240
|
||||||
iso.3.6.1.4.1.35450.34.33.0 = Gauge32: 53
|
.1.3.6.1.4.1.35450.34.33.0 = Gauge32: 53
|
||||||
iso.3.6.1.4.1.35450.34.34.0 = Gauge32: 55186538` + "`" +
|
.1.3.6.1.4.1.35450.34.34.0 = Gauge32: 55186538` + "`" +
|
||||||
`
|
`
|
||||||
func main() {
|
func main() {
|
||||||
fmt.Println(output)
|
fmt.Println(output)
|
||||||
|
|
|
@ -514,7 +514,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
||||||
returnVal.Command = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs)
|
returnVal.Command = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs)
|
||||||
}
|
}
|
||||||
|
|
||||||
if newStat.Metrics != nil && newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil {
|
if newStat.Metrics != nil && newStat.Metrics.TTL != nil && oldStat.Metrics != nil && oldStat.Metrics.TTL != nil {
|
||||||
returnVal.Passes = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs)
|
returnVal.Passes = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs)
|
||||||
returnVal.DeletedDocuments = diff(newStat.Metrics.TTL.DeletedDocuments, oldStat.Metrics.TTL.DeletedDocuments, sampleSecs)
|
returnVal.DeletedDocuments = diff(newStat.Metrics.TTL.DeletedDocuments, oldStat.Metrics.TTL.DeletedDocuments, sampleSecs)
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,9 +10,13 @@ The plugin expects messages in the
|
||||||
```toml
|
```toml
|
||||||
# Read metrics from MQTT topic(s)
|
# Read metrics from MQTT topic(s)
|
||||||
[[inputs.mqtt_consumer]]
|
[[inputs.mqtt_consumer]]
|
||||||
servers = ["localhost:1883"]
|
## MQTT broker URLs to be used. The format should be scheme://host:port,
|
||||||
|
## schema can be tcp, ssl, or ws.
|
||||||
|
servers = ["tcp://localhost:1883"]
|
||||||
## MQTT QoS, must be 0, 1, or 2
|
## MQTT QoS, must be 0, 1, or 2
|
||||||
qos = 0
|
qos = 0
|
||||||
|
## Connection timeout for initial connection in seconds
|
||||||
|
connection_timeout = "30s"
|
||||||
|
|
||||||
## Topics to subscribe to
|
## Topics to subscribe to
|
||||||
topics = [
|
topics = [
|
||||||
|
|
|
@ -15,12 +15,16 @@ import (
|
||||||
"github.com/eclipse/paho.mqtt.golang"
|
"github.com/eclipse/paho.mqtt.golang"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// 30 Seconds is the default used by paho.mqtt.golang
|
||||||
|
var defaultConnectionTimeout = internal.Duration{Duration: 30 * time.Second}
|
||||||
|
|
||||||
type MQTTConsumer struct {
|
type MQTTConsumer struct {
|
||||||
Servers []string
|
Servers []string
|
||||||
Topics []string
|
Topics []string
|
||||||
Username string
|
Username string
|
||||||
Password string
|
Password string
|
||||||
QoS int `toml:"qos"`
|
QoS int `toml:"qos"`
|
||||||
|
ConnectionTimeout internal.Duration `toml:"connection_timeout"`
|
||||||
|
|
||||||
parser parsers.Parser
|
parser parsers.Parser
|
||||||
|
|
||||||
|
@ -48,13 +52,18 @@ type MQTTConsumer struct {
|
||||||
// keep the accumulator internally:
|
// keep the accumulator internally:
|
||||||
acc telegraf.Accumulator
|
acc telegraf.Accumulator
|
||||||
|
|
||||||
started bool
|
connected bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
servers = ["localhost:1883"]
|
## MQTT broker URLs to be used. The format should be scheme://host:port,
|
||||||
|
## schema can be tcp, ssl, or ws.
|
||||||
|
servers = ["tcp://localhost:1883"]
|
||||||
|
|
||||||
## MQTT QoS, must be 0, 1, or 2
|
## MQTT QoS, must be 0, 1, or 2
|
||||||
qos = 0
|
qos = 0
|
||||||
|
## Connection timeout for initial connection in seconds
|
||||||
|
connection_timeout = "30s"
|
||||||
|
|
||||||
## Topics to subscribe to
|
## Topics to subscribe to
|
||||||
topics = [
|
topics = [
|
||||||
|
@ -103,7 +112,7 @@ func (m *MQTTConsumer) SetParser(parser parsers.Parser) {
|
||||||
func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {
|
func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {
|
||||||
m.Lock()
|
m.Lock()
|
||||||
defer m.Unlock()
|
defer m.Unlock()
|
||||||
m.started = false
|
m.connected = false
|
||||||
|
|
||||||
if m.PersistentSession && m.ClientID == "" {
|
if m.PersistentSession && m.ClientID == "" {
|
||||||
return fmt.Errorf("ERROR MQTT Consumer: When using persistent_session" +
|
return fmt.Errorf("ERROR MQTT Consumer: When using persistent_session" +
|
||||||
|
@ -115,26 +124,40 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {
|
||||||
return fmt.Errorf("MQTT Consumer, invalid QoS value: %d", m.QoS)
|
return fmt.Errorf("MQTT Consumer, invalid QoS value: %d", m.QoS)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if m.ConnectionTimeout.Duration < 1*time.Second {
|
||||||
|
return fmt.Errorf("MQTT Consumer, invalid connection_timeout value: %s", m.ConnectionTimeout.Duration)
|
||||||
|
}
|
||||||
|
|
||||||
opts, err := m.createOpts()
|
opts, err := m.createOpts()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
m.client = mqtt.NewClient(opts)
|
m.client = mqtt.NewClient(opts)
|
||||||
if token := m.client.Connect(); token.Wait() && token.Error() != nil {
|
|
||||||
return token.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
m.in = make(chan mqtt.Message, 1000)
|
m.in = make(chan mqtt.Message, 1000)
|
||||||
m.done = make(chan struct{})
|
m.done = make(chan struct{})
|
||||||
|
|
||||||
|
m.connect()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MQTTConsumer) connect() error {
|
||||||
|
if token := m.client.Connect(); token.Wait() && token.Error() != nil {
|
||||||
|
err := token.Error()
|
||||||
|
log.Printf("D! MQTT Consumer, connection error - %v", err)
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
go m.receiver()
|
go m.receiver()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MQTTConsumer) onConnect(c mqtt.Client) {
|
func (m *MQTTConsumer) onConnect(c mqtt.Client) {
|
||||||
log.Printf("I! MQTT Client Connected")
|
log.Printf("I! MQTT Client Connected")
|
||||||
if !m.PersistentSession || !m.started {
|
if !m.PersistentSession || !m.connected {
|
||||||
topics := make(map[string]byte)
|
topics := make(map[string]byte)
|
||||||
for _, topic := range m.Topics {
|
for _, topic := range m.Topics {
|
||||||
topics[topic] = byte(m.QoS)
|
topics[topic] = byte(m.QoS)
|
||||||
|
@ -145,7 +168,7 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) {
|
||||||
m.acc.AddError(fmt.Errorf("E! MQTT Subscribe Error\ntopics: %s\nerror: %s",
|
m.acc.AddError(fmt.Errorf("E! MQTT Subscribe Error\ntopics: %s\nerror: %s",
|
||||||
strings.Join(m.Topics[:], ","), subscribeToken.Error()))
|
strings.Join(m.Topics[:], ","), subscribeToken.Error()))
|
||||||
}
|
}
|
||||||
m.started = true
|
m.connected = true
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -186,18 +209,27 @@ func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) {
|
||||||
func (m *MQTTConsumer) Stop() {
|
func (m *MQTTConsumer) Stop() {
|
||||||
m.Lock()
|
m.Lock()
|
||||||
defer m.Unlock()
|
defer m.Unlock()
|
||||||
|
|
||||||
|
if m.connected {
|
||||||
close(m.done)
|
close(m.done)
|
||||||
m.client.Disconnect(200)
|
m.client.Disconnect(200)
|
||||||
m.started = false
|
m.connected = false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error {
|
func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error {
|
||||||
|
if !m.connected {
|
||||||
|
m.connect()
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
|
func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
|
||||||
opts := mqtt.NewClientOptions()
|
opts := mqtt.NewClientOptions()
|
||||||
|
|
||||||
|
opts.ConnectTimeout = m.ConnectionTimeout.Duration
|
||||||
|
|
||||||
if m.ClientID == "" {
|
if m.ClientID == "" {
|
||||||
opts.SetClientID("Telegraf-Consumer-" + internal.RandomString(5))
|
opts.SetClientID("Telegraf-Consumer-" + internal.RandomString(5))
|
||||||
} else {
|
} else {
|
||||||
|
@ -210,9 +242,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
scheme := "tcp"
|
|
||||||
if tlsCfg != nil {
|
if tlsCfg != nil {
|
||||||
scheme = "ssl"
|
|
||||||
opts.SetTLSConfig(tlsCfg)
|
opts.SetTLSConfig(tlsCfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -228,8 +258,17 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
|
||||||
if len(m.Servers) == 0 {
|
if len(m.Servers) == 0 {
|
||||||
return opts, fmt.Errorf("could not get host infomations")
|
return opts, fmt.Errorf("could not get host infomations")
|
||||||
}
|
}
|
||||||
for _, host := range m.Servers {
|
|
||||||
server := fmt.Sprintf("%s://%s", scheme, host)
|
for _, server := range m.Servers {
|
||||||
|
// Preserve support for host:port style servers; deprecated in Telegraf 1.4.4
|
||||||
|
if !strings.Contains(server, "://") {
|
||||||
|
log.Printf("W! mqtt_consumer server %q should be updated to use `scheme://host:port` format", server)
|
||||||
|
if tlsCfg == nil {
|
||||||
|
server = "tcp://" + server
|
||||||
|
} else {
|
||||||
|
server = "ssl://" + server
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
opts.AddBroker(server)
|
opts.AddBroker(server)
|
||||||
}
|
}
|
||||||
|
@ -238,11 +277,14 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
|
||||||
opts.SetCleanSession(!m.PersistentSession)
|
opts.SetCleanSession(!m.PersistentSession)
|
||||||
opts.SetOnConnectHandler(m.onConnect)
|
opts.SetOnConnectHandler(m.onConnect)
|
||||||
opts.SetConnectionLostHandler(m.onConnectionLost)
|
opts.SetConnectionLostHandler(m.onConnectionLost)
|
||||||
|
|
||||||
return opts, nil
|
return opts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
inputs.Add("mqtt_consumer", func() telegraf.Input {
|
inputs.Add("mqtt_consumer", func() telegraf.Input {
|
||||||
return &MQTTConsumer{}
|
return &MQTTConsumer{
|
||||||
|
ConnectionTimeout: defaultConnectionTimeout,
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,9 @@ func newTestMQTTConsumer() (*MQTTConsumer, chan mqtt.Message) {
|
||||||
Servers: []string{"localhost:1883"},
|
Servers: []string{"localhost:1883"},
|
||||||
in: in,
|
in: in,
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
|
connected: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
return n, in
|
return n, in
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,6 +133,7 @@ func TestRunParserAndGather(t *testing.T) {
|
||||||
n, in := newTestMQTTConsumer()
|
n, in := newTestMQTTConsumer()
|
||||||
acc := testutil.Accumulator{}
|
acc := testutil.Accumulator{}
|
||||||
n.acc = &acc
|
n.acc = &acc
|
||||||
|
|
||||||
defer close(n.done)
|
defer close(n.done)
|
||||||
|
|
||||||
n.parser, _ = parsers.NewInfluxParser()
|
n.parser, _ = parsers.NewInfluxParser()
|
||||||
|
|
|
@ -25,6 +25,7 @@ package nsq
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -101,28 +102,42 @@ func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error {
|
||||||
return fmt.Errorf("%s returned HTTP status %s", u.String(), r.Status)
|
return fmt.Errorf("%s returned HTTP status %s", u.String(), r.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
s := &NSQStats{}
|
body, err := ioutil.ReadAll(r.Body)
|
||||||
err = json.NewDecoder(r.Body).Decode(s)
|
if err != nil {
|
||||||
|
return fmt.Errorf(`Error reading body: %s`, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
data := &NSQStatsData{}
|
||||||
|
err = json.Unmarshal(body, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(`Error parsing response: %s`, err)
|
return fmt.Errorf(`Error parsing response: %s`, err)
|
||||||
}
|
}
|
||||||
|
// Data was not parsed correctly attempt to use old format.
|
||||||
|
if len(data.Version) < 1 {
|
||||||
|
wrapper := &NSQStats{}
|
||||||
|
err = json.Unmarshal(body, wrapper)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(`Error parsing response: %s`, err)
|
||||||
|
}
|
||||||
|
data = &wrapper.Data
|
||||||
|
}
|
||||||
|
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
`server_host`: u.Host,
|
`server_host`: u.Host,
|
||||||
`server_version`: s.Data.Version,
|
`server_version`: data.Version,
|
||||||
}
|
}
|
||||||
|
|
||||||
fields := make(map[string]interface{})
|
fields := make(map[string]interface{})
|
||||||
if s.Data.Health == `OK` {
|
if data.Health == `OK` {
|
||||||
fields["server_count"] = int64(1)
|
fields["server_count"] = int64(1)
|
||||||
} else {
|
} else {
|
||||||
fields["server_count"] = int64(0)
|
fields["server_count"] = int64(0)
|
||||||
}
|
}
|
||||||
fields["topic_count"] = int64(len(s.Data.Topics))
|
fields["topic_count"] = int64(len(data.Topics))
|
||||||
|
|
||||||
acc.AddFields("nsq_server", fields, tags)
|
acc.AddFields("nsq_server", fields, tags)
|
||||||
for _, t := range s.Data.Topics {
|
for _, t := range data.Topics {
|
||||||
topicStats(t, acc, u.Host, s.Data.Version)
|
topicStats(t, acc, u.Host, data.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -189,7 +204,6 @@ func clientStats(c ClientStats, acc telegraf.Accumulator, host, version, topic,
|
||||||
"server_version": version,
|
"server_version": version,
|
||||||
"topic": topic,
|
"topic": topic,
|
||||||
"channel": channel,
|
"channel": channel,
|
||||||
"client_name": c.Name,
|
|
||||||
"client_id": c.ID,
|
"client_id": c.ID,
|
||||||
"client_hostname": c.Hostname,
|
"client_hostname": c.Hostname,
|
||||||
"client_version": c.Version,
|
"client_version": c.Version,
|
||||||
|
@ -199,6 +213,9 @@ func clientStats(c ClientStats, acc telegraf.Accumulator, host, version, topic,
|
||||||
"client_snappy": strconv.FormatBool(c.Snappy),
|
"client_snappy": strconv.FormatBool(c.Snappy),
|
||||||
"client_deflate": strconv.FormatBool(c.Deflate),
|
"client_deflate": strconv.FormatBool(c.Deflate),
|
||||||
}
|
}
|
||||||
|
if len(c.Name) > 0 {
|
||||||
|
tags["client_name"] = c.Name
|
||||||
|
}
|
||||||
|
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"ready_count": c.ReadyCount,
|
"ready_count": c.ReadyCount,
|
||||||
|
@ -248,7 +265,7 @@ type ChannelStats struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ClientStats struct {
|
type ClientStats struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"` // DEPRECATED 1.x+, still here as the structs are currently being shared for parsing v3.x and 1.x
|
||||||
ID string `json:"client_id"`
|
ID string `json:"client_id"`
|
||||||
Hostname string `json:"hostname"`
|
Hostname string `json:"hostname"`
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
|
|
|
@ -12,10 +12,267 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNSQStats(t *testing.T) {
|
func TestNSQStatsV1(t *testing.T) {
|
||||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
fmt.Fprintln(w, response)
|
fmt.Fprintln(w, responseV1)
|
||||||
|
}))
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
n := &NSQ{
|
||||||
|
Endpoints: []string{ts.URL},
|
||||||
|
}
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
err := acc.GatherError(n.Gather)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
u, err := url.Parse(ts.URL)
|
||||||
|
require.NoError(t, err)
|
||||||
|
host := u.Host
|
||||||
|
|
||||||
|
// actually validate the tests
|
||||||
|
tests := []struct {
|
||||||
|
m string
|
||||||
|
f map[string]interface{}
|
||||||
|
g map[string]string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"nsq_server",
|
||||||
|
map[string]interface{}{
|
||||||
|
"server_count": int64(1),
|
||||||
|
"topic_count": int64(2),
|
||||||
|
},
|
||||||
|
map[string]string{
|
||||||
|
"server_host": host,
|
||||||
|
"server_version": "1.0.0-compat",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"nsq_topic",
|
||||||
|
map[string]interface{}{
|
||||||
|
"depth": int64(12),
|
||||||
|
"backend_depth": int64(13),
|
||||||
|
"message_count": int64(14),
|
||||||
|
"channel_count": int64(1),
|
||||||
|
},
|
||||||
|
map[string]string{
|
||||||
|
"server_host": host,
|
||||||
|
"server_version": "1.0.0-compat",
|
||||||
|
"topic": "t1"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"nsq_channel",
|
||||||
|
map[string]interface{}{
|
||||||
|
"depth": int64(0),
|
||||||
|
"backend_depth": int64(1),
|
||||||
|
"inflight_count": int64(2),
|
||||||
|
"deferred_count": int64(3),
|
||||||
|
"message_count": int64(4),
|
||||||
|
"requeue_count": int64(5),
|
||||||
|
"timeout_count": int64(6),
|
||||||
|
"client_count": int64(1),
|
||||||
|
},
|
||||||
|
map[string]string{
|
||||||
|
"server_host": host,
|
||||||
|
"server_version": "1.0.0-compat",
|
||||||
|
"topic": "t1",
|
||||||
|
"channel": "c1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"nsq_client",
|
||||||
|
map[string]interface{}{
|
||||||
|
"ready_count": int64(200),
|
||||||
|
"inflight_count": int64(7),
|
||||||
|
"message_count": int64(8),
|
||||||
|
"finish_count": int64(9),
|
||||||
|
"requeue_count": int64(10),
|
||||||
|
},
|
||||||
|
map[string]string{"server_host": host, "server_version": "1.0.0-compat",
|
||||||
|
"topic": "t1", "channel": "c1",
|
||||||
|
"client_id": "373a715cd990", "client_hostname": "373a715cd990",
|
||||||
|
"client_version": "V2", "client_address": "172.17.0.11:35560",
|
||||||
|
"client_tls": "false", "client_snappy": "false",
|
||||||
|
"client_deflate": "false",
|
||||||
|
"client_user_agent": "nsq_to_nsq/0.3.6 go-nsq/1.0.5"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"nsq_topic",
|
||||||
|
map[string]interface{}{
|
||||||
|
"depth": int64(28),
|
||||||
|
"backend_depth": int64(29),
|
||||||
|
"message_count": int64(30),
|
||||||
|
"channel_count": int64(1),
|
||||||
|
},
|
||||||
|
map[string]string{
|
||||||
|
"server_host": host,
|
||||||
|
"server_version": "1.0.0-compat",
|
||||||
|
"topic": "t2"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"nsq_channel",
|
||||||
|
map[string]interface{}{
|
||||||
|
"depth": int64(15),
|
||||||
|
"backend_depth": int64(16),
|
||||||
|
"inflight_count": int64(17),
|
||||||
|
"deferred_count": int64(18),
|
||||||
|
"message_count": int64(19),
|
||||||
|
"requeue_count": int64(20),
|
||||||
|
"timeout_count": int64(21),
|
||||||
|
"client_count": int64(1),
|
||||||
|
},
|
||||||
|
map[string]string{
|
||||||
|
"server_host": host,
|
||||||
|
"server_version": "1.0.0-compat",
|
||||||
|
"topic": "t2",
|
||||||
|
"channel": "c2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"nsq_client",
|
||||||
|
map[string]interface{}{
|
||||||
|
"ready_count": int64(22),
|
||||||
|
"inflight_count": int64(23),
|
||||||
|
"message_count": int64(24),
|
||||||
|
"finish_count": int64(25),
|
||||||
|
"requeue_count": int64(26),
|
||||||
|
},
|
||||||
|
map[string]string{"server_host": host, "server_version": "1.0.0-compat",
|
||||||
|
"topic": "t2", "channel": "c2",
|
||||||
|
"client_id": "377569bd462b", "client_hostname": "377569bd462b",
|
||||||
|
"client_version": "V2", "client_address": "172.17.0.8:48145",
|
||||||
|
"client_user_agent": "go-nsq/1.0.5", "client_tls": "true",
|
||||||
|
"client_snappy": "true", "client_deflate": "true"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
acc.AssertContainsTaggedFields(t, test.m, test.f, test.g)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// v1 version of localhost/stats?format=json reesponse body
|
||||||
|
var responseV1 = `
|
||||||
|
{
|
||||||
|
"version": "1.0.0-compat",
|
||||||
|
"health": "OK",
|
||||||
|
"start_time": 1452021674,
|
||||||
|
"topics": [
|
||||||
|
{
|
||||||
|
"topic_name": "t1",
|
||||||
|
"channels": [
|
||||||
|
{
|
||||||
|
"channel_name": "c1",
|
||||||
|
"depth": 0,
|
||||||
|
"backend_depth": 1,
|
||||||
|
"in_flight_count": 2,
|
||||||
|
"deferred_count": 3,
|
||||||
|
"message_count": 4,
|
||||||
|
"requeue_count": 5,
|
||||||
|
"timeout_count": 6,
|
||||||
|
"clients": [
|
||||||
|
{
|
||||||
|
"client_id": "373a715cd990",
|
||||||
|
"hostname": "373a715cd990",
|
||||||
|
"version": "V2",
|
||||||
|
"remote_address": "172.17.0.11:35560",
|
||||||
|
"state": 3,
|
||||||
|
"ready_count": 200,
|
||||||
|
"in_flight_count": 7,
|
||||||
|
"message_count": 8,
|
||||||
|
"finish_count": 9,
|
||||||
|
"requeue_count": 10,
|
||||||
|
"connect_ts": 1452021675,
|
||||||
|
"sample_rate": 11,
|
||||||
|
"deflate": false,
|
||||||
|
"snappy": false,
|
||||||
|
"user_agent": "nsq_to_nsq\/0.3.6 go-nsq\/1.0.5",
|
||||||
|
"tls": false,
|
||||||
|
"tls_cipher_suite": "",
|
||||||
|
"tls_version": "",
|
||||||
|
"tls_negotiated_protocol": "",
|
||||||
|
"tls_negotiated_protocol_is_mutual": false
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"paused": false,
|
||||||
|
"e2e_processing_latency": {
|
||||||
|
"count": 0,
|
||||||
|
"percentiles": null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"depth": 12,
|
||||||
|
"backend_depth": 13,
|
||||||
|
"message_count": 14,
|
||||||
|
"paused": false,
|
||||||
|
"e2e_processing_latency": {
|
||||||
|
"count": 0,
|
||||||
|
"percentiles": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"topic_name": "t2",
|
||||||
|
"channels": [
|
||||||
|
{
|
||||||
|
"channel_name": "c2",
|
||||||
|
"depth": 15,
|
||||||
|
"backend_depth": 16,
|
||||||
|
"in_flight_count": 17,
|
||||||
|
"deferred_count": 18,
|
||||||
|
"message_count": 19,
|
||||||
|
"requeue_count": 20,
|
||||||
|
"timeout_count": 21,
|
||||||
|
"clients": [
|
||||||
|
{
|
||||||
|
"client_id": "377569bd462b",
|
||||||
|
"hostname": "377569bd462b",
|
||||||
|
"version": "V2",
|
||||||
|
"remote_address": "172.17.0.8:48145",
|
||||||
|
"state": 3,
|
||||||
|
"ready_count": 22,
|
||||||
|
"in_flight_count": 23,
|
||||||
|
"message_count": 24,
|
||||||
|
"finish_count": 25,
|
||||||
|
"requeue_count": 26,
|
||||||
|
"connect_ts": 1452021678,
|
||||||
|
"sample_rate": 27,
|
||||||
|
"deflate": true,
|
||||||
|
"snappy": true,
|
||||||
|
"user_agent": "go-nsq\/1.0.5",
|
||||||
|
"tls": true,
|
||||||
|
"tls_cipher_suite": "",
|
||||||
|
"tls_version": "",
|
||||||
|
"tls_negotiated_protocol": "",
|
||||||
|
"tls_negotiated_protocol_is_mutual": false
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"paused": false,
|
||||||
|
"e2e_processing_latency": {
|
||||||
|
"count": 0,
|
||||||
|
"percentiles": null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"depth": 28,
|
||||||
|
"backend_depth": 29,
|
||||||
|
"message_count": 30,
|
||||||
|
"paused": false,
|
||||||
|
"e2e_processing_latency": {
|
||||||
|
"count": 0,
|
||||||
|
"percentiles": null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
`
|
||||||
|
|
||||||
|
// TestNSQStatsPreV1 is for backwards compatibility with nsq versions < 1.0
|
||||||
|
func TestNSQStatsPreV1(t *testing.T) {
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
fmt.Fprintln(w, responsePreV1)
|
||||||
}))
|
}))
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
|
@ -152,7 +409,7 @@ func TestNSQStats(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var response = `
|
var responsePreV1 = `
|
||||||
{
|
{
|
||||||
"status_code": 200,
|
"status_code": 200,
|
||||||
"status_txt": "OK",
|
"status_txt": "OK",
|
||||||
|
|
|
@ -69,7 +69,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
||||||
// Due to problems with a parsing, we have to use regexp expression in order
|
// Due to problems with a parsing, we have to use regexp expression in order
|
||||||
// to remove string that starts from '(' and ends with space
|
// to remove string that starts from '(' and ends with space
|
||||||
// see: https://github.com/influxdata/telegraf/issues/2386
|
// see: https://github.com/influxdata/telegraf/issues/2386
|
||||||
reg, err := regexp.Compile("\\([\\S]*")
|
reg, err := regexp.Compile("\\s+\\([\\S]*")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -260,6 +260,57 @@ func TestParserNTPQ(t *testing.T) {
|
||||||
}
|
}
|
||||||
acc := testutil.Accumulator{}
|
acc := testutil.Accumulator{}
|
||||||
assert.NoError(t, acc.GatherError(n.Gather))
|
assert.NoError(t, acc.GatherError(n.Gather))
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"poll": int64(64),
|
||||||
|
"when": int64(60),
|
||||||
|
"reach": int64(377),
|
||||||
|
"delay": float64(0.0),
|
||||||
|
"offset": float64(0.045),
|
||||||
|
"jitter": float64(1.012),
|
||||||
|
}
|
||||||
|
tags := map[string]string{
|
||||||
|
"remote": "SHM(0)",
|
||||||
|
"state_prefix": "*",
|
||||||
|
"refid": ".PPS.",
|
||||||
|
"stratum": "1",
|
||||||
|
"type": "u",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||||
|
|
||||||
|
fields = map[string]interface{}{
|
||||||
|
"poll": int64(128),
|
||||||
|
"when": int64(121),
|
||||||
|
"reach": int64(377),
|
||||||
|
"delay": float64(0.0),
|
||||||
|
"offset": float64(10.105),
|
||||||
|
"jitter": float64(2.012),
|
||||||
|
}
|
||||||
|
tags = map[string]string{
|
||||||
|
"remote": "SHM(1)",
|
||||||
|
"state_prefix": "-",
|
||||||
|
"refid": ".GPS.",
|
||||||
|
"stratum": "1",
|
||||||
|
"type": "u",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||||
|
|
||||||
|
fields = map[string]interface{}{
|
||||||
|
"poll": int64(1024),
|
||||||
|
"when": int64(10),
|
||||||
|
"reach": int64(377),
|
||||||
|
"delay": float64(1.748),
|
||||||
|
"offset": float64(0.373),
|
||||||
|
"jitter": float64(0.101),
|
||||||
|
}
|
||||||
|
tags = map[string]string{
|
||||||
|
"remote": "37.58.57.238",
|
||||||
|
"state_prefix": "+",
|
||||||
|
"refid": "192.53.103.103",
|
||||||
|
"stratum": "2",
|
||||||
|
"type": "u",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiNTPQ(t *testing.T) {
|
func TestMultiNTPQ(t *testing.T) {
|
||||||
|
@ -480,7 +531,9 @@ var multiNTPQ = ` remote refid st t when poll reach delay
|
||||||
`
|
`
|
||||||
var multiParserNTPQ = ` remote refid st t when poll reach delay offset jitter
|
var multiParserNTPQ = ` remote refid st t when poll reach delay offset jitter
|
||||||
==============================================================================
|
==============================================================================
|
||||||
|
*SHM(0) .PPS. 1 u 60 64 377 0.000 0.045 1.012
|
||||||
+37.58.57.238 (d 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101
|
+37.58.57.238 (d 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101
|
||||||
+37.58.57.238 (domain) 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101
|
+37.58.57.238 (domain) 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101
|
||||||
+37.58.57.238 ( 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101
|
+37.58.57.238 ( 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101
|
||||||
|
-SHM(1) .GPS. 1 u 121 128 377 0.000 10.105 2.012
|
||||||
`
|
`
|
||||||
|
|
|
@ -2,11 +2,12 @@ package sqlserver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
|
||||||
// go-mssqldb initialization
|
// go-mssqldb initialization
|
||||||
_ "github.com/zensqlmonitor/go-mssqldb"
|
_ "github.com/zensqlmonitor/go-mssqldb"
|
||||||
)
|
)
|
||||||
|
@ -244,10 +245,10 @@ UNION ALL
|
||||||
SELECT 'Average pending disk IO', AveragePendingDiskIOCount = (SELECT AVG(pending_disk_io_count) FROM sys.dm_os_schedulers WITH (NOLOCK) WHERE scheduler_id < 255 )
|
SELECT 'Average pending disk IO', AveragePendingDiskIOCount = (SELECT AVG(pending_disk_io_count) FROM sys.dm_os_schedulers WITH (NOLOCK) WHERE scheduler_id < 255 )
|
||||||
UNION ALL
|
UNION ALL
|
||||||
SELECT 'Buffer pool rate (bytes/sec)', BufferPoolRate = (1.0*cntr_value * 8 * 1024) /
|
SELECT 'Buffer pool rate (bytes/sec)', BufferPoolRate = (1.0*cntr_value * 8 * 1024) /
|
||||||
(SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE object_name like '%Buffer Manager%' AND lower(counter_name) = 'Page life expectancy')
|
(SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE object_name like '%Buffer Manager%' AND counter_name = 'Page life expectancy')
|
||||||
FROM sys.dm_os_performance_counters
|
FROM sys.dm_os_performance_counters
|
||||||
WHERE object_name like '%Buffer Manager%'
|
WHERE object_name like '%Buffer Manager%'
|
||||||
AND counter_name = 'database pages'
|
AND counter_name = 'Database pages'
|
||||||
UNION ALL
|
UNION ALL
|
||||||
SELECT 'Memory grant pending', MemoryGrantPending = cntr_value
|
SELECT 'Memory grant pending', MemoryGrantPending = cntr_value
|
||||||
FROM sys.dm_os_performance_counters
|
FROM sys.dm_os_performance_counters
|
||||||
|
@ -1022,7 +1023,7 @@ CREATE TABLE #PCounters
|
||||||
Primary Key(object_name, counter_name, instance_name)
|
Primary Key(object_name, counter_name, instance_name)
|
||||||
);
|
);
|
||||||
INSERT #PCounters
|
INSERT #PCounters
|
||||||
SELECT RTrim(spi.object_name) object_name
|
SELECT DISTINCT RTrim(spi.object_name) object_name
|
||||||
, RTrim(spi.counter_name) counter_name
|
, RTrim(spi.counter_name) counter_name
|
||||||
, RTrim(spi.instance_name) instance_name
|
, RTrim(spi.instance_name) instance_name
|
||||||
, spi.cntr_value
|
, spi.cntr_value
|
||||||
|
@ -1044,7 +1045,7 @@ CREATE TABLE #CCounters
|
||||||
Primary Key(object_name, counter_name, instance_name)
|
Primary Key(object_name, counter_name, instance_name)
|
||||||
);
|
);
|
||||||
INSERT #CCounters
|
INSERT #CCounters
|
||||||
SELECT RTrim(spi.object_name) object_name
|
SELECT DISTINCT RTrim(spi.object_name) object_name
|
||||||
, RTrim(spi.counter_name) counter_name
|
, RTrim(spi.counter_name) counter_name
|
||||||
, RTrim(spi.instance_name) instance_name
|
, RTrim(spi.instance_name) instance_name
|
||||||
, spi.cntr_value
|
, spi.cntr_value
|
||||||
|
@ -1436,16 +1437,16 @@ SELECT
|
||||||
, type = 'Wait stats'
|
, type = 'Wait stats'
|
||||||
---- values
|
---- values
|
||||||
, [I/O] = SUM([I/O])
|
, [I/O] = SUM([I/O])
|
||||||
, [Latch] = SUM([Latch])
|
, [Latch] = SUM([LATCH])
|
||||||
, [Lock] = SUM([Lock])
|
, [Lock] = SUM([LOCK])
|
||||||
, [Network] = SUM([Network])
|
, [Network] = SUM([NETWORK])
|
||||||
, [Service broker] = SUM([Service broker])
|
, [Service broker] = SUM([SERVICE BROKER])
|
||||||
, [Memory] = SUM([Memory])
|
, [Memory] = SUM([MEMORY])
|
||||||
, [Buffer] = SUM([Buffer])
|
, [Buffer] = SUM([BUFFER])
|
||||||
, [CLR] = SUM([CLR])
|
, [CLR] = SUM([CLR])
|
||||||
, [SQLOS] = SUM([SQLOS])
|
, [SQLOS] = SUM([SQLOS])
|
||||||
, [XEvent] = SUM([XEvent])
|
, [XEvent] = SUM([XEVENT])
|
||||||
, [Other] = SUM([Other])
|
, [Other] = SUM([OTHER])
|
||||||
, [Total] = SUM([I/O]+[LATCH]+[LOCK]+[NETWORK]+[SERVICE BROKER]+[MEMORY]+[BUFFER]+[CLR]+[XEVENT]+[SQLOS]+[OTHER])
|
, [Total] = SUM([I/O]+[LATCH]+[LOCK]+[NETWORK]+[SERVICE BROKER]+[MEMORY]+[BUFFER]+[CLR]+[XEVENT]+[SQLOS]+[OTHER])
|
||||||
FROM
|
FROM
|
||||||
(
|
(
|
||||||
|
@ -1479,16 +1480,16 @@ SELECT
|
||||||
, type = 'Wait stats'
|
, type = 'Wait stats'
|
||||||
---- values
|
---- values
|
||||||
, [I/O] = SUM([I/O])
|
, [I/O] = SUM([I/O])
|
||||||
, [Latch] = SUM([Latch])
|
, [Latch] = SUM([LATCH])
|
||||||
, [Lock] = SUM([Lock])
|
, [Lock] = SUM([LOCK])
|
||||||
, [Network] = SUM([Network])
|
, [Network] = SUM([NETWORK])
|
||||||
, [Service broker] = SUM([Service broker])
|
, [Service broker] = SUM([SERVICE BROKER])
|
||||||
, [Memory] = SUM([Memory])
|
, [Memory] = SUM([MEMORY])
|
||||||
, [Buffer] = SUM([Buffer])
|
, [Buffer] = SUM([BUFFER])
|
||||||
, [CLR] = SUM([CLR])
|
, [CLR] = SUM([CLR])
|
||||||
, [SQLOS] = SUM([SQLOS])
|
, [SQLOS] = SUM([SQLOS])
|
||||||
, [XEvent] = SUM([XEvent])
|
, [XEvent] = SUM([XEVENT])
|
||||||
, [Other] = SUM([Other])
|
, [Other] = SUM([OTHER])
|
||||||
, [Total] = SUM([I/O]+[LATCH]+[LOCK]+[NETWORK]+[SERVICE BROKER]+[MEMORY]+[BUFFER]+[CLR]+[XEVENT]+[SQLOS]+[OTHER])
|
, [Total] = SUM([I/O]+[LATCH]+[LOCK]+[NETWORK]+[SERVICE BROKER]+[MEMORY]+[BUFFER]+[CLR]+[XEVENT]+[SQLOS]+[OTHER])
|
||||||
FROM
|
FROM
|
||||||
(
|
(
|
||||||
|
|
|
@ -101,8 +101,15 @@ func (rs *RunningStats) Percentile(n int) float64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
i := int(float64(len(rs.perc)) * float64(n) / float64(100))
|
i := int(float64(len(rs.perc)) * float64(n) / float64(100))
|
||||||
if i < 0 {
|
return rs.perc[clamp(i, 0, len(rs.perc)-1)]
|
||||||
i = 0
|
|
||||||
}
|
}
|
||||||
return rs.perc[i]
|
|
||||||
|
func clamp(i int, min int, max int) int {
|
||||||
|
if i < min {
|
||||||
|
return min
|
||||||
|
}
|
||||||
|
if i > max {
|
||||||
|
return max
|
||||||
|
}
|
||||||
|
return i
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,12 +23,18 @@ func TestRunningStats_Single(t *testing.T) {
|
||||||
if rs.Lower() != 10.1 {
|
if rs.Lower() != 10.1 {
|
||||||
t.Errorf("Expected %v, got %v", 10.1, rs.Lower())
|
t.Errorf("Expected %v, got %v", 10.1, rs.Lower())
|
||||||
}
|
}
|
||||||
|
if rs.Percentile(100) != 10.1 {
|
||||||
|
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(100))
|
||||||
|
}
|
||||||
if rs.Percentile(90) != 10.1 {
|
if rs.Percentile(90) != 10.1 {
|
||||||
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(90))
|
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(90))
|
||||||
}
|
}
|
||||||
if rs.Percentile(50) != 10.1 {
|
if rs.Percentile(50) != 10.1 {
|
||||||
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(50))
|
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(50))
|
||||||
}
|
}
|
||||||
|
if rs.Percentile(0) != 10.1 {
|
||||||
|
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(0))
|
||||||
|
}
|
||||||
if rs.Count() != 1 {
|
if rs.Count() != 1 {
|
||||||
t.Errorf("Expected %v, got %v", 1, rs.Count())
|
t.Errorf("Expected %v, got %v", 1, rs.Count())
|
||||||
}
|
}
|
||||||
|
@ -58,12 +64,18 @@ func TestRunningStats_Duplicate(t *testing.T) {
|
||||||
if rs.Lower() != 10.1 {
|
if rs.Lower() != 10.1 {
|
||||||
t.Errorf("Expected %v, got %v", 10.1, rs.Lower())
|
t.Errorf("Expected %v, got %v", 10.1, rs.Lower())
|
||||||
}
|
}
|
||||||
|
if rs.Percentile(100) != 10.1 {
|
||||||
|
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(100))
|
||||||
|
}
|
||||||
if rs.Percentile(90) != 10.1 {
|
if rs.Percentile(90) != 10.1 {
|
||||||
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(90))
|
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(90))
|
||||||
}
|
}
|
||||||
if rs.Percentile(50) != 10.1 {
|
if rs.Percentile(50) != 10.1 {
|
||||||
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(50))
|
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(50))
|
||||||
}
|
}
|
||||||
|
if rs.Percentile(0) != 10.1 {
|
||||||
|
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(0))
|
||||||
|
}
|
||||||
if rs.Count() != 4 {
|
if rs.Count() != 4 {
|
||||||
t.Errorf("Expected %v, got %v", 4, rs.Count())
|
t.Errorf("Expected %v, got %v", 4, rs.Count())
|
||||||
}
|
}
|
||||||
|
@ -93,12 +105,18 @@ func TestRunningStats(t *testing.T) {
|
||||||
if rs.Lower() != 5 {
|
if rs.Lower() != 5 {
|
||||||
t.Errorf("Expected %v, got %v", 5, rs.Lower())
|
t.Errorf("Expected %v, got %v", 5, rs.Lower())
|
||||||
}
|
}
|
||||||
|
if rs.Percentile(100) != 45 {
|
||||||
|
t.Errorf("Expected %v, got %v", 45, rs.Percentile(100))
|
||||||
|
}
|
||||||
if rs.Percentile(90) != 32 {
|
if rs.Percentile(90) != 32 {
|
||||||
t.Errorf("Expected %v, got %v", 32, rs.Percentile(90))
|
t.Errorf("Expected %v, got %v", 32, rs.Percentile(90))
|
||||||
}
|
}
|
||||||
if rs.Percentile(50) != 11 {
|
if rs.Percentile(50) != 11 {
|
||||||
t.Errorf("Expected %v, got %v", 11, rs.Percentile(50))
|
t.Errorf("Expected %v, got %v", 11, rs.Percentile(50))
|
||||||
}
|
}
|
||||||
|
if rs.Percentile(0) != 5 {
|
||||||
|
t.Errorf("Expected %v, got %v", 5, rs.Percentile(0))
|
||||||
|
}
|
||||||
if rs.Count() != 16 {
|
if rs.Count() != 16 {
|
||||||
t.Errorf("Expected %v, got %v", 4, rs.Count())
|
t.Errorf("Expected %v, got %v", 4, rs.Count())
|
||||||
}
|
}
|
||||||
|
|
|
@ -251,14 +251,14 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, metric := range s.gauges {
|
for _, metric := range s.gauges {
|
||||||
acc.AddFields(metric.name, metric.fields, metric.tags, now)
|
acc.AddGauge(metric.name, metric.fields, metric.tags, now)
|
||||||
}
|
}
|
||||||
if s.DeleteGauges {
|
if s.DeleteGauges {
|
||||||
s.gauges = make(map[string]cachedgauge)
|
s.gauges = make(map[string]cachedgauge)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, metric := range s.counters {
|
for _, metric := range s.counters {
|
||||||
acc.AddFields(metric.name, metric.fields, metric.tags, now)
|
acc.AddCounter(metric.name, metric.fields, metric.tags, now)
|
||||||
}
|
}
|
||||||
if s.DeleteCounters {
|
if s.DeleteCounters {
|
||||||
s.counters = make(map[string]cachedcounter)
|
s.counters = make(map[string]cachedcounter)
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
|
|
||||||
type CPUStats struct {
|
type CPUStats struct {
|
||||||
ps PS
|
ps PS
|
||||||
lastStats []cpu.TimesStat
|
lastStats map[string]cpu.TimesStat
|
||||||
|
|
||||||
PerCPU bool `toml:"percpu"`
|
PerCPU bool `toml:"percpu"`
|
||||||
TotalCPU bool `toml:"totalcpu"`
|
TotalCPU bool `toml:"totalcpu"`
|
||||||
|
@ -53,7 +53,7 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
|
||||||
}
|
}
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
for i, cts := range times {
|
for _, cts := range times {
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
"cpu": cts.CPU,
|
"cpu": cts.CPU,
|
||||||
}
|
}
|
||||||
|
@ -86,14 +86,18 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
|
||||||
// If it's the 1st gather, can't get CPU Usage stats yet
|
// If it's the 1st gather, can't get CPU Usage stats yet
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
lastCts := s.lastStats[i]
|
|
||||||
|
lastCts, ok := s.lastStats[cts.CPU]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
lastTotal := totalCpuTime(lastCts)
|
lastTotal := totalCpuTime(lastCts)
|
||||||
lastActive := activeCpuTime(lastCts)
|
lastActive := activeCpuTime(lastCts)
|
||||||
totalDelta := total - lastTotal
|
totalDelta := total - lastTotal
|
||||||
|
|
||||||
if totalDelta < 0 {
|
if totalDelta < 0 {
|
||||||
s.lastStats = times
|
err = fmt.Errorf("Error: current total CPU time is less than previous total CPU time")
|
||||||
return fmt.Errorf("Error: current total CPU time is less than previous total CPU time")
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if totalDelta == 0 {
|
if totalDelta == 0 {
|
||||||
|
@ -118,9 +122,12 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
|
||||||
acc.AddGauge("cpu", fieldsG, tags, now)
|
acc.AddGauge("cpu", fieldsG, tags, now)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.lastStats = times
|
s.lastStats = make(map[string]cpu.TimesStat)
|
||||||
|
for _, cts := range times {
|
||||||
|
s.lastStats[cts.CPU] = cts
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func totalCpuTime(t cpu.TimesStat) float64 {
|
func totalCpuTime(t cpu.TimesStat) float64 {
|
||||||
|
|
|
@ -149,3 +149,107 @@ func assertContainsTaggedFloat(
|
||||||
measurement, delta, expectedValue, actualValue)
|
measurement, delta, expectedValue, actualValue)
|
||||||
assert.Fail(t, msg)
|
assert.Fail(t, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestCPUCountChange tests that no errors are encountered if the number of
|
||||||
|
// CPUs increases as reported with LXC.
|
||||||
|
func TestCPUCountIncrease(t *testing.T) {
|
||||||
|
var mps MockPS
|
||||||
|
var mps2 MockPS
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
var err error
|
||||||
|
|
||||||
|
cs := NewCPUStats(&mps)
|
||||||
|
|
||||||
|
mps.On("CPUTimes").Return(
|
||||||
|
[]cpu.TimesStat{
|
||||||
|
cpu.TimesStat{
|
||||||
|
CPU: "cpu0",
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
err = cs.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
mps2.On("CPUTimes").Return(
|
||||||
|
[]cpu.TimesStat{
|
||||||
|
cpu.TimesStat{
|
||||||
|
CPU: "cpu0",
|
||||||
|
},
|
||||||
|
cpu.TimesStat{
|
||||||
|
CPU: "cpu1",
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
cs.ps = &mps2
|
||||||
|
|
||||||
|
err = cs.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCPUTimesDecrease tests that telegraf continue to works after
|
||||||
|
// CPU times decrease, which seems to occur when Linux system is suspended.
|
||||||
|
func TestCPUTimesDecrease(t *testing.T) {
|
||||||
|
var mps MockPS
|
||||||
|
defer mps.AssertExpectations(t)
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
cts := cpu.TimesStat{
|
||||||
|
CPU: "cpu0",
|
||||||
|
User: 18,
|
||||||
|
Idle: 80,
|
||||||
|
Iowait: 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
cts2 := cpu.TimesStat{
|
||||||
|
CPU: "cpu0",
|
||||||
|
User: 38, // increased by 20
|
||||||
|
Idle: 40, // decreased by 40
|
||||||
|
Iowait: 1, // decreased by 1
|
||||||
|
}
|
||||||
|
|
||||||
|
cts3 := cpu.TimesStat{
|
||||||
|
CPU: "cpu0",
|
||||||
|
User: 56, // increased by 18
|
||||||
|
Idle: 120, // increased by 80
|
||||||
|
Iowait: 3, // increased by 2
|
||||||
|
}
|
||||||
|
|
||||||
|
mps.On("CPUTimes").Return([]cpu.TimesStat{cts}, nil)
|
||||||
|
|
||||||
|
cs := NewCPUStats(&mps)
|
||||||
|
|
||||||
|
cputags := map[string]string{
|
||||||
|
"cpu": "cpu0",
|
||||||
|
}
|
||||||
|
|
||||||
|
err := cs.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Computed values are checked with delta > 0 becasue of floating point arithmatic
|
||||||
|
// imprecision
|
||||||
|
assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 18, 0, cputags)
|
||||||
|
assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80, 0, cputags)
|
||||||
|
assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 2, 0, cputags)
|
||||||
|
|
||||||
|
mps2 := MockPS{}
|
||||||
|
mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil)
|
||||||
|
cs.ps = &mps2
|
||||||
|
|
||||||
|
// CPU times decreased. An error should be raised
|
||||||
|
err = cs.Gather(&acc)
|
||||||
|
require.Error(t, err)
|
||||||
|
|
||||||
|
mps3 := MockPS{}
|
||||||
|
mps3.On("CPUTimes").Return([]cpu.TimesStat{cts3}, nil)
|
||||||
|
cs.ps = &mps3
|
||||||
|
|
||||||
|
err = cs.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 56, 0, cputags)
|
||||||
|
assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 120, 0, cputags)
|
||||||
|
assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 3, 0, cputags)
|
||||||
|
|
||||||
|
assertContainsTaggedFloat(t, &acc, "cpu", "usage_user", 18, 0.0005, cputags)
|
||||||
|
assertContainsTaggedFloat(t, &acc, "cpu", "usage_idle", 80, 0.0005, cputags)
|
||||||
|
assertContainsTaggedFloat(t, &acc, "cpu", "usage_iowait", 2, 0.0005, cputags)
|
||||||
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package system
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -164,14 +165,13 @@ func (s *DiskIOStats) Gather(acc telegraf.Accumulator) error {
|
||||||
var varRegex = regexp.MustCompile(`\$(?:\w+|\{\w+\})`)
|
var varRegex = regexp.MustCompile(`\$(?:\w+|\{\w+\})`)
|
||||||
|
|
||||||
func (s *DiskIOStats) diskName(devName string) string {
|
func (s *DiskIOStats) diskName(devName string) string {
|
||||||
di, err := s.diskInfo(devName)
|
if len(s.NameTemplates) == 0 {
|
||||||
if err != nil {
|
|
||||||
// discard error :-(
|
|
||||||
// We can't return error because it's non-fatal to the Gather().
|
|
||||||
// And we have no logger, so we can't log it.
|
|
||||||
return devName
|
return devName
|
||||||
}
|
}
|
||||||
if di == nil {
|
|
||||||
|
di, err := s.diskInfo(devName)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("W! Error gathering disk info: %s", err)
|
||||||
return devName
|
return devName
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,14 +198,13 @@ func (s *DiskIOStats) diskName(devName string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DiskIOStats) diskTags(devName string) map[string]string {
|
func (s *DiskIOStats) diskTags(devName string) map[string]string {
|
||||||
di, err := s.diskInfo(devName)
|
if len(s.DeviceTags) == 0 {
|
||||||
if err != nil {
|
|
||||||
// discard error :-(
|
|
||||||
// We can't return error because it's non-fatal to the Gather().
|
|
||||||
// And we have no logger, so we can't log it.
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if di == nil {
|
|
||||||
|
di, err := s.diskInfo(devName)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("W! Error gathering disk info: %s", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,25 +5,26 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
type diskInfoCache struct {
|
type diskInfoCache struct {
|
||||||
stat syscall.Stat_t
|
udevDataPath string
|
||||||
values map[string]string
|
values map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
var udevPath = "/run/udev/data"
|
var udevPath = "/run/udev/data"
|
||||||
|
|
||||||
func (s *DiskIOStats) diskInfo(devName string) (map[string]string, error) {
|
func (s *DiskIOStats) diskInfo(devName string) (map[string]string, error) {
|
||||||
fi, err := os.Stat("/dev/" + devName)
|
var err error
|
||||||
|
var stat unix.Stat_t
|
||||||
|
|
||||||
|
path := "/dev/" + devName
|
||||||
|
err = unix.Stat(path, &stat)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
stat, ok := fi.Sys().(*syscall.Stat_t)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.infoCache == nil {
|
if s.infoCache == nil {
|
||||||
s.infoCache = map[string]diskInfoCache{}
|
s.infoCache = map[string]diskInfoCache{}
|
||||||
|
@ -31,25 +32,26 @@ func (s *DiskIOStats) diskInfo(devName string) (map[string]string, error) {
|
||||||
ic, ok := s.infoCache[devName]
|
ic, ok := s.infoCache[devName]
|
||||||
if ok {
|
if ok {
|
||||||
return ic.values, nil
|
return ic.values, nil
|
||||||
} else {
|
|
||||||
ic = diskInfoCache{
|
|
||||||
stat: *stat,
|
|
||||||
values: map[string]string{},
|
|
||||||
}
|
}
|
||||||
s.infoCache[devName] = ic
|
|
||||||
}
|
|
||||||
di := ic.values
|
|
||||||
|
|
||||||
major := stat.Rdev >> 8 & 0xff
|
major := stat.Rdev >> 8 & 0xff
|
||||||
minor := stat.Rdev & 0xff
|
minor := stat.Rdev & 0xff
|
||||||
|
udevDataPath := fmt.Sprintf("%s/b%d:%d", udevPath, major, minor)
|
||||||
|
|
||||||
f, err := os.Open(fmt.Sprintf("%s/b%d:%d", udevPath, major, minor))
|
di := map[string]string{}
|
||||||
|
|
||||||
|
s.infoCache[devName] = diskInfoCache{
|
||||||
|
udevDataPath: udevDataPath,
|
||||||
|
values: di,
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Open(udevDataPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
scnr := bufio.NewScanner(f)
|
|
||||||
|
|
||||||
|
scnr := bufio.NewScanner(f)
|
||||||
for scnr.Scan() {
|
for scnr.Scan() {
|
||||||
l := scnr.Text()
|
l := scnr.Text()
|
||||||
if len(l) < 4 || l[:2] != "E:" {
|
if len(l) < 4 || l[:2] != "E:" {
|
||||||
|
|
|
@ -41,7 +41,7 @@ func (k *KernelVmstat) Gather(acc telegraf.Accumulator) error {
|
||||||
// We only want the even number index as that contain the stat name.
|
// We only want the even number index as that contain the stat name.
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
// Convert the stat value into an integer.
|
// Convert the stat value into an integer.
|
||||||
m, err := strconv.Atoi(string(dataFields[i+1]))
|
m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,7 @@ func TestFullVmStatProcFile(t *testing.T) {
|
||||||
"nr_isolated_anon": int64(0),
|
"nr_isolated_anon": int64(0),
|
||||||
"nr_isolated_file": int64(0),
|
"nr_isolated_file": int64(0),
|
||||||
"nr_shmem": int64(541689),
|
"nr_shmem": int64(541689),
|
||||||
"numa_hit": int64(5113399878),
|
"numa_hit": int64(6690743595),
|
||||||
"numa_miss": int64(0),
|
"numa_miss": int64(0),
|
||||||
"numa_foreign": int64(0),
|
"numa_foreign": int64(0),
|
||||||
"numa_interleave": int64(35793),
|
"numa_interleave": int64(35793),
|
||||||
|
@ -200,7 +200,7 @@ nr_writeback_temp 0
|
||||||
nr_isolated_anon 0
|
nr_isolated_anon 0
|
||||||
nr_isolated_file 0
|
nr_isolated_file 0
|
||||||
nr_shmem 541689
|
nr_shmem 541689
|
||||||
numa_hit 5113399878
|
numa_hit 6690743595
|
||||||
numa_miss 0
|
numa_miss 0
|
||||||
numa_foreign 0
|
numa_foreign 0
|
||||||
numa_interleave 35793
|
numa_interleave 35793
|
||||||
|
|
|
@ -165,7 +165,7 @@ func (s *Tomcat) Gather(acc telegraf.Accumulator) error {
|
||||||
for _, c := range status.TomcatConnectors {
|
for _, c := range status.TomcatConnectors {
|
||||||
name, err := strconv.Unquote(c.Name)
|
name, err := strconv.Unquote(c.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Unable to unquote name '%s': %s", c.Name, err)
|
name = c.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
tccTags := map[string]string{
|
tccTags := map[string]string{
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
var tomcatStatus = `<?xml version="1.0" encoding="UTF-8"?>
|
var tomcatStatus8 = `<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<?xml-stylesheet type="text/xsl" href="/manager/xform.xsl" ?>
|
<?xml-stylesheet type="text/xsl" href="/manager/xform.xsl" ?>
|
||||||
<status>
|
<status>
|
||||||
<jvm>
|
<jvm>
|
||||||
|
@ -37,10 +37,10 @@ var tomcatStatus = `<?xml version="1.0" encoding="UTF-8"?>
|
||||||
</connector>
|
</connector>
|
||||||
</status>`
|
</status>`
|
||||||
|
|
||||||
func TestHTTPTomcat(t *testing.T) {
|
func TestHTTPTomcat8(t *testing.T) {
|
||||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
fmt.Fprintln(w, tomcatStatus)
|
fmt.Fprintln(w, tomcatStatus8)
|
||||||
}))
|
}))
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
|
@ -91,5 +91,63 @@ func TestHTTPTomcat(t *testing.T) {
|
||||||
"name": "http-apr-8080",
|
"name": "http-apr-8080",
|
||||||
}
|
}
|
||||||
acc.AssertContainsTaggedFields(t, "tomcat_connector", connectorFields, connectorTags)
|
acc.AssertContainsTaggedFields(t, "tomcat_connector", connectorFields, connectorTags)
|
||||||
|
}
|
||||||
|
|
||||||
|
var tomcatStatus6 = `<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<?xml-stylesheet type="text/xsl" href="xform.xsl" ?>
|
||||||
|
<status>
|
||||||
|
<jvm>
|
||||||
|
<memory free="1942681600" total="2040070144" max="2040070144"/>
|
||||||
|
</jvm>
|
||||||
|
<connector name="http-8080">
|
||||||
|
<threadInfo maxThreads="150" currentThreadCount="2" currentThreadsBusy="2"/>
|
||||||
|
<requestInfo maxTime="1005" processingTime="2465" requestCount="436" errorCount="16" bytesReceived="0" bytesSent="550196"/>
|
||||||
|
<workers>
|
||||||
|
<worker stage="K" requestProcessingTime="526" requestBytesSent="0" requestBytesReceived="0" remoteAddr="127.0.0.1" virtualHost="?" method="?" currentUri="?" currentQueryString="?" protocol="?"/>
|
||||||
|
<worker stage="S" requestProcessingTime="1" requestBytesSent="0" requestBytesReceived="0" remoteAddr="127.0.0.1" virtualHost="127.0.0.1" method="GET" currentUri="/manager/status/all" currentQueryString="XML=true" protocol="HTTP/1.1"/>
|
||||||
|
</workers>
|
||||||
|
</connector>
|
||||||
|
</status>`
|
||||||
|
|
||||||
|
func TestHTTPTomcat6(t *testing.T) {
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
fmt.Fprintln(w, tomcatStatus6)
|
||||||
|
}))
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
tc := Tomcat{
|
||||||
|
URL: ts.URL,
|
||||||
|
Username: "tomcat",
|
||||||
|
Password: "s3cret",
|
||||||
|
}
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
err := tc.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// tomcat_jvm_memory
|
||||||
|
jvmMemoryFields := map[string]interface{}{
|
||||||
|
"free": int64(1942681600),
|
||||||
|
"total": int64(2040070144),
|
||||||
|
"max": int64(2040070144),
|
||||||
|
}
|
||||||
|
acc.AssertContainsFields(t, "tomcat_jvm_memory", jvmMemoryFields)
|
||||||
|
|
||||||
|
// tomcat_connector
|
||||||
|
connectorFields := map[string]interface{}{
|
||||||
|
"bytes_received": int64(0),
|
||||||
|
"bytes_sent": int64(550196),
|
||||||
|
"current_thread_count": int64(2),
|
||||||
|
"current_threads_busy": int64(2),
|
||||||
|
"error_count": int(16),
|
||||||
|
"max_threads": int64(150),
|
||||||
|
"max_time": int(1005),
|
||||||
|
"processing_time": int(2465),
|
||||||
|
"request_count": int(436),
|
||||||
|
}
|
||||||
|
connectorTags := map[string]string{
|
||||||
|
"name": "http-8080",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "tomcat_connector", connectorFields, connectorTags)
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,6 +33,12 @@ func (z *Zfs) gatherPoolStats(acc telegraf.Accumulator) (string, error) {
|
||||||
tags := map[string]string{"pool": col[0], "health": col[8]}
|
tags := map[string]string{"pool": col[0], "health": col[8]}
|
||||||
fields := map[string]interface{}{}
|
fields := map[string]interface{}{}
|
||||||
|
|
||||||
|
if tags["health"] == "UNAVAIL" {
|
||||||
|
|
||||||
|
fields["size"] = int64(0)
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
size, err := strconv.ParseInt(col[1], 10, 64)
|
size, err := strconv.ParseInt(col[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Error parsing size: %s", err)
|
return "", fmt.Errorf("Error parsing size: %s", err)
|
||||||
|
@ -68,6 +74,7 @@ func (z *Zfs) gatherPoolStats(acc telegraf.Accumulator) (string, error) {
|
||||||
return "", fmt.Errorf("Error parsing dedupratio: %s", err)
|
return "", fmt.Errorf("Error parsing dedupratio: %s", err)
|
||||||
}
|
}
|
||||||
fields["dedupratio"] = dedup
|
fields["dedupratio"] = dedup
|
||||||
|
}
|
||||||
|
|
||||||
acc.AddFields("zfs_pool", fields, tags)
|
acc.AddFields("zfs_pool", fields, tags)
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,15 @@ func mock_zpool() ([]string, error) {
|
||||||
return zpool_output, nil
|
return zpool_output, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// $ zpool list -Hp
|
||||||
|
var zpool_output_unavail = []string{
|
||||||
|
"temp2 - - - - - - - UNAVAIL -",
|
||||||
|
}
|
||||||
|
|
||||||
|
func mock_zpool_unavail() ([]string, error) {
|
||||||
|
return zpool_output_unavail, nil
|
||||||
|
}
|
||||||
|
|
||||||
// sysctl -q kstat.zfs.misc.arcstats
|
// sysctl -q kstat.zfs.misc.arcstats
|
||||||
|
|
||||||
// sysctl -q kstat.zfs.misc.vdev_cache_stats
|
// sysctl -q kstat.zfs.misc.vdev_cache_stats
|
||||||
|
@ -82,6 +91,41 @@ func TestZfsPoolMetrics(t *testing.T) {
|
||||||
acc.AssertContainsTaggedFields(t, "zfs_pool", poolMetrics, tags)
|
acc.AssertContainsTaggedFields(t, "zfs_pool", poolMetrics, tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestZfsPoolMetrics_unavail(t *testing.T) {
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
z := &Zfs{
|
||||||
|
KstatMetrics: []string{"vdev_cache_stats"},
|
||||||
|
sysctl: mock_sysctl,
|
||||||
|
zpool: mock_zpool_unavail,
|
||||||
|
}
|
||||||
|
err := z.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.False(t, acc.HasMeasurement("zfs_pool"))
|
||||||
|
acc.Metrics = nil
|
||||||
|
|
||||||
|
z = &Zfs{
|
||||||
|
KstatMetrics: []string{"vdev_cache_stats"},
|
||||||
|
PoolMetrics: true,
|
||||||
|
sysctl: mock_sysctl,
|
||||||
|
zpool: mock_zpool_unavail,
|
||||||
|
}
|
||||||
|
err = z.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//one pool, UNAVAIL
|
||||||
|
tags := map[string]string{
|
||||||
|
"pool": "temp2",
|
||||||
|
"health": "UNAVAIL",
|
||||||
|
}
|
||||||
|
|
||||||
|
poolMetrics := getTemp2PoolMetrics()
|
||||||
|
|
||||||
|
acc.AssertContainsTaggedFields(t, "zfs_pool", poolMetrics, tags)
|
||||||
|
}
|
||||||
|
|
||||||
func TestZfsGeneratesMetrics(t *testing.T) {
|
func TestZfsGeneratesMetrics(t *testing.T) {
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
@ -128,6 +172,12 @@ func getFreeNasBootPoolMetrics() map[string]interface{} {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getTemp2PoolMetrics() map[string]interface{} {
|
||||||
|
return map[string]interface{}{
|
||||||
|
"size": int64(0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func getKstatMetricsVdevOnly() map[string]interface{} {
|
func getKstatMetricsVdevOnly() map[string]interface{} {
|
||||||
return map[string]interface{}{
|
return map[string]interface{}{
|
||||||
"vdev_cache_stats_misses": int64(87789),
|
"vdev_cache_stats_misses": int64(87789),
|
||||||
|
|
|
@ -12,6 +12,9 @@ based on its main usage cases and the evolution of the OpenTracing standard.*
|
||||||
port = 9411 # Port on which Telegraf listens
|
port = 9411 # Port on which Telegraf listens
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The plugin accepts spans in `JSON` or `thrift` if the `Content-Type` is `application/json` or `application/x-thrift`, respectively.
|
||||||
|
If `Content-Type` is not set, then the plugin assumes it is `JSON` format.
|
||||||
|
|
||||||
## Tracing:
|
## Tracing:
|
||||||
|
|
||||||
This plugin uses Annotations tags and fields to track data from spans
|
This plugin uses Annotations tags and fields to track data from spans
|
||||||
|
|
|
@ -62,13 +62,17 @@ func main() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("%v\n", err)
|
log.Fatalf("%v\n", err)
|
||||||
}
|
}
|
||||||
ioutil.WriteFile(outFileName, raw, 0644)
|
if err := ioutil.WriteFile(outFileName, raw, 0644); err != nil {
|
||||||
|
log.Fatalf("%v", err)
|
||||||
|
}
|
||||||
case "thrift":
|
case "thrift":
|
||||||
raw, err := thriftToJSONSpans(contents)
|
raw, err := thriftToJSONSpans(contents)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("%v\n", err)
|
log.Fatalf("%v\n", err)
|
||||||
}
|
}
|
||||||
ioutil.WriteFile(outFileName, raw, 0644)
|
if err := ioutil.WriteFile(outFileName, raw, 0644); err != nil {
|
||||||
|
log.Fatalf("%v", err)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
log.Fatalf("Unsupported input type")
|
log.Fatalf("Unsupported input type")
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,210 @@
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
|
||||||
|
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
||||||
|
)
|
||||||
|
|
||||||
|
//now is a mockable time for now
|
||||||
|
var now = time.Now
|
||||||
|
|
||||||
|
// DefaultServiceName when the span does not have any serviceName
|
||||||
|
const DefaultServiceName = "unknown"
|
||||||
|
|
||||||
|
// Decoder decodes the bytes and returns a trace
|
||||||
|
type Decoder interface {
|
||||||
|
Decode(octets []byte) ([]Span, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Span are created by instrumentation in RPC clients or servers
|
||||||
|
type Span interface {
|
||||||
|
Trace() (string, error)
|
||||||
|
SpanID() (string, error)
|
||||||
|
Parent() (string, error)
|
||||||
|
Name() string
|
||||||
|
Annotations() []Annotation
|
||||||
|
BinaryAnnotations() ([]BinaryAnnotation, error)
|
||||||
|
Timestamp() time.Time
|
||||||
|
Duration() time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// Annotation represents an event that explains latency with a timestamp.
|
||||||
|
type Annotation interface {
|
||||||
|
Timestamp() time.Time
|
||||||
|
Value() string
|
||||||
|
Host() Endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
// BinaryAnnotation represent tags applied to a Span to give it context
|
||||||
|
type BinaryAnnotation interface {
|
||||||
|
Key() string
|
||||||
|
Value() string
|
||||||
|
Host() Endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
// Endpoint represents the network context of a service recording an annotation
|
||||||
|
type Endpoint interface {
|
||||||
|
Host() string
|
||||||
|
Name() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultEndpoint is used if the annotations have no endpoints
|
||||||
|
type DefaultEndpoint struct{}
|
||||||
|
|
||||||
|
// Host returns 0.0.0.0; used when the host is unknown
|
||||||
|
func (d *DefaultEndpoint) Host() string { return "0.0.0.0" }
|
||||||
|
|
||||||
|
// Name returns "unknown" when an endpoint doesn't exist
|
||||||
|
func (d *DefaultEndpoint) Name() string { return DefaultServiceName }
|
||||||
|
|
||||||
|
// MicroToTime converts zipkin's native time of microseconds into time.Time
|
||||||
|
func MicroToTime(micro int64) time.Time {
|
||||||
|
return time.Unix(0, micro*int64(time.Microsecond)).UTC()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTrace converts a slice of []Span into a new Trace
|
||||||
|
func NewTrace(spans []Span) (trace.Trace, error) {
|
||||||
|
tr := make(trace.Trace, len(spans))
|
||||||
|
for i, span := range spans {
|
||||||
|
bin, err := span.BinaryAnnotations()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
endpoint := serviceEndpoint(span.Annotations(), bin)
|
||||||
|
id, err := span.SpanID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tid, err := span.Trace()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pid, err := parentID(span)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tr[i] = trace.Span{
|
||||||
|
ID: id,
|
||||||
|
TraceID: tid,
|
||||||
|
Name: span.Name(),
|
||||||
|
Timestamp: guessTimestamp(span),
|
||||||
|
Duration: convertDuration(span),
|
||||||
|
ParentID: pid,
|
||||||
|
ServiceName: endpoint.Name(),
|
||||||
|
Annotations: NewAnnotations(span.Annotations(), endpoint),
|
||||||
|
BinaryAnnotations: NewBinaryAnnotations(bin, endpoint),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAnnotations converts a slice of Annotation into a slice of new Annotations
|
||||||
|
func NewAnnotations(annotations []Annotation, endpoint Endpoint) []trace.Annotation {
|
||||||
|
formatted := make([]trace.Annotation, len(annotations))
|
||||||
|
for i, annotation := range annotations {
|
||||||
|
formatted[i] = trace.Annotation{
|
||||||
|
Host: endpoint.Host(),
|
||||||
|
ServiceName: endpoint.Name(),
|
||||||
|
Timestamp: annotation.Timestamp(),
|
||||||
|
Value: annotation.Value(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return formatted
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBinaryAnnotations is very similar to NewAnnotations, but it
|
||||||
|
// converts BinaryAnnotations instead of the normal Annotation
|
||||||
|
func NewBinaryAnnotations(annotations []BinaryAnnotation, endpoint Endpoint) []trace.BinaryAnnotation {
|
||||||
|
formatted := make([]trace.BinaryAnnotation, len(annotations))
|
||||||
|
for i, annotation := range annotations {
|
||||||
|
formatted[i] = trace.BinaryAnnotation{
|
||||||
|
Host: endpoint.Host(),
|
||||||
|
ServiceName: endpoint.Name(),
|
||||||
|
Key: annotation.Key(),
|
||||||
|
Value: annotation.Value(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return formatted
|
||||||
|
}
|
||||||
|
|
||||||
|
func minMax(span Span) (time.Time, time.Time) {
|
||||||
|
min := now().UTC()
|
||||||
|
max := time.Time{}.UTC()
|
||||||
|
for _, annotation := range span.Annotations() {
|
||||||
|
ts := annotation.Timestamp()
|
||||||
|
if !ts.IsZero() && ts.Before(min) {
|
||||||
|
min = ts
|
||||||
|
}
|
||||||
|
if !ts.IsZero() && ts.After(max) {
|
||||||
|
max = ts
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if max.IsZero() {
|
||||||
|
max = min
|
||||||
|
}
|
||||||
|
return min, max
|
||||||
|
}
|
||||||
|
|
||||||
|
func guessTimestamp(span Span) time.Time {
|
||||||
|
ts := span.Timestamp()
|
||||||
|
if !ts.IsZero() {
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
min, _ := minMax(span)
|
||||||
|
return min
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertDuration(span Span) time.Duration {
|
||||||
|
duration := span.Duration()
|
||||||
|
if duration != 0 {
|
||||||
|
return duration
|
||||||
|
}
|
||||||
|
min, max := minMax(span)
|
||||||
|
return max.Sub(min)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parentID(span Span) (string, error) {
|
||||||
|
// A parent ID of "" means that this is a parent span. In this case,
|
||||||
|
// we set the parent ID of the span to be its own id, so it points to
|
||||||
|
// itself.
|
||||||
|
id, err := span.Parent()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if id != "" {
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
return span.SpanID()
|
||||||
|
}
|
||||||
|
|
||||||
|
func serviceEndpoint(ann []Annotation, bann []BinaryAnnotation) Endpoint {
|
||||||
|
for _, a := range ann {
|
||||||
|
switch a.Value() {
|
||||||
|
case zipkincore.SERVER_RECV, zipkincore.SERVER_SEND, zipkincore.CLIENT_RECV, zipkincore.CLIENT_SEND:
|
||||||
|
if a.Host() != nil && a.Host().Name() != "" {
|
||||||
|
return a.Host()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, a := range bann {
|
||||||
|
if a.Key() == zipkincore.LOCAL_COMPONENT && a.Host() != nil && a.Host().Name() != "" {
|
||||||
|
return a.Host()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Unable to find any "standard" endpoint host, so, use any that exist in the regular annotations
|
||||||
|
for _, a := range ann {
|
||||||
|
if a.Host() != nil && a.Host().Name() != "" {
|
||||||
|
return a.Host()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &DefaultEndpoint{}
|
||||||
|
}
|
|
@ -0,0 +1,636 @@
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_MicroToTime(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
micro int64
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
micro int64
|
||||||
|
want time.Time
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "given zero micro seconds expected unix time zero",
|
||||||
|
micro: 0,
|
||||||
|
want: time.Unix(0, 0).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "given a million micro seconds expected unix time one",
|
||||||
|
micro: 1000000,
|
||||||
|
want: time.Unix(1, 0).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "given a million micro seconds expected unix time one",
|
||||||
|
micro: 1503031538791000,
|
||||||
|
want: time.Unix(0, 1503031538791000000).UTC(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if got := MicroToTime(tt.micro); !reflect.DeepEqual(got, tt.want) {
|
||||||
|
t.Errorf("microToTime() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_minMax(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
span *MockSpan
|
||||||
|
now func() time.Time
|
||||||
|
wantMin time.Time
|
||||||
|
wantMax time.Time
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Single annotation",
|
||||||
|
span: &MockSpan{
|
||||||
|
Anno: []Annotation{
|
||||||
|
&MockAnnotation{
|
||||||
|
Time: time.Unix(0, 0).UTC().Add(time.Second),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantMin: time.Unix(1, 0).UTC(),
|
||||||
|
wantMax: time.Unix(1, 0).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Three annotations",
|
||||||
|
span: &MockSpan{
|
||||||
|
Anno: []Annotation{
|
||||||
|
&MockAnnotation{
|
||||||
|
Time: time.Unix(0, 0).UTC().Add(1 * time.Second),
|
||||||
|
},
|
||||||
|
&MockAnnotation{
|
||||||
|
Time: time.Unix(0, 0).UTC().Add(2 * time.Second),
|
||||||
|
},
|
||||||
|
&MockAnnotation{
|
||||||
|
Time: time.Unix(0, 0).UTC().Add(3 * time.Second),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantMin: time.Unix(1, 0).UTC(),
|
||||||
|
wantMax: time.Unix(3, 0).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Annotations are in the future",
|
||||||
|
span: &MockSpan{
|
||||||
|
Anno: []Annotation{
|
||||||
|
&MockAnnotation{
|
||||||
|
Time: time.Unix(0, 0).UTC().Add(3 * time.Second),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantMin: time.Unix(2, 0).UTC(),
|
||||||
|
wantMax: time.Unix(3, 0).UTC(),
|
||||||
|
now: func() time.Time {
|
||||||
|
return time.Unix(2, 0).UTC()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "No Annotations",
|
||||||
|
span: &MockSpan{
|
||||||
|
Anno: []Annotation{},
|
||||||
|
},
|
||||||
|
wantMin: time.Unix(2, 0).UTC(),
|
||||||
|
wantMax: time.Unix(2, 0).UTC(),
|
||||||
|
now: func() time.Time {
|
||||||
|
return time.Unix(2, 0).UTC()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if tt.now != nil {
|
||||||
|
now = tt.now
|
||||||
|
}
|
||||||
|
got, got1 := minMax(tt.span)
|
||||||
|
if !reflect.DeepEqual(got, tt.wantMin) {
|
||||||
|
t.Errorf("minMax() got = %v, want %v", got, tt.wantMin)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(got1, tt.wantMax) {
|
||||||
|
t.Errorf("minMax() got1 = %v, want %v", got1, tt.wantMax)
|
||||||
|
}
|
||||||
|
now = time.Now
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_guessTimestamp(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
span Span
|
||||||
|
now func() time.Time
|
||||||
|
want time.Time
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "simple timestamp",
|
||||||
|
span: &MockSpan{
|
||||||
|
Time: time.Unix(2, 0).UTC(),
|
||||||
|
},
|
||||||
|
want: time.Unix(2, 0).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "zero timestamp",
|
||||||
|
span: &MockSpan{
|
||||||
|
Time: time.Time{},
|
||||||
|
},
|
||||||
|
now: func() time.Time {
|
||||||
|
return time.Unix(2, 0).UTC()
|
||||||
|
},
|
||||||
|
want: time.Unix(2, 0).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "zero timestamp with single annotation",
|
||||||
|
span: &MockSpan{
|
||||||
|
Time: time.Time{},
|
||||||
|
Anno: []Annotation{
|
||||||
|
&MockAnnotation{
|
||||||
|
Time: time.Unix(0, 0).UTC(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: time.Unix(0, 0).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "zero timestamp with two annotations",
|
||||||
|
span: &MockSpan{
|
||||||
|
Time: time.Time{},
|
||||||
|
Anno: []Annotation{
|
||||||
|
&MockAnnotation{
|
||||||
|
Time: time.Unix(0, 0).UTC(),
|
||||||
|
},
|
||||||
|
&MockAnnotation{
|
||||||
|
Time: time.Unix(2, 0).UTC(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: time.Unix(0, 0).UTC(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if tt.now != nil {
|
||||||
|
now = tt.now
|
||||||
|
}
|
||||||
|
if got := guessTimestamp(tt.span); !reflect.DeepEqual(got, tt.want) {
|
||||||
|
t.Errorf("guessTimestamp() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
now = time.Now
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_convertDuration(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
span Span
|
||||||
|
want time.Duration
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "simple duration",
|
||||||
|
span: &MockSpan{
|
||||||
|
Dur: time.Hour,
|
||||||
|
},
|
||||||
|
want: time.Hour,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no timestamp, but, 2 seconds between annotations",
|
||||||
|
span: &MockSpan{
|
||||||
|
Anno: []Annotation{
|
||||||
|
&MockAnnotation{
|
||||||
|
Time: time.Unix(0, 0).UTC().Add(1 * time.Second),
|
||||||
|
},
|
||||||
|
&MockAnnotation{
|
||||||
|
Time: time.Unix(0, 0).UTC().Add(2 * time.Second),
|
||||||
|
},
|
||||||
|
&MockAnnotation{
|
||||||
|
Time: time.Unix(0, 0).UTC().Add(3 * time.Second),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: 2 * time.Second,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if got := convertDuration(tt.span); got != tt.want {
|
||||||
|
t.Errorf("convertDuration() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_parentID(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
span Span
|
||||||
|
want string
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "has parent id",
|
||||||
|
span: &MockSpan{
|
||||||
|
ParentID: "6b221d5bc9e6496c",
|
||||||
|
},
|
||||||
|
want: "6b221d5bc9e6496c",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no parent, so use id",
|
||||||
|
span: &MockSpan{
|
||||||
|
ID: "abceasyas123",
|
||||||
|
},
|
||||||
|
want: "abceasyas123",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "bad parent value",
|
||||||
|
span: &MockSpan{
|
||||||
|
Error: fmt.Errorf("Mommie Dearest"),
|
||||||
|
},
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := parentID(tt.span)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("parentID() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if got != tt.want {
|
||||||
|
t.Errorf("parentID() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_serviceEndpoint(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
ann []Annotation
|
||||||
|
bann []BinaryAnnotation
|
||||||
|
want Endpoint
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Annotation with server receive",
|
||||||
|
ann: []Annotation{
|
||||||
|
&MockAnnotation{
|
||||||
|
Val: "battery",
|
||||||
|
H: &MockEndpoint{
|
||||||
|
name: "aa",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&MockAnnotation{
|
||||||
|
Val: "sr",
|
||||||
|
H: &MockEndpoint{
|
||||||
|
name: "me",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: &MockEndpoint{
|
||||||
|
name: "me",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Annotation with no standard values",
|
||||||
|
ann: []Annotation{
|
||||||
|
&MockAnnotation{
|
||||||
|
Val: "noop",
|
||||||
|
},
|
||||||
|
&MockAnnotation{
|
||||||
|
Val: "aa",
|
||||||
|
H: &MockEndpoint{
|
||||||
|
name: "battery",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: &MockEndpoint{
|
||||||
|
name: "battery",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Annotation with no endpoints",
|
||||||
|
ann: []Annotation{
|
||||||
|
&MockAnnotation{
|
||||||
|
Val: "noop",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: &DefaultEndpoint{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Binary annotation with local component",
|
||||||
|
bann: []BinaryAnnotation{
|
||||||
|
&MockBinaryAnnotation{
|
||||||
|
K: "noop",
|
||||||
|
H: &MockEndpoint{
|
||||||
|
name: "aa",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&MockBinaryAnnotation{
|
||||||
|
K: "lc",
|
||||||
|
H: &MockEndpoint{
|
||||||
|
name: "me",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: &MockEndpoint{
|
||||||
|
name: "me",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if got := serviceEndpoint(tt.ann, tt.bann); !reflect.DeepEqual(got, tt.want) {
|
||||||
|
t.Errorf("serviceEndpoint() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewBinaryAnnotations(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
annotations []BinaryAnnotation
|
||||||
|
endpoint Endpoint
|
||||||
|
want []trace.BinaryAnnotation
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Should override annotation with endpoint",
|
||||||
|
annotations: []BinaryAnnotation{
|
||||||
|
&MockBinaryAnnotation{
|
||||||
|
K: "mykey",
|
||||||
|
V: "myvalue",
|
||||||
|
H: &MockEndpoint{
|
||||||
|
host: "noop",
|
||||||
|
name: "noop",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
endpoint: &MockEndpoint{
|
||||||
|
host: "myhost",
|
||||||
|
name: "myservice",
|
||||||
|
},
|
||||||
|
want: []trace.BinaryAnnotation{
|
||||||
|
trace.BinaryAnnotation{
|
||||||
|
Host: "myhost",
|
||||||
|
ServiceName: "myservice",
|
||||||
|
Key: "mykey",
|
||||||
|
Value: "myvalue",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if got := NewBinaryAnnotations(tt.annotations, tt.endpoint); !reflect.DeepEqual(got, tt.want) {
|
||||||
|
t.Errorf("NewBinaryAnnotations() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewAnnotations(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
annotations []Annotation
|
||||||
|
endpoint Endpoint
|
||||||
|
want []trace.Annotation
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Should override annotation with endpoint",
|
||||||
|
annotations: []Annotation{
|
||||||
|
&MockAnnotation{
|
||||||
|
Time: time.Unix(0, 0).UTC(),
|
||||||
|
Val: "myvalue",
|
||||||
|
H: &MockEndpoint{
|
||||||
|
host: "noop",
|
||||||
|
name: "noop",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
endpoint: &MockEndpoint{
|
||||||
|
host: "myhost",
|
||||||
|
name: "myservice",
|
||||||
|
},
|
||||||
|
want: []trace.Annotation{
|
||||||
|
trace.Annotation{
|
||||||
|
Host: "myhost",
|
||||||
|
ServiceName: "myservice",
|
||||||
|
Timestamp: time.Unix(0, 0).UTC(),
|
||||||
|
Value: "myvalue",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if got := NewAnnotations(tt.annotations, tt.endpoint); !reflect.DeepEqual(got, tt.want) {
|
||||||
|
t.Errorf("NewAnnotations() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewTrace(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
spans []Span
|
||||||
|
now func() time.Time
|
||||||
|
want trace.Trace
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty span",
|
||||||
|
spans: []Span{
|
||||||
|
&MockSpan{},
|
||||||
|
},
|
||||||
|
now: func() time.Time {
|
||||||
|
return time.Unix(0, 0).UTC()
|
||||||
|
},
|
||||||
|
want: trace.Trace{
|
||||||
|
trace.Span{
|
||||||
|
ServiceName: "unknown",
|
||||||
|
Timestamp: time.Unix(0, 0).UTC(),
|
||||||
|
Annotations: []trace.Annotation{},
|
||||||
|
BinaryAnnotations: []trace.BinaryAnnotation{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "span has no id",
|
||||||
|
spans: []Span{
|
||||||
|
&MockSpan{
|
||||||
|
Error: fmt.Errorf("Span has no id"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "complete span",
|
||||||
|
spans: []Span{
|
||||||
|
&MockSpan{
|
||||||
|
TraceID: "tid",
|
||||||
|
ID: "id",
|
||||||
|
ParentID: "",
|
||||||
|
ServiceName: "me",
|
||||||
|
Anno: []Annotation{
|
||||||
|
&MockAnnotation{
|
||||||
|
Time: time.Unix(1, 0).UTC(),
|
||||||
|
Val: "myval",
|
||||||
|
H: &MockEndpoint{
|
||||||
|
host: "myhost",
|
||||||
|
name: "myname",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 0).UTC(),
|
||||||
|
Dur: 2 * time.Second,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
now: func() time.Time {
|
||||||
|
return time.Unix(0, 0).UTC()
|
||||||
|
},
|
||||||
|
want: trace.Trace{
|
||||||
|
trace.Span{
|
||||||
|
ID: "id",
|
||||||
|
ParentID: "id",
|
||||||
|
TraceID: "tid",
|
||||||
|
Name: "me",
|
||||||
|
ServiceName: "myname",
|
||||||
|
Timestamp: time.Unix(0, 0).UTC(),
|
||||||
|
Duration: 2 * time.Second,
|
||||||
|
Annotations: []trace.Annotation{
|
||||||
|
{
|
||||||
|
Timestamp: time.Unix(1, 0).UTC(),
|
||||||
|
Value: "myval",
|
||||||
|
Host: "myhost",
|
||||||
|
ServiceName: "myname",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
BinaryAnnotations: []trace.BinaryAnnotation{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if tt.now != nil {
|
||||||
|
now = tt.now
|
||||||
|
}
|
||||||
|
got, err := NewTrace(tt.spans)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("NewTrace() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !cmp.Equal(tt.want, got) {
|
||||||
|
t.Errorf("NewTrace() = %s", cmp.Diff(tt.want, got))
|
||||||
|
}
|
||||||
|
now = time.Now
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type MockSpan struct {
|
||||||
|
TraceID string
|
||||||
|
ID string
|
||||||
|
ParentID string
|
||||||
|
ServiceName string
|
||||||
|
Anno []Annotation
|
||||||
|
BinAnno []BinaryAnnotation
|
||||||
|
Time time.Time
|
||||||
|
Dur time.Duration
|
||||||
|
Error error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockSpan) Trace() (string, error) {
|
||||||
|
return m.TraceID, m.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockSpan) SpanID() (string, error) {
|
||||||
|
return m.ID, m.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockSpan) Parent() (string, error) {
|
||||||
|
return m.ParentID, m.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockSpan) Name() string {
|
||||||
|
return m.ServiceName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockSpan) Annotations() []Annotation {
|
||||||
|
return m.Anno
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockSpan) BinaryAnnotations() ([]BinaryAnnotation, error) {
|
||||||
|
return m.BinAnno, m.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockSpan) Timestamp() time.Time {
|
||||||
|
return m.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockSpan) Duration() time.Duration {
|
||||||
|
return m.Dur
|
||||||
|
}
|
||||||
|
|
||||||
|
type MockAnnotation struct {
|
||||||
|
Time time.Time
|
||||||
|
Val string
|
||||||
|
H Endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockAnnotation) Timestamp() time.Time {
|
||||||
|
return m.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockAnnotation) Value() string {
|
||||||
|
return m.Val
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockAnnotation) Host() Endpoint {
|
||||||
|
return m.H
|
||||||
|
}
|
||||||
|
|
||||||
|
type MockEndpoint struct {
|
||||||
|
host string
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MockEndpoint) Host() string {
|
||||||
|
return e.host
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MockEndpoint) Name() string {
|
||||||
|
return e.name
|
||||||
|
}
|
||||||
|
|
||||||
|
type MockBinaryAnnotation struct {
|
||||||
|
Time time.Time
|
||||||
|
K string
|
||||||
|
V string
|
||||||
|
H Endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *MockBinaryAnnotation) Key() string {
|
||||||
|
return b.K
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *MockBinaryAnnotation) Value() string {
|
||||||
|
return b.V
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *MockBinaryAnnotation) Host() Endpoint {
|
||||||
|
return b.H
|
||||||
|
}
|
|
@ -0,0 +1,252 @@
|
||||||
|
package jsonV1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
|
||||||
|
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JSON decodes spans from bodies `POST`ed to the spans endpoint
|
||||||
|
type JSON struct{}
|
||||||
|
|
||||||
|
// Decode unmarshals and validates the JSON body
|
||||||
|
func (j *JSON) Decode(octets []byte) ([]codec.Span, error) {
|
||||||
|
var spans []span
|
||||||
|
err := json.Unmarshal(octets, &spans)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res := make([]codec.Span, len(spans))
|
||||||
|
for i := range spans {
|
||||||
|
if err := spans[i].Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res[i] = &spans[i]
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type span struct {
|
||||||
|
TraceID string `json:"traceId"`
|
||||||
|
SpanName string `json:"name"`
|
||||||
|
ParentID string `json:"parentId,omitempty"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
Time *int64 `json:"timestamp,omitempty"`
|
||||||
|
Dur *int64 `json:"duration,omitempty"`
|
||||||
|
Debug bool `json:"debug,omitempty"`
|
||||||
|
Anno []annotation `json:"annotations"`
|
||||||
|
BAnno []binaryAnnotation `json:"binaryAnnotations"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) Validate() error {
|
||||||
|
var err error
|
||||||
|
check := func(f func() (string, error)) {
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, err = f()
|
||||||
|
}
|
||||||
|
|
||||||
|
check(s.Trace)
|
||||||
|
check(s.SpanID)
|
||||||
|
check(s.Parent)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = s.BinaryAnnotations()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) Trace() (string, error) {
|
||||||
|
if s.TraceID == "" {
|
||||||
|
return "", fmt.Errorf("Trace ID cannot be null")
|
||||||
|
}
|
||||||
|
return TraceIDFromString(s.TraceID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) SpanID() (string, error) {
|
||||||
|
if s.ID == "" {
|
||||||
|
return "", fmt.Errorf("Span ID cannot be null")
|
||||||
|
}
|
||||||
|
return IDFromString(s.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) Parent() (string, error) {
|
||||||
|
if s.ParentID == "" {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
return IDFromString(s.ParentID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) Name() string {
|
||||||
|
return s.SpanName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) Annotations() []codec.Annotation {
|
||||||
|
res := make([]codec.Annotation, len(s.Anno))
|
||||||
|
for i := range s.Anno {
|
||||||
|
res[i] = &s.Anno[i]
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) BinaryAnnotations() ([]codec.BinaryAnnotation, error) {
|
||||||
|
res := make([]codec.BinaryAnnotation, len(s.BAnno))
|
||||||
|
for i, a := range s.BAnno {
|
||||||
|
if a.Key() != "" && a.Value() == "" {
|
||||||
|
return nil, fmt.Errorf("No value for key %s at binaryAnnotations[%d]", a.K, i)
|
||||||
|
}
|
||||||
|
if a.Value() != "" && a.Key() == "" {
|
||||||
|
return nil, fmt.Errorf("No key at binaryAnnotations[%d]", i)
|
||||||
|
}
|
||||||
|
res[i] = &s.BAnno[i]
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) Timestamp() time.Time {
|
||||||
|
if s.Time == nil {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
return codec.MicroToTime(*s.Time)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) Duration() time.Duration {
|
||||||
|
if s.Dur == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return time.Duration(*s.Dur) * time.Microsecond
|
||||||
|
}
|
||||||
|
|
||||||
|
type annotation struct {
|
||||||
|
Endpoint *endpoint `json:"endpoint,omitempty"`
|
||||||
|
Time int64 `json:"timestamp"`
|
||||||
|
Val string `json:"value,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *annotation) Timestamp() time.Time {
|
||||||
|
return codec.MicroToTime(a.Time)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *annotation) Value() string {
|
||||||
|
return a.Val
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *annotation) Host() codec.Endpoint {
|
||||||
|
return a.Endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
type binaryAnnotation struct {
|
||||||
|
K string `json:"key"`
|
||||||
|
V json.RawMessage `json:"value"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Endpoint *endpoint `json:"endpoint,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *binaryAnnotation) Key() string {
|
||||||
|
return b.K
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *binaryAnnotation) Value() string {
|
||||||
|
t, err := zipkincore.AnnotationTypeFromString(b.Type)
|
||||||
|
// Assume this is a string if we cannot tell the type
|
||||||
|
if err != nil {
|
||||||
|
t = zipkincore.AnnotationType_STRING
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t {
|
||||||
|
case zipkincore.AnnotationType_BOOL:
|
||||||
|
var v bool
|
||||||
|
err := json.Unmarshal(b.V, &v)
|
||||||
|
if err == nil {
|
||||||
|
return strconv.FormatBool(v)
|
||||||
|
}
|
||||||
|
case zipkincore.AnnotationType_BYTES:
|
||||||
|
return string(b.V)
|
||||||
|
case zipkincore.AnnotationType_I16, zipkincore.AnnotationType_I32, zipkincore.AnnotationType_I64:
|
||||||
|
var v int64
|
||||||
|
err := json.Unmarshal(b.V, &v)
|
||||||
|
if err == nil {
|
||||||
|
return strconv.FormatInt(v, 10)
|
||||||
|
}
|
||||||
|
case zipkincore.AnnotationType_DOUBLE:
|
||||||
|
var v float64
|
||||||
|
err := json.Unmarshal(b.V, &v)
|
||||||
|
if err == nil {
|
||||||
|
return strconv.FormatFloat(v, 'f', -1, 64)
|
||||||
|
}
|
||||||
|
case zipkincore.AnnotationType_STRING:
|
||||||
|
var v string
|
||||||
|
err := json.Unmarshal(b.V, &v)
|
||||||
|
if err == nil {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *binaryAnnotation) Host() codec.Endpoint {
|
||||||
|
return b.Endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
type endpoint struct {
|
||||||
|
ServiceName string `json:"serviceName"`
|
||||||
|
Ipv4 string `json:"ipv4"`
|
||||||
|
Ipv6 string `json:"ipv6,omitempty"`
|
||||||
|
Port int `json:"port"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *endpoint) Host() string {
|
||||||
|
if e.Port != 0 {
|
||||||
|
return fmt.Sprintf("%s:%d", e.Ipv4, e.Port)
|
||||||
|
}
|
||||||
|
return e.Ipv4
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *endpoint) Name() string {
|
||||||
|
return e.ServiceName
|
||||||
|
}
|
||||||
|
|
||||||
|
// TraceIDFromString creates a TraceID from a hexadecimal string
|
||||||
|
func TraceIDFromString(s string) (string, error) {
|
||||||
|
var hi, lo uint64
|
||||||
|
var err error
|
||||||
|
if len(s) > 32 {
|
||||||
|
return "", fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s)
|
||||||
|
} else if len(s) > 16 {
|
||||||
|
hiLen := len(s) - 16
|
||||||
|
if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if lo, err = strconv.ParseUint(s, 16, 64); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if hi == 0 {
|
||||||
|
return fmt.Sprintf("%x", lo), nil
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%x%016x", hi, lo), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDFromString creates a decimal id from a hexadecimal string
|
||||||
|
func IDFromString(s string) (string, error) {
|
||||||
|
if len(s) > 16 {
|
||||||
|
return "", fmt.Errorf("ID cannot be longer than 16 hex characters: %s", s)
|
||||||
|
}
|
||||||
|
id, err := strconv.ParseUint(s, 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return strconv.FormatUint(id, 10), nil
|
||||||
|
}
|
|
@ -0,0 +1,920 @@
|
||||||
|
package jsonV1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestJSON_Decode(t *testing.T) {
|
||||||
|
addr := func(i int64) *int64 { return &i }
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
octets []byte
|
||||||
|
want []codec.Span
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "bad json is error",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
]`),
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Decodes simple trace",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "6b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c"
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
want: []codec.Span{
|
||||||
|
&span{
|
||||||
|
TraceID: "6b221d5bc9e6496c",
|
||||||
|
SpanName: "get-traces",
|
||||||
|
ID: "6b221d5bc9e6496c",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Decodes two spans",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "6b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"traceId": "6b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "c6946e9cb5d122b6",
|
||||||
|
"parentId": "6b221d5bc9e6496c",
|
||||||
|
"duration": 10000
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
want: []codec.Span{
|
||||||
|
&span{
|
||||||
|
TraceID: "6b221d5bc9e6496c",
|
||||||
|
SpanName: "get-traces",
|
||||||
|
ID: "6b221d5bc9e6496c",
|
||||||
|
},
|
||||||
|
&span{
|
||||||
|
TraceID: "6b221d5bc9e6496c",
|
||||||
|
SpanName: "get-traces",
|
||||||
|
ID: "c6946e9cb5d122b6",
|
||||||
|
ParentID: "6b221d5bc9e6496c",
|
||||||
|
Dur: addr(10000),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Decodes trace with timestamp",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "6b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c",
|
||||||
|
"timestamp": 1503031538791000
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
want: []codec.Span{
|
||||||
|
&span{
|
||||||
|
TraceID: "6b221d5bc9e6496c",
|
||||||
|
SpanName: "get-traces",
|
||||||
|
ID: "6b221d5bc9e6496c",
|
||||||
|
Time: addr(1503031538791000),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Decodes simple trace with high and low trace id",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c"
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
want: []codec.Span{
|
||||||
|
&span{
|
||||||
|
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
SpanName: "get-traces",
|
||||||
|
ID: "6b221d5bc9e6496c",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Error when trace id is null",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": null,
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c"
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ignore null parentId",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c",
|
||||||
|
"parentId": null
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
want: []codec.Span{
|
||||||
|
&span{
|
||||||
|
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
SpanName: "get-traces",
|
||||||
|
ID: "6b221d5bc9e6496c",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ignore null timestamp",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c",
|
||||||
|
"timestamp": null
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
want: []codec.Span{
|
||||||
|
&span{
|
||||||
|
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
SpanName: "get-traces",
|
||||||
|
ID: "6b221d5bc9e6496c",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ignore null duration",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c",
|
||||||
|
"duration": null
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
want: []codec.Span{
|
||||||
|
&span{
|
||||||
|
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
SpanName: "get-traces",
|
||||||
|
ID: "6b221d5bc9e6496c",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ignore null annotation endpoint",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c",
|
||||||
|
"annotations": [
|
||||||
|
{
|
||||||
|
"timestamp": 1461750491274000,
|
||||||
|
"value": "cs",
|
||||||
|
"endpoint": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
want: []codec.Span{
|
||||||
|
&span{
|
||||||
|
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
SpanName: "get-traces",
|
||||||
|
ID: "6b221d5bc9e6496c",
|
||||||
|
Anno: []annotation{
|
||||||
|
{
|
||||||
|
Time: 1461750491274000,
|
||||||
|
Val: "cs",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ignore null binary annotation endpoint",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c",
|
||||||
|
"binaryAnnotations": [
|
||||||
|
{
|
||||||
|
"key": "lc",
|
||||||
|
"value": "JDBCSpanStore",
|
||||||
|
"endpoint": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
want: []codec.Span{
|
||||||
|
&span{
|
||||||
|
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
SpanName: "get-traces",
|
||||||
|
ID: "6b221d5bc9e6496c",
|
||||||
|
BAnno: []binaryAnnotation{
|
||||||
|
{
|
||||||
|
K: "lc",
|
||||||
|
V: json.RawMessage(`"JDBCSpanStore"`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Error when binary annotation has no key",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c",
|
||||||
|
"binaryAnnotations": [
|
||||||
|
{
|
||||||
|
"value": "JDBCSpanStore",
|
||||||
|
"endpoint": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Error when binary annotation has no value",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c",
|
||||||
|
"binaryAnnotations": [
|
||||||
|
{
|
||||||
|
"key": "lc",
|
||||||
|
"endpoint": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "binary annotation with endpoint",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c",
|
||||||
|
"binaryAnnotations": [
|
||||||
|
{
|
||||||
|
"key": "lc",
|
||||||
|
"value": "JDBCSpanStore",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "service",
|
||||||
|
"port": 65535
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
want: []codec.Span{
|
||||||
|
&span{
|
||||||
|
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
SpanName: "get-traces",
|
||||||
|
ID: "6b221d5bc9e6496c",
|
||||||
|
BAnno: []binaryAnnotation{
|
||||||
|
{
|
||||||
|
K: "lc",
|
||||||
|
V: json.RawMessage(`"JDBCSpanStore"`),
|
||||||
|
Endpoint: &endpoint{
|
||||||
|
ServiceName: "service",
|
||||||
|
Port: 65535,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "binary annotation with double value",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c",
|
||||||
|
"binaryAnnotations": [
|
||||||
|
{
|
||||||
|
"key": "num",
|
||||||
|
"value": 1.23456789,
|
||||||
|
"type": "DOUBLE"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
want: []codec.Span{
|
||||||
|
&span{
|
||||||
|
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
SpanName: "get-traces",
|
||||||
|
ID: "6b221d5bc9e6496c",
|
||||||
|
BAnno: []binaryAnnotation{
|
||||||
|
{
|
||||||
|
K: "num",
|
||||||
|
V: json.RawMessage{0x31, 0x2e, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39},
|
||||||
|
Type: "DOUBLE",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "binary annotation with integer value",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c",
|
||||||
|
"binaryAnnotations": [
|
||||||
|
{
|
||||||
|
"key": "num",
|
||||||
|
"value": 1,
|
||||||
|
"type": "I16"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
want: []codec.Span{
|
||||||
|
&span{
|
||||||
|
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
SpanName: "get-traces",
|
||||||
|
ID: "6b221d5bc9e6496c",
|
||||||
|
BAnno: []binaryAnnotation{
|
||||||
|
{
|
||||||
|
K: "num",
|
||||||
|
V: json.RawMessage{0x31},
|
||||||
|
Type: "I16",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "binary annotation with bool value",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c",
|
||||||
|
"binaryAnnotations": [
|
||||||
|
{
|
||||||
|
"key": "num",
|
||||||
|
"value": true,
|
||||||
|
"type": "BOOL"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
want: []codec.Span{
|
||||||
|
&span{
|
||||||
|
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
SpanName: "get-traces",
|
||||||
|
ID: "6b221d5bc9e6496c",
|
||||||
|
BAnno: []binaryAnnotation{
|
||||||
|
{
|
||||||
|
K: "num",
|
||||||
|
V: json.RawMessage(`true`),
|
||||||
|
Type: "BOOL",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "binary annotation with bytes value",
|
||||||
|
octets: []byte(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
"name": "get-traces",
|
||||||
|
"id": "6b221d5bc9e6496c",
|
||||||
|
"binaryAnnotations": [
|
||||||
|
{
|
||||||
|
"key": "num",
|
||||||
|
"value": "1",
|
||||||
|
"type": "BYTES"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]`),
|
||||||
|
want: []codec.Span{
|
||||||
|
&span{
|
||||||
|
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
SpanName: "get-traces",
|
||||||
|
ID: "6b221d5bc9e6496c",
|
||||||
|
BAnno: []binaryAnnotation{
|
||||||
|
{
|
||||||
|
K: "num",
|
||||||
|
V: json.RawMessage(`"1"`),
|
||||||
|
Type: "BYTES",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
j := &JSON{}
|
||||||
|
got, err := j.Decode(tt.octets)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("JSON.Decode() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !cmp.Equal(tt.want, got) {
|
||||||
|
t.Errorf("JSON.Decode() = got(-)/want(+) %s", cmp.Diff(tt.want, got))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_span_Trace(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
TraceID string
|
||||||
|
want string
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Trace IDs cannot be null",
|
||||||
|
TraceID: "",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "converts hex string correctly",
|
||||||
|
TraceID: "deadbeef",
|
||||||
|
want: "deadbeef",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "converts high and low trace id correctly",
|
||||||
|
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
want: "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "errors when string isn't hex",
|
||||||
|
TraceID: "oxdeadbeef",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "errors when id is too long",
|
||||||
|
TraceID: "1234567890abcdef1234567890abcdef1",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
s := &span{
|
||||||
|
TraceID: tt.TraceID,
|
||||||
|
}
|
||||||
|
got, err := s.Trace()
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("span.Trace() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !cmp.Equal(tt.want, got) {
|
||||||
|
t.Errorf("span.Trace() = got(-)/want(+) %s", cmp.Diff(tt.want, got))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_span_SpanID(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
ID string
|
||||||
|
want string
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Span IDs cannot be null",
|
||||||
|
ID: "",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "converts known id correctly",
|
||||||
|
ID: "b26412d1ac16767d",
|
||||||
|
want: "12854419928166856317",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "converts hex string correctly",
|
||||||
|
ID: "deadbeef",
|
||||||
|
want: "3735928559",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "errors when string isn't hex",
|
||||||
|
ID: "oxdeadbeef",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "errors when id is too long",
|
||||||
|
ID: "1234567890abcdef1",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
s := &span{
|
||||||
|
ID: tt.ID,
|
||||||
|
}
|
||||||
|
got, err := s.SpanID()
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("span.SpanID() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !cmp.Equal(tt.want, got) {
|
||||||
|
t.Errorf("span.SpanID() = got(-)/want(+) %s", cmp.Diff(tt.want, got))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_span_Parent(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
ParentID string
|
||||||
|
want string
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "when there is no parent return empty string",
|
||||||
|
ParentID: "",
|
||||||
|
want: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "converts hex string correctly",
|
||||||
|
ParentID: "deadbeef",
|
||||||
|
want: "3735928559",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "errors when string isn't hex",
|
||||||
|
ParentID: "oxdeadbeef",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "errors when parent id is too long",
|
||||||
|
ParentID: "1234567890abcdef1",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
s := &span{
|
||||||
|
ParentID: tt.ParentID,
|
||||||
|
}
|
||||||
|
got, err := s.Parent()
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("span.Parent() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !cmp.Equal(tt.want, got) {
|
||||||
|
t.Errorf("span.Parent() = got(-)/want(+) %s", cmp.Diff(tt.want, got))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_span_Timestamp(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
Time *int64
|
||||||
|
want time.Time
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "converts to microseconds",
|
||||||
|
Time: func(i int64) *int64 { return &i }(3000000),
|
||||||
|
want: time.Unix(3, 0).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nil time should be zero time",
|
||||||
|
Time: nil,
|
||||||
|
want: time.Time{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
s := &span{
|
||||||
|
Time: tt.Time,
|
||||||
|
}
|
||||||
|
if got := s.Timestamp(); !cmp.Equal(tt.want, got) {
|
||||||
|
t.Errorf("span.Timestamp() = got(-)/want(+) %s", cmp.Diff(tt.want, got))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_span_Duration(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
dur *int64
|
||||||
|
want time.Duration
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "converts from 3 microseconds",
|
||||||
|
dur: func(i int64) *int64 { return &i }(3000000),
|
||||||
|
want: 3 * time.Second,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nil time should be zero duration",
|
||||||
|
dur: nil,
|
||||||
|
want: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
s := &span{
|
||||||
|
Dur: tt.dur,
|
||||||
|
}
|
||||||
|
if got := s.Duration(); got != tt.want {
|
||||||
|
t.Errorf("span.Duration() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_annotation(t *testing.T) {
|
||||||
|
type fields struct {
|
||||||
|
Endpoint *endpoint
|
||||||
|
Time int64
|
||||||
|
Val string
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
fields fields
|
||||||
|
tm time.Time
|
||||||
|
val string
|
||||||
|
endpoint *endpoint
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "returns all fields",
|
||||||
|
fields: fields{
|
||||||
|
Time: 3000000,
|
||||||
|
Val: "myvalue",
|
||||||
|
Endpoint: &endpoint{
|
||||||
|
ServiceName: "myservice",
|
||||||
|
Ipv4: "127.0.0.1",
|
||||||
|
Port: 443,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
tm: time.Unix(3, 0).UTC(),
|
||||||
|
val: "myvalue",
|
||||||
|
endpoint: &endpoint{
|
||||||
|
ServiceName: "myservice",
|
||||||
|
Ipv4: "127.0.0.1",
|
||||||
|
Port: 443,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
an := annotation(tt.fields)
|
||||||
|
a := &an
|
||||||
|
if got := a.Timestamp(); got != tt.tm {
|
||||||
|
t.Errorf("annotation.Timestamp() = %v, want %v", got, tt.tm)
|
||||||
|
}
|
||||||
|
if got := a.Value(); got != tt.val {
|
||||||
|
t.Errorf("annotation.Value() = %v, want %v", got, tt.val)
|
||||||
|
}
|
||||||
|
if got := a.Host(); !cmp.Equal(tt.endpoint, got) {
|
||||||
|
t.Errorf("annotation.Endpoint() = %v, want %v", got, tt.endpoint)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_binaryAnnotation(t *testing.T) {
|
||||||
|
type fields struct {
|
||||||
|
K string
|
||||||
|
V json.RawMessage
|
||||||
|
Type string
|
||||||
|
Endpoint *endpoint
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
fields fields
|
||||||
|
key string
|
||||||
|
value string
|
||||||
|
endpoint *endpoint
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "returns all fields",
|
||||||
|
fields: fields{
|
||||||
|
K: "key",
|
||||||
|
V: json.RawMessage(`"value"`),
|
||||||
|
Endpoint: &endpoint{
|
||||||
|
ServiceName: "myservice",
|
||||||
|
Ipv4: "127.0.0.1",
|
||||||
|
Port: 443,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
key: "key",
|
||||||
|
value: "value",
|
||||||
|
endpoint: &endpoint{
|
||||||
|
ServiceName: "myservice",
|
||||||
|
Ipv4: "127.0.0.1",
|
||||||
|
Port: 443,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
bin := binaryAnnotation(tt.fields)
|
||||||
|
b := &bin
|
||||||
|
if got := b.Key(); got != tt.key {
|
||||||
|
t.Errorf("binaryAnnotation.Key() = %v, want %v", got, tt.key)
|
||||||
|
}
|
||||||
|
if got := b.Value(); got != tt.value {
|
||||||
|
t.Errorf("binaryAnnotation.Value() = %v, want %v", got, tt.value)
|
||||||
|
}
|
||||||
|
if got := b.Host(); !cmp.Equal(tt.endpoint, got) {
|
||||||
|
t.Errorf("binaryAnnotation.Endpoint() = %v, want %v", got, tt.endpoint)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_endpoint_Host(t *testing.T) {
|
||||||
|
type fields struct {
|
||||||
|
Ipv4 string
|
||||||
|
Port int
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
fields fields
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "with port",
|
||||||
|
fields: fields{
|
||||||
|
Ipv4: "127.0.0.1",
|
||||||
|
Port: 443,
|
||||||
|
},
|
||||||
|
want: "127.0.0.1:443",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no port",
|
||||||
|
fields: fields{
|
||||||
|
Ipv4: "127.0.0.1",
|
||||||
|
},
|
||||||
|
want: "127.0.0.1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
e := &endpoint{
|
||||||
|
Ipv4: tt.fields.Ipv4,
|
||||||
|
Port: tt.fields.Port,
|
||||||
|
}
|
||||||
|
if got := e.Host(); got != tt.want {
|
||||||
|
t.Errorf("endpoint.Host() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_endpoint_Name(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
ServiceName string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "has service name",
|
||||||
|
ServiceName: "myservicename",
|
||||||
|
want: "myservicename",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
e := &endpoint{
|
||||||
|
ServiceName: tt.ServiceName,
|
||||||
|
}
|
||||||
|
if got := e.Name(); got != tt.want {
|
||||||
|
t.Errorf("endpoint.Name() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTraceIDFromString(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
s string
|
||||||
|
want string
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Convert hex string id",
|
||||||
|
s: "6b221d5bc9e6496c",
|
||||||
|
want: "6b221d5bc9e6496c",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "error : id too long",
|
||||||
|
s: "1234567890abcdef1234567890abcdef1",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "error : not parsable",
|
||||||
|
s: "howdyhowdyhowdy",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Convert hex string with high/low",
|
||||||
|
s: "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
want: "48485a3953bb61246b221d5bc9e6496c",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "errors in high",
|
||||||
|
s: "ERR85a3953bb61246b221d5bc9e6496c",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "errors in low",
|
||||||
|
s: "48485a3953bb61246b221d5bc9e64ERR",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := TraceIDFromString(tt.s)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("TraceIDFromString() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if got != tt.want {
|
||||||
|
t.Errorf("TraceIDFromString() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIDFromString(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
s string
|
||||||
|
want string
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Convert hex string id",
|
||||||
|
s: "6b221d5bc9e6496c",
|
||||||
|
want: "7719764991332993388",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "error : id too long",
|
||||||
|
s: "1234567890abcdef1",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "error : not parsable",
|
||||||
|
s: "howdyhowdyhowdy",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := IDFromString(tt.s)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("IDFromString() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if got != tt.want {
|
||||||
|
t.Errorf("IDFromString() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,203 @@
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
|
||||||
|
|
||||||
|
"github.com/apache/thrift/lib/go/thrift"
|
||||||
|
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UnmarshalThrift converts raw bytes in thrift format to a slice of spans
|
||||||
|
func UnmarshalThrift(body []byte) ([]*zipkincore.Span, error) {
|
||||||
|
buffer := thrift.NewTMemoryBuffer()
|
||||||
|
if _, err := buffer.Write(body); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
transport := thrift.NewTBinaryProtocolTransport(buffer)
|
||||||
|
_, size, err := transport.ReadListBegin()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
spans := make([]*zipkincore.Span, size)
|
||||||
|
for i := 0; i < size; i++ {
|
||||||
|
zs := &zipkincore.Span{}
|
||||||
|
if err = zs.Read(transport); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
spans[i] = zs
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = transport.ReadListEnd(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return spans, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Thrift decodes binary data to create a Trace
|
||||||
|
type Thrift struct{}
|
||||||
|
|
||||||
|
// Decode unmarshals and validates bytes in thrift format
|
||||||
|
func (t *Thrift) Decode(octets []byte) ([]codec.Span, error) {
|
||||||
|
spans, err := UnmarshalThrift(octets)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res := make([]codec.Span, len(spans))
|
||||||
|
for i, s := range spans {
|
||||||
|
res[i] = &span{s}
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ codec.Endpoint = &endpoint{}
|
||||||
|
|
||||||
|
type endpoint struct {
|
||||||
|
*zipkincore.Endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *endpoint) Host() string {
|
||||||
|
ipv4 := func(addr int32) string {
|
||||||
|
buf := make([]byte, 4)
|
||||||
|
binary.BigEndian.PutUint32(buf, uint32(addr))
|
||||||
|
return net.IP(buf).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.Endpoint == nil {
|
||||||
|
return ipv4(int32(0))
|
||||||
|
}
|
||||||
|
if e.Endpoint.GetPort() == 0 {
|
||||||
|
return ipv4(e.Endpoint.GetIpv4())
|
||||||
|
}
|
||||||
|
// Zipkin uses a signed int16 for the port, but, warns us that they actually treat it
|
||||||
|
// as an unsigned int16. So, we convert from int16 to int32 followed by taking & 0xffff
|
||||||
|
// to convert from signed to unsigned
|
||||||
|
// https://github.com/openzipkin/zipkin/blob/57dc2ec9c65fe6144e401c0c933b4400463a69df/zipkin/src/main/java/zipkin/Endpoint.java#L44
|
||||||
|
return ipv4(e.Endpoint.GetIpv4()) + ":" + strconv.FormatInt(int64(int(e.Endpoint.GetPort())&0xffff), 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *endpoint) Name() string {
|
||||||
|
if e.Endpoint == nil {
|
||||||
|
return codec.DefaultServiceName
|
||||||
|
}
|
||||||
|
return e.Endpoint.GetServiceName()
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ codec.BinaryAnnotation = &binaryAnnotation{}
|
||||||
|
|
||||||
|
type binaryAnnotation struct {
|
||||||
|
*zipkincore.BinaryAnnotation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *binaryAnnotation) Key() string {
|
||||||
|
return b.BinaryAnnotation.GetKey()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *binaryAnnotation) Value() string {
|
||||||
|
return string(b.BinaryAnnotation.GetValue())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *binaryAnnotation) Host() codec.Endpoint {
|
||||||
|
if b.BinaryAnnotation.Host == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &endpoint{b.BinaryAnnotation.Host}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ codec.Annotation = &annotation{}
|
||||||
|
|
||||||
|
type annotation struct {
|
||||||
|
*zipkincore.Annotation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *annotation) Timestamp() time.Time {
|
||||||
|
ts := a.Annotation.GetTimestamp()
|
||||||
|
if ts == 0 {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
return codec.MicroToTime(ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *annotation) Value() string {
|
||||||
|
return a.Annotation.GetValue()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *annotation) Host() codec.Endpoint {
|
||||||
|
if a.Annotation.Host == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &endpoint{a.Annotation.Host}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ codec.Span = &span{}
|
||||||
|
|
||||||
|
type span struct {
|
||||||
|
*zipkincore.Span
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) Trace() (string, error) {
|
||||||
|
if s.Span.GetTraceIDHigh() == 0 && s.Span.GetTraceID() == 0 {
|
||||||
|
return "", fmt.Errorf("Span does not have a trace ID")
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.Span.GetTraceIDHigh() == 0 {
|
||||||
|
return fmt.Sprintf("%x", s.Span.GetTraceID()), nil
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%x%016x", s.Span.GetTraceIDHigh(), s.Span.GetTraceID()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) SpanID() (string, error) {
|
||||||
|
return formatID(s.Span.GetID()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) Parent() (string, error) {
|
||||||
|
id := s.Span.GetParentID()
|
||||||
|
if id != 0 {
|
||||||
|
return formatID(id), nil
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) Name() string {
|
||||||
|
return s.Span.GetName()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) Annotations() []codec.Annotation {
|
||||||
|
res := make([]codec.Annotation, len(s.Span.Annotations))
|
||||||
|
for i := range s.Span.Annotations {
|
||||||
|
res[i] = &annotation{s.Span.Annotations[i]}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) BinaryAnnotations() ([]codec.BinaryAnnotation, error) {
|
||||||
|
res := make([]codec.BinaryAnnotation, len(s.Span.BinaryAnnotations))
|
||||||
|
for i := range s.Span.BinaryAnnotations {
|
||||||
|
res[i] = &binaryAnnotation{s.Span.BinaryAnnotations[i]}
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) Timestamp() time.Time {
|
||||||
|
ts := s.Span.GetTimestamp()
|
||||||
|
if ts == 0 {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
return codec.MicroToTime(ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) Duration() time.Duration {
|
||||||
|
return time.Duration(s.Span.GetDuration()) * time.Microsecond
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatID(id int64) string {
|
||||||
|
return strconv.FormatInt(id, 10)
|
||||||
|
}
|
|
@ -0,0 +1,211 @@
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
|
||||||
|
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_endpointHost(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
h *zipkincore.Endpoint
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Host Found",
|
||||||
|
args: args{
|
||||||
|
h: &zipkincore.Endpoint{
|
||||||
|
Ipv4: 1234,
|
||||||
|
Port: 8888,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: "0.0.4.210:8888",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "No Host",
|
||||||
|
args: args{
|
||||||
|
h: nil,
|
||||||
|
},
|
||||||
|
want: "0.0.0.0",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "int overflow zipkin uses an int16 type as an unsigned int 16.",
|
||||||
|
args: args{
|
||||||
|
h: &zipkincore.Endpoint{
|
||||||
|
Ipv4: 1234,
|
||||||
|
Port: -1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: "0.0.4.210:65535",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
e := endpoint{tt.args.h}
|
||||||
|
if got := e.Host(); got != tt.want {
|
||||||
|
t.Errorf("host() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_endpointName(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
h *zipkincore.Endpoint
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Found ServiceName",
|
||||||
|
args: args{
|
||||||
|
h: &zipkincore.Endpoint{
|
||||||
|
ServiceName: "zipkin",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: "zipkin",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "No ServiceName",
|
||||||
|
args: args{
|
||||||
|
h: nil,
|
||||||
|
},
|
||||||
|
want: "unknown",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
e := endpoint{tt.args.h}
|
||||||
|
if got := e.Name(); got != tt.want {
|
||||||
|
t.Errorf("serviceName() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalThrift(t *testing.T) {
|
||||||
|
addr := func(i int64) *int64 { return &i }
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
filename string
|
||||||
|
want []*zipkincore.Span
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "threespans",
|
||||||
|
filename: "../../testdata/threespans.dat",
|
||||||
|
want: []*zipkincore.Span{
|
||||||
|
{
|
||||||
|
TraceID: 2505404965370368069,
|
||||||
|
Name: "Child",
|
||||||
|
ID: 8090652509916334619,
|
||||||
|
ParentID: addr(22964302721410078),
|
||||||
|
Timestamp: addr(1498688360851331),
|
||||||
|
Duration: addr(53106),
|
||||||
|
Annotations: []*zipkincore.Annotation{},
|
||||||
|
BinaryAnnotations: []*zipkincore.BinaryAnnotation{
|
||||||
|
&zipkincore.BinaryAnnotation{
|
||||||
|
Key: "lc",
|
||||||
|
AnnotationType: zipkincore.AnnotationType_STRING,
|
||||||
|
Value: []byte("trivial"),
|
||||||
|
Host: &zipkincore.Endpoint{
|
||||||
|
Ipv4: 2130706433,
|
||||||
|
ServiceName: "trivial",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
TraceID: 2505404965370368069,
|
||||||
|
Name: "Child",
|
||||||
|
ID: 103618986556047333,
|
||||||
|
ParentID: addr(22964302721410078),
|
||||||
|
Timestamp: addr(1498688360904552),
|
||||||
|
Duration: addr(50410),
|
||||||
|
Annotations: []*zipkincore.Annotation{},
|
||||||
|
BinaryAnnotations: []*zipkincore.BinaryAnnotation{
|
||||||
|
&zipkincore.BinaryAnnotation{
|
||||||
|
Key: "lc",
|
||||||
|
AnnotationType: zipkincore.AnnotationType_STRING,
|
||||||
|
Value: []byte("trivial"),
|
||||||
|
Host: &zipkincore.Endpoint{
|
||||||
|
Ipv4: 2130706433,
|
||||||
|
ServiceName: "trivial",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
TraceID: 2505404965370368069,
|
||||||
|
Name: "Parent",
|
||||||
|
ID: 22964302721410078,
|
||||||
|
Timestamp: addr(1498688360851318),
|
||||||
|
Duration: addr(103680),
|
||||||
|
Annotations: []*zipkincore.Annotation{
|
||||||
|
&zipkincore.Annotation{
|
||||||
|
Timestamp: 1498688360851325,
|
||||||
|
Value: "Starting child #0",
|
||||||
|
Host: &zipkincore.Endpoint{
|
||||||
|
Ipv4: 2130706433,
|
||||||
|
ServiceName: "trivial",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&zipkincore.Annotation{
|
||||||
|
Timestamp: 1498688360904545,
|
||||||
|
Value: "Starting child #1",
|
||||||
|
Host: &zipkincore.Endpoint{
|
||||||
|
Ipv4: 2130706433,
|
||||||
|
ServiceName: "trivial",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&zipkincore.Annotation{
|
||||||
|
Timestamp: 1498688360954992,
|
||||||
|
Value: "A Log",
|
||||||
|
Host: &zipkincore.Endpoint{
|
||||||
|
Ipv4: 2130706433,
|
||||||
|
ServiceName: "trivial",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
BinaryAnnotations: []*zipkincore.BinaryAnnotation{
|
||||||
|
&zipkincore.BinaryAnnotation{
|
||||||
|
Key: "lc",
|
||||||
|
AnnotationType: zipkincore.AnnotationType_STRING,
|
||||||
|
Value: []byte("trivial"),
|
||||||
|
Host: &zipkincore.Endpoint{
|
||||||
|
Ipv4: 2130706433,
|
||||||
|
ServiceName: "trivial",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
dat, err := ioutil.ReadFile(tt.filename)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Could not find file %s\n", tt.filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := UnmarshalThrift(dat)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("UnmarshalThrift() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !cmp.Equal(tt.want, got) {
|
||||||
|
t.Errorf("UnmarshalThrift() got(-)/want(+): %s", cmp.Diff(tt.want, got))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,22 +1,10 @@
|
||||||
package zipkin
|
package zipkin
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultServiceName when the span does not have any serviceName
|
|
||||||
const DefaultServiceName = "unknown"
|
|
||||||
|
|
||||||
//now is a moackable time for now
|
|
||||||
var now = time.Now
|
|
||||||
|
|
||||||
// LineProtocolConverter implements the Recorder interface; it is a
|
// LineProtocolConverter implements the Recorder interface; it is a
|
||||||
// type meant to encapsulate the storage of zipkin tracing data in
|
// type meant to encapsulate the storage of zipkin tracing data in
|
||||||
// telegraf as line protocol.
|
// telegraf as line protocol.
|
||||||
|
@ -35,7 +23,7 @@ func NewLineProtocolConverter(acc telegraf.Accumulator) *LineProtocolConverter {
|
||||||
// Record is LineProtocolConverter's implementation of the Record method of
|
// Record is LineProtocolConverter's implementation of the Record method of
|
||||||
// the Recorder interface; it takes a trace as input, and adds it to an internal
|
// the Recorder interface; it takes a trace as input, and adds it to an internal
|
||||||
// telegraf.Accumulator.
|
// telegraf.Accumulator.
|
||||||
func (l *LineProtocolConverter) Record(t Trace) error {
|
func (l *LineProtocolConverter) Record(t trace.Trace) error {
|
||||||
for _, s := range t {
|
for _, s := range t {
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"duration_ns": s.Duration.Nanoseconds(),
|
"duration_ns": s.Duration.Nanoseconds(),
|
||||||
|
@ -83,167 +71,3 @@ func (l *LineProtocolConverter) Record(t Trace) error {
|
||||||
func (l *LineProtocolConverter) Error(err error) {
|
func (l *LineProtocolConverter) Error(err error) {
|
||||||
l.acc.AddError(err)
|
l.acc.AddError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTrace converts a slice of []*zipkincore.Spans into a new Trace
|
|
||||||
func NewTrace(spans []*zipkincore.Span) Trace {
|
|
||||||
trace := make(Trace, len(spans))
|
|
||||||
for i, span := range spans {
|
|
||||||
endpoint := serviceEndpoint(span.GetAnnotations(), span.GetBinaryAnnotations())
|
|
||||||
trace[i] = Span{
|
|
||||||
ID: formatID(span.GetID()),
|
|
||||||
TraceID: formatTraceID(span.GetTraceIDHigh(), span.GetTraceID()),
|
|
||||||
Name: span.GetName(),
|
|
||||||
Timestamp: guessTimestamp(span),
|
|
||||||
Duration: convertDuration(span),
|
|
||||||
ParentID: parentID(span),
|
|
||||||
ServiceName: serviceName(endpoint),
|
|
||||||
Annotations: NewAnnotations(span.GetAnnotations(), endpoint),
|
|
||||||
BinaryAnnotations: NewBinaryAnnotations(span.GetBinaryAnnotations(), endpoint),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return trace
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAnnotations converts a slice of *zipkincore.Annotation into a slice
|
|
||||||
// of new Annotations
|
|
||||||
func NewAnnotations(annotations []*zipkincore.Annotation, endpoint *zipkincore.Endpoint) []Annotation {
|
|
||||||
formatted := make([]Annotation, len(annotations))
|
|
||||||
for i, annotation := range annotations {
|
|
||||||
formatted[i] = Annotation{
|
|
||||||
Host: host(endpoint),
|
|
||||||
ServiceName: serviceName(endpoint),
|
|
||||||
Timestamp: microToTime(annotation.GetTimestamp()),
|
|
||||||
Value: annotation.GetValue(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return formatted
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBinaryAnnotations is very similar to NewAnnotations, but it
|
|
||||||
// converts zipkincore.BinaryAnnotations instead of the normal zipkincore.Annotation
|
|
||||||
func NewBinaryAnnotations(annotations []*zipkincore.BinaryAnnotation, endpoint *zipkincore.Endpoint) []BinaryAnnotation {
|
|
||||||
formatted := make([]BinaryAnnotation, len(annotations))
|
|
||||||
for i, annotation := range annotations {
|
|
||||||
formatted[i] = BinaryAnnotation{
|
|
||||||
Host: host(endpoint),
|
|
||||||
ServiceName: serviceName(endpoint),
|
|
||||||
Key: annotation.GetKey(),
|
|
||||||
Value: string(annotation.GetValue()),
|
|
||||||
Type: annotation.GetAnnotationType().String(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return formatted
|
|
||||||
}
|
|
||||||
|
|
||||||
func microToTime(micro int64) time.Time {
|
|
||||||
return time.Unix(0, micro*int64(time.Microsecond)).UTC()
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatID(id int64) string {
|
|
||||||
return strconv.FormatInt(id, 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatTraceID(high, low int64) string {
|
|
||||||
if high == 0 {
|
|
||||||
return fmt.Sprintf("%x", low)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%x%016x", high, low)
|
|
||||||
}
|
|
||||||
|
|
||||||
func minMax(span *zipkincore.Span) (time.Time, time.Time) {
|
|
||||||
min := now().UTC()
|
|
||||||
max := time.Time{}.UTC()
|
|
||||||
for _, annotation := range span.Annotations {
|
|
||||||
ts := microToTime(annotation.GetTimestamp())
|
|
||||||
if !ts.IsZero() && ts.Before(min) {
|
|
||||||
min = ts
|
|
||||||
}
|
|
||||||
if !ts.IsZero() && ts.After(max) {
|
|
||||||
max = ts
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if max.IsZero() {
|
|
||||||
max = min
|
|
||||||
}
|
|
||||||
return min, max
|
|
||||||
}
|
|
||||||
|
|
||||||
func guessTimestamp(span *zipkincore.Span) time.Time {
|
|
||||||
if span.GetTimestamp() != 0 {
|
|
||||||
return microToTime(span.GetTimestamp())
|
|
||||||
}
|
|
||||||
min, _ := minMax(span)
|
|
||||||
return min
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertDuration(span *zipkincore.Span) time.Duration {
|
|
||||||
duration := time.Duration(span.GetDuration()) * time.Microsecond
|
|
||||||
if duration != 0 {
|
|
||||||
return duration
|
|
||||||
}
|
|
||||||
min, max := minMax(span)
|
|
||||||
return max.Sub(min)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parentID(span *zipkincore.Span) string {
|
|
||||||
// A parent ID of 0 means that this is a parent span. In this case,
|
|
||||||
// we set the parent ID of the span to be its own id, so it points to
|
|
||||||
// itself.
|
|
||||||
id := span.GetParentID()
|
|
||||||
if id != 0 {
|
|
||||||
return formatID(id)
|
|
||||||
}
|
|
||||||
return formatID(span.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ipv4(addr int32) string {
|
|
||||||
buf := make([]byte, 4)
|
|
||||||
binary.BigEndian.PutUint32(buf, uint32(addr))
|
|
||||||
return net.IP(buf).String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func host(h *zipkincore.Endpoint) string {
|
|
||||||
if h == nil {
|
|
||||||
return ipv4(int32(0))
|
|
||||||
}
|
|
||||||
if h.GetPort() == 0 {
|
|
||||||
return ipv4(h.GetIpv4())
|
|
||||||
}
|
|
||||||
// Zipkin uses a signed int16 for the port, but, warns us that they actually treat it
|
|
||||||
// as an unsigned int16. So, we convert from int16 to int32 followed by taking & 0xffff
|
|
||||||
// to convert from signed to unsigned
|
|
||||||
// https://github.com/openzipkin/zipkin/blob/57dc2ec9c65fe6144e401c0c933b4400463a69df/zipkin/src/main/java/zipkin/Endpoint.java#L44
|
|
||||||
return ipv4(h.GetIpv4()) + ":" + strconv.FormatInt(int64(int(h.GetPort())&0xffff), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
func serviceName(h *zipkincore.Endpoint) string {
|
|
||||||
if h == nil {
|
|
||||||
return DefaultServiceName
|
|
||||||
}
|
|
||||||
return h.GetServiceName()
|
|
||||||
}
|
|
||||||
|
|
||||||
func serviceEndpoint(ann []*zipkincore.Annotation, bann []*zipkincore.BinaryAnnotation) *zipkincore.Endpoint {
|
|
||||||
for _, a := range ann {
|
|
||||||
switch a.Value {
|
|
||||||
case zipkincore.SERVER_RECV, zipkincore.SERVER_SEND, zipkincore.CLIENT_RECV, zipkincore.CLIENT_SEND:
|
|
||||||
if a.Host != nil && a.Host.ServiceName != "" {
|
|
||||||
return a.Host
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, a := range bann {
|
|
||||||
if a.Key == zipkincore.LOCAL_COMPONENT && a.Host != nil && a.Host.ServiceName != "" {
|
|
||||||
return a.Host
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Unable to find any "standard" endpoint host, so, use any that exist in the regular annotations
|
|
||||||
for _, a := range ann {
|
|
||||||
if a.Host != nil && a.Host.ServiceName != "" {
|
|
||||||
return a.Host
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,14 +1,13 @@
|
||||||
package zipkin
|
package zipkin
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestLineProtocolConverter_Record(t *testing.T) {
|
func TestLineProtocolConverter_Record(t *testing.T) {
|
||||||
|
@ -17,7 +16,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
||||||
acc telegraf.Accumulator
|
acc telegraf.Accumulator
|
||||||
}
|
}
|
||||||
type args struct {
|
type args struct {
|
||||||
t Trace
|
t trace.Trace
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
@ -32,8 +31,8 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
||||||
acc: &mockAcc,
|
acc: &mockAcc,
|
||||||
},
|
},
|
||||||
args: args{
|
args: args{
|
||||||
t: Trace{
|
t: trace.Trace{
|
||||||
Span{
|
{
|
||||||
ID: "8090652509916334619",
|
ID: "8090652509916334619",
|
||||||
TraceID: "2505404965370368069",
|
TraceID: "2505404965370368069",
|
||||||
Name: "Child",
|
Name: "Child",
|
||||||
|
@ -41,18 +40,17 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
||||||
Timestamp: time.Unix(0, 1498688360851331000).UTC(),
|
Timestamp: time.Unix(0, 1498688360851331000).UTC(),
|
||||||
Duration: time.Duration(53106) * time.Microsecond,
|
Duration: time.Duration(53106) * time.Microsecond,
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
Annotations: []Annotation{},
|
Annotations: []trace.Annotation{},
|
||||||
BinaryAnnotations: []BinaryAnnotation{
|
BinaryAnnotations: []trace.BinaryAnnotation{
|
||||||
BinaryAnnotation{
|
{
|
||||||
Key: "lc",
|
Key: "lc",
|
||||||
Value: "dHJpdmlhbA==",
|
Value: "dHJpdmlhbA==",
|
||||||
Host: "2130706433:0",
|
Host: "2130706433:0",
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
Type: "STRING",
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Span{
|
{
|
||||||
ID: "103618986556047333",
|
ID: "103618986556047333",
|
||||||
TraceID: "2505404965370368069",
|
TraceID: "2505404965370368069",
|
||||||
Name: "Child",
|
Name: "Child",
|
||||||
|
@ -60,18 +58,17 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
||||||
Timestamp: time.Unix(0, 1498688360904552000).UTC(),
|
Timestamp: time.Unix(0, 1498688360904552000).UTC(),
|
||||||
Duration: time.Duration(50410) * time.Microsecond,
|
Duration: time.Duration(50410) * time.Microsecond,
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
Annotations: []Annotation{},
|
Annotations: []trace.Annotation{},
|
||||||
BinaryAnnotations: []BinaryAnnotation{
|
BinaryAnnotations: []trace.BinaryAnnotation{
|
||||||
BinaryAnnotation{
|
{
|
||||||
Key: "lc",
|
Key: "lc",
|
||||||
Value: "dHJpdmlhbA==",
|
Value: "dHJpdmlhbA==",
|
||||||
Host: "2130706433:0",
|
Host: "2130706433:0",
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
Type: "STRING",
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Span{
|
{
|
||||||
ID: "22964302721410078",
|
ID: "22964302721410078",
|
||||||
TraceID: "2505404965370368069",
|
TraceID: "2505404965370368069",
|
||||||
Name: "Parent",
|
Name: "Parent",
|
||||||
|
@ -79,33 +76,32 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
||||||
Timestamp: time.Unix(0, 1498688360851318000).UTC(),
|
Timestamp: time.Unix(0, 1498688360851318000).UTC(),
|
||||||
Duration: time.Duration(103680) * time.Microsecond,
|
Duration: time.Duration(103680) * time.Microsecond,
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
Annotations: []Annotation{
|
Annotations: []trace.Annotation{
|
||||||
Annotation{
|
{
|
||||||
Timestamp: time.Unix(0, 1498688360851325000).UTC(),
|
Timestamp: time.Unix(0, 1498688360851325000).UTC(),
|
||||||
Value: "Starting child #0",
|
Value: "Starting child #0",
|
||||||
Host: "2130706433:0",
|
Host: "2130706433:0",
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
},
|
},
|
||||||
Annotation{
|
{
|
||||||
Timestamp: time.Unix(0, 1498688360904545000).UTC(),
|
Timestamp: time.Unix(0, 1498688360904545000).UTC(),
|
||||||
Value: "Starting child #1",
|
Value: "Starting child #1",
|
||||||
Host: "2130706433:0",
|
Host: "2130706433:0",
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
},
|
},
|
||||||
Annotation{
|
{
|
||||||
Timestamp: time.Unix(0, 1498688360954992000).UTC(),
|
Timestamp: time.Unix(0, 1498688360954992000).UTC(),
|
||||||
Value: "A Log",
|
Value: "A Log",
|
||||||
Host: "2130706433:0",
|
Host: "2130706433:0",
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
BinaryAnnotations: []BinaryAnnotation{
|
BinaryAnnotations: []trace.BinaryAnnotation{
|
||||||
BinaryAnnotation{
|
{
|
||||||
Key: "lc",
|
Key: "lc",
|
||||||
Value: "dHJpdmlhbA==",
|
Value: "dHJpdmlhbA==",
|
||||||
Host: "2130706433:0",
|
Host: "2130706433:0",
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
Type: "STRING",
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -265,8 +261,8 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
||||||
acc: &mockAcc,
|
acc: &mockAcc,
|
||||||
},
|
},
|
||||||
args: args{
|
args: args{
|
||||||
t: Trace{
|
t: trace.Trace{
|
||||||
Span{
|
{
|
||||||
ID: "6802735349851856000",
|
ID: "6802735349851856000",
|
||||||
TraceID: "0:6802735349851856000",
|
TraceID: "0:6802735349851856000",
|
||||||
Name: "main.dud",
|
Name: "main.dud",
|
||||||
|
@ -274,15 +270,15 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
||||||
Timestamp: time.Unix(1, 0).UTC(),
|
Timestamp: time.Unix(1, 0).UTC(),
|
||||||
Duration: 1,
|
Duration: 1,
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
Annotations: []Annotation{
|
Annotations: []trace.Annotation{
|
||||||
Annotation{
|
{
|
||||||
Timestamp: time.Unix(0, 1433330263415871000).UTC(),
|
Timestamp: time.Unix(0, 1433330263415871000).UTC(),
|
||||||
Value: "cs",
|
Value: "cs",
|
||||||
Host: "0:9410",
|
Host: "0:9410",
|
||||||
ServiceName: "go-zipkin-testclient",
|
ServiceName: "go-zipkin-testclient",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
BinaryAnnotations: []BinaryAnnotation{},
|
BinaryAnnotations: []trace.BinaryAnnotation{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -339,206 +335,3 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_microToTime(t *testing.T) {
|
|
||||||
type args struct {
|
|
||||||
micro int64
|
|
||||||
}
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
args args
|
|
||||||
want time.Time
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "given zero micro seconds expected unix time zero",
|
|
||||||
args: args{
|
|
||||||
micro: 0,
|
|
||||||
},
|
|
||||||
want: time.Unix(0, 0).UTC(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "given a million micro seconds expected unix time one",
|
|
||||||
args: args{
|
|
||||||
micro: 1000000,
|
|
||||||
},
|
|
||||||
want: time.Unix(1, 0).UTC(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if got := microToTime(tt.args.micro); !reflect.DeepEqual(got, tt.want) {
|
|
||||||
t.Errorf("microToTime() = %v, want %v", got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAnnotation(micro int64) *zipkincore.Annotation {
|
|
||||||
return &zipkincore.Annotation{
|
|
||||||
Timestamp: micro,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_minMax(t *testing.T) {
|
|
||||||
type args struct {
|
|
||||||
span *zipkincore.Span
|
|
||||||
}
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
args args
|
|
||||||
now func() time.Time
|
|
||||||
wantMin time.Time
|
|
||||||
wantMax time.Time
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "Single annotation",
|
|
||||||
args: args{
|
|
||||||
span: &zipkincore.Span{
|
|
||||||
Annotations: []*zipkincore.Annotation{
|
|
||||||
newAnnotation(1000000),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantMin: time.Unix(1, 0).UTC(),
|
|
||||||
wantMax: time.Unix(1, 0).UTC(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Three annotations",
|
|
||||||
args: args{
|
|
||||||
span: &zipkincore.Span{
|
|
||||||
Annotations: []*zipkincore.Annotation{
|
|
||||||
newAnnotation(1000000),
|
|
||||||
newAnnotation(2000000),
|
|
||||||
newAnnotation(3000000),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantMin: time.Unix(1, 0).UTC(),
|
|
||||||
wantMax: time.Unix(3, 0).UTC(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Annotations are in the future",
|
|
||||||
args: args{
|
|
||||||
span: &zipkincore.Span{
|
|
||||||
Annotations: []*zipkincore.Annotation{
|
|
||||||
newAnnotation(3000000),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantMin: time.Unix(2, 0).UTC(),
|
|
||||||
wantMax: time.Unix(3, 0).UTC(),
|
|
||||||
now: func() time.Time {
|
|
||||||
return time.Unix(2, 0).UTC()
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "No Annotations",
|
|
||||||
args: args{
|
|
||||||
span: &zipkincore.Span{
|
|
||||||
Annotations: []*zipkincore.Annotation{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
wantMin: time.Unix(2, 0).UTC(),
|
|
||||||
wantMax: time.Unix(2, 0).UTC(),
|
|
||||||
now: func() time.Time {
|
|
||||||
return time.Unix(2, 0).UTC()
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if tt.now != nil {
|
|
||||||
now = tt.now
|
|
||||||
}
|
|
||||||
got, got1 := minMax(tt.args.span)
|
|
||||||
if !reflect.DeepEqual(got, tt.wantMin) {
|
|
||||||
t.Errorf("minMax() got = %v, want %v", got, tt.wantMin)
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(got1, tt.wantMax) {
|
|
||||||
t.Errorf("minMax() got1 = %v, want %v", got1, tt.wantMax)
|
|
||||||
}
|
|
||||||
now = time.Now
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_host(t *testing.T) {
|
|
||||||
type args struct {
|
|
||||||
h *zipkincore.Endpoint
|
|
||||||
}
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
args args
|
|
||||||
want string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "Host Found",
|
|
||||||
args: args{
|
|
||||||
h: &zipkincore.Endpoint{
|
|
||||||
Ipv4: 1234,
|
|
||||||
Port: 8888,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
want: "0.0.4.210:8888",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "No Host",
|
|
||||||
args: args{
|
|
||||||
h: nil,
|
|
||||||
},
|
|
||||||
want: "0.0.0.0",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "int overflow zipkin uses an int16 type as an unsigned int 16.",
|
|
||||||
args: args{
|
|
||||||
h: &zipkincore.Endpoint{
|
|
||||||
Ipv4: 1234,
|
|
||||||
Port: -1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
want: "0.0.4.210:65535",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if got := host(tt.args.h); got != tt.want {
|
|
||||||
t.Errorf("host() = %v, want %v", got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_serviceName(t *testing.T) {
|
|
||||||
type args struct {
|
|
||||||
h *zipkincore.Endpoint
|
|
||||||
}
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
args args
|
|
||||||
want string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "Found ServiceName",
|
|
||||||
args: args{
|
|
||||||
h: &zipkincore.Endpoint{
|
|
||||||
ServiceName: "zipkin",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
want: "zipkin",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "No ServiceName",
|
|
||||||
args: args{
|
|
||||||
h: nil,
|
|
||||||
},
|
|
||||||
want: "unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if got := serviceName(tt.args.h); got != tt.want {
|
|
||||||
t.Errorf("serviceName() = %v, want %v", got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -2,14 +2,16 @@ package zipkin
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/apache/thrift/lib/go/thrift"
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/jsonV1"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SpanHandler is an implementation of a Handler which accepts zipkin thrift
|
// SpanHandler is an implementation of a Handler which accepts zipkin thrift
|
||||||
|
@ -17,7 +19,6 @@ import (
|
||||||
type SpanHandler struct {
|
type SpanHandler struct {
|
||||||
Path string
|
Path string
|
||||||
recorder Recorder
|
recorder Recorder
|
||||||
waitGroup *sync.WaitGroup
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSpanHandler returns a new server instance given path to handle
|
// NewSpanHandler returns a new server instance given path to handle
|
||||||
|
@ -81,6 +82,12 @@ func (s *SpanHandler) Spans(w http.ResponseWriter, r *http.Request) {
|
||||||
defer body.Close()
|
defer body.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
decoder, err := ContentDecoder(r)
|
||||||
|
if err != nil {
|
||||||
|
s.recorder.Error(err)
|
||||||
|
w.WriteHeader(http.StatusUnsupportedMediaType)
|
||||||
|
}
|
||||||
|
|
||||||
octets, err := ioutil.ReadAll(body)
|
octets, err := ioutil.ReadAll(body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.recorder.Error(err)
|
s.recorder.Error(err)
|
||||||
|
@ -88,14 +95,19 @@ func (s *SpanHandler) Spans(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
spans, err := unmarshalThrift(octets)
|
spans, err := decoder.Decode(octets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.recorder.Error(err)
|
s.recorder.Error(err)
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
trace := NewTrace(spans)
|
trace, err := codec.NewTrace(spans)
|
||||||
|
if err != nil {
|
||||||
|
s.recorder.Error(err)
|
||||||
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if err = s.recorder.Record(trace); err != nil {
|
if err = s.recorder.Record(trace); err != nil {
|
||||||
s.recorder.Error(err)
|
s.recorder.Error(err)
|
||||||
|
@ -106,30 +118,25 @@ func (s *SpanHandler) Spans(w http.ResponseWriter, r *http.Request) {
|
||||||
w.WriteHeader(http.StatusNoContent)
|
w.WriteHeader(http.StatusNoContent)
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalThrift(body []byte) ([]*zipkincore.Span, error) {
|
// ContentDecoer returns a Decoder that is able to produce Traces from bytes.
|
||||||
buffer := thrift.NewTMemoryBuffer()
|
// Failure should yield an HTTP 415 (`http.StatusUnsupportedMediaType`)
|
||||||
if _, err := buffer.Write(body); err != nil {
|
// If a Content-Type is not set, zipkin assumes application/json
|
||||||
return nil, err
|
func ContentDecoder(r *http.Request) (codec.Decoder, error) {
|
||||||
|
contentType := r.Header.Get("Content-Type")
|
||||||
|
if contentType == "" {
|
||||||
|
return &jsonV1.JSON{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
transport := thrift.NewTBinaryProtocolTransport(buffer)
|
for _, v := range strings.Split(contentType, ",") {
|
||||||
_, size, err := transport.ReadListBegin()
|
t, _, err := mime.ParseMediaType(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
break
|
||||||
}
|
}
|
||||||
|
if t == "application/json" {
|
||||||
spans := make([]*zipkincore.Span, size)
|
return &jsonV1.JSON{}, nil
|
||||||
for i := 0; i < size; i++ {
|
} else if t == "application/x-thrift" {
|
||||||
zs := &zipkincore.Span{}
|
return &thrift.Thrift{}, nil
|
||||||
if err = zs.Read(transport); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
spans[i] = zs
|
|
||||||
}
|
}
|
||||||
|
return nil, fmt.Errorf("Unknown Content-Type: %s", contentType)
|
||||||
if err = transport.ReadListEnd(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return spans, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,14 +10,15 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MockRecorder struct {
|
type MockRecorder struct {
|
||||||
Data Trace
|
Data trace.Trace
|
||||||
Err error
|
Err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockRecorder) Record(t Trace) error {
|
func (m *MockRecorder) Record(t trace.Trace) error {
|
||||||
m.Data = t
|
m.Data = t
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -39,6 +40,7 @@ func TestSpanHandler(t *testing.T) {
|
||||||
ioutil.NopCloser(
|
ioutil.NopCloser(
|
||||||
bytes.NewReader(dat)))
|
bytes.NewReader(dat)))
|
||||||
|
|
||||||
|
r.Header.Set("Content-Type", "application/x-thrift")
|
||||||
handler := NewSpanHandler("/api/v1/spans")
|
handler := NewSpanHandler("/api/v1/spans")
|
||||||
mockRecorder := &MockRecorder{}
|
mockRecorder := &MockRecorder{}
|
||||||
handler.recorder = mockRecorder
|
handler.recorder = mockRecorder
|
||||||
|
@ -51,8 +53,8 @@ func TestSpanHandler(t *testing.T) {
|
||||||
got := mockRecorder.Data
|
got := mockRecorder.Data
|
||||||
|
|
||||||
parentID := strconv.FormatInt(22964302721410078, 10)
|
parentID := strconv.FormatInt(22964302721410078, 10)
|
||||||
want := Trace{
|
want := trace.Trace{
|
||||||
Span{
|
{
|
||||||
Name: "Child",
|
Name: "Child",
|
||||||
ID: "8090652509916334619",
|
ID: "8090652509916334619",
|
||||||
TraceID: "22c4fc8ab3669045",
|
TraceID: "22c4fc8ab3669045",
|
||||||
|
@ -60,18 +62,17 @@ func TestSpanHandler(t *testing.T) {
|
||||||
Timestamp: time.Unix(0, 1498688360851331*int64(time.Microsecond)).UTC(),
|
Timestamp: time.Unix(0, 1498688360851331*int64(time.Microsecond)).UTC(),
|
||||||
Duration: time.Duration(53106) * time.Microsecond,
|
Duration: time.Duration(53106) * time.Microsecond,
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
Annotations: []Annotation{},
|
Annotations: []trace.Annotation{},
|
||||||
BinaryAnnotations: []BinaryAnnotation{
|
BinaryAnnotations: []trace.BinaryAnnotation{
|
||||||
BinaryAnnotation{
|
{
|
||||||
Key: "lc",
|
Key: "lc",
|
||||||
Value: "trivial",
|
Value: "trivial",
|
||||||
Host: "127.0.0.1",
|
Host: "127.0.0.1",
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
Type: "STRING",
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Span{
|
{
|
||||||
Name: "Child",
|
Name: "Child",
|
||||||
ID: "103618986556047333",
|
ID: "103618986556047333",
|
||||||
TraceID: "22c4fc8ab3669045",
|
TraceID: "22c4fc8ab3669045",
|
||||||
|
@ -79,18 +80,17 @@ func TestSpanHandler(t *testing.T) {
|
||||||
Timestamp: time.Unix(0, 1498688360904552*int64(time.Microsecond)).UTC(),
|
Timestamp: time.Unix(0, 1498688360904552*int64(time.Microsecond)).UTC(),
|
||||||
Duration: time.Duration(50410) * time.Microsecond,
|
Duration: time.Duration(50410) * time.Microsecond,
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
Annotations: []Annotation{},
|
Annotations: []trace.Annotation{},
|
||||||
BinaryAnnotations: []BinaryAnnotation{
|
BinaryAnnotations: []trace.BinaryAnnotation{
|
||||||
BinaryAnnotation{
|
{
|
||||||
Key: "lc",
|
Key: "lc",
|
||||||
Value: "trivial",
|
Value: "trivial",
|
||||||
Host: "127.0.0.1",
|
Host: "127.0.0.1",
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
Type: "STRING",
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Span{
|
{
|
||||||
Name: "Parent",
|
Name: "Parent",
|
||||||
ID: "22964302721410078",
|
ID: "22964302721410078",
|
||||||
TraceID: "22c4fc8ab3669045",
|
TraceID: "22c4fc8ab3669045",
|
||||||
|
@ -98,33 +98,32 @@ func TestSpanHandler(t *testing.T) {
|
||||||
Timestamp: time.Unix(0, 1498688360851318*int64(time.Microsecond)).UTC(),
|
Timestamp: time.Unix(0, 1498688360851318*int64(time.Microsecond)).UTC(),
|
||||||
Duration: time.Duration(103680) * time.Microsecond,
|
Duration: time.Duration(103680) * time.Microsecond,
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
Annotations: []Annotation{
|
Annotations: []trace.Annotation{
|
||||||
Annotation{
|
{
|
||||||
Timestamp: time.Unix(0, 1498688360851325*int64(time.Microsecond)).UTC(),
|
Timestamp: time.Unix(0, 1498688360851325*int64(time.Microsecond)).UTC(),
|
||||||
Value: "Starting child #0",
|
Value: "Starting child #0",
|
||||||
Host: "127.0.0.1",
|
Host: "127.0.0.1",
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
},
|
},
|
||||||
Annotation{
|
{
|
||||||
Timestamp: time.Unix(0, 1498688360904545*int64(time.Microsecond)).UTC(),
|
Timestamp: time.Unix(0, 1498688360904545*int64(time.Microsecond)).UTC(),
|
||||||
Value: "Starting child #1",
|
Value: "Starting child #1",
|
||||||
Host: "127.0.0.1",
|
Host: "127.0.0.1",
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
},
|
},
|
||||||
Annotation{
|
{
|
||||||
Timestamp: time.Unix(0, 1498688360954992*int64(time.Microsecond)).UTC(),
|
Timestamp: time.Unix(0, 1498688360954992*int64(time.Microsecond)).UTC(),
|
||||||
Value: "A Log",
|
Value: "A Log",
|
||||||
Host: "127.0.0.1",
|
Host: "127.0.0.1",
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
BinaryAnnotations: []BinaryAnnotation{
|
BinaryAnnotations: []trace.BinaryAnnotation{
|
||||||
BinaryAnnotation{
|
{
|
||||||
Key: "lc",
|
Key: "lc",
|
||||||
Value: "trivial",
|
Value: "trivial",
|
||||||
Host: "127.0.0.1",
|
Host: "127.0.0.1",
|
||||||
ServiceName: "trivial",
|
ServiceName: "trivial",
|
||||||
Type: "STRING",
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -0,0 +1,188 @@
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"traceId": "7312f822d43d0fd8",
|
||||||
|
"id": "b26412d1ac16767d",
|
||||||
|
"name": "http:/hi2",
|
||||||
|
"parentId": "7312f822d43d0fd8",
|
||||||
|
"annotations": [
|
||||||
|
{
|
||||||
|
"timestamp": 1503031538791000,
|
||||||
|
"value": "sr",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1503031538794000,
|
||||||
|
"value": "ss",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"binaryAnnotations": [
|
||||||
|
{
|
||||||
|
"key": "mvc.controller.class",
|
||||||
|
"value": "Demo2Application",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "mvc.controller.method",
|
||||||
|
"value": "hi2",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "spring.instance_id",
|
||||||
|
"value": "192.168.0.8:test:8010",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"traceId": "7312f822d43d0fd8",
|
||||||
|
"id": "b26412d1ac16767d",
|
||||||
|
"name": "http:/hi2",
|
||||||
|
"parentId": "7312f822d43d0fd8",
|
||||||
|
"timestamp": 1503031538786000,
|
||||||
|
"duration": 10000,
|
||||||
|
"annotations": [
|
||||||
|
{
|
||||||
|
"timestamp": 1503031538786000,
|
||||||
|
"value": "cs",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1503031538796000,
|
||||||
|
"value": "cr",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"binaryAnnotations": [
|
||||||
|
{
|
||||||
|
"key": "http.host",
|
||||||
|
"value": "localhost",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "http.method",
|
||||||
|
"value": "GET",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "http.path",
|
||||||
|
"value": "/hi2",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "http.url",
|
||||||
|
"value": "http://localhost:8010/hi2",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "spring.instance_id",
|
||||||
|
"value": "192.168.0.8:test:8010",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"traceId": "7312f822d43d0fd8",
|
||||||
|
"id": "7312f822d43d0fd8",
|
||||||
|
"name": "http:/hi",
|
||||||
|
"timestamp": 1503031538778000,
|
||||||
|
"duration": 23393,
|
||||||
|
"annotations": [
|
||||||
|
{
|
||||||
|
"timestamp": 1503031538778000,
|
||||||
|
"value": "sr",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1503031538801000,
|
||||||
|
"value": "ss",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"binaryAnnotations": [
|
||||||
|
{
|
||||||
|
"key": "mvc.controller.class",
|
||||||
|
"value": "Demo2Application",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "mvc.controller.method",
|
||||||
|
"value": "hi",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "spring.instance_id",
|
||||||
|
"value": "192.168.0.8:test:8010",
|
||||||
|
"endpoint": {
|
||||||
|
"serviceName": "test",
|
||||||
|
"ipv4": "192.168.0.8",
|
||||||
|
"port": 8010
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
|
@ -0,0 +1,41 @@
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Trace is an array (or a series) of spans
|
||||||
|
type Trace []Span
|
||||||
|
|
||||||
|
//Span represents a specific zipkin span. It holds the majority of the same
|
||||||
|
// data as a zipkin span sent via the thrift protocol, but is presented in a
|
||||||
|
// format which is more straightforward for storage purposes.
|
||||||
|
type Span struct {
|
||||||
|
ID string
|
||||||
|
TraceID string // zipkin traceid high concat with traceid
|
||||||
|
Name string
|
||||||
|
ParentID string
|
||||||
|
ServiceName string
|
||||||
|
Timestamp time.Time // If zipkin input is nil then time.Now()
|
||||||
|
Duration time.Duration
|
||||||
|
Annotations []Annotation
|
||||||
|
BinaryAnnotations []BinaryAnnotation
|
||||||
|
}
|
||||||
|
|
||||||
|
// BinaryAnnotation represents a zipkin binary annotation. It contains
|
||||||
|
// all of the same fields as might be found in its zipkin counterpart.
|
||||||
|
type BinaryAnnotation struct {
|
||||||
|
Key string
|
||||||
|
Value string
|
||||||
|
Host string // annotation.endpoint.ipv4 + ":" + annotation.endpoint.port
|
||||||
|
ServiceName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Annotation represents an ordinary zipkin annotation. It contains the data fields
|
||||||
|
// which will become fields/tags in influxdb
|
||||||
|
type Annotation struct {
|
||||||
|
Timestamp time.Time
|
||||||
|
Value string
|
||||||
|
Host string // annotation.endpoint.ipv4 + ":" + annotation.endpoint.port
|
||||||
|
ServiceName string
|
||||||
|
}
|
|
@ -8,11 +8,11 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -32,7 +32,7 @@ const (
|
||||||
// Recorder represents a type which can record zipkin trace data as well as
|
// Recorder represents a type which can record zipkin trace data as well as
|
||||||
// any accompanying errors, and process that data.
|
// any accompanying errors, and process that data.
|
||||||
type Recorder interface {
|
type Recorder interface {
|
||||||
Record(Trace) error
|
Record(trace.Trace) error
|
||||||
Error(error)
|
Error(error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,43 +42,6 @@ type Handler interface {
|
||||||
Register(router *mux.Router, recorder Recorder) error
|
Register(router *mux.Router, recorder Recorder) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// BinaryAnnotation represents a zipkin binary annotation. It contains
|
|
||||||
// all of the same fields as might be found in its zipkin counterpart.
|
|
||||||
type BinaryAnnotation struct {
|
|
||||||
Key string
|
|
||||||
Value string
|
|
||||||
Host string // annotation.endpoint.ipv4 + ":" + annotation.endpoint.port
|
|
||||||
ServiceName string
|
|
||||||
Type string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Annotation represents an ordinary zipkin annotation. It contains the data fields
|
|
||||||
// which will become fields/tags in influxdb
|
|
||||||
type Annotation struct {
|
|
||||||
Timestamp time.Time
|
|
||||||
Value string
|
|
||||||
Host string // annotation.endpoint.ipv4 + ":" + annotation.endpoint.port
|
|
||||||
ServiceName string
|
|
||||||
}
|
|
||||||
|
|
||||||
//Span represents a specific zipkin span. It holds the majority of the same
|
|
||||||
// data as a zipkin span sent via the thrift protocol, but is presented in a
|
|
||||||
// format which is more straightforward for storage purposes.
|
|
||||||
type Span struct {
|
|
||||||
ID string
|
|
||||||
TraceID string // zipkin traceid high concat with traceid
|
|
||||||
Name string
|
|
||||||
ParentID string
|
|
||||||
ServiceName string
|
|
||||||
Timestamp time.Time // If zipkin input is nil then time.Now()
|
|
||||||
Duration time.Duration
|
|
||||||
Annotations []Annotation
|
|
||||||
BinaryAnnotations []BinaryAnnotation
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trace is an array (or a series) of spans
|
|
||||||
type Trace []Span
|
|
||||||
|
|
||||||
const sampleConfig = `
|
const sampleConfig = `
|
||||||
# path = "/api/v1/spans" # URL path for span data
|
# path = "/api/v1/spans" # URL path for span data
|
||||||
# port = 9411 # Port on which Telegraf listens
|
# port = 9411 # Port on which Telegraf listens
|
||||||
|
@ -122,7 +85,9 @@ func (z *Zipkin) Start(acc telegraf.Accumulator) error {
|
||||||
|
|
||||||
router := mux.NewRouter()
|
router := mux.NewRouter()
|
||||||
converter := NewLineProtocolConverter(acc)
|
converter := NewLineProtocolConverter(acc)
|
||||||
z.handler.Register(router, converter)
|
if err := z.handler.Register(router, converter); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
z.server = &http.Server{
|
z.server = &http.Server{
|
||||||
Handler: router,
|
Handler: router,
|
||||||
|
|
|
@ -17,13 +17,15 @@ func TestZipkinPlugin(t *testing.T) {
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
thriftDataFile string //path name to a binary thrift data file which contains test data
|
datafile string // data file which contains test data
|
||||||
|
contentType string
|
||||||
wantErr bool
|
wantErr bool
|
||||||
want []testutil.Metric
|
want []testutil.Metric
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "threespan",
|
name: "threespan",
|
||||||
thriftDataFile: "testdata/threespans.dat",
|
datafile: "testdata/threespans.dat",
|
||||||
|
contentType: "application/x-thrift",
|
||||||
want: []testutil.Metric{
|
want: []testutil.Metric{
|
||||||
testutil.Metric{
|
testutil.Metric{
|
||||||
Measurement: "zipkin",
|
Measurement: "zipkin",
|
||||||
|
@ -171,7 +173,8 @@ func TestZipkinPlugin(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "distributed_trace_sample",
|
name: "distributed_trace_sample",
|
||||||
thriftDataFile: "testdata/distributed_trace_sample.dat",
|
datafile: "testdata/distributed_trace_sample.dat",
|
||||||
|
contentType: "application/x-thrift",
|
||||||
want: []testutil.Metric{
|
want: []testutil.Metric{
|
||||||
testutil.Metric{
|
testutil.Metric{
|
||||||
Measurement: "zipkin",
|
Measurement: "zipkin",
|
||||||
|
@ -185,7 +188,6 @@ func TestZipkinPlugin(t *testing.T) {
|
||||||
Fields: map[string]interface{}{
|
Fields: map[string]interface{}{
|
||||||
"duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(),
|
"duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(),
|
||||||
},
|
},
|
||||||
//Time: time.Unix(1, 0).UTC(),
|
|
||||||
Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(),
|
Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(),
|
||||||
},
|
},
|
||||||
testutil.Metric{
|
testutil.Metric{
|
||||||
|
@ -202,7 +204,6 @@ func TestZipkinPlugin(t *testing.T) {
|
||||||
Fields: map[string]interface{}{
|
Fields: map[string]interface{}{
|
||||||
"duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(),
|
"duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(),
|
||||||
},
|
},
|
||||||
//Time: time.Unix(1, 0).UTC(),
|
|
||||||
Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(),
|
Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(),
|
||||||
},
|
},
|
||||||
testutil.Metric{
|
testutil.Metric{
|
||||||
|
@ -223,6 +224,337 @@ func TestZipkinPlugin(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "JSON rather than thrift",
|
||||||
|
datafile: "testdata/json/brave-tracer-example.json",
|
||||||
|
contentType: "application/json",
|
||||||
|
want: []testutil.Metric{
|
||||||
|
{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"id": "12854419928166856317",
|
||||||
|
"name": "http:/hi2",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(3000000),
|
||||||
|
}, Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "sr",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "12854419928166856317",
|
||||||
|
"name": "http:/hi2",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(3000000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "ss",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "12854419928166856317",
|
||||||
|
"name": "http:/hi2",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(3000000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "Demo2Application",
|
||||||
|
"annotation_key": "mvc.controller.class",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "12854419928166856317",
|
||||||
|
"name": "http:/hi2",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(3000000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "hi2",
|
||||||
|
"annotation_key": "mvc.controller.method",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "12854419928166856317",
|
||||||
|
"name": "http:/hi2",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(3000000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "192.168.0.8:test:8010",
|
||||||
|
"annotation_key": "spring.instance_id",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "12854419928166856317",
|
||||||
|
"name": "http:/hi2",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(3000000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"id": "12854419928166856317",
|
||||||
|
"name": "http:/hi2",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(10000000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "cs",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "12854419928166856317",
|
||||||
|
"name": "http:/hi2",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(10000000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "cr",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "12854419928166856317",
|
||||||
|
"name": "http:/hi2",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(10000000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "localhost",
|
||||||
|
"annotation_key": "http.host",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "12854419928166856317",
|
||||||
|
"name": "http:/hi2",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(10000000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "GET",
|
||||||
|
"annotation_key": "http.method",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "12854419928166856317",
|
||||||
|
"name": "http:/hi2",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(10000000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "/hi2",
|
||||||
|
"annotation_key": "http.path",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "12854419928166856317",
|
||||||
|
"name": "http:/hi2",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(10000000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "http://localhost:8010/hi2",
|
||||||
|
"annotation_key": "http.url",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "12854419928166856317",
|
||||||
|
"name": "http:/hi2",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(10000000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "192.168.0.8:test:8010",
|
||||||
|
"annotation_key": "spring.instance_id",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "12854419928166856317",
|
||||||
|
"name": "http:/hi2",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(10000000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"id": "8291962692415852504",
|
||||||
|
"name": "http:/hi",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(23393000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "sr",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "8291962692415852504",
|
||||||
|
"name": "http:/hi",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(23393000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
testutil.Metric{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "ss",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "8291962692415852504",
|
||||||
|
"name": "http:/hi",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(23393000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
testutil.Metric{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "Demo2Application",
|
||||||
|
"annotation_key": "mvc.controller.class",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "8291962692415852504",
|
||||||
|
"name": "http:/hi",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(23393000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
testutil.Metric{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "hi",
|
||||||
|
"annotation_key": "mvc.controller.method",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "8291962692415852504",
|
||||||
|
"name": "http:/hi",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(23393000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
testutil.Metric{
|
||||||
|
Measurement: "zipkin",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"annotation": "192.168.0.8:test:8010",
|
||||||
|
"annotation_key": "spring.instance_id",
|
||||||
|
"endpoint_host": "192.168.0.8:8010",
|
||||||
|
"id": "8291962692415852504",
|
||||||
|
"name": "http:/hi",
|
||||||
|
"parent_id": "8291962692415852504",
|
||||||
|
"service_name": "test",
|
||||||
|
"trace_id": "7312f822d43d0fd8",
|
||||||
|
},
|
||||||
|
Fields: map[string]interface{}{
|
||||||
|
"duration_ns": int64(23393000),
|
||||||
|
},
|
||||||
|
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
z := &Zipkin{
|
z := &Zipkin{
|
||||||
|
@ -240,7 +572,7 @@ func TestZipkinPlugin(t *testing.T) {
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
mockAcc.ClearMetrics()
|
mockAcc.ClearMetrics()
|
||||||
if err := postThriftData(tt.thriftDataFile, z.address); err != nil {
|
if err := postThriftData(tt.datafile, z.address, tt.contentType); err != nil {
|
||||||
t.Fatalf("Posting data to http endpoint /api/v1/spans failed. Error: %s\n", err)
|
t.Fatalf("Posting data to http endpoint /api/v1/spans failed. Error: %s\n", err)
|
||||||
}
|
}
|
||||||
mockAcc.Wait(len(tt.want)) //Since the server is running concurrently, we need to wait for the number of data points we want to test to be added to the Accumulator.
|
mockAcc.Wait(len(tt.want)) //Since the server is running concurrently, we need to wait for the number of data points we want to test to be added to the Accumulator.
|
||||||
|
@ -252,7 +584,6 @@ func TestZipkinPlugin(t *testing.T) {
|
||||||
for _, m := range mockAcc.Metrics {
|
for _, m := range mockAcc.Metrics {
|
||||||
got = append(got, *m)
|
got = append(got, *m)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !cmp.Equal(tt.want, got) {
|
if !cmp.Equal(tt.want, got) {
|
||||||
t.Fatalf("Got != Want\n %s", cmp.Diff(tt.want, got))
|
t.Fatalf("Got != Want\n %s", cmp.Diff(tt.want, got))
|
||||||
}
|
}
|
||||||
|
@ -266,19 +597,18 @@ func TestZipkinPlugin(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func postThriftData(datafile, address string) error {
|
func postThriftData(datafile, address, contentType string) error {
|
||||||
dat, err := ioutil.ReadFile(datafile)
|
dat, err := ioutil.ReadFile(datafile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not read from data file %s", datafile)
|
return fmt.Errorf("could not read from data file %s", datafile)
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequest("POST", fmt.Sprintf("http://%s/api/v1/spans", address), bytes.NewReader(dat))
|
req, err := http.NewRequest("POST", fmt.Sprintf("http://%s/api/v1/spans", address), bytes.NewReader(dat))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("HTTP request creation failed")
|
return fmt.Errorf("HTTP request creation failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
req.Header.Set("Content-Type", "application/x-thrift")
|
req.Header.Set("Content-Type", contentType)
|
||||||
client := &http.Client{}
|
client := &http.Client{}
|
||||||
_, err = client.Do(req)
|
_, err = client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -48,6 +48,9 @@ func (a *Amon) Connect() error {
|
||||||
return fmt.Errorf("serverkey and amon_instance are required fields for amon output")
|
return fmt.Errorf("serverkey and amon_instance are required fields for amon output")
|
||||||
}
|
}
|
||||||
a.client = &http.Client{
|
a.client = &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
},
|
||||||
Timeout: a.Timeout.Duration,
|
Timeout: a.Timeout.Duration,
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -193,6 +193,25 @@ func BuildMetricDatum(point telegraf.Metric) []*cloudwatch.MetricDatum {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Do CloudWatch boundary checking
|
||||||
|
// Constraints at: http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html
|
||||||
|
if math.IsNaN(value) {
|
||||||
|
datums = datums[:len(datums)-1]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if math.IsInf(value, 0) {
|
||||||
|
datums = datums[:len(datums)-1]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if value > 0 && value < float64(8.515920e-109) {
|
||||||
|
datums = datums[:len(datums)-1]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if value > float64(1.174271e+108) {
|
||||||
|
datums = datums[:len(datums)-1]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
datums[i] = &cloudwatch.MetricDatum{
|
datums[i] = &cloudwatch.MetricDatum{
|
||||||
MetricName: aws.String(strings.Join([]string{point.Name(), k}, "_")),
|
MetricName: aws.String(strings.Join([]string{point.Name(), k}, "_")),
|
||||||
Value: aws.Float64(value),
|
Value: aws.Float64(value),
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package cloudwatch
|
package cloudwatch
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -51,22 +53,32 @@ func TestBuildDimensions(t *testing.T) {
|
||||||
func TestBuildMetricDatums(t *testing.T) {
|
func TestBuildMetricDatums(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
zero := 0.0
|
||||||
validMetrics := []telegraf.Metric{
|
validMetrics := []telegraf.Metric{
|
||||||
testutil.TestMetric(1),
|
testutil.TestMetric(1),
|
||||||
testutil.TestMetric(int32(1)),
|
testutil.TestMetric(int32(1)),
|
||||||
testutil.TestMetric(int64(1)),
|
testutil.TestMetric(int64(1)),
|
||||||
testutil.TestMetric(float64(1)),
|
testutil.TestMetric(float64(1)),
|
||||||
|
testutil.TestMetric(float64(0)),
|
||||||
|
testutil.TestMetric(math.Copysign(zero, -1)), // the CW documentation does not call out -0 as rejected
|
||||||
|
testutil.TestMetric(float64(8.515920e-109)),
|
||||||
|
testutil.TestMetric(float64(1.174271e+108)), // largest should be 1.174271e+108
|
||||||
testutil.TestMetric(true),
|
testutil.TestMetric(true),
|
||||||
}
|
}
|
||||||
|
invalidMetrics := []telegraf.Metric{
|
||||||
|
testutil.TestMetric("Foo"),
|
||||||
|
testutil.TestMetric(math.Log(-1.0)),
|
||||||
|
testutil.TestMetric(float64(8.515919e-109)), // smallest should be 8.515920e-109
|
||||||
|
testutil.TestMetric(float64(1.174272e+108)), // largest should be 1.174271e+108
|
||||||
|
}
|
||||||
for _, point := range validMetrics {
|
for _, point := range validMetrics {
|
||||||
datums := BuildMetricDatum(point)
|
datums := BuildMetricDatum(point)
|
||||||
assert.Equal(1, len(datums), "Valid type should create a Datum")
|
assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", point))
|
||||||
|
}
|
||||||
|
for _, point := range invalidMetrics {
|
||||||
|
datums := BuildMetricDatum(point)
|
||||||
|
assert.Equal(0, len(datums), fmt.Sprintf("Valid point should not create a Datum {value: %v}", point))
|
||||||
}
|
}
|
||||||
|
|
||||||
nonValidPoint := testutil.TestMetric("Foo")
|
|
||||||
|
|
||||||
assert.Equal(0, len(BuildMetricDatum(nonValidPoint)), "Invalid type should not create a Datum")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPartitionDatums(t *testing.T) {
|
func TestPartitionDatums(t *testing.T) {
|
||||||
|
@ -78,10 +90,13 @@ func TestPartitionDatums(t *testing.T) {
|
||||||
Value: aws.Float64(1),
|
Value: aws.Float64(1),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
zeroDatum := []*cloudwatch.MetricDatum{}
|
||||||
oneDatum := []*cloudwatch.MetricDatum{&testDatum}
|
oneDatum := []*cloudwatch.MetricDatum{&testDatum}
|
||||||
twoDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum}
|
twoDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum}
|
||||||
threeDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum, &testDatum}
|
threeDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum, &testDatum}
|
||||||
|
|
||||||
|
assert.Equal([][]*cloudwatch.MetricDatum{}, PartitionDatums(2, zeroDatum))
|
||||||
|
assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum))
|
||||||
assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum))
|
assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum))
|
||||||
assert.Equal([][]*cloudwatch.MetricDatum{twoDatum}, PartitionDatums(2, twoDatum))
|
assert.Equal([][]*cloudwatch.MetricDatum{twoDatum}, PartitionDatums(2, twoDatum))
|
||||||
assert.Equal([][]*cloudwatch.MetricDatum{twoDatum, oneDatum}, PartitionDatums(2, threeDatum))
|
assert.Equal([][]*cloudwatch.MetricDatum{twoDatum, oneDatum}, PartitionDatums(2, threeDatum))
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/internal"
|
"github.com/influxdata/telegraf/internal"
|
||||||
|
@ -55,7 +56,11 @@ func (d *Datadog) Connect() error {
|
||||||
if d.Apikey == "" {
|
if d.Apikey == "" {
|
||||||
return fmt.Errorf("apikey is a required field for datadog output")
|
return fmt.Errorf("apikey is a required field for datadog output")
|
||||||
}
|
}
|
||||||
|
|
||||||
d.client = &http.Client{
|
d.client = &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
},
|
||||||
Timeout: d.Timeout.Duration,
|
Timeout: d.Timeout.Duration,
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -96,6 +101,7 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
redactedApiKey := "****************"
|
||||||
ts.Series = make([]*Metric, metricCounter)
|
ts.Series = make([]*Metric, metricCounter)
|
||||||
copy(ts.Series, tempSeries[0:])
|
copy(ts.Series, tempSeries[0:])
|
||||||
tsBytes, err := json.Marshal(ts)
|
tsBytes, err := json.Marshal(ts)
|
||||||
|
@ -104,13 +110,13 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error {
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("POST", d.authenticatedUrl(), bytes.NewBuffer(tsBytes))
|
req, err := http.NewRequest("POST", d.authenticatedUrl(), bytes.NewBuffer(tsBytes))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to create http.Request, %s\n", err.Error())
|
return fmt.Errorf("unable to create http.Request, %s\n", strings.Replace(err.Error(), d.Apikey, redactedApiKey, -1))
|
||||||
}
|
}
|
||||||
req.Header.Add("Content-Type", "application/json")
|
req.Header.Add("Content-Type", "application/json")
|
||||||
|
|
||||||
resp, err := d.client.Do(req)
|
resp, err := d.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error POSTing metrics, %s\n", err.Error())
|
return fmt.Errorf("error POSTing metrics, %s\n", strings.Replace(err.Error(), d.Apikey, redactedApiKey, -1))
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
|
|
@ -7,9 +7,7 @@ This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP.
|
||||||
```toml
|
```toml
|
||||||
# Configuration for influxdb server to send metrics to
|
# Configuration for influxdb server to send metrics to
|
||||||
[[outputs.influxdb]]
|
[[outputs.influxdb]]
|
||||||
## The HTTP or UDP URL for your InfluxDB instance. Each item should be
|
## The full HTTP or UDP URL for your InfluxDB instance.
|
||||||
## of the form:
|
|
||||||
## scheme "://" host [ ":" port]
|
|
||||||
##
|
##
|
||||||
## Multiple urls can be specified as part of the same cluster,
|
## Multiple urls can be specified as part of the same cluster,
|
||||||
## this means that only ONE of the urls will be written to each interval.
|
## this means that only ONE of the urls will be written to each interval.
|
||||||
|
@ -44,6 +42,9 @@ This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP.
|
||||||
## HTTP Proxy Config
|
## HTTP Proxy Config
|
||||||
# http_proxy = "http://corporate.proxy:3128"
|
# http_proxy = "http://corporate.proxy:3128"
|
||||||
|
|
||||||
|
## Optional HTTP headers
|
||||||
|
# http_headers = {"X-Special-Header" = "Special-Value"}
|
||||||
|
|
||||||
## Compress each HTTP request payload using GZIP.
|
## Compress each HTTP request payload using GZIP.
|
||||||
# content_encoding = "gzip"
|
# content_encoding = "gzip"
|
||||||
```
|
```
|
||||||
|
@ -70,4 +71,5 @@ to write to. Each URL should start with either `http://` or `udp://`
|
||||||
* `ssl_key`: SSL key
|
* `ssl_key`: SSL key
|
||||||
* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false)
|
* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false)
|
||||||
* `http_proxy`: HTTP Proxy URI
|
* `http_proxy`: HTTP Proxy URI
|
||||||
|
* `http_headers`: HTTP headers to add to each HTTP request
|
||||||
* `content_encoding`: Compress each HTTP request payload using gzip if set to: "gzip"
|
* `content_encoding`: Compress each HTTP request payload using gzip if set to: "gzip"
|
||||||
|
|
|
@ -4,13 +4,7 @@ import "io"
|
||||||
|
|
||||||
type Client interface {
|
type Client interface {
|
||||||
Query(command string) error
|
Query(command string) error
|
||||||
|
WriteStream(b io.Reader) error
|
||||||
Write(b []byte) (int, error)
|
|
||||||
WriteWithParams(b []byte, params WriteParams) (int, error)
|
|
||||||
|
|
||||||
WriteStream(b io.Reader, contentLength int) (int, error)
|
|
||||||
WriteStreamWithParams(b io.Reader, contentLength int, params WriteParams) (int, error)
|
|
||||||
|
|
||||||
Close() error
|
Close() error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"path"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -53,6 +54,7 @@ func NewHTTP(config HTTPConfig, defaultWP WriteParams) (Client, error) {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
transport = http.Transport{
|
transport = http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
TLSClientConfig: config.TLSConfig,
|
TLSClientConfig: config.TLSConfig,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -68,6 +70,8 @@ func NewHTTP(config HTTPConfig, defaultWP WriteParams) (Client, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type HTTPHeaders map[string]string
|
||||||
|
|
||||||
type HTTPConfig struct {
|
type HTTPConfig struct {
|
||||||
// URL should be of the form "http://host:port" (REQUIRED)
|
// URL should be of the form "http://host:port" (REQUIRED)
|
||||||
URL string
|
URL string
|
||||||
|
@ -95,6 +99,9 @@ type HTTPConfig struct {
|
||||||
// Proxy URL should be of the form "http://host:port"
|
// Proxy URL should be of the form "http://host:port"
|
||||||
HTTPProxy string
|
HTTPProxy string
|
||||||
|
|
||||||
|
// HTTP headers to append to HTTP requests.
|
||||||
|
HTTPHeaders HTTPHeaders
|
||||||
|
|
||||||
// The content encoding mechanism to use for each request.
|
// The content encoding mechanism to use for each request.
|
||||||
ContentEncoding string
|
ContentEncoding string
|
||||||
}
|
}
|
||||||
|
@ -130,60 +137,13 @@ func (c *httpClient) Query(command string) error {
|
||||||
return c.doRequest(req, http.StatusOK)
|
return c.doRequest(req, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *httpClient) Write(b []byte) (int, error) {
|
func (c *httpClient) WriteStream(r io.Reader) error {
|
||||||
req, err := c.makeWriteRequest(bytes.NewReader(b), len(b), c.writeURL)
|
req, err := c.makeWriteRequest(r, c.writeURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, nil
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.doRequest(req, http.StatusNoContent)
|
return c.doRequest(req, http.StatusNoContent)
|
||||||
if err == nil {
|
|
||||||
return len(b), nil
|
|
||||||
}
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *httpClient) WriteWithParams(b []byte, wp WriteParams) (int, error) {
|
|
||||||
req, err := c.makeWriteRequest(bytes.NewReader(b), len(b), writeURL(c.url, wp))
|
|
||||||
if err != nil {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.doRequest(req, http.StatusNoContent)
|
|
||||||
if err == nil {
|
|
||||||
return len(b), nil
|
|
||||||
}
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *httpClient) WriteStream(r io.Reader, contentLength int) (int, error) {
|
|
||||||
req, err := c.makeWriteRequest(r, contentLength, c.writeURL)
|
|
||||||
if err != nil {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.doRequest(req, http.StatusNoContent)
|
|
||||||
if err == nil {
|
|
||||||
return contentLength, nil
|
|
||||||
}
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *httpClient) WriteStreamWithParams(
|
|
||||||
r io.Reader,
|
|
||||||
contentLength int,
|
|
||||||
wp WriteParams,
|
|
||||||
) (int, error) {
|
|
||||||
req, err := c.makeWriteRequest(r, contentLength, writeURL(c.url, wp))
|
|
||||||
if err != nil {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.doRequest(req, http.StatusNoContent)
|
|
||||||
if err == nil {
|
|
||||||
return contentLength, nil
|
|
||||||
}
|
|
||||||
return 0, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *httpClient) doRequest(
|
func (c *httpClient) doRequest(
|
||||||
|
@ -225,7 +185,6 @@ func (c *httpClient) doRequest(
|
||||||
|
|
||||||
func (c *httpClient) makeWriteRequest(
|
func (c *httpClient) makeWriteRequest(
|
||||||
body io.Reader,
|
body io.Reader,
|
||||||
contentLength int,
|
|
||||||
writeURL string,
|
writeURL string,
|
||||||
) (*http.Request, error) {
|
) (*http.Request, error) {
|
||||||
req, err := c.makeRequest(writeURL, body)
|
req, err := c.makeRequest(writeURL, body)
|
||||||
|
@ -234,8 +193,6 @@ func (c *httpClient) makeWriteRequest(
|
||||||
}
|
}
|
||||||
if c.config.ContentEncoding == "gzip" {
|
if c.config.ContentEncoding == "gzip" {
|
||||||
req.Header.Set("Content-Encoding", "gzip")
|
req.Header.Set("Content-Encoding", "gzip")
|
||||||
} else {
|
|
||||||
req.Header.Set("Content-Length", fmt.Sprint(contentLength))
|
|
||||||
}
|
}
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
@ -253,6 +210,11 @@ func (c *httpClient) makeRequest(uri string, body io.Reader) (*http.Request, err
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for header, value := range c.config.HTTPHeaders {
|
||||||
|
req.Header.Set(header, value)
|
||||||
|
}
|
||||||
|
|
||||||
req.Header.Set("Content-Type", "text/plain")
|
req.Header.Set("Content-Type", "text/plain")
|
||||||
req.Header.Set("User-Agent", c.config.UserAgent)
|
req.Header.Set("User-Agent", c.config.UserAgent)
|
||||||
if c.config.Username != "" && c.config.Password != "" {
|
if c.config.Username != "" && c.config.Password != "" {
|
||||||
|
@ -294,8 +256,11 @@ func writeURL(u *url.URL, wp WriteParams) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
u.RawQuery = params.Encode()
|
u.RawQuery = params.Encode()
|
||||||
u.Path = "write"
|
p := u.Path
|
||||||
return u.String()
|
u.Path = path.Join(p, "write")
|
||||||
|
s := u.String()
|
||||||
|
u.Path = p
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func queryURL(u *url.URL, command string) string {
|
func queryURL(u *url.URL, command string) string {
|
||||||
|
@ -303,6 +268,9 @@ func queryURL(u *url.URL, command string) string {
|
||||||
params.Set("q", command)
|
params.Set("q", command)
|
||||||
|
|
||||||
u.RawQuery = params.Encode()
|
u.RawQuery = params.Encode()
|
||||||
u.Path = "query"
|
p := u.Path
|
||||||
return u.String()
|
u.Path = path.Join(p, "query")
|
||||||
|
s := u.String()
|
||||||
|
u.Path = p
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,6 +55,13 @@ func TestHTTPClient_Write(t *testing.T) {
|
||||||
fmt.Fprintln(w, `{"results":[{}],"error":"basic auth incorrect"}`)
|
fmt.Fprintln(w, `{"results":[{}],"error":"basic auth incorrect"}`)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// test that user-specified http header is set properly
|
||||||
|
if r.Header.Get("X-Test-Header") != "Test-Value" {
|
||||||
|
w.WriteHeader(http.StatusTeapot)
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
fmt.Fprintln(w, `{"results":[{}],"error":"wrong http header value"}`)
|
||||||
|
}
|
||||||
|
|
||||||
// Validate Content-Length Header
|
// Validate Content-Length Header
|
||||||
if r.ContentLength != 13 {
|
if r.ContentLength != 13 {
|
||||||
w.WriteHeader(http.StatusTeapot)
|
w.WriteHeader(http.StatusTeapot)
|
||||||
|
@ -90,6 +97,9 @@ func TestHTTPClient_Write(t *testing.T) {
|
||||||
UserAgent: "test-agent",
|
UserAgent: "test-agent",
|
||||||
Username: "test-user",
|
Username: "test-user",
|
||||||
Password: "test-password",
|
Password: "test-password",
|
||||||
|
HTTPHeaders: HTTPHeaders{
|
||||||
|
"X-Test-Header": "Test-Value",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
wp := WriteParams{
|
wp := WriteParams{
|
||||||
Database: "test",
|
Database: "test",
|
||||||
|
@ -100,66 +110,8 @@ func TestHTTPClient_Write(t *testing.T) {
|
||||||
client, err := NewHTTP(config, wp)
|
client, err := NewHTTP(config, wp)
|
||||||
defer client.Close()
|
defer client.Close()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
n, err := client.Write([]byte("cpu value=99\n"))
|
|
||||||
assert.Equal(t, 13, n)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
_, err = client.WriteStream(bytes.NewReader([]byte("cpu value=99\n")), 13)
|
err = client.WriteStream(bytes.NewReader([]byte("cpu value=99\n")))
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHTTPClient_WriteParamsOverride(t *testing.T) {
|
|
||||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
switch r.URL.Path {
|
|
||||||
case "/write":
|
|
||||||
// test that database is set properly
|
|
||||||
if r.FormValue("db") != "override" {
|
|
||||||
w.WriteHeader(http.StatusTeapot)
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
fmt.Fprintln(w, `{"results":[{}],"error":"wrong db name"}`)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate the request body:
|
|
||||||
buf := make([]byte, 100)
|
|
||||||
n, _ := r.Body.Read(buf)
|
|
||||||
expected := "cpu value=99"
|
|
||||||
got := string(buf[0 : n-1])
|
|
||||||
if expected != got {
|
|
||||||
w.WriteHeader(http.StatusTeapot)
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
msg := fmt.Sprintf(`{"results":[{}],"error":"expected [%s], got [%s]"}`, expected, got)
|
|
||||||
fmt.Fprintln(w, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
case "/query":
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
fmt.Fprintln(w, `{"results":[{}]}`)
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
defer ts.Close()
|
|
||||||
|
|
||||||
config := HTTPConfig{
|
|
||||||
URL: ts.URL,
|
|
||||||
}
|
|
||||||
defaultWP := WriteParams{
|
|
||||||
Database: "test",
|
|
||||||
}
|
|
||||||
client, err := NewHTTP(config, defaultWP)
|
|
||||||
defer client.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// test that WriteWithParams overrides the default write params
|
|
||||||
wp := WriteParams{
|
|
||||||
Database: "override",
|
|
||||||
}
|
|
||||||
n, err := client.WriteWithParams([]byte("cpu value=99\n"), wp)
|
|
||||||
assert.Equal(t, 13, n)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
_, err = client.WriteStreamWithParams(bytes.NewReader([]byte("cpu value=99\n")), 13, wp)
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,23 +139,7 @@ func TestHTTPClient_Write_Errors(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
lp := []byte("cpu value=99\n")
|
lp := []byte("cpu value=99\n")
|
||||||
n, err := client.Write(lp)
|
err = client.WriteStream(bytes.NewReader(lp))
|
||||||
assert.Equal(t, 0, n)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
n, err = client.WriteStream(bytes.NewReader(lp), 13)
|
|
||||||
assert.Equal(t, 0, n)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
wp := WriteParams{
|
|
||||||
Database: "override",
|
|
||||||
}
|
|
||||||
n, err = client.WriteWithParams(lp, wp)
|
|
||||||
assert.Equal(t, 0, n)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
n, err = client.WriteStreamWithParams(bytes.NewReader(lp), 13, wp)
|
|
||||||
assert.Equal(t, 0, n)
|
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -363,3 +299,37 @@ func TestGzipCompression(t *testing.T) {
|
||||||
|
|
||||||
assert.Equal(t, []byte(influxLine), uncompressed.Bytes())
|
assert.Equal(t, []byte(influxLine), uncompressed.Bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHTTPClient_PathPrefix(t *testing.T) {
|
||||||
|
prefix := "/some/random/prefix"
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
switch r.URL.Path {
|
||||||
|
case prefix + "/write":
|
||||||
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
case prefix + "/query":
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
fmt.Fprintln(w, `{"results":[{}]}`)
|
||||||
|
default:
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
msg := fmt.Sprintf("Path not found: %s", r.URL.Path)
|
||||||
|
fmt.Fprintln(w, msg)
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
config := HTTPConfig{
|
||||||
|
URL: ts.URL + prefix,
|
||||||
|
}
|
||||||
|
wp := WriteParams{
|
||||||
|
Database: "test",
|
||||||
|
}
|
||||||
|
client, err := NewHTTP(config, wp)
|
||||||
|
defer client.Close()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = client.Query("CREATE DATABASE test")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = client.WriteStream(bytes.NewReader([]byte("cpu value=99\n")))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package client
|
package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
|
@ -62,18 +61,8 @@ func (c *udpClient) Query(command string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write will send the byte stream to the given UDP client endpoint
|
|
||||||
func (c *udpClient) Write(b []byte) (int, error) {
|
|
||||||
return c.WriteStream(bytes.NewReader(b), -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteWithParams are ignored by the UDP client, will forward to WriteStream
|
|
||||||
func (c *udpClient) WriteWithParams(b []byte, wp WriteParams) (int, error) {
|
|
||||||
return c.WriteStream(bytes.NewReader(b), -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteStream will send the provided data through to the client, contentLength is ignored by the UDP client
|
// WriteStream will send the provided data through to the client, contentLength is ignored by the UDP client
|
||||||
func (c *udpClient) WriteStream(r io.Reader, contentLength int) (int, error) {
|
func (c *udpClient) WriteStream(r io.Reader) error {
|
||||||
var totaln int
|
var totaln int
|
||||||
for {
|
for {
|
||||||
nR, err := r.Read(c.buffer)
|
nR, err := r.Read(c.buffer)
|
||||||
|
@ -81,14 +70,14 @@ func (c *udpClient) WriteStream(r io.Reader, contentLength int) (int, error) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != io.EOF && err != nil {
|
if err != io.EOF && err != nil {
|
||||||
return totaln, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.buffer[nR-1] == uint8('\n') {
|
if c.buffer[nR-1] == uint8('\n') {
|
||||||
nW, err := c.conn.Write(c.buffer[0:nR])
|
nW, err := c.conn.Write(c.buffer[0:nR])
|
||||||
totaln += nW
|
totaln += nW
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return totaln, err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("E! Could not fit point into UDP payload; dropping")
|
log.Printf("E! Could not fit point into UDP payload; dropping")
|
||||||
|
@ -99,7 +88,7 @@ func (c *udpClient) WriteStream(r io.Reader, contentLength int) (int, error) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != io.EOF && err != nil {
|
if err != io.EOF && err != nil {
|
||||||
return totaln, err
|
return err
|
||||||
}
|
}
|
||||||
if c.buffer[nR-1] == uint8('\n') {
|
if c.buffer[nR-1] == uint8('\n') {
|
||||||
break
|
break
|
||||||
|
@ -107,13 +96,7 @@ func (c *udpClient) WriteStream(r io.Reader, contentLength int) (int, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return totaln, nil
|
return nil
|
||||||
}
|
|
||||||
|
|
||||||
// WriteStreamWithParams will forward the stream to the client backend, contentLength is ignored by the UDP client
|
|
||||||
// write params are ignored by the UDP client
|
|
||||||
func (c *udpClient) WriteStreamWithParams(r io.Reader, contentLength int, wp WriteParams) (int, error) {
|
|
||||||
return c.WriteStream(r, -1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close will terminate the provided client connection
|
// Close will terminate the provided client connection
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"github.com/influxdata/telegraf/metric"
|
"github.com/influxdata/telegraf/metric"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestUDPClient(t *testing.T) {
|
func TestUDPClient(t *testing.T) {
|
||||||
|
@ -65,43 +64,6 @@ func TestUDPClient_Write(t *testing.T) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// test sending simple metric
|
|
||||||
n, err := client.Write([]byte("cpu value=99\n"))
|
|
||||||
assert.Equal(t, n, 13)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
pkt := <-packets
|
|
||||||
assert.Equal(t, "cpu value=99\n", pkt)
|
|
||||||
|
|
||||||
wp := WriteParams{}
|
|
||||||
//
|
|
||||||
// Using WriteStream() & a metric.Reader:
|
|
||||||
config3 := UDPConfig{
|
|
||||||
URL: "udp://localhost:8199",
|
|
||||||
PayloadSize: 40,
|
|
||||||
}
|
|
||||||
client3, err := NewUDP(config3)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
now := time.Unix(1484142942, 0)
|
|
||||||
m1, _ := metric.New("test", map[string]string{},
|
|
||||||
map[string]interface{}{"value": 1.1}, now)
|
|
||||||
m2, _ := metric.New("test", map[string]string{},
|
|
||||||
map[string]interface{}{"value": 1.1}, now)
|
|
||||||
m3, _ := metric.New("test", map[string]string{},
|
|
||||||
map[string]interface{}{"value": 1.1}, now)
|
|
||||||
ms := []telegraf.Metric{m1, m2, m3}
|
|
||||||
mReader := metric.NewReader(ms)
|
|
||||||
n, err = client3.WriteStreamWithParams(mReader, 10, wp)
|
|
||||||
// 3 metrics at 35 bytes each (including the newline)
|
|
||||||
assert.Equal(t, 105, n)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
pkt = <-packets
|
|
||||||
assert.Equal(t, "test value=1.1 1484142942000000000\n", pkt)
|
|
||||||
pkt = <-packets
|
|
||||||
assert.Equal(t, "test value=1.1 1484142942000000000\n", pkt)
|
|
||||||
pkt = <-packets
|
|
||||||
assert.Equal(t, "test value=1.1 1484142942000000000\n", pkt)
|
|
||||||
|
|
||||||
assert.NoError(t, client.Close())
|
assert.NoError(t, client.Close())
|
||||||
|
|
||||||
config = UDPConfig{
|
config = UDPConfig{
|
||||||
|
@ -112,17 +74,15 @@ func TestUDPClient_Write(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
ts := time.Unix(1484142943, 0)
|
ts := time.Unix(1484142943, 0)
|
||||||
m1, _ = metric.New("test", map[string]string{},
|
m1, _ := metric.New("test", map[string]string{},
|
||||||
map[string]interface{}{"this_is_a_very_long_field_name": 1.1}, ts)
|
map[string]interface{}{"this_is_a_very_long_field_name": 1.1}, ts)
|
||||||
m2, _ = metric.New("test", map[string]string{},
|
m2, _ := metric.New("test", map[string]string{},
|
||||||
map[string]interface{}{"value": 1.1}, ts)
|
map[string]interface{}{"value": 1.1}, ts)
|
||||||
ms = []telegraf.Metric{m1, m2}
|
ms := []telegraf.Metric{m1, m2}
|
||||||
reader := metric.NewReader(ms)
|
reader := metric.NewReader(ms)
|
||||||
n, err = client4.WriteStream(reader, 0)
|
err = client4.WriteStream(reader)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
require.Equal(t, 35, n)
|
pkt := <-packets
|
||||||
assert.NoError(t, err)
|
|
||||||
pkt = <-packets
|
|
||||||
assert.Equal(t, "test value=1.1 1484142943000000000\n", pkt)
|
assert.Equal(t, "test value=1.1 1484142943000000000\n", pkt)
|
||||||
|
|
||||||
assert.NoError(t, client4.Close())
|
assert.NoError(t, client4.Close())
|
||||||
|
|
|
@ -34,6 +34,7 @@ type InfluxDB struct {
|
||||||
Timeout internal.Duration
|
Timeout internal.Duration
|
||||||
UDPPayload int `toml:"udp_payload"`
|
UDPPayload int `toml:"udp_payload"`
|
||||||
HTTPProxy string `toml:"http_proxy"`
|
HTTPProxy string `toml:"http_proxy"`
|
||||||
|
HTTPHeaders map[string]string `toml:"http_headers"`
|
||||||
ContentEncoding string `toml:"content_encoding"`
|
ContentEncoding string `toml:"content_encoding"`
|
||||||
|
|
||||||
// Path to CA file
|
// Path to CA file
|
||||||
|
@ -52,9 +53,7 @@ type InfluxDB struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
## The HTTP or UDP URL for your InfluxDB instance. Each item should be
|
## The full HTTP or UDP URL for your InfluxDB instance.
|
||||||
## of the form:
|
|
||||||
## scheme "://" host [ ":" port]
|
|
||||||
##
|
##
|
||||||
## Multiple urls can be specified as part of the same cluster,
|
## Multiple urls can be specified as part of the same cluster,
|
||||||
## this means that only ONE of the urls will be written to each interval.
|
## this means that only ONE of the urls will be written to each interval.
|
||||||
|
@ -89,6 +88,9 @@ var sampleConfig = `
|
||||||
## HTTP Proxy Config
|
## HTTP Proxy Config
|
||||||
# http_proxy = "http://corporate.proxy:3128"
|
# http_proxy = "http://corporate.proxy:3128"
|
||||||
|
|
||||||
|
## Optional HTTP headers
|
||||||
|
# http_headers = {"X-Special-Header" = "Special-Value"}
|
||||||
|
|
||||||
## Compress each HTTP request payload using GZIP.
|
## Compress each HTTP request payload using GZIP.
|
||||||
# content_encoding = "gzip"
|
# content_encoding = "gzip"
|
||||||
`
|
`
|
||||||
|
@ -132,8 +134,12 @@ func (i *InfluxDB) Connect() error {
|
||||||
Username: i.Username,
|
Username: i.Username,
|
||||||
Password: i.Password,
|
Password: i.Password,
|
||||||
HTTPProxy: i.HTTPProxy,
|
HTTPProxy: i.HTTPProxy,
|
||||||
|
HTTPHeaders: client.HTTPHeaders{},
|
||||||
ContentEncoding: i.ContentEncoding,
|
ContentEncoding: i.ContentEncoding,
|
||||||
}
|
}
|
||||||
|
for header, value := range i.HTTPHeaders {
|
||||||
|
config.HTTPHeaders[header] = value
|
||||||
|
}
|
||||||
wp := client.WriteParams{
|
wp := client.WriteParams{
|
||||||
Database: i.Database,
|
Database: i.Database,
|
||||||
RetentionPolicy: i.RetentionPolicy,
|
RetentionPolicy: i.RetentionPolicy,
|
||||||
|
@ -177,12 +183,6 @@ func (i *InfluxDB) Description() string {
|
||||||
// Write will choose a random server in the cluster to write to until a successful write
|
// Write will choose a random server in the cluster to write to until a successful write
|
||||||
// occurs, logging each unsuccessful. If all servers fail, return error.
|
// occurs, logging each unsuccessful. If all servers fail, return error.
|
||||||
func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
|
func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
|
||||||
|
|
||||||
bufsize := 0
|
|
||||||
for _, m := range metrics {
|
|
||||||
bufsize += m.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
r := metric.NewReader(metrics)
|
r := metric.NewReader(metrics)
|
||||||
|
|
||||||
// This will get set to nil if a successful write occurs
|
// This will get set to nil if a successful write occurs
|
||||||
|
@ -190,7 +190,7 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
|
||||||
|
|
||||||
p := rand.Perm(len(i.clients))
|
p := rand.Perm(len(i.clients))
|
||||||
for _, n := range p {
|
for _, n := range p {
|
||||||
if _, e := i.clients[n].WriteStream(r, bufsize); e != nil {
|
if e := i.clients[n].WriteStream(r); e != nil {
|
||||||
// If the database was not found, try to recreate it:
|
// If the database was not found, try to recreate it:
|
||||||
if strings.Contains(e.Error(), "database not found") {
|
if strings.Contains(e.Error(), "database not found") {
|
||||||
errc := i.clients[n].Query(fmt.Sprintf(`CREATE DATABASE "%s"`, qiReplacer.Replace(i.Database)))
|
errc := i.clients[n].Query(fmt.Sprintf(`CREATE DATABASE "%s"`, qiReplacer.Replace(i.Database)))
|
||||||
|
@ -199,6 +199,7 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
|
||||||
i.Database)
|
i.Database)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.Contains(e.Error(), "field type conflict") {
|
if strings.Contains(e.Error(), "field type conflict") {
|
||||||
log.Printf("E! Field type conflict, dropping conflicted points: %s", e)
|
log.Printf("E! Field type conflict, dropping conflicted points: %s", e)
|
||||||
// setting err to nil, otherwise we will keep retrying and points
|
// setting err to nil, otherwise we will keep retrying and points
|
||||||
|
@ -206,6 +207,31 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
|
||||||
err = nil
|
err = nil
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if strings.Contains(e.Error(), "points beyond retention policy") {
|
||||||
|
log.Printf("W! Points beyond retention policy: %s", e)
|
||||||
|
// This error is indicates the point is older than the
|
||||||
|
// retention policy permits, and is probably not a cause for
|
||||||
|
// concern. Retrying will not help unless the retention
|
||||||
|
// policy is modified.
|
||||||
|
err = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(e.Error(), "unable to parse") {
|
||||||
|
log.Printf("E! Parse error; dropping points: %s", e)
|
||||||
|
// This error indicates a bug in Telegraf or InfluxDB parsing
|
||||||
|
// of line protocol. Retries will not be successful.
|
||||||
|
err = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(e.Error(), "hinted handoff queue not empty") {
|
||||||
|
// This is an informational message
|
||||||
|
err = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
// Log write failure
|
// Log write failure
|
||||||
log.Printf("E! InfluxDB Output Error: %s", e)
|
log.Printf("E! InfluxDB Output Error: %s", e)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -178,28 +178,107 @@ func TestHTTPError_DatabaseNotFound(t *testing.T) {
|
||||||
require.NoError(t, i.Close())
|
require.NoError(t, i.Close())
|
||||||
}
|
}
|
||||||
|
|
||||||
// field type conflict does not return an error, instead we
|
func TestHTTPError_WriteErrors(t *testing.T) {
|
||||||
func TestHTTPError_FieldTypeConflict(t *testing.T) {
|
var testCases = []struct {
|
||||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
name string
|
||||||
switch r.URL.Path {
|
status int
|
||||||
case "/write":
|
contentType string
|
||||||
w.WriteHeader(http.StatusNotFound)
|
body string
|
||||||
w.Header().Set("Content-Type", "application/json")
|
err error
|
||||||
fmt.Fprintln(w, `{"results":[{}],"error":"field type conflict: input field \"value\" on measurement \"test\" is type integer, already exists as type float dropped=1"}`)
|
}{
|
||||||
|
{
|
||||||
|
// HTTP/1.1 400 Bad Request
|
||||||
|
// Content-Type: application/json
|
||||||
|
// X-Influxdb-Version: 1.3.3
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "error": "partial write: points beyond retention policy dropped=1"
|
||||||
|
// }
|
||||||
|
name: "beyond retention policy is not an error",
|
||||||
|
status: http.StatusBadRequest,
|
||||||
|
contentType: "application/json",
|
||||||
|
body: `{"error":"partial write: points beyond retention policy dropped=1"}`,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// HTTP/1.1 400 Bad Request
|
||||||
|
// Content-Type: application/json
|
||||||
|
// X-Influxdb-Version: 1.3.3
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "error": "unable to parse 'foo bar=': missing field value"
|
||||||
|
// }
|
||||||
|
name: "unable to parse is not an error",
|
||||||
|
status: http.StatusBadRequest,
|
||||||
|
contentType: "application/json",
|
||||||
|
body: `{"error":"unable to parse 'foo bar=': missing field value"}`,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// HTTP/1.1 400 Bad Request
|
||||||
|
// Content-Type: application/json
|
||||||
|
// X-Influxdb-Version: 1.3.3
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "error": "partial write: field type conflict: input field \"bar\" on measurement \"foo\" is type float, already exists as type integer dropped=1"
|
||||||
|
// }
|
||||||
|
name: "field type conflict is not an error",
|
||||||
|
status: http.StatusBadRequest,
|
||||||
|
contentType: "application/json",
|
||||||
|
body: `{"error": "partial write: field type conflict: input field \"bar\" on measurement \"foo\" is type float, already exists as type integer dropped=1"}`,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// HTTP/1.1 500 Internal Server Error
|
||||||
|
// Content-Type: application/json
|
||||||
|
// X-Influxdb-Version: 1.3.3-c1.3.3
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "error": "write failed: hinted handoff queue not empty"
|
||||||
|
// }
|
||||||
|
name: "hinted handoff queue not empty is not an error",
|
||||||
|
status: http.StatusInternalServerError,
|
||||||
|
contentType: "application/json",
|
||||||
|
body: `{"error":"write failed: hinted handoff queue not empty"}`,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// HTTP/1.1 500 Internal Server Error
|
||||||
|
// Content-Type: application/json
|
||||||
|
// X-Influxdb-Version: 1.3.3-c1.3.3
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "error": "partial write"
|
||||||
|
// }
|
||||||
|
name: "plain partial write is an error",
|
||||||
|
status: http.StatusInternalServerError,
|
||||||
|
contentType: "application/json",
|
||||||
|
body: `{"error":"partial write"}`,
|
||||||
|
err: fmt.Errorf("Could not write to any InfluxDB server in cluster"),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, tt := range testCases {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
rw.WriteHeader(tt.status)
|
||||||
|
rw.Header().Set("Content-Type", tt.contentType)
|
||||||
|
fmt.Fprintln(rw, tt.body)
|
||||||
}))
|
}))
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
i := InfluxDB{
|
influx := InfluxDB{
|
||||||
URLs: []string{ts.URL},
|
URLs: []string{ts.URL},
|
||||||
Database: "test",
|
Database: "test",
|
||||||
}
|
}
|
||||||
|
|
||||||
err := i.Connect()
|
err := influx.Connect()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = i.Write(testutil.MockMetrics())
|
err = influx.Write(testutil.MockMetrics())
|
||||||
require.NoError(t, err)
|
require.Equal(t, tt.err, err)
|
||||||
require.NoError(t, i.Close())
|
require.NoError(t, influx.Close())
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type MockClient struct {
|
type MockClient struct {
|
||||||
|
|
|
@ -80,6 +80,9 @@ func (l *Librato) Connect() error {
|
||||||
"api_user and api_token are required fields for librato output")
|
"api_user and api_token are required fields for librato output")
|
||||||
}
|
}
|
||||||
l.client = &http.Client{
|
l.client = &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
},
|
||||||
Timeout: l.Timeout.Duration,
|
Timeout: l.Timeout.Duration,
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"log"
|
"log"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -13,6 +14,16 @@ import (
|
||||||
"github.com/influxdata/telegraf/plugins/outputs"
|
"github.com/influxdata/telegraf/plugins/outputs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-_./\p{L}]`)
|
||||||
|
hypenChars = strings.NewReplacer(
|
||||||
|
"@", "-",
|
||||||
|
"*", "-",
|
||||||
|
`%`, "-",
|
||||||
|
"#", "-",
|
||||||
|
"$", "-")
|
||||||
|
)
|
||||||
|
|
||||||
type OpenTSDB struct {
|
type OpenTSDB struct {
|
||||||
Prefix string
|
Prefix string
|
||||||
|
|
||||||
|
@ -24,9 +35,6 @@ type OpenTSDB struct {
|
||||||
Debug bool
|
Debug bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var sanitizedChars = strings.NewReplacer("@", "-", "*", "-", " ", "_",
|
|
||||||
`%`, "-", "#", "-", "$", "-", ":", "_")
|
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
## prefix for metrics keys
|
## prefix for metrics keys
|
||||||
prefix = "my.specific.prefix."
|
prefix = "my.specific.prefix."
|
||||||
|
@ -125,8 +133,7 @@ func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
metric := &HttpMetric{
|
metric := &HttpMetric{
|
||||||
Metric: sanitizedChars.Replace(fmt.Sprintf("%s%s_%s",
|
Metric: sanitize(fmt.Sprintf("%s%s_%s", o.Prefix, m.Name(), fieldName)),
|
||||||
o.Prefix, m.Name(), fieldName)),
|
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
Timestamp: now,
|
Timestamp: now,
|
||||||
Value: value,
|
Value: value,
|
||||||
|
@ -176,7 +183,7 @@ func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
messageLine := fmt.Sprintf("put %s %v %s %s\n",
|
messageLine := fmt.Sprintf("put %s %v %s %s\n",
|
||||||
sanitizedChars.Replace(fmt.Sprintf("%s%s_%s", o.Prefix, m.Name(), fieldName)),
|
sanitize(fmt.Sprintf("%s%s_%s", o.Prefix, m.Name(), fieldName)),
|
||||||
now, metricValue, tags)
|
now, metricValue, tags)
|
||||||
|
|
||||||
_, err := connection.Write([]byte(messageLine))
|
_, err := connection.Write([]byte(messageLine))
|
||||||
|
@ -192,7 +199,7 @@ func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error {
|
||||||
func cleanTags(tags map[string]string) map[string]string {
|
func cleanTags(tags map[string]string) map[string]string {
|
||||||
tagSet := make(map[string]string, len(tags))
|
tagSet := make(map[string]string, len(tags))
|
||||||
for k, v := range tags {
|
for k, v := range tags {
|
||||||
tagSet[sanitizedChars.Replace(k)] = sanitizedChars.Replace(v)
|
tagSet[sanitize(k)] = sanitize(v)
|
||||||
}
|
}
|
||||||
return tagSet
|
return tagSet
|
||||||
}
|
}
|
||||||
|
@ -236,6 +243,13 @@ func (o *OpenTSDB) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sanitize(value string) string {
|
||||||
|
// Apply special hypenation rules to preserve backwards compatibility
|
||||||
|
value = hypenChars.Replace(value)
|
||||||
|
// Replace any remaining illegal chars
|
||||||
|
return allowedChars.ReplaceAllLiteralString(value, "_")
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
outputs.Add("opentsdb", func() telegraf.Output {
|
outputs.Add("opentsdb", func() telegraf.Output {
|
||||||
return &OpenTSDB{}
|
return &OpenTSDB{}
|
||||||
|
|
|
@ -10,9 +10,10 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
//"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCleanTags(t *testing.T) {
|
func TestCleanTags(t *testing.T) {
|
||||||
|
@ -29,8 +30,16 @@ func TestCleanTags(t *testing.T) {
|
||||||
map[string]string{"aaa": "bbb"},
|
map[string]string{"aaa": "bbb"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]string{"Sp%ci@l Chars": "g$t repl#ced"},
|
map[string]string{"Sp%ci@l Chars[": "g$t repl#ce)d"},
|
||||||
map[string]string{"Sp-ci-l_Chars": "g-t_repl-ced"},
|
map[string]string{"Sp-ci-l_Chars_": "g-t_repl-ce_d"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]string{"μnicodε_letters": "okαy"},
|
||||||
|
map[string]string{"μnicodε_letters": "okαy"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]string{"n☺": "emojies☠"},
|
||||||
|
map[string]string{"n_": "emojies_"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
map[string]string{},
|
map[string]string{},
|
||||||
|
@ -75,6 +84,47 @@ func TestBuildTagsTelnet(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSanitize(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
value string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Ascii letters and numbers allowed",
|
||||||
|
value: "ascii 123",
|
||||||
|
expected: "ascii_123",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Allowed punct",
|
||||||
|
value: "-_./",
|
||||||
|
expected: "-_./",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Special conversions to hyphen",
|
||||||
|
value: "@*%#$!",
|
||||||
|
expected: "-----_",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Unicode Letters allowed",
|
||||||
|
value: "μnicodε_letters",
|
||||||
|
expected: "μnicodε_letters",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Other Unicode not allowed",
|
||||||
|
value: "“☢”",
|
||||||
|
expected: "___",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
actual := sanitize(tt.value)
|
||||||
|
require.Equal(t, tt.expected, actual)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkHttpSend(b *testing.B) {
|
func BenchmarkHttpSend(b *testing.B) {
|
||||||
const BatchSize = 50
|
const BatchSize = 50
|
||||||
const MetricsCount = 4 * BatchSize
|
const MetricsCount = 4 * BatchSize
|
||||||
|
|
|
@ -67,6 +67,10 @@ func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]i
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *JSONParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
func (p *JSONParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||||
|
buf = bytes.TrimSpace(buf)
|
||||||
|
if len(buf) == 0 {
|
||||||
|
return make([]telegraf.Metric, 0), nil
|
||||||
|
}
|
||||||
|
|
||||||
if !isarray(buf) {
|
if !isarray(buf) {
|
||||||
metrics := make([]telegraf.Metric, 0)
|
metrics := make([]telegraf.Metric, 0)
|
||||||
|
@ -155,8 +159,6 @@ func (f *JSONFlattener) FullFlattenJSON(
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
case nil:
|
case nil:
|
||||||
// ignored types
|
|
||||||
fmt.Println("json parser ignoring " + fieldname)
|
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("JSON Flattener: got unexpected type %T with value %v (%s)",
|
return fmt.Errorf("JSON Flattener: got unexpected type %T with value %v (%s)",
|
||||||
|
|
|
@ -84,6 +84,16 @@ func TestParseValidJSON(t *testing.T) {
|
||||||
"b_c": float64(6),
|
"b_c": float64(6),
|
||||||
}, metrics[0].Fields())
|
}, metrics[0].Fields())
|
||||||
assert.Equal(t, map[string]string{}, metrics[0].Tags())
|
assert.Equal(t, map[string]string{}, metrics[0].Tags())
|
||||||
|
|
||||||
|
// Test that whitespace only will parse as an empty list of metrics
|
||||||
|
metrics, err = parser.Parse([]byte("\n\t"))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, metrics, 0)
|
||||||
|
|
||||||
|
// Test that an empty string will parse as an empty list of metrics
|
||||||
|
metrics, err = parser.Parse([]byte(""))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, metrics, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseLineValidJSON(t *testing.T) {
|
func TestParseLineValidJSON(t *testing.T) {
|
||||||
|
|
|
@ -32,13 +32,22 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for fieldName, value := range metric.Fields() {
|
for fieldName, value := range metric.Fields() {
|
||||||
// Convert value to string
|
switch v := value.(type) {
|
||||||
valueS := fmt.Sprintf("%#v", value)
|
case string:
|
||||||
point := []byte(fmt.Sprintf("%s %s %d\n",
|
continue
|
||||||
|
case bool:
|
||||||
|
if v {
|
||||||
|
value = 1
|
||||||
|
} else {
|
||||||
|
value = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
metricString := fmt.Sprintf("%s %#v %d\n",
|
||||||
// insert "field" section of template
|
// insert "field" section of template
|
||||||
sanitizedChars.Replace(InsertField(bucket, fieldName)),
|
sanitizedChars.Replace(InsertField(bucket, fieldName)),
|
||||||
sanitizedChars.Replace(valueS),
|
value,
|
||||||
timestamp))
|
timestamp)
|
||||||
|
point := []byte(metricString)
|
||||||
out = append(out, point...)
|
out = append(out, point...)
|
||||||
}
|
}
|
||||||
return out, nil
|
return out, nil
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue