Compare commits
32 Commits
ga-azure-m
...
1.4.0-rc3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b2b2bd8a27 | ||
|
|
f96cbb48c7 | ||
|
|
9077cb83bc | ||
|
|
0f188f280f | ||
|
|
b9420e73bd | ||
|
|
1e43e5e7ae | ||
|
|
5e104ad974 | ||
|
|
cc9d8c700c | ||
|
|
b15ec21ba7 | ||
|
|
a9abfe8f08 | ||
|
|
307210242c | ||
|
|
0a41db16f1 | ||
|
|
7480267fd2 | ||
|
|
30949c4596 | ||
|
|
47264bc860 | ||
|
|
67e693e9a8 | ||
|
|
851352bc8a | ||
|
|
c807452c14 | ||
|
|
48e00f7ea0 | ||
|
|
8ce901aaa4 | ||
|
|
78d1715601 | ||
|
|
1b0a18897d | ||
|
|
257b6a09d9 | ||
|
|
e6feac735c | ||
|
|
6616065acf | ||
|
|
98774d60e2 | ||
|
|
d4cd1b7eb4 | ||
|
|
7254111d37 | ||
|
|
4551efb459 | ||
|
|
2610eba0e3 | ||
|
|
c277dc27a6 | ||
|
|
a4f5c6fbc3 |
10
CHANGELOG.md
10
CHANGELOG.md
@@ -62,6 +62,7 @@
|
||||
- [#2978](https://github.com/influxdata/telegraf/pull/2978): Add gzip content-encoding support to influxdb output.
|
||||
- [#3127](https://github.com/influxdata/telegraf/pull/3127): Allow using system plugin in Windows.
|
||||
- [#3112](https://github.com/influxdata/telegraf/pull/3112): Add tomcat input plugin.
|
||||
- [#3182](https://github.com/influxdata/telegraf/pull/3182): HTTP headers can be added to InfluxDB output.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
@@ -93,6 +94,15 @@
|
||||
- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilcation of logparser and tail on solaris.
|
||||
- [#2951](https://github.com/influxdata/telegraf/issues/2951): Discard logging from tail library.
|
||||
- [#3126](https://github.com/influxdata/telegraf/pull/3126): Remove log message on ping timeout.
|
||||
- [#3144](https://github.com/influxdata/telegraf/issues/3144): Don't retry points beyond retention policy.
|
||||
- [#3015](https://github.com/influxdata/telegraf/issues/3015): Don't start Telegraf on install in Amazon Linux.
|
||||
- [#3153](https://github.com/influxdata/telegraf/issues/3053): Enable hddtemp input on all platforms.
|
||||
- [#3142](https://github.com/influxdata/telegraf/issues/3142): Escape backslash within string fields.
|
||||
- [#3162](https://github.com/influxdata/telegraf/issues/3162): Fix parsing of SHM remotes in ntpq input
|
||||
- [#3149](https://github.com/influxdata/telegraf/issues/3149): Don't fail parsing zpool stats if pool health is UNAVAIL on FreeBSD.
|
||||
- [#2672](https://github.com/influxdata/telegraf/issues/2672): Fix NSQ input plugin when used with version 1.0.0-compat.
|
||||
- [#2523](https://github.com/influxdata/telegraf/issues/2523): Added CloudWatch metric constraint validation.
|
||||
- [#3179](https://github.com/influxdata/telegraf/issues/3179): Skip non-numerical values in graphite format.
|
||||
|
||||
## v1.3.5 [2017-07-26]
|
||||
|
||||
|
||||
2
Makefile
2
Makefile
@@ -25,7 +25,7 @@ deps:
|
||||
gdm restore
|
||||
|
||||
telegraf:
|
||||
go build -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
|
||||
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
|
||||
|
||||
go-install:
|
||||
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
|
||||
|
||||
@@ -172,7 +172,8 @@ configuration options.
|
||||
* [varnish](./plugins/inputs/varnish)
|
||||
* [zfs](./plugins/inputs/zfs)
|
||||
* [zookeeper](./plugins/inputs/zookeeper)
|
||||
* [win_perf_counters ](./plugins/inputs/win_perf_counters) (windows performance counters)
|
||||
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
|
||||
* [win_services](./plugins/inputs/win_services)
|
||||
* [sysstat](./plugins/inputs/sysstat)
|
||||
* [system](./plugins/inputs/system)
|
||||
* cpu
|
||||
|
||||
@@ -96,6 +96,9 @@ tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
|
||||
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
||||
```
|
||||
|
||||
Fields with string values will be skipped. Boolean fields will be converted
|
||||
to 1 (true) or 0 (false).
|
||||
|
||||
### Graphite Configuration:
|
||||
|
||||
```toml
|
||||
|
||||
@@ -118,6 +118,12 @@
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## HTTP Proxy Config
|
||||
# http_proxy = "http://corporate.proxy:3128"
|
||||
|
||||
## Compress each HTTP request payload using GZIP.
|
||||
# content_encoding = "gzip"
|
||||
|
||||
|
||||
# # Configuration for Amon Server to send metrics to.
|
||||
# [[outputs.amon]]
|
||||
@@ -272,11 +278,11 @@
|
||||
# timeout = 2
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Send telegraf metrics to graylog(s)
|
||||
@@ -596,6 +602,32 @@
|
||||
# AGGREGATOR PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
# # Create aggregate histograms.
|
||||
# [[aggregators.histogram]]
|
||||
# ## The period in which to flush the aggregator.
|
||||
# period = "30s"
|
||||
#
|
||||
# ## If true, the original metric will be dropped by the
|
||||
# ## aggregator and will not get sent to the output plugins.
|
||||
# drop_original = false
|
||||
#
|
||||
# ## Example config that aggregates all fields of the metric.
|
||||
# # [[aggregators.histogram.config]]
|
||||
# # ## The set of buckets.
|
||||
# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||
# # ## The name of metric.
|
||||
# # measurement_name = "cpu"
|
||||
#
|
||||
# ## Example config that aggregates only specific fields of the metric.
|
||||
# # [[aggregators.histogram.config]]
|
||||
# # ## The set of buckets.
|
||||
# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||
# # ## The name of metric.
|
||||
# # measurement_name = "diskio"
|
||||
# # ## The concrete fields of metric
|
||||
# # fields = ["io_time", "read_time", "write_time"]
|
||||
|
||||
|
||||
# # Keep the aggregate min/max of each metric passing through.
|
||||
# [[aggregators.minmax]]
|
||||
# ## General Aggregator Arguments:
|
||||
@@ -606,32 +638,6 @@
|
||||
# drop_original = false
|
||||
|
||||
|
||||
# # Configuration for aggregate histogram metrics
|
||||
# [[aggregators.histogram]]
|
||||
# ## General Aggregator Arguments:
|
||||
# ## The period on which to flush & clear the aggregator.
|
||||
# period = "30s"
|
||||
# ## If true, the original metric will be dropped by the
|
||||
# ## aggregator and will not get sent to the output plugins.
|
||||
# drop_original = false
|
||||
#
|
||||
# ## The example of config to aggregate histogram for all fields of specified metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||
# ## The name of metric.
|
||||
# metric_name = "cpu"
|
||||
#
|
||||
# ## The example of config to aggregate for specified fields of metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||
# ## The name of metric.
|
||||
# metric_name = "diskio"
|
||||
# ## The concrete fields of metric
|
||||
# metric_fields = ["io_time", "read_time", "write_time"]
|
||||
|
||||
|
||||
|
||||
###############################################################################
|
||||
# INPUT PLUGINS #
|
||||
@@ -645,6 +651,8 @@
|
||||
totalcpu = true
|
||||
## If true, collect raw CPU time metrics.
|
||||
collect_cpu_time = false
|
||||
## If true, compute and report the sum of all non-idle CPU states.
|
||||
report_active = false
|
||||
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
@@ -720,15 +728,17 @@
|
||||
|
||||
# # Read Apache status information (mod_status)
|
||||
# [[inputs.apache]]
|
||||
# ## An array of Apache status URI to gather stats.
|
||||
# ## An array of URLs to gather from, must be directed at the machine
|
||||
# ## readable version of the mod_status page including the auto query string.
|
||||
# ## Default is "http://localhost/server-status?auto".
|
||||
# urls = ["http://localhost/server-status?auto"]
|
||||
# ## user credentials for basic HTTP authentication
|
||||
# username = "myuser"
|
||||
# password = "mypassword"
|
||||
#
|
||||
# ## Timeout to the complete conection and reponse time in seconds
|
||||
# response_timeout = "25s" ## default to 5 seconds
|
||||
# ## Credentials for basic HTTP authentication.
|
||||
# # username = "myuser"
|
||||
# # password = "mypassword"
|
||||
#
|
||||
# ## Maximum time to receive response.
|
||||
# # response_timeout = "5s"
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -846,7 +856,7 @@
|
||||
#
|
||||
# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
|
||||
# # metrics are made available to the 1 minute period. Some are collected at
|
||||
# # 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# # Note that if a period is configured that is smaller than the minimum for a
|
||||
# # particular metric, that metric will not be returned by the Cloudwatch API
|
||||
# # and will not be collected by Telegraf.
|
||||
@@ -958,20 +968,23 @@
|
||||
# # Query given DNS server and gives statistics
|
||||
# [[inputs.dns_query]]
|
||||
# ## servers to query
|
||||
# servers = ["8.8.8.8"] # required
|
||||
# servers = ["8.8.8.8"]
|
||||
#
|
||||
# ## Domains or subdomains to query. "."(root) is default
|
||||
# domains = ["."] # optional
|
||||
# ## Network is the network protocol name.
|
||||
# # network = "udp"
|
||||
#
|
||||
# ## Query record type. Default is "A"
|
||||
# ## Domains or subdomains to query.
|
||||
# # domains = ["."]
|
||||
#
|
||||
# ## Query record type.
|
||||
# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
|
||||
# record_type = "A" # optional
|
||||
# # record_type = "A"
|
||||
#
|
||||
# ## Dns server port. 53 is default
|
||||
# port = 53 # optional
|
||||
# ## Dns server port.
|
||||
# # port = 53
|
||||
#
|
||||
# ## Query timeout in seconds. Default is 2 seconds
|
||||
# timeout = 2 # optional
|
||||
# ## Query timeout in seconds.
|
||||
# # timeout = 2
|
||||
|
||||
|
||||
# # Read metrics about docker containers
|
||||
@@ -980,8 +993,15 @@
|
||||
# ## To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
# endpoint = "unix:///var/run/docker.sock"
|
||||
#
|
||||
# ## Only collect metrics for these containers, collect all if empty
|
||||
# container_names = []
|
||||
#
|
||||
# ## Containers to include and exclude. Globs accepted.
|
||||
# ## Note that an empty array for both will include all containers
|
||||
# container_name_include = []
|
||||
# container_name_exclude = []
|
||||
#
|
||||
# ## Timeout for docker list, info, and stats commands
|
||||
# timeout = "5s"
|
||||
#
|
||||
@@ -990,11 +1010,20 @@
|
||||
# perdevice = true
|
||||
# ## Whether to report for each container total blkio and network stats or not
|
||||
# total = false
|
||||
# ## Which environment variables should we use as a tag
|
||||
# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
||||
#
|
||||
# ## docker labels to include and exclude as tags. Globs accepted.
|
||||
# ## Note that an empty array for both will include all labels as tags
|
||||
# docker_label_include = []
|
||||
# docker_label_exclude = []
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Read statistics from one or many dovecot servers
|
||||
@@ -1064,6 +1093,15 @@
|
||||
# data_format = "influx"
|
||||
|
||||
|
||||
# # Read metrics from fail2ban.
|
||||
# [[inputs.fail2ban]]
|
||||
# ## fail2ban-client require root access.
|
||||
# ## Setting 'use_sudo' to true will make use of sudo to run fail2ban-client.
|
||||
# ## Users must configure sudo to allow telegraf user to run fail2ban-client with no password.
|
||||
# ## This plugin run only "fail2ban-client status".
|
||||
# use_sudo = false
|
||||
|
||||
|
||||
# # Read stats about given file(s)
|
||||
# [[inputs.filestat]]
|
||||
# ## Files to gather stats about.
|
||||
@@ -1080,6 +1118,22 @@
|
||||
# md5 = false
|
||||
|
||||
|
||||
# # Read metrics exposed by fluentd in_monitor plugin
|
||||
# [[inputs.fluentd]]
|
||||
# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
|
||||
# ##
|
||||
# ## Endpoint:
|
||||
# ## - only one URI is allowed
|
||||
# ## - https is not supported
|
||||
# endpoint = "http://localhost:24220/api/plugins.json"
|
||||
#
|
||||
# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
|
||||
# exclude = [
|
||||
# "monitor_agent",
|
||||
# "dummy",
|
||||
# ]
|
||||
|
||||
|
||||
# # Read flattened metrics from one or more GrayLog HTTP endpoints
|
||||
# [[inputs.graylog]]
|
||||
# ## API endpoint, currently supported API:
|
||||
@@ -1161,25 +1215,26 @@
|
||||
# # HTTP/HTTPS request given an address a method and a timeout
|
||||
# [[inputs.http_response]]
|
||||
# ## Server address (default http://localhost)
|
||||
# address = "http://github.com"
|
||||
# # address = "http://localhost"
|
||||
#
|
||||
# ## Set response_timeout (default 5 seconds)
|
||||
# response_timeout = "5s"
|
||||
# # response_timeout = "5s"
|
||||
#
|
||||
# ## HTTP Request Method
|
||||
# method = "GET"
|
||||
# # method = "GET"
|
||||
#
|
||||
# ## Whether to follow redirects from the server (defaults to false)
|
||||
# follow_redirects = true
|
||||
# ## HTTP Request Headers (all values must be strings)
|
||||
# # [inputs.http_response.headers]
|
||||
# # Host = "github.com"
|
||||
# # follow_redirects = false
|
||||
#
|
||||
# ## Optional HTTP Request Body
|
||||
# # body = '''
|
||||
# # {'fake':'data'}
|
||||
# # '''
|
||||
#
|
||||
# ## Optional substring or regex match in body of the response
|
||||
# ## response_string_match = "\"service_status\": \"up\""
|
||||
# ## response_string_match = "ok"
|
||||
# ## response_string_match = "\".*_status\".?:.?\"up\""
|
||||
# # response_string_match = "\"service_status\": \"up\""
|
||||
# # response_string_match = "ok"
|
||||
# # response_string_match = "\".*_status\".?:.?\"up\""
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -1187,6 +1242,10 @@
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## HTTP Request Headers (all values must be strings)
|
||||
# # [inputs.http_response.headers]
|
||||
# # Host = "github.com"
|
||||
|
||||
|
||||
# # Read flattened metrics from one or more JSON HTTP endpoints
|
||||
@@ -1249,6 +1308,13 @@
|
||||
# "http://localhost:8086/debug/vars"
|
||||
# ]
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## http request & header timeout
|
||||
# timeout = "5s"
|
||||
|
||||
@@ -1279,6 +1345,13 @@
|
||||
# ## if no servers are specified, local machine sensor stats will be queried
|
||||
# ##
|
||||
# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
|
||||
#
|
||||
# ## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid
|
||||
# ## gaps or overlap in pulled data
|
||||
# interval = "30s"
|
||||
#
|
||||
# ## Timeout for the ipmitool command to complete
|
||||
# timeout = "20s"
|
||||
|
||||
|
||||
# # Gather packets and bytes throughput from iptables
|
||||
@@ -1398,9 +1471,9 @@
|
||||
|
||||
# # Read metrics from a LeoFS Server via SNMP
|
||||
# [[inputs.leofs]]
|
||||
# ## An array of URI to gather stats about LeoFS.
|
||||
# ## Specify an ip or hostname with port. ie 127.0.0.1:4020
|
||||
# servers = ["127.0.0.1:4021"]
|
||||
# ## An array of URLs of the form:
|
||||
# ## "udp://" host [ ":" port]
|
||||
# servers = ["udp://127.0.0.1:4020"]
|
||||
|
||||
|
||||
# # Provides Linux sysctl fs metrics
|
||||
@@ -1475,14 +1548,24 @@
|
||||
# # ]
|
||||
|
||||
|
||||
# # Collects scores from a minecraft server's scoreboard using the RCON protocol
|
||||
# [[inputs.minecraft]]
|
||||
# ## server address for minecraft
|
||||
# # server = "localhost"
|
||||
# ## port for RCON
|
||||
# # port = "25575"
|
||||
# ## password RCON for mincraft server
|
||||
# # password = ""
|
||||
|
||||
|
||||
# # Read metrics from one or many MongoDB servers
|
||||
# [[inputs.mongodb]]
|
||||
# ## An array of URI to gather stats about. Specify an ip or hostname
|
||||
# ## with optional port add password. ie,
|
||||
# ## An array of URLs of the form:
|
||||
# ## "mongodb://" [user ":" pass "@"] host [ ":" port]
|
||||
# ## For example:
|
||||
# ## mongodb://user:auth_key@10.10.3.30:27017,
|
||||
# ## mongodb://10.10.3.33:18832,
|
||||
# ## 10.0.0.1:10000, etc.
|
||||
# servers = ["127.0.0.1:27017"]
|
||||
# servers = ["mongodb://127.0.0.1:27017"]
|
||||
# gather_perdb_stats = false
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
@@ -1496,7 +1579,7 @@
|
||||
# # Read metrics from one or many mysql servers
|
||||
# [[inputs.mysql]]
|
||||
# ## specify servers via a url matching:
|
||||
# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
||||
# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
|
||||
# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
|
||||
# ## e.g.
|
||||
# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
|
||||
@@ -1553,7 +1636,7 @@
|
||||
# #
|
||||
# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
|
||||
# interval_slow = "30m"
|
||||
|
||||
#
|
||||
# ## Optional SSL Config (will be used if tls=custom parameter specified in server uri)
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
@@ -1599,8 +1682,17 @@
|
||||
|
||||
# # Read Nginx's basic status information (ngx_http_stub_status_module)
|
||||
# [[inputs.nginx]]
|
||||
# ## An array of Nginx stub_status URI to gather stats.
|
||||
# urls = ["http://localhost/status"]
|
||||
# # An array of Nginx stub_status URI to gather stats.
|
||||
# urls = ["http://localhost/server_status"]
|
||||
#
|
||||
# # TLS/SSL configuration
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.cer"
|
||||
# ssl_key = "/etc/telegraf/key.key"
|
||||
# insecure_skip_verify = false
|
||||
#
|
||||
# # HTTP response timeout (default: 5s)
|
||||
# response_timeout = "5s"
|
||||
|
||||
|
||||
# # Read NSQ topic and channel statistics.
|
||||
@@ -1627,6 +1719,27 @@
|
||||
# dns_lookup = true
|
||||
|
||||
|
||||
# # OpenLDAP cn=Monitor plugin
|
||||
# [[inputs.openldap]]
|
||||
# host = "localhost"
|
||||
# port = 389
|
||||
#
|
||||
# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
|
||||
# # note that port will likely need to be changed to 636 for ldaps
|
||||
# # valid options: "" | "starttls" | "ldaps"
|
||||
# ssl = ""
|
||||
#
|
||||
# # skip peer certificate verification. Default is false.
|
||||
# insecure_skip_verify = false
|
||||
#
|
||||
# # Path to PEM-encoded Root certificate to use to verify server certificate
|
||||
# ssl_ca = "/etc/ssl/certs.pem"
|
||||
#
|
||||
# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
|
||||
# bind_dn = ""
|
||||
# bind_password = ""
|
||||
|
||||
|
||||
# # Read metrics of passenger using passenger-status
|
||||
# [[inputs.passenger]]
|
||||
# ## Path of passenger-status.
|
||||
@@ -1820,10 +1933,13 @@
|
||||
# location = "/var/lib/puppet/state/last_run_summary.yaml"
|
||||
|
||||
|
||||
# # Read metrics from one or many RabbitMQ servers via the management API
|
||||
# # Reads metrics from RabbitMQ servers via the Management Plugin
|
||||
# [[inputs.rabbitmq]]
|
||||
# ## Management Plugin url. (default: http://localhost:15672)
|
||||
# # url = "http://localhost:15672"
|
||||
# # name = "rmq-server-1" # optional tag
|
||||
# ## Tag added to rabbitmq_overview series; deprecated: use tags
|
||||
# # name = "rmq-server-1"
|
||||
# ## Credentials
|
||||
# # username = "guest"
|
||||
# # password = "guest"
|
||||
#
|
||||
@@ -1844,9 +1960,13 @@
|
||||
# ## Includes connection time, any redirects, and reading the response body.
|
||||
# # client_timeout = "4s"
|
||||
#
|
||||
# ## A list of nodes to pull metrics about. If not specified, metrics for
|
||||
# ## all nodes are gathered.
|
||||
# ## A list of nodes to gather as the rabbitmq_node measurement. If not
|
||||
# ## specified, metrics for all nodes are gathered.
|
||||
# # nodes = ["rabbit@node1", "rabbit@node2"]
|
||||
#
|
||||
# ## A list of queues to gather as the rabbitmq_queue measurement. If not
|
||||
# ## specified, metrics for all queues are gathered.
|
||||
# # queues = ["telegraf"]
|
||||
|
||||
|
||||
# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
|
||||
@@ -1880,14 +2000,11 @@
|
||||
# ##
|
||||
# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
|
||||
# ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
|
||||
# servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
|
||||
# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
|
||||
# ##
|
||||
# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
|
||||
# ## have to be named "rethinkdb".
|
||||
# servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
|
||||
|
||||
|
||||
|
||||
# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
|
||||
|
||||
|
||||
# # Read metrics one or many Riak servers
|
||||
@@ -1896,6 +2013,26 @@
|
||||
# servers = ["http://localhost:8098"]
|
||||
|
||||
|
||||
# # Read API usage and limits for a Salesforce organisation
|
||||
# [[inputs.salesforce]]
|
||||
# ## specify your credentials
|
||||
# ##
|
||||
# username = "your_username"
|
||||
# password = "your_password"
|
||||
# ##
|
||||
# ## (optional) security token
|
||||
# # security_token = "your_security_token"
|
||||
# ##
|
||||
# ## (optional) environment type (sandbox or production)
|
||||
# ## default is: production
|
||||
# ##
|
||||
# # environment = "production"
|
||||
# ##
|
||||
# ## (optional) API version (default: "39.0")
|
||||
# ##
|
||||
# # version = "39.0"
|
||||
|
||||
|
||||
# # Monitor sensors, requires lm-sensors package
|
||||
# [[inputs.sensors]]
|
||||
# ## Remove numbers from field names.
|
||||
@@ -2141,6 +2278,26 @@
|
||||
# # vg = "rootvg"
|
||||
|
||||
|
||||
# # Gather metrics from the Tomcat server status page.
|
||||
# [[inputs.tomcat]]
|
||||
# ## URL of the Tomcat server status
|
||||
# # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
|
||||
#
|
||||
# ## HTTP Basic Auth Credentials
|
||||
# # username = "tomcat"
|
||||
# # password = "s3cret"
|
||||
#
|
||||
# ## Request timeout
|
||||
# # timeout = "5s"
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Inserts sine and cosine waves for demonstration purposes
|
||||
# [[inputs.trig]]
|
||||
# ## Set the amplitude
|
||||
@@ -2157,6 +2314,9 @@
|
||||
|
||||
# # A plugin to collect stats from Varnish HTTP Cache
|
||||
# [[inputs.varnish]]
|
||||
# ## If running as a restricted user you can prepend sudo for additional access:
|
||||
# #use_sudo = false
|
||||
#
|
||||
# ## The default location of the varnishstat binary can be overridden with:
|
||||
# binary = "/usr/bin/varnishstat"
|
||||
#
|
||||
@@ -2247,16 +2407,13 @@
|
||||
# ## 0 means to use the default of 65536 bytes (64 kibibytes)
|
||||
# max_line_size = 0
|
||||
|
||||
# # Read metrics from Kafka 0.9+ topic(s)
|
||||
|
||||
# # Read metrics from Kafka topic(s)
|
||||
# [[inputs.kafka_consumer]]
|
||||
# ## topic(s) to consume
|
||||
# topics = ["telegraf"]
|
||||
# ## kafka servers
|
||||
# brokers = ["localhost:9092"]
|
||||
# ## the name of the consumer group
|
||||
# consumer_group = "telegraf_metrics_consumers"
|
||||
# ## Offset (must be either "oldest" or "newest")
|
||||
# offset = "oldest"
|
||||
# ## topic(s) to consume
|
||||
# topics = ["telegraf"]
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -2269,6 +2426,11 @@
|
||||
# # sasl_username = "kafka"
|
||||
# # sasl_password = "secret"
|
||||
#
|
||||
# ## the name of the consumer group
|
||||
# consumer_group = "telegraf_metrics_consumers"
|
||||
# ## Offset (must be either "oldest" or "newest")
|
||||
# offset = "oldest"
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
@@ -2279,7 +2441,8 @@
|
||||
# ## larger messages are dropped
|
||||
# max_message_len = 65536
|
||||
|
||||
# # Read metrics from Kafka (0.8 or less) topic(s)
|
||||
|
||||
# # Read metrics from Kafka topic(s)
|
||||
# [[inputs.kafka_consumer_legacy]]
|
||||
# ## topic(s) to consume
|
||||
# topics = ["telegraf"]
|
||||
@@ -2312,6 +2475,7 @@
|
||||
# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
|
||||
# ## /var/log/apache.log -> only tail the apache log file
|
||||
# files = ["/var/log/apache/access.log"]
|
||||
#
|
||||
# ## Read files that currently exist from the beginning. Files that are created
|
||||
# ## while telegraf is running (and that match the "files" globs) will always
|
||||
# ## be read from the beginning.
|
||||
@@ -2327,12 +2491,26 @@
|
||||
# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
|
||||
# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
|
||||
# patterns = ["%{COMBINED_LOG_FORMAT}"]
|
||||
#
|
||||
# ## Name of the outputted measurement name.
|
||||
# measurement = "apache_access_log"
|
||||
#
|
||||
# ## Full path(s) to custom pattern files.
|
||||
# custom_pattern_files = []
|
||||
#
|
||||
# ## Custom patterns can also be defined here. Put one pattern per line.
|
||||
# custom_patterns = '''
|
||||
#
|
||||
# ## Timezone allows you to provide an override for timestamps that
|
||||
# ## don't already include an offset
|
||||
# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
|
||||
# ##
|
||||
# ## Default: "" which renders UTC
|
||||
# ## Options are as follows:
|
||||
# ## 1. Local -- interpret based on machine localtime
|
||||
# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
|
||||
# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC
|
||||
# timezone = "Canada/Eastern"
|
||||
# '''
|
||||
|
||||
|
||||
@@ -2431,6 +2609,11 @@
|
||||
# ## 0 (default) is unlimited.
|
||||
# # max_connections = 1024
|
||||
#
|
||||
# ## Read timeout.
|
||||
# ## Only applies to stream sockets (e.g. TCP).
|
||||
# ## 0 (default) is unlimited.
|
||||
# # read_timeout = "30s"
|
||||
#
|
||||
# ## Maximum socket buffer size in bytes.
|
||||
# ## For stream sockets, once the buffer fills up, the sender will start backing up.
|
||||
# ## For datagram sockets, once the buffer fills up, metrics will start dropping.
|
||||
@@ -2450,12 +2633,14 @@
|
||||
# # data_format = "influx"
|
||||
|
||||
|
||||
# # Statsd Server
|
||||
# # Statsd UDP/TCP Server
|
||||
# [[inputs.statsd]]
|
||||
# ## Protocol, must be "tcp" or "udp"
|
||||
# ## Protocol, must be "tcp" or "udp" (default=udp)
|
||||
# protocol = "udp"
|
||||
# ## Maximum number of concurrent TCP connections to allow
|
||||
#
|
||||
# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
|
||||
# max_tcp_connections = 250
|
||||
#
|
||||
# ## Address and port to host UDP listener on
|
||||
# service_address = ":8125"
|
||||
#
|
||||
@@ -2556,3 +2741,9 @@
|
||||
# [inputs.webhooks.papertrail]
|
||||
# path = "/papertrail"
|
||||
|
||||
|
||||
# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
|
||||
# [[inputs.zipkin]]
|
||||
# # path = "/api/v1/spans" # URL path for span data
|
||||
# # port = 9411 # Port on which Telegraf listens
|
||||
|
||||
|
||||
@@ -20,8 +20,14 @@ var (
|
||||
|
||||
// stringFieldEscaper is for escaping string field values only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
stringFieldEscaper = strings.NewReplacer(`"`, `\"`)
|
||||
stringFieldUnEscaper = strings.NewReplacer(`\"`, `"`)
|
||||
stringFieldEscaper = strings.NewReplacer(
|
||||
`"`, `\"`,
|
||||
`\`, `\\`,
|
||||
)
|
||||
stringFieldUnEscaper = strings.NewReplacer(
|
||||
`\"`, `"`,
|
||||
`\\`, `\`,
|
||||
)
|
||||
)
|
||||
|
||||
func escape(s string, t string) string {
|
||||
|
||||
@@ -77,16 +77,10 @@ func New(
|
||||
|
||||
// pre-allocate capacity of the fields slice
|
||||
fieldlen := 0
|
||||
for k, v := range fields {
|
||||
for k, _ := range fields {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("Metric cannot have field key ending with a backslash")
|
||||
}
|
||||
switch val := v.(type) {
|
||||
case string:
|
||||
if strings.HasSuffix(val, `\`) {
|
||||
return nil, fmt.Errorf("Metric cannot have field value ending with a backslash")
|
||||
}
|
||||
}
|
||||
|
||||
// 10 bytes is completely arbitrary, but will at least prevent some
|
||||
// amount of allocations. There's a small possibility this will create
|
||||
|
||||
@@ -257,6 +257,7 @@ func TestNewMetric_Fields(t *testing.T) {
|
||||
"string": "test",
|
||||
"quote_string": `x"y`,
|
||||
"backslash_quote_string": `x\"y`,
|
||||
"backslash": `x\y`,
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
@@ -708,12 +709,6 @@ func TestNewMetric_TrailingSlash(t *testing.T) {
|
||||
`value\`: "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
fields: map[string]interface{}{
|
||||
"value": `x\`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
tags: map[string]string{
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -620,6 +621,83 @@ func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader_Read(t *testing.T) {
|
||||
epoch := time.Unix(0, 0)
|
||||
|
||||
type args struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
t time.Time
|
||||
mType []telegraf.ValueType
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
expected []byte
|
||||
}{
|
||||
{
|
||||
name: "escape backslashes in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape quote in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test"`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\"" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape quote and backslash in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\"`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\\"" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape multiple backslash in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\\`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\\\" 0`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
buf := make([]byte, 512)
|
||||
m, err := New(tt.args.name, tt.args.tags, tt.args.fields, tt.args.t, tt.args.mType...)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := NewReader([]telegraf.Metric{m})
|
||||
num, err := r.Read(buf)
|
||||
if err != io.EOF {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
line := string(buf[:num])
|
||||
// This is done so that we can use raw strings in the test spec
|
||||
noeol := strings.TrimRight(line, "\n")
|
||||
require.Equal(t, string(tt.expected), noeol)
|
||||
require.Equal(t, len(tt.expected)+1, num)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricRoundtrip(t *testing.T) {
|
||||
const lp = `nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=netstat,sr=database IpExtInBcastOctets=12570626154i,IpExtInBcastPkts=95541226i,IpExtInCEPkts=0i,IpExtInCsumErrors=0i,IpExtInECT0Pkts=55674i,IpExtInECT1Pkts=0i,IpExtInMcastOctets=5928296i,IpExtInMcastPkts=174365i,IpExtInNoECTPkts=17965863529i,IpExtInNoRoutes=20i,IpExtInOctets=3334866321815i,IpExtInTruncatedPkts=0i,IpExtOutBcastOctets=0i,IpExtOutBcastPkts=0i,IpExtOutMcastOctets=0i,IpExtOutMcastPkts=0i,IpExtOutOctets=31397892391399i,TcpExtArpFilter=0i,TcpExtBusyPollRxPackets=0i,TcpExtDelayedACKLocked=14094i,TcpExtDelayedACKLost=302083i,TcpExtDelayedACKs=55486507i,TcpExtEmbryonicRsts=11879i,TcpExtIPReversePathFilter=0i,TcpExtListenDrops=1736i,TcpExtListenOverflows=0i,TcpExtLockDroppedIcmps=0i,TcpExtOfoPruned=0i,TcpExtOutOfWindowIcmps=8i,TcpExtPAWSActive=0i,TcpExtPAWSEstab=974i,TcpExtPAWSPassive=0i,TcpExtPruneCalled=0i,TcpExtRcvPruned=0i,TcpExtSyncookiesFailed=12593i,TcpExtSyncookiesRecv=0i,TcpExtSyncookiesSent=0i,TcpExtTCPACKSkippedChallenge=0i,TcpExtTCPACKSkippedFinWait2=0i,TcpExtTCPACKSkippedPAWS=806i,TcpExtTCPACKSkippedSeq=519i,TcpExtTCPACKSkippedSynRecv=0i,TcpExtTCPACKSkippedTimeWait=0i,TcpExtTCPAbortFailed=0i,TcpExtTCPAbortOnClose=22i,TcpExtTCPAbortOnData=36593i,TcpExtTCPAbortOnLinger=0i,TcpExtTCPAbortOnMemory=0i,TcpExtTCPAbortOnTimeout=674i,TcpExtTCPAutoCorking=494253233i,TcpExtTCPBacklogDrop=0i,TcpExtTCPChallengeACK=281i,TcpExtTCPDSACKIgnoredNoUndo=93354i,TcpExtTCPDSACKIgnoredOld=336i,TcpExtTCPDSACKOfoRecv=0i,TcpExtTCPDSACKOfoSent=7i,TcpExtTCPDSACKOldSent=302073i,TcpExtTCPDSACKRecv=215884i,TcpExtTCPDSACKUndo=7633i,TcpExtTCPDeferAcceptDrop=0i,TcpExtTCPDirectCopyFromBacklog=0i,TcpExtTCPDirectCopyFromPrequeue=0i,TcpExtTCPFACKReorder=1320i,TcpExtTCPFastOpenActive=0i,TcpExtTCPFastOpenActiveFail=0i,TcpExtTCPFastOpenCookieReqd=0i,TcpExtTCPFastOpenListenOverflow=0i,TcpExtTCPFastOpenPassive=0i,TcpExtTCPFastOpenPassiveFail=0i,TcpExtTCPFastRetrans=350681i,TcpExtTCPForwardRetrans=142168i,TcpExtTCPFromZeroWindowAdv=4317i,TcpExtTCPFullUndo=29502i,TcpExtTCPHPAcks=10267073000i,TcpExtTCPHPHits=5629837098i,TcpExtTCPHPHitsToUser=0i,TcpExtTCPHystartDelayCwnd=285127i,TcpExtTCPHystartDelayDetect=12318i,TcpExtTCPHystartTrainCwnd=69160570i,TcpExtTCPHystartTrainDetect=3315799i,TcpExtTCPLossFailures=109i,TcpExtTCPLossProbeRecovery=110819i,TcpExtTCPLossProbes=233995i,TcpExtTCPLossUndo=5276i,TcpExtTCPLostRetransmit=397i,TcpExtTCPMD5NotFound=0i,TcpExtTCPMD5Unexpected=0i,TcpExtTCPMemoryPressures=0i,TcpExtTCPMinTTLDrop=0i,TcpExtTCPOFODrop=0i,TcpExtTCPOFOMerge=7i,TcpExtTCPOFOQueue=15196i,TcpExtTCPOrigDataSent=29055119435i,TcpExtTCPPartialUndo=21320i,TcpExtTCPPrequeueDropped=0i,TcpExtTCPPrequeued=0i,TcpExtTCPPureAcks=1236441827i,TcpExtTCPRcvCoalesce=225590473i,TcpExtTCPRcvCollapsed=0i,TcpExtTCPRenoFailures=0i,TcpExtTCPRenoRecovery=0i,TcpExtTCPRenoRecoveryFail=0i,TcpExtTCPRenoReorder=0i,TcpExtTCPReqQFullDoCookies=0i,TcpExtTCPReqQFullDrop=0i,TcpExtTCPRetransFail=41i,TcpExtTCPSACKDiscard=0i,TcpExtTCPSACKReneging=0i,TcpExtTCPSACKReorder=4307i,TcpExtTCPSYNChallenge=244i,TcpExtTCPSackFailures=1698i,TcpExtTCPSackMerged=184668i,TcpExtTCPSackRecovery=97369i,TcpExtTCPSackRecoveryFail=381i,TcpExtTCPSackShiftFallback=2697079i,TcpExtTCPSackShifted=760299i,TcpExtTCPSchedulerFailed=0i,TcpExtTCPSlowStartRetrans=9276i,TcpExtTCPSpuriousRTOs=959i,TcpExtTCPSpuriousRtxHostQueues=2973i,TcpExtTCPSynRetrans=200970i,TcpExtTCPTSReorder=15221i,TcpExtTCPTimeWaitOverflow=0i,TcpExtTCPTimeouts=70127i,TcpExtTCPToZeroWindowAdv=4317i,TcpExtTCPWantZeroWindowAdv=2133i,TcpExtTW=24809813i,TcpExtTWKilled=0i,TcpExtTWRecycled=0i 1496460785000000000
|
||||
nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=snmp,sr=database IcmpInAddrMaskReps=0i,IcmpInAddrMasks=90i,IcmpInCsumErrors=0i,IcmpInDestUnreachs=284401i,IcmpInEchoReps=9i,IcmpInEchos=1761912i,IcmpInErrors=407i,IcmpInMsgs=2047767i,IcmpInParmProbs=0i,IcmpInRedirects=0i,IcmpInSrcQuenchs=0i,IcmpInTimeExcds=46i,IcmpInTimestampReps=0i,IcmpInTimestamps=1309i,IcmpMsgInType0=9i,IcmpMsgInType11=46i,IcmpMsgInType13=1309i,IcmpMsgInType17=90i,IcmpMsgInType3=284401i,IcmpMsgInType8=1761912i,IcmpMsgOutType0=1761912i,IcmpMsgOutType14=1248i,IcmpMsgOutType3=108709i,IcmpMsgOutType8=9i,IcmpOutAddrMaskReps=0i,IcmpOutAddrMasks=0i,IcmpOutDestUnreachs=108709i,IcmpOutEchoReps=1761912i,IcmpOutEchos=9i,IcmpOutErrors=0i,IcmpOutMsgs=1871878i,IcmpOutParmProbs=0i,IcmpOutRedirects=0i,IcmpOutSrcQuenchs=0i,IcmpOutTimeExcds=0i,IcmpOutTimestampReps=1248i,IcmpOutTimestamps=0i,IpDefaultTTL=64i,IpForwDatagrams=0i,IpForwarding=2i,IpFragCreates=0i,IpFragFails=0i,IpFragOKs=0i,IpInAddrErrors=0i,IpInDelivers=17658795773i,IpInDiscards=0i,IpInHdrErrors=0i,IpInReceives=17659269339i,IpInUnknownProtos=0i,IpOutDiscards=236976i,IpOutNoRoutes=1009i,IpOutRequests=23466783734i,IpReasmFails=0i,IpReasmOKs=0i,IpReasmReqds=0i,IpReasmTimeout=0i,TcpActiveOpens=23308977i,TcpAttemptFails=3757543i,TcpCurrEstab=280i,TcpEstabResets=184792i,TcpInCsumErrors=0i,TcpInErrs=232i,TcpInSegs=17536573089i,TcpMaxConn=-1i,TcpOutRsts=4051451i,TcpOutSegs=29836254873i,TcpPassiveOpens=176546974i,TcpRetransSegs=878085i,TcpRtoAlgorithm=1i,TcpRtoMax=120000i,TcpRtoMin=200i,UdpInCsumErrors=0i,UdpInDatagrams=24441661i,UdpInErrors=0i,UdpLiteInCsumErrors=0i,UdpLiteInDatagrams=0i,UdpLiteInErrors=0i,UdpLiteNoPorts=0i,UdpLiteOutDatagrams=0i,UdpLiteRcvbufErrors=0i,UdpLiteSndbufErrors=0i,UdpNoPorts=17660i,UdpOutDatagrams=51807896i,UdpRcvbufErrors=0i,UdpSndbufErrors=236922i 1496460785000000000
|
||||
|
||||
@@ -1,38 +1,25 @@
|
||||
# Histogram Aggregator Plugin
|
||||
|
||||
#### Goal
|
||||
The histogram aggregator plugin creates histograms containing the counts of
|
||||
field values within a range.
|
||||
|
||||
This plugin was added for ability to build histograms.
|
||||
Values added to a bucket are also added to the larger buckets in the
|
||||
distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg).
|
||||
|
||||
#### Description
|
||||
Like other Telegraf aggregators, the metric is emitted every `period` seconds.
|
||||
Bucket counts however are not reset between periods and will be non-strictly
|
||||
increasing while Telegraf is running.
|
||||
|
||||
The histogram aggregator plugin aggregates values of specified metric's
|
||||
fields. The metric is emitted every `period` seconds. All you need to do
|
||||
is to specify borders of histogram buckets and fields, for which you want
|
||||
to aggregate histogram.
|
||||
#### Design
|
||||
|
||||
#### How it works
|
||||
|
||||
The each metric is passed to the aggregator and this aggregator searches
|
||||
Each metric is passed to the aggregator and this aggregator searches
|
||||
histogram buckets for those fields, which have been specified in the
|
||||
config. If buckets are found, the aggregator will put +1 to appropriate
|
||||
bucket. Otherwise, nothing will happen. Every `period` seconds these data
|
||||
will be pushed to output.
|
||||
config. If buckets are found, the aggregator will increment +1 to the appropriate
|
||||
bucket otherwise it will be added to the `+Inf` bucket. Every `period`
|
||||
seconds this data will be forwarded to the outputs.
|
||||
|
||||
Note, that the all hits of current bucket will be also added to all next
|
||||
buckets in final result of distribution. Why does it work this way? In
|
||||
configuration you define right borders for each bucket in a ascending
|
||||
sequence. Internally buckets are presented as ranges with borders
|
||||
(0..bucketBorder]: 0..1, 0..10, 0..50, …, 0..+Inf. So the value "+1" will be
|
||||
put into those buckets, in which the metric value fell with such ranges of
|
||||
buckets.
|
||||
|
||||
This plugin creates cumulative histograms. It means, that the hits in the
|
||||
buckets will always increase from the moment of telegraf start. But if you
|
||||
restart telegraf, all hits in the buckets will be reset to 0.
|
||||
|
||||
Also, the algorithm of hit counting to buckets was implemented on the base
|
||||
of the algorithm, which is implemented in the Prometheus
|
||||
The algorithm of hit counting to buckets was implemented on the base
|
||||
of the algorithm which is implemented in the Prometheus
|
||||
[client](https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go).
|
||||
|
||||
### Configuration
|
||||
@@ -40,61 +27,44 @@ of the algorithm, which is implemented in the Prometheus
|
||||
```toml
|
||||
# Configuration for aggregate histogram metrics
|
||||
[[aggregators.histogram]]
|
||||
## General Aggregator Arguments:
|
||||
## The period on which to flush & clear the aggregator.
|
||||
## The period in which to flush the aggregator.
|
||||
period = "30s"
|
||||
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
|
||||
## The example of config to aggregate histogram for all fields of specified metric.
|
||||
[[aggregators.histogram.config]]
|
||||
## The set of buckets.
|
||||
buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||
## The name of metric.
|
||||
metric_name = "cpu"
|
||||
## Example config that aggregates all fields of the metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||
# ## The name of metric.
|
||||
# measurement_name = "cpu"
|
||||
|
||||
## The example of config to aggregate histogram for concrete fields of specified metric.
|
||||
[[aggregators.histogram.config]]
|
||||
## The set of buckets.
|
||||
buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||
## The name of metric.
|
||||
metric_name = "diskio"
|
||||
## The concrete fields of metric.
|
||||
metric_fields = ["io_time", "read_time", "write_time"]
|
||||
## Example config that aggregates only specific fields of the metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||
# ## The name of metric.
|
||||
# measurement_name = "diskio"
|
||||
# ## The concrete fields of metric
|
||||
# fields = ["io_time", "read_time", "write_time"]
|
||||
```
|
||||
|
||||
#### Explanation
|
||||
The user is responsible for defining the bounds of the histogram bucket as
|
||||
well as the measurement name and fields to aggregate.
|
||||
|
||||
The field `metric_fields` is the list of metric fields. For example, the
|
||||
metric `cpu` has the following fields: usage_user, usage_system,
|
||||
usage_idle, usage_nice, usage_iowait, usage_irq, usage_softirq, usage_steal,
|
||||
usage_guest, usage_guest_nice.
|
||||
Each histogram config section must contain a `buckets` and `measurement_name`
|
||||
option. Optionally, if `fields` is set only the fields listed will be
|
||||
aggregated. If `fields` is not set all fields are aggregated.
|
||||
|
||||
Note that histogram metrics will be pushed every `period` seconds.
|
||||
As you know telegraf calls aggregator `Reset()` func each `period` seconds.
|
||||
Histogram aggregator ignores `Reset()` and continues to count hits.
|
||||
The `buckets` option contains a list of floats which specify the bucket
|
||||
boundaries. Each float value defines the inclusive upper bound of the bucket.
|
||||
The `+Inf` bucket is added automatically and does not need to be defined.
|
||||
|
||||
#### Use cases
|
||||
|
||||
You can specify fields using two cases:
|
||||
|
||||
1. The specifying only metric name. In this case all fields of metric
|
||||
will be aggregated.
|
||||
2. The specifying metric name and concrete field.
|
||||
|
||||
#### Some rules
|
||||
|
||||
- The setting of each histogram must be in separate section with title
|
||||
`aggregators.histogram.config`.
|
||||
|
||||
- The each value of bucket must be float value.
|
||||
|
||||
- Don\`t include the border bucket `+Inf`. It will be done automatically.
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
The postfix `bucket` will be added to each field.
|
||||
The postfix `bucket` will be added to each field key.
|
||||
|
||||
- measurement1
|
||||
- field1_bucket
|
||||
@@ -102,16 +72,15 @@ The postfix `bucket` will be added to each field.
|
||||
|
||||
### Tags:
|
||||
|
||||
All measurements have tag `le`. This tag has the border value of bucket. It
|
||||
means that the metric value is less or equal to the value of this tag. For
|
||||
example, let assume that we have the metric value 10 and the following
|
||||
buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value 10,
|
||||
because the metrics value is passed into bucket with right border value `10`.
|
||||
All measurements are given the tag `le`. This tag has the border value of
|
||||
bucket. It means that the metric value is less than or equal to the value of
|
||||
this tag. For example, let assume that we have the metric value 10 and the
|
||||
following buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value
|
||||
10, because the metrics value is passed into bucket with right border value
|
||||
`10`.
|
||||
|
||||
### Example Output:
|
||||
|
||||
The following output will return to the Prometheus client.
|
||||
|
||||
```
|
||||
cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=0i 1486998330000000000
|
||||
|
||||
@@ -24,8 +24,8 @@ type HistogramAggregator struct {
|
||||
|
||||
// config is the config, which contains name, field of metric and histogram buckets.
|
||||
type config struct {
|
||||
Metric string `toml:"metric_name"`
|
||||
Fields []string `toml:"metric_fields"`
|
||||
Metric string `toml:"measurement_name"`
|
||||
Fields []string `toml:"fields"`
|
||||
Buckets buckets `toml:"buckets"`
|
||||
}
|
||||
|
||||
@@ -65,28 +65,28 @@ func NewHistogramAggregator() telegraf.Aggregator {
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## General Aggregator Arguments:
|
||||
## The period on which to flush & clear the aggregator.
|
||||
## The period in which to flush the aggregator.
|
||||
period = "30s"
|
||||
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
|
||||
## The example of config to aggregate histogram for all fields of specified metric.
|
||||
[[aggregators.histogram.config]]
|
||||
## The set of buckets.
|
||||
buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||
## The name of metric.
|
||||
metric_name = "cpu"
|
||||
## Example config that aggregates all fields of the metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||
# ## The name of metric.
|
||||
# measurement_name = "cpu"
|
||||
|
||||
## The example of config to aggregate for specified fields of metric.
|
||||
[[aggregators.histogram.config]]
|
||||
## The set of buckets.
|
||||
buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||
## The name of metric.
|
||||
metric_name = "diskio"
|
||||
## The concrete fields of metric
|
||||
metric_fields = ["io_time", "read_time", "write_time"]
|
||||
## Example config that aggregates only specific fields of the metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||
# ## The name of metric.
|
||||
# measurement_name = "diskio"
|
||||
# ## The concrete fields of metric
|
||||
# fields = ["io_time", "read_time", "write_time"]
|
||||
`
|
||||
|
||||
// SampleConfig returns sample of config
|
||||
@@ -96,7 +96,7 @@ func (h *HistogramAggregator) SampleConfig() string {
|
||||
|
||||
// Description returns description of aggregator plugin
|
||||
func (h *HistogramAggregator) Description() string {
|
||||
return "Keep the aggregate histogram of each metric passing through."
|
||||
return "Create aggregate histograms."
|
||||
}
|
||||
|
||||
// Add adds new hit to the buckets
|
||||
|
||||
@@ -39,9 +39,9 @@ The following defaults are known to work with RabbitMQ:
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to output.
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
@@ -85,10 +85,10 @@ func (a *AMQPConsumer) SampleConfig() string {
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to output.
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
`
|
||||
}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
// +build linux
|
||||
|
||||
package chrony
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package chrony
|
||||
@@ -1,5 +1,3 @@
|
||||
// +build linux
|
||||
|
||||
package chrony
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build linux
|
||||
|
||||
package dmcache
|
||||
|
||||
import (
|
||||
@@ -1,19 +1,19 @@
|
||||
# Fail2ban Plugin
|
||||
# Fail2ban Input Plugin
|
||||
|
||||
The fail2ban plugin gathers counts of failed and banned ip addresses from fail2ban.
|
||||
The fail2ban plugin gathers the count of failed and banned ip addresses using [fail2ban](https://www.fail2ban.org).
|
||||
|
||||
This plugin run fail2ban-client command, and fail2ban-client require root access.
|
||||
You have to grant telegraf to run fail2ban-client:
|
||||
This plugin runs the `fail2ban-client` command which generally requires root access.
|
||||
Acquiring the required permissions can be done using several methods:
|
||||
|
||||
- Run telegraf as root. (deprecate)
|
||||
- Configure sudo to grant telegraf to fail2ban-client.
|
||||
- Use sudo run fail2ban-client.
|
||||
- Run telegraf as root. (not recommended)
|
||||
|
||||
### Using sudo
|
||||
|
||||
You may edit your sudo configuration with the following:
|
||||
|
||||
``` sudo
|
||||
telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
|
||||
telegraf ALL=(root) NOEXEC: NOPASSWD: /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status *
|
||||
```
|
||||
|
||||
### Configuration:
|
||||
@@ -21,10 +21,7 @@ telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
|
||||
``` toml
|
||||
# Read metrics from fail2ban.
|
||||
[[inputs.fail2ban]]
|
||||
## fail2ban-client require root access.
|
||||
## Setting 'use_sudo' to true will make use of sudo to run fail2ban-client.
|
||||
## Users must configure sudo to allow telegraf user to run fail2ban-client with no password.
|
||||
## This plugin run only "fail2ban-client status".
|
||||
## Use sudo to run fail2ban-client
|
||||
use_sudo = false
|
||||
```
|
||||
|
||||
@@ -38,7 +35,7 @@ telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
|
||||
|
||||
- All measurements have the following tags:
|
||||
- jail
|
||||
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
@@ -55,6 +52,5 @@ Status for the jail: sshd
|
||||
```
|
||||
|
||||
```
|
||||
$ ./telegraf --config telegraf.conf --input-filter fail2ban --test
|
||||
fail2ban,jail=sshd failed=5i,banned=2i 1495868667000000000
|
||||
```
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
// +build linux
|
||||
|
||||
package fail2ban
|
||||
|
||||
import (
|
||||
@@ -8,9 +6,10 @@ import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -23,10 +22,7 @@ type Fail2ban struct {
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## fail2ban-client require root access.
|
||||
## Setting 'use_sudo' to true will make use of sudo to run fail2ban-client.
|
||||
## Users must configure sudo to allow telegraf user to run fail2ban-client with no password.
|
||||
## This plugin run only "fail2ban-client status".
|
||||
## Use sudo to run fail2ban-client
|
||||
use_sudo = false
|
||||
`
|
||||
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package fail2ban
|
||||
@@ -1,5 +1,3 @@
|
||||
// +build linux
|
||||
|
||||
package hddtemp
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package hddtemp
|
||||
@@ -25,6 +25,7 @@ package nsq
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
@@ -101,28 +102,42 @@ func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error {
|
||||
return fmt.Errorf("%s returned HTTP status %s", u.String(), r.Status)
|
||||
}
|
||||
|
||||
s := &NSQStats{}
|
||||
err = json.NewDecoder(r.Body).Decode(s)
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`Error reading body: %s`, err)
|
||||
}
|
||||
|
||||
data := &NSQStatsData{}
|
||||
err = json.Unmarshal(body, data)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`Error parsing response: %s`, err)
|
||||
}
|
||||
// Data was not parsed correctly attempt to use old format.
|
||||
if len(data.Version) < 1 {
|
||||
wrapper := &NSQStats{}
|
||||
err = json.Unmarshal(body, wrapper)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`Error parsing response: %s`, err)
|
||||
}
|
||||
data = &wrapper.Data
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
`server_host`: u.Host,
|
||||
`server_version`: s.Data.Version,
|
||||
`server_version`: data.Version,
|
||||
}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
if s.Data.Health == `OK` {
|
||||
if data.Health == `OK` {
|
||||
fields["server_count"] = int64(1)
|
||||
} else {
|
||||
fields["server_count"] = int64(0)
|
||||
}
|
||||
fields["topic_count"] = int64(len(s.Data.Topics))
|
||||
fields["topic_count"] = int64(len(data.Topics))
|
||||
|
||||
acc.AddFields("nsq_server", fields, tags)
|
||||
for _, t := range s.Data.Topics {
|
||||
topicStats(t, acc, u.Host, s.Data.Version)
|
||||
for _, t := range data.Topics {
|
||||
topicStats(t, acc, u.Host, data.Version)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -189,7 +204,6 @@ func clientStats(c ClientStats, acc telegraf.Accumulator, host, version, topic,
|
||||
"server_version": version,
|
||||
"topic": topic,
|
||||
"channel": channel,
|
||||
"client_name": c.Name,
|
||||
"client_id": c.ID,
|
||||
"client_hostname": c.Hostname,
|
||||
"client_version": c.Version,
|
||||
@@ -199,6 +213,9 @@ func clientStats(c ClientStats, acc telegraf.Accumulator, host, version, topic,
|
||||
"client_snappy": strconv.FormatBool(c.Snappy),
|
||||
"client_deflate": strconv.FormatBool(c.Deflate),
|
||||
}
|
||||
if len(c.Name) > 0 {
|
||||
tags["client_name"] = c.Name
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"ready_count": c.ReadyCount,
|
||||
@@ -248,7 +265,7 @@ type ChannelStats struct {
|
||||
}
|
||||
|
||||
type ClientStats struct {
|
||||
Name string `json:"name"`
|
||||
Name string `json:"name"` // DEPRECATED 1.x+, still here as the structs are currently being shared for parsing v3.x and 1.x
|
||||
ID string `json:"client_id"`
|
||||
Hostname string `json:"hostname"`
|
||||
Version string `json:"version"`
|
||||
|
||||
@@ -12,10 +12,267 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNSQStats(t *testing.T) {
|
||||
func TestNSQStatsV1(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, response)
|
||||
fmt.Fprintln(w, responseV1)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
n := &NSQ{
|
||||
Endpoints: []string{ts.URL},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(n.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
host := u.Host
|
||||
|
||||
// actually validate the tests
|
||||
tests := []struct {
|
||||
m string
|
||||
f map[string]interface{}
|
||||
g map[string]string
|
||||
}{
|
||||
{
|
||||
"nsq_server",
|
||||
map[string]interface{}{
|
||||
"server_count": int64(1),
|
||||
"topic_count": int64(2),
|
||||
},
|
||||
map[string]string{
|
||||
"server_host": host,
|
||||
"server_version": "1.0.0-compat",
|
||||
},
|
||||
},
|
||||
{
|
||||
"nsq_topic",
|
||||
map[string]interface{}{
|
||||
"depth": int64(12),
|
||||
"backend_depth": int64(13),
|
||||
"message_count": int64(14),
|
||||
"channel_count": int64(1),
|
||||
},
|
||||
map[string]string{
|
||||
"server_host": host,
|
||||
"server_version": "1.0.0-compat",
|
||||
"topic": "t1"},
|
||||
},
|
||||
{
|
||||
"nsq_channel",
|
||||
map[string]interface{}{
|
||||
"depth": int64(0),
|
||||
"backend_depth": int64(1),
|
||||
"inflight_count": int64(2),
|
||||
"deferred_count": int64(3),
|
||||
"message_count": int64(4),
|
||||
"requeue_count": int64(5),
|
||||
"timeout_count": int64(6),
|
||||
"client_count": int64(1),
|
||||
},
|
||||
map[string]string{
|
||||
"server_host": host,
|
||||
"server_version": "1.0.0-compat",
|
||||
"topic": "t1",
|
||||
"channel": "c1",
|
||||
},
|
||||
},
|
||||
{
|
||||
"nsq_client",
|
||||
map[string]interface{}{
|
||||
"ready_count": int64(200),
|
||||
"inflight_count": int64(7),
|
||||
"message_count": int64(8),
|
||||
"finish_count": int64(9),
|
||||
"requeue_count": int64(10),
|
||||
},
|
||||
map[string]string{"server_host": host, "server_version": "1.0.0-compat",
|
||||
"topic": "t1", "channel": "c1",
|
||||
"client_id": "373a715cd990", "client_hostname": "373a715cd990",
|
||||
"client_version": "V2", "client_address": "172.17.0.11:35560",
|
||||
"client_tls": "false", "client_snappy": "false",
|
||||
"client_deflate": "false",
|
||||
"client_user_agent": "nsq_to_nsq/0.3.6 go-nsq/1.0.5"},
|
||||
},
|
||||
{
|
||||
"nsq_topic",
|
||||
map[string]interface{}{
|
||||
"depth": int64(28),
|
||||
"backend_depth": int64(29),
|
||||
"message_count": int64(30),
|
||||
"channel_count": int64(1),
|
||||
},
|
||||
map[string]string{
|
||||
"server_host": host,
|
||||
"server_version": "1.0.0-compat",
|
||||
"topic": "t2"},
|
||||
},
|
||||
{
|
||||
"nsq_channel",
|
||||
map[string]interface{}{
|
||||
"depth": int64(15),
|
||||
"backend_depth": int64(16),
|
||||
"inflight_count": int64(17),
|
||||
"deferred_count": int64(18),
|
||||
"message_count": int64(19),
|
||||
"requeue_count": int64(20),
|
||||
"timeout_count": int64(21),
|
||||
"client_count": int64(1),
|
||||
},
|
||||
map[string]string{
|
||||
"server_host": host,
|
||||
"server_version": "1.0.0-compat",
|
||||
"topic": "t2",
|
||||
"channel": "c2",
|
||||
},
|
||||
},
|
||||
{
|
||||
"nsq_client",
|
||||
map[string]interface{}{
|
||||
"ready_count": int64(22),
|
||||
"inflight_count": int64(23),
|
||||
"message_count": int64(24),
|
||||
"finish_count": int64(25),
|
||||
"requeue_count": int64(26),
|
||||
},
|
||||
map[string]string{"server_host": host, "server_version": "1.0.0-compat",
|
||||
"topic": "t2", "channel": "c2",
|
||||
"client_id": "377569bd462b", "client_hostname": "377569bd462b",
|
||||
"client_version": "V2", "client_address": "172.17.0.8:48145",
|
||||
"client_user_agent": "go-nsq/1.0.5", "client_tls": "true",
|
||||
"client_snappy": "true", "client_deflate": "true"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
acc.AssertContainsTaggedFields(t, test.m, test.f, test.g)
|
||||
}
|
||||
}
|
||||
|
||||
// v1 version of localhost/stats?format=json reesponse body
|
||||
var responseV1 = `
|
||||
{
|
||||
"version": "1.0.0-compat",
|
||||
"health": "OK",
|
||||
"start_time": 1452021674,
|
||||
"topics": [
|
||||
{
|
||||
"topic_name": "t1",
|
||||
"channels": [
|
||||
{
|
||||
"channel_name": "c1",
|
||||
"depth": 0,
|
||||
"backend_depth": 1,
|
||||
"in_flight_count": 2,
|
||||
"deferred_count": 3,
|
||||
"message_count": 4,
|
||||
"requeue_count": 5,
|
||||
"timeout_count": 6,
|
||||
"clients": [
|
||||
{
|
||||
"client_id": "373a715cd990",
|
||||
"hostname": "373a715cd990",
|
||||
"version": "V2",
|
||||
"remote_address": "172.17.0.11:35560",
|
||||
"state": 3,
|
||||
"ready_count": 200,
|
||||
"in_flight_count": 7,
|
||||
"message_count": 8,
|
||||
"finish_count": 9,
|
||||
"requeue_count": 10,
|
||||
"connect_ts": 1452021675,
|
||||
"sample_rate": 11,
|
||||
"deflate": false,
|
||||
"snappy": false,
|
||||
"user_agent": "nsq_to_nsq\/0.3.6 go-nsq\/1.0.5",
|
||||
"tls": false,
|
||||
"tls_cipher_suite": "",
|
||||
"tls_version": "",
|
||||
"tls_negotiated_protocol": "",
|
||||
"tls_negotiated_protocol_is_mutual": false
|
||||
}
|
||||
],
|
||||
"paused": false,
|
||||
"e2e_processing_latency": {
|
||||
"count": 0,
|
||||
"percentiles": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"depth": 12,
|
||||
"backend_depth": 13,
|
||||
"message_count": 14,
|
||||
"paused": false,
|
||||
"e2e_processing_latency": {
|
||||
"count": 0,
|
||||
"percentiles": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"topic_name": "t2",
|
||||
"channels": [
|
||||
{
|
||||
"channel_name": "c2",
|
||||
"depth": 15,
|
||||
"backend_depth": 16,
|
||||
"in_flight_count": 17,
|
||||
"deferred_count": 18,
|
||||
"message_count": 19,
|
||||
"requeue_count": 20,
|
||||
"timeout_count": 21,
|
||||
"clients": [
|
||||
{
|
||||
"client_id": "377569bd462b",
|
||||
"hostname": "377569bd462b",
|
||||
"version": "V2",
|
||||
"remote_address": "172.17.0.8:48145",
|
||||
"state": 3,
|
||||
"ready_count": 22,
|
||||
"in_flight_count": 23,
|
||||
"message_count": 24,
|
||||
"finish_count": 25,
|
||||
"requeue_count": 26,
|
||||
"connect_ts": 1452021678,
|
||||
"sample_rate": 27,
|
||||
"deflate": true,
|
||||
"snappy": true,
|
||||
"user_agent": "go-nsq\/1.0.5",
|
||||
"tls": true,
|
||||
"tls_cipher_suite": "",
|
||||
"tls_version": "",
|
||||
"tls_negotiated_protocol": "",
|
||||
"tls_negotiated_protocol_is_mutual": false
|
||||
}
|
||||
],
|
||||
"paused": false,
|
||||
"e2e_processing_latency": {
|
||||
"count": 0,
|
||||
"percentiles": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"depth": 28,
|
||||
"backend_depth": 29,
|
||||
"message_count": 30,
|
||||
"paused": false,
|
||||
"e2e_processing_latency": {
|
||||
"count": 0,
|
||||
"percentiles": null
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
`
|
||||
|
||||
// TestNSQStatsPreV1 is for backwards compatibility with nsq versions < 1.0
|
||||
func TestNSQStatsPreV1(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, responsePreV1)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
@@ -152,7 +409,7 @@ func TestNSQStats(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var response = `
|
||||
var responsePreV1 = `
|
||||
{
|
||||
"status_code": 200,
|
||||
"status_txt": "OK",
|
||||
|
||||
@@ -69,7 +69,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
||||
// Due to problems with a parsing, we have to use regexp expression in order
|
||||
// to remove string that starts from '(' and ends with space
|
||||
// see: https://github.com/influxdata/telegraf/issues/2386
|
||||
reg, err := regexp.Compile("\\([\\S]*")
|
||||
reg, err := regexp.Compile("\\s+\\([\\S]*")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -260,6 +260,57 @@ func TestParserNTPQ(t *testing.T) {
|
||||
}
|
||||
acc := testutil.Accumulator{}
|
||||
assert.NoError(t, acc.GatherError(n.Gather))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"poll": int64(64),
|
||||
"when": int64(60),
|
||||
"reach": int64(377),
|
||||
"delay": float64(0.0),
|
||||
"offset": float64(0.045),
|
||||
"jitter": float64(1.012),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"remote": "SHM(0)",
|
||||
"state_prefix": "*",
|
||||
"refid": ".PPS.",
|
||||
"stratum": "1",
|
||||
"type": "u",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||
|
||||
fields = map[string]interface{}{
|
||||
"poll": int64(128),
|
||||
"when": int64(121),
|
||||
"reach": int64(377),
|
||||
"delay": float64(0.0),
|
||||
"offset": float64(10.105),
|
||||
"jitter": float64(2.012),
|
||||
}
|
||||
tags = map[string]string{
|
||||
"remote": "SHM(1)",
|
||||
"state_prefix": "-",
|
||||
"refid": ".GPS.",
|
||||
"stratum": "1",
|
||||
"type": "u",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||
|
||||
fields = map[string]interface{}{
|
||||
"poll": int64(1024),
|
||||
"when": int64(10),
|
||||
"reach": int64(377),
|
||||
"delay": float64(1.748),
|
||||
"offset": float64(0.373),
|
||||
"jitter": float64(0.101),
|
||||
}
|
||||
tags = map[string]string{
|
||||
"remote": "37.58.57.238",
|
||||
"state_prefix": "+",
|
||||
"refid": "192.53.103.103",
|
||||
"stratum": "2",
|
||||
"type": "u",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
|
||||
}
|
||||
|
||||
func TestMultiNTPQ(t *testing.T) {
|
||||
@@ -480,7 +531,9 @@ var multiNTPQ = ` remote refid st t when poll reach delay
|
||||
`
|
||||
var multiParserNTPQ = ` remote refid st t when poll reach delay offset jitter
|
||||
==============================================================================
|
||||
*SHM(0) .PPS. 1 u 60 64 377 0.000 0.045 1.012
|
||||
+37.58.57.238 (d 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101
|
||||
+37.58.57.238 (domain) 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101
|
||||
+37.58.57.238 ( 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101
|
||||
-SHM(1) .GPS. 1 u 121 128 377 0.000 10.105 2.012
|
||||
`
|
||||
|
||||
@@ -33,41 +33,48 @@ func (z *Zfs) gatherPoolStats(acc telegraf.Accumulator) (string, error) {
|
||||
tags := map[string]string{"pool": col[0], "health": col[8]}
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
size, err := strconv.ParseInt(col[1], 10, 64)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error parsing size: %s", err)
|
||||
}
|
||||
fields["size"] = size
|
||||
if tags["health"] == "UNAVAIL" {
|
||||
|
||||
alloc, err := strconv.ParseInt(col[2], 10, 64)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error parsing allocation: %s", err)
|
||||
}
|
||||
fields["allocated"] = alloc
|
||||
fields["size"] = int64(0)
|
||||
|
||||
free, err := strconv.ParseInt(col[3], 10, 64)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error parsing free: %s", err)
|
||||
}
|
||||
fields["free"] = free
|
||||
} else {
|
||||
|
||||
frag, err := strconv.ParseInt(strings.TrimSuffix(col[5], "%"), 10, 0)
|
||||
if err != nil { // This might be - for RO devs
|
||||
frag = 0
|
||||
}
|
||||
fields["fragmentation"] = frag
|
||||
size, err := strconv.ParseInt(col[1], 10, 64)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error parsing size: %s", err)
|
||||
}
|
||||
fields["size"] = size
|
||||
|
||||
capval, err := strconv.ParseInt(col[6], 10, 0)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error parsing capacity: %s", err)
|
||||
}
|
||||
fields["capacity"] = capval
|
||||
alloc, err := strconv.ParseInt(col[2], 10, 64)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error parsing allocation: %s", err)
|
||||
}
|
||||
fields["allocated"] = alloc
|
||||
|
||||
dedup, err := strconv.ParseFloat(strings.TrimSuffix(col[7], "x"), 32)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error parsing dedupratio: %s", err)
|
||||
free, err := strconv.ParseInt(col[3], 10, 64)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error parsing free: %s", err)
|
||||
}
|
||||
fields["free"] = free
|
||||
|
||||
frag, err := strconv.ParseInt(strings.TrimSuffix(col[5], "%"), 10, 0)
|
||||
if err != nil { // This might be - for RO devs
|
||||
frag = 0
|
||||
}
|
||||
fields["fragmentation"] = frag
|
||||
|
||||
capval, err := strconv.ParseInt(col[6], 10, 0)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error parsing capacity: %s", err)
|
||||
}
|
||||
fields["capacity"] = capval
|
||||
|
||||
dedup, err := strconv.ParseFloat(strings.TrimSuffix(col[7], "x"), 32)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error parsing dedupratio: %s", err)
|
||||
}
|
||||
fields["dedupratio"] = dedup
|
||||
}
|
||||
fields["dedupratio"] = dedup
|
||||
|
||||
acc.AddFields("zfs_pool", fields, tags)
|
||||
}
|
||||
|
||||
@@ -22,6 +22,15 @@ func mock_zpool() ([]string, error) {
|
||||
return zpool_output, nil
|
||||
}
|
||||
|
||||
// $ zpool list -Hp
|
||||
var zpool_output_unavail = []string{
|
||||
"temp2 - - - - - - - UNAVAIL -",
|
||||
}
|
||||
|
||||
func mock_zpool_unavail() ([]string, error) {
|
||||
return zpool_output_unavail, nil
|
||||
}
|
||||
|
||||
// sysctl -q kstat.zfs.misc.arcstats
|
||||
|
||||
// sysctl -q kstat.zfs.misc.vdev_cache_stats
|
||||
@@ -82,6 +91,41 @@ func TestZfsPoolMetrics(t *testing.T) {
|
||||
acc.AssertContainsTaggedFields(t, "zfs_pool", poolMetrics, tags)
|
||||
}
|
||||
|
||||
func TestZfsPoolMetrics_unavail(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
z := &Zfs{
|
||||
KstatMetrics: []string{"vdev_cache_stats"},
|
||||
sysctl: mock_sysctl,
|
||||
zpool: mock_zpool_unavail,
|
||||
}
|
||||
err := z.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.False(t, acc.HasMeasurement("zfs_pool"))
|
||||
acc.Metrics = nil
|
||||
|
||||
z = &Zfs{
|
||||
KstatMetrics: []string{"vdev_cache_stats"},
|
||||
PoolMetrics: true,
|
||||
sysctl: mock_sysctl,
|
||||
zpool: mock_zpool_unavail,
|
||||
}
|
||||
err = z.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
//one pool, UNAVAIL
|
||||
tags := map[string]string{
|
||||
"pool": "temp2",
|
||||
"health": "UNAVAIL",
|
||||
}
|
||||
|
||||
poolMetrics := getTemp2PoolMetrics()
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "zfs_pool", poolMetrics, tags)
|
||||
}
|
||||
|
||||
func TestZfsGeneratesMetrics(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
@@ -128,6 +172,12 @@ func getFreeNasBootPoolMetrics() map[string]interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func getTemp2PoolMetrics() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"size": int64(0),
|
||||
}
|
||||
}
|
||||
|
||||
func getKstatMetricsVdevOnly() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"vdev_cache_stats_misses": int64(87789),
|
||||
|
||||
@@ -12,6 +12,9 @@ based on its main usage cases and the evolution of the OpenTracing standard.*
|
||||
port = 9411 # Port on which Telegraf listens
|
||||
```
|
||||
|
||||
The plugin accepts spans in `JSON` or `thrift` if the `Content-Type` is `application/json` or `application/x-thrift`, respectively.
|
||||
If `Content-Type` is not set, then the plugin assumes it is `JSON` format.
|
||||
|
||||
## Tracing:
|
||||
|
||||
This plugin uses Annotations tags and fields to track data from spans
|
||||
|
||||
@@ -62,13 +62,17 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatalf("%v\n", err)
|
||||
}
|
||||
ioutil.WriteFile(outFileName, raw, 0644)
|
||||
if err := ioutil.WriteFile(outFileName, raw, 0644); err != nil {
|
||||
log.Fatalf("%v", err)
|
||||
}
|
||||
case "thrift":
|
||||
raw, err := thriftToJSONSpans(contents)
|
||||
if err != nil {
|
||||
log.Fatalf("%v\n", err)
|
||||
}
|
||||
ioutil.WriteFile(outFileName, raw, 0644)
|
||||
if err := ioutil.WriteFile(outFileName, raw, 0644); err != nil {
|
||||
log.Fatalf("%v", err)
|
||||
}
|
||||
default:
|
||||
log.Fatalf("Unsupported input type")
|
||||
}
|
||||
|
||||
210
plugins/inputs/zipkin/codec/codec.go
Normal file
210
plugins/inputs/zipkin/codec/codec.go
Normal file
@@ -0,0 +1,210 @@
|
||||
package codec
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
|
||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
||||
)
|
||||
|
||||
//now is a mockable time for now
|
||||
var now = time.Now
|
||||
|
||||
// DefaultServiceName when the span does not have any serviceName
|
||||
const DefaultServiceName = "unknown"
|
||||
|
||||
// Decoder decodes the bytes and returns a trace
|
||||
type Decoder interface {
|
||||
Decode(octets []byte) ([]Span, error)
|
||||
}
|
||||
|
||||
// Span are created by instrumentation in RPC clients or servers
|
||||
type Span interface {
|
||||
Trace() (string, error)
|
||||
SpanID() (string, error)
|
||||
Parent() (string, error)
|
||||
Name() string
|
||||
Annotations() []Annotation
|
||||
BinaryAnnotations() ([]BinaryAnnotation, error)
|
||||
Timestamp() time.Time
|
||||
Duration() time.Duration
|
||||
}
|
||||
|
||||
// Annotation represents an event that explains latency with a timestamp.
|
||||
type Annotation interface {
|
||||
Timestamp() time.Time
|
||||
Value() string
|
||||
Host() Endpoint
|
||||
}
|
||||
|
||||
// BinaryAnnotation represent tags applied to a Span to give it context
|
||||
type BinaryAnnotation interface {
|
||||
Key() string
|
||||
Value() string
|
||||
Host() Endpoint
|
||||
}
|
||||
|
||||
// Endpoint represents the network context of a service recording an annotation
|
||||
type Endpoint interface {
|
||||
Host() string
|
||||
Name() string
|
||||
}
|
||||
|
||||
// DefaultEndpoint is used if the annotations have no endpoints
|
||||
type DefaultEndpoint struct{}
|
||||
|
||||
// Host returns 0.0.0.0; used when the host is unknown
|
||||
func (d *DefaultEndpoint) Host() string { return "0.0.0.0" }
|
||||
|
||||
// Name returns "unknown" when an endpoint doesn't exist
|
||||
func (d *DefaultEndpoint) Name() string { return DefaultServiceName }
|
||||
|
||||
// MicroToTime converts zipkin's native time of microseconds into time.Time
|
||||
func MicroToTime(micro int64) time.Time {
|
||||
return time.Unix(0, micro*int64(time.Microsecond)).UTC()
|
||||
}
|
||||
|
||||
// NewTrace converts a slice of []Span into a new Trace
|
||||
func NewTrace(spans []Span) (trace.Trace, error) {
|
||||
tr := make(trace.Trace, len(spans))
|
||||
for i, span := range spans {
|
||||
bin, err := span.BinaryAnnotations()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
endpoint := serviceEndpoint(span.Annotations(), bin)
|
||||
id, err := span.SpanID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tid, err := span.Trace()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pid, err := parentID(span)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tr[i] = trace.Span{
|
||||
ID: id,
|
||||
TraceID: tid,
|
||||
Name: span.Name(),
|
||||
Timestamp: guessTimestamp(span),
|
||||
Duration: convertDuration(span),
|
||||
ParentID: pid,
|
||||
ServiceName: endpoint.Name(),
|
||||
Annotations: NewAnnotations(span.Annotations(), endpoint),
|
||||
BinaryAnnotations: NewBinaryAnnotations(bin, endpoint),
|
||||
}
|
||||
}
|
||||
return tr, nil
|
||||
}
|
||||
|
||||
// NewAnnotations converts a slice of Annotation into a slice of new Annotations
|
||||
func NewAnnotations(annotations []Annotation, endpoint Endpoint) []trace.Annotation {
|
||||
formatted := make([]trace.Annotation, len(annotations))
|
||||
for i, annotation := range annotations {
|
||||
formatted[i] = trace.Annotation{
|
||||
Host: endpoint.Host(),
|
||||
ServiceName: endpoint.Name(),
|
||||
Timestamp: annotation.Timestamp(),
|
||||
Value: annotation.Value(),
|
||||
}
|
||||
}
|
||||
|
||||
return formatted
|
||||
}
|
||||
|
||||
// NewBinaryAnnotations is very similar to NewAnnotations, but it
|
||||
// converts BinaryAnnotations instead of the normal Annotation
|
||||
func NewBinaryAnnotations(annotations []BinaryAnnotation, endpoint Endpoint) []trace.BinaryAnnotation {
|
||||
formatted := make([]trace.BinaryAnnotation, len(annotations))
|
||||
for i, annotation := range annotations {
|
||||
formatted[i] = trace.BinaryAnnotation{
|
||||
Host: endpoint.Host(),
|
||||
ServiceName: endpoint.Name(),
|
||||
Key: annotation.Key(),
|
||||
Value: annotation.Value(),
|
||||
}
|
||||
}
|
||||
return formatted
|
||||
}
|
||||
|
||||
func minMax(span Span) (time.Time, time.Time) {
|
||||
min := now().UTC()
|
||||
max := time.Time{}.UTC()
|
||||
for _, annotation := range span.Annotations() {
|
||||
ts := annotation.Timestamp()
|
||||
if !ts.IsZero() && ts.Before(min) {
|
||||
min = ts
|
||||
}
|
||||
if !ts.IsZero() && ts.After(max) {
|
||||
max = ts
|
||||
}
|
||||
}
|
||||
if max.IsZero() {
|
||||
max = min
|
||||
}
|
||||
return min, max
|
||||
}
|
||||
|
||||
func guessTimestamp(span Span) time.Time {
|
||||
ts := span.Timestamp()
|
||||
if !ts.IsZero() {
|
||||
return ts
|
||||
}
|
||||
|
||||
min, _ := minMax(span)
|
||||
return min
|
||||
}
|
||||
|
||||
func convertDuration(span Span) time.Duration {
|
||||
duration := span.Duration()
|
||||
if duration != 0 {
|
||||
return duration
|
||||
}
|
||||
min, max := minMax(span)
|
||||
return max.Sub(min)
|
||||
}
|
||||
|
||||
func parentID(span Span) (string, error) {
|
||||
// A parent ID of "" means that this is a parent span. In this case,
|
||||
// we set the parent ID of the span to be its own id, so it points to
|
||||
// itself.
|
||||
id, err := span.Parent()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if id != "" {
|
||||
return id, nil
|
||||
}
|
||||
return span.SpanID()
|
||||
}
|
||||
|
||||
func serviceEndpoint(ann []Annotation, bann []BinaryAnnotation) Endpoint {
|
||||
for _, a := range ann {
|
||||
switch a.Value() {
|
||||
case zipkincore.SERVER_RECV, zipkincore.SERVER_SEND, zipkincore.CLIENT_RECV, zipkincore.CLIENT_SEND:
|
||||
if a.Host() != nil && a.Host().Name() != "" {
|
||||
return a.Host()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, a := range bann {
|
||||
if a.Key() == zipkincore.LOCAL_COMPONENT && a.Host() != nil && a.Host().Name() != "" {
|
||||
return a.Host()
|
||||
}
|
||||
}
|
||||
// Unable to find any "standard" endpoint host, so, use any that exist in the regular annotations
|
||||
for _, a := range ann {
|
||||
if a.Host() != nil && a.Host().Name() != "" {
|
||||
return a.Host()
|
||||
}
|
||||
}
|
||||
return &DefaultEndpoint{}
|
||||
}
|
||||
636
plugins/inputs/zipkin/codec/codec_test.go
Normal file
636
plugins/inputs/zipkin/codec/codec_test.go
Normal file
@@ -0,0 +1,636 @@
|
||||
package codec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
|
||||
)
|
||||
|
||||
func Test_MicroToTime(t *testing.T) {
|
||||
type args struct {
|
||||
micro int64
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
micro int64
|
||||
want time.Time
|
||||
}{
|
||||
{
|
||||
name: "given zero micro seconds expected unix time zero",
|
||||
micro: 0,
|
||||
want: time.Unix(0, 0).UTC(),
|
||||
},
|
||||
{
|
||||
name: "given a million micro seconds expected unix time one",
|
||||
micro: 1000000,
|
||||
want: time.Unix(1, 0).UTC(),
|
||||
},
|
||||
{
|
||||
name: "given a million micro seconds expected unix time one",
|
||||
micro: 1503031538791000,
|
||||
want: time.Unix(0, 1503031538791000000).UTC(),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := MicroToTime(tt.micro); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("microToTime() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_minMax(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
span *MockSpan
|
||||
now func() time.Time
|
||||
wantMin time.Time
|
||||
wantMax time.Time
|
||||
}{
|
||||
{
|
||||
name: "Single annotation",
|
||||
span: &MockSpan{
|
||||
Anno: []Annotation{
|
||||
&MockAnnotation{
|
||||
Time: time.Unix(0, 0).UTC().Add(time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantMin: time.Unix(1, 0).UTC(),
|
||||
wantMax: time.Unix(1, 0).UTC(),
|
||||
},
|
||||
{
|
||||
name: "Three annotations",
|
||||
span: &MockSpan{
|
||||
Anno: []Annotation{
|
||||
&MockAnnotation{
|
||||
Time: time.Unix(0, 0).UTC().Add(1 * time.Second),
|
||||
},
|
||||
&MockAnnotation{
|
||||
Time: time.Unix(0, 0).UTC().Add(2 * time.Second),
|
||||
},
|
||||
&MockAnnotation{
|
||||
Time: time.Unix(0, 0).UTC().Add(3 * time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantMin: time.Unix(1, 0).UTC(),
|
||||
wantMax: time.Unix(3, 0).UTC(),
|
||||
},
|
||||
{
|
||||
name: "Annotations are in the future",
|
||||
span: &MockSpan{
|
||||
Anno: []Annotation{
|
||||
&MockAnnotation{
|
||||
Time: time.Unix(0, 0).UTC().Add(3 * time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantMin: time.Unix(2, 0).UTC(),
|
||||
wantMax: time.Unix(3, 0).UTC(),
|
||||
now: func() time.Time {
|
||||
return time.Unix(2, 0).UTC()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "No Annotations",
|
||||
span: &MockSpan{
|
||||
Anno: []Annotation{},
|
||||
},
|
||||
wantMin: time.Unix(2, 0).UTC(),
|
||||
wantMax: time.Unix(2, 0).UTC(),
|
||||
now: func() time.Time {
|
||||
return time.Unix(2, 0).UTC()
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.now != nil {
|
||||
now = tt.now
|
||||
}
|
||||
got, got1 := minMax(tt.span)
|
||||
if !reflect.DeepEqual(got, tt.wantMin) {
|
||||
t.Errorf("minMax() got = %v, want %v", got, tt.wantMin)
|
||||
}
|
||||
if !reflect.DeepEqual(got1, tt.wantMax) {
|
||||
t.Errorf("minMax() got1 = %v, want %v", got1, tt.wantMax)
|
||||
}
|
||||
now = time.Now
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_guessTimestamp(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
span Span
|
||||
now func() time.Time
|
||||
want time.Time
|
||||
}{
|
||||
{
|
||||
name: "simple timestamp",
|
||||
span: &MockSpan{
|
||||
Time: time.Unix(2, 0).UTC(),
|
||||
},
|
||||
want: time.Unix(2, 0).UTC(),
|
||||
},
|
||||
{
|
||||
name: "zero timestamp",
|
||||
span: &MockSpan{
|
||||
Time: time.Time{},
|
||||
},
|
||||
now: func() time.Time {
|
||||
return time.Unix(2, 0).UTC()
|
||||
},
|
||||
want: time.Unix(2, 0).UTC(),
|
||||
},
|
||||
{
|
||||
name: "zero timestamp with single annotation",
|
||||
span: &MockSpan{
|
||||
Time: time.Time{},
|
||||
Anno: []Annotation{
|
||||
&MockAnnotation{
|
||||
Time: time.Unix(0, 0).UTC(),
|
||||
},
|
||||
},
|
||||
},
|
||||
want: time.Unix(0, 0).UTC(),
|
||||
},
|
||||
{
|
||||
name: "zero timestamp with two annotations",
|
||||
span: &MockSpan{
|
||||
Time: time.Time{},
|
||||
Anno: []Annotation{
|
||||
&MockAnnotation{
|
||||
Time: time.Unix(0, 0).UTC(),
|
||||
},
|
||||
&MockAnnotation{
|
||||
Time: time.Unix(2, 0).UTC(),
|
||||
},
|
||||
},
|
||||
},
|
||||
want: time.Unix(0, 0).UTC(),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.now != nil {
|
||||
now = tt.now
|
||||
}
|
||||
if got := guessTimestamp(tt.span); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("guessTimestamp() = %v, want %v", got, tt.want)
|
||||
}
|
||||
now = time.Now
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_convertDuration(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
span Span
|
||||
want time.Duration
|
||||
}{
|
||||
{
|
||||
name: "simple duration",
|
||||
span: &MockSpan{
|
||||
Dur: time.Hour,
|
||||
},
|
||||
want: time.Hour,
|
||||
},
|
||||
{
|
||||
name: "no timestamp, but, 2 seconds between annotations",
|
||||
span: &MockSpan{
|
||||
Anno: []Annotation{
|
||||
&MockAnnotation{
|
||||
Time: time.Unix(0, 0).UTC().Add(1 * time.Second),
|
||||
},
|
||||
&MockAnnotation{
|
||||
Time: time.Unix(0, 0).UTC().Add(2 * time.Second),
|
||||
},
|
||||
&MockAnnotation{
|
||||
Time: time.Unix(0, 0).UTC().Add(3 * time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
want: 2 * time.Second,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := convertDuration(tt.span); got != tt.want {
|
||||
t.Errorf("convertDuration() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parentID(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
span Span
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "has parent id",
|
||||
span: &MockSpan{
|
||||
ParentID: "6b221d5bc9e6496c",
|
||||
},
|
||||
want: "6b221d5bc9e6496c",
|
||||
},
|
||||
{
|
||||
name: "no parent, so use id",
|
||||
span: &MockSpan{
|
||||
ID: "abceasyas123",
|
||||
},
|
||||
want: "abceasyas123",
|
||||
},
|
||||
{
|
||||
name: "bad parent value",
|
||||
span: &MockSpan{
|
||||
Error: fmt.Errorf("Mommie Dearest"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := parentID(tt.span)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("parentID() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("parentID() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_serviceEndpoint(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ann []Annotation
|
||||
bann []BinaryAnnotation
|
||||
want Endpoint
|
||||
}{
|
||||
{
|
||||
name: "Annotation with server receive",
|
||||
ann: []Annotation{
|
||||
&MockAnnotation{
|
||||
Val: "battery",
|
||||
H: &MockEndpoint{
|
||||
name: "aa",
|
||||
},
|
||||
},
|
||||
&MockAnnotation{
|
||||
Val: "sr",
|
||||
H: &MockEndpoint{
|
||||
name: "me",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &MockEndpoint{
|
||||
name: "me",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Annotation with no standard values",
|
||||
ann: []Annotation{
|
||||
&MockAnnotation{
|
||||
Val: "noop",
|
||||
},
|
||||
&MockAnnotation{
|
||||
Val: "aa",
|
||||
H: &MockEndpoint{
|
||||
name: "battery",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &MockEndpoint{
|
||||
name: "battery",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Annotation with no endpoints",
|
||||
ann: []Annotation{
|
||||
&MockAnnotation{
|
||||
Val: "noop",
|
||||
},
|
||||
},
|
||||
want: &DefaultEndpoint{},
|
||||
},
|
||||
{
|
||||
name: "Binary annotation with local component",
|
||||
bann: []BinaryAnnotation{
|
||||
&MockBinaryAnnotation{
|
||||
K: "noop",
|
||||
H: &MockEndpoint{
|
||||
name: "aa",
|
||||
},
|
||||
},
|
||||
&MockBinaryAnnotation{
|
||||
K: "lc",
|
||||
H: &MockEndpoint{
|
||||
name: "me",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &MockEndpoint{
|
||||
name: "me",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := serviceEndpoint(tt.ann, tt.bann); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("serviceEndpoint() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewBinaryAnnotations(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
annotations []BinaryAnnotation
|
||||
endpoint Endpoint
|
||||
want []trace.BinaryAnnotation
|
||||
}{
|
||||
{
|
||||
name: "Should override annotation with endpoint",
|
||||
annotations: []BinaryAnnotation{
|
||||
&MockBinaryAnnotation{
|
||||
K: "mykey",
|
||||
V: "myvalue",
|
||||
H: &MockEndpoint{
|
||||
host: "noop",
|
||||
name: "noop",
|
||||
},
|
||||
},
|
||||
},
|
||||
endpoint: &MockEndpoint{
|
||||
host: "myhost",
|
||||
name: "myservice",
|
||||
},
|
||||
want: []trace.BinaryAnnotation{
|
||||
trace.BinaryAnnotation{
|
||||
Host: "myhost",
|
||||
ServiceName: "myservice",
|
||||
Key: "mykey",
|
||||
Value: "myvalue",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := NewBinaryAnnotations(tt.annotations, tt.endpoint); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("NewBinaryAnnotations() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewAnnotations(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
annotations []Annotation
|
||||
endpoint Endpoint
|
||||
want []trace.Annotation
|
||||
}{
|
||||
{
|
||||
name: "Should override annotation with endpoint",
|
||||
annotations: []Annotation{
|
||||
&MockAnnotation{
|
||||
Time: time.Unix(0, 0).UTC(),
|
||||
Val: "myvalue",
|
||||
H: &MockEndpoint{
|
||||
host: "noop",
|
||||
name: "noop",
|
||||
},
|
||||
},
|
||||
},
|
||||
endpoint: &MockEndpoint{
|
||||
host: "myhost",
|
||||
name: "myservice",
|
||||
},
|
||||
want: []trace.Annotation{
|
||||
trace.Annotation{
|
||||
Host: "myhost",
|
||||
ServiceName: "myservice",
|
||||
Timestamp: time.Unix(0, 0).UTC(),
|
||||
Value: "myvalue",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := NewAnnotations(tt.annotations, tt.endpoint); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("NewAnnotations() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewTrace(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
spans []Span
|
||||
now func() time.Time
|
||||
want trace.Trace
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "empty span",
|
||||
spans: []Span{
|
||||
&MockSpan{},
|
||||
},
|
||||
now: func() time.Time {
|
||||
return time.Unix(0, 0).UTC()
|
||||
},
|
||||
want: trace.Trace{
|
||||
trace.Span{
|
||||
ServiceName: "unknown",
|
||||
Timestamp: time.Unix(0, 0).UTC(),
|
||||
Annotations: []trace.Annotation{},
|
||||
BinaryAnnotations: []trace.BinaryAnnotation{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "span has no id",
|
||||
spans: []Span{
|
||||
&MockSpan{
|
||||
Error: fmt.Errorf("Span has no id"),
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "complete span",
|
||||
spans: []Span{
|
||||
&MockSpan{
|
||||
TraceID: "tid",
|
||||
ID: "id",
|
||||
ParentID: "",
|
||||
ServiceName: "me",
|
||||
Anno: []Annotation{
|
||||
&MockAnnotation{
|
||||
Time: time.Unix(1, 0).UTC(),
|
||||
Val: "myval",
|
||||
H: &MockEndpoint{
|
||||
host: "myhost",
|
||||
name: "myname",
|
||||
},
|
||||
},
|
||||
},
|
||||
Time: time.Unix(0, 0).UTC(),
|
||||
Dur: 2 * time.Second,
|
||||
},
|
||||
},
|
||||
now: func() time.Time {
|
||||
return time.Unix(0, 0).UTC()
|
||||
},
|
||||
want: trace.Trace{
|
||||
trace.Span{
|
||||
ID: "id",
|
||||
ParentID: "id",
|
||||
TraceID: "tid",
|
||||
Name: "me",
|
||||
ServiceName: "myname",
|
||||
Timestamp: time.Unix(0, 0).UTC(),
|
||||
Duration: 2 * time.Second,
|
||||
Annotations: []trace.Annotation{
|
||||
{
|
||||
Timestamp: time.Unix(1, 0).UTC(),
|
||||
Value: "myval",
|
||||
Host: "myhost",
|
||||
ServiceName: "myname",
|
||||
},
|
||||
},
|
||||
BinaryAnnotations: []trace.BinaryAnnotation{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.now != nil {
|
||||
now = tt.now
|
||||
}
|
||||
got, err := NewTrace(tt.spans)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("NewTrace() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !cmp.Equal(tt.want, got) {
|
||||
t.Errorf("NewTrace() = %s", cmp.Diff(tt.want, got))
|
||||
}
|
||||
now = time.Now
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type MockSpan struct {
|
||||
TraceID string
|
||||
ID string
|
||||
ParentID string
|
||||
ServiceName string
|
||||
Anno []Annotation
|
||||
BinAnno []BinaryAnnotation
|
||||
Time time.Time
|
||||
Dur time.Duration
|
||||
Error error
|
||||
}
|
||||
|
||||
func (m *MockSpan) Trace() (string, error) {
|
||||
return m.TraceID, m.Error
|
||||
}
|
||||
|
||||
func (m *MockSpan) SpanID() (string, error) {
|
||||
return m.ID, m.Error
|
||||
}
|
||||
|
||||
func (m *MockSpan) Parent() (string, error) {
|
||||
return m.ParentID, m.Error
|
||||
}
|
||||
|
||||
func (m *MockSpan) Name() string {
|
||||
return m.ServiceName
|
||||
}
|
||||
|
||||
func (m *MockSpan) Annotations() []Annotation {
|
||||
return m.Anno
|
||||
}
|
||||
|
||||
func (m *MockSpan) BinaryAnnotations() ([]BinaryAnnotation, error) {
|
||||
return m.BinAnno, m.Error
|
||||
}
|
||||
|
||||
func (m *MockSpan) Timestamp() time.Time {
|
||||
return m.Time
|
||||
}
|
||||
|
||||
func (m *MockSpan) Duration() time.Duration {
|
||||
return m.Dur
|
||||
}
|
||||
|
||||
type MockAnnotation struct {
|
||||
Time time.Time
|
||||
Val string
|
||||
H Endpoint
|
||||
}
|
||||
|
||||
func (m *MockAnnotation) Timestamp() time.Time {
|
||||
return m.Time
|
||||
}
|
||||
|
||||
func (m *MockAnnotation) Value() string {
|
||||
return m.Val
|
||||
}
|
||||
|
||||
func (m *MockAnnotation) Host() Endpoint {
|
||||
return m.H
|
||||
}
|
||||
|
||||
type MockEndpoint struct {
|
||||
host string
|
||||
name string
|
||||
}
|
||||
|
||||
func (e *MockEndpoint) Host() string {
|
||||
return e.host
|
||||
}
|
||||
|
||||
func (e *MockEndpoint) Name() string {
|
||||
return e.name
|
||||
}
|
||||
|
||||
type MockBinaryAnnotation struct {
|
||||
Time time.Time
|
||||
K string
|
||||
V string
|
||||
H Endpoint
|
||||
}
|
||||
|
||||
func (b *MockBinaryAnnotation) Key() string {
|
||||
return b.K
|
||||
}
|
||||
|
||||
func (b *MockBinaryAnnotation) Value() string {
|
||||
return b.V
|
||||
}
|
||||
|
||||
func (b *MockBinaryAnnotation) Host() Endpoint {
|
||||
return b.H
|
||||
}
|
||||
252
plugins/inputs/zipkin/codec/jsonV1/jsonV1.go
Normal file
252
plugins/inputs/zipkin/codec/jsonV1/jsonV1.go
Normal file
@@ -0,0 +1,252 @@
|
||||
package jsonV1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
|
||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
||||
)
|
||||
|
||||
// JSON decodes spans from bodies `POST`ed to the spans endpoint
|
||||
type JSON struct{}
|
||||
|
||||
// Decode unmarshals and validates the JSON body
|
||||
func (j *JSON) Decode(octets []byte) ([]codec.Span, error) {
|
||||
var spans []span
|
||||
err := json.Unmarshal(octets, &spans)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := make([]codec.Span, len(spans))
|
||||
for i := range spans {
|
||||
if err := spans[i].Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[i] = &spans[i]
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
type span struct {
|
||||
TraceID string `json:"traceId"`
|
||||
SpanName string `json:"name"`
|
||||
ParentID string `json:"parentId,omitempty"`
|
||||
ID string `json:"id"`
|
||||
Time *int64 `json:"timestamp,omitempty"`
|
||||
Dur *int64 `json:"duration,omitempty"`
|
||||
Debug bool `json:"debug,omitempty"`
|
||||
Anno []annotation `json:"annotations"`
|
||||
BAnno []binaryAnnotation `json:"binaryAnnotations"`
|
||||
}
|
||||
|
||||
func (s *span) Validate() error {
|
||||
var err error
|
||||
check := func(f func() (string, error)) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = f()
|
||||
}
|
||||
|
||||
check(s.Trace)
|
||||
check(s.SpanID)
|
||||
check(s.Parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = s.BinaryAnnotations()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *span) Trace() (string, error) {
|
||||
if s.TraceID == "" {
|
||||
return "", fmt.Errorf("Trace ID cannot be null")
|
||||
}
|
||||
return TraceIDFromString(s.TraceID)
|
||||
}
|
||||
|
||||
func (s *span) SpanID() (string, error) {
|
||||
if s.ID == "" {
|
||||
return "", fmt.Errorf("Span ID cannot be null")
|
||||
}
|
||||
return IDFromString(s.ID)
|
||||
}
|
||||
|
||||
func (s *span) Parent() (string, error) {
|
||||
if s.ParentID == "" {
|
||||
return "", nil
|
||||
}
|
||||
return IDFromString(s.ParentID)
|
||||
}
|
||||
|
||||
func (s *span) Name() string {
|
||||
return s.SpanName
|
||||
}
|
||||
|
||||
func (s *span) Annotations() []codec.Annotation {
|
||||
res := make([]codec.Annotation, len(s.Anno))
|
||||
for i := range s.Anno {
|
||||
res[i] = &s.Anno[i]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *span) BinaryAnnotations() ([]codec.BinaryAnnotation, error) {
|
||||
res := make([]codec.BinaryAnnotation, len(s.BAnno))
|
||||
for i, a := range s.BAnno {
|
||||
if a.Key() != "" && a.Value() == "" {
|
||||
return nil, fmt.Errorf("No value for key %s at binaryAnnotations[%d]", a.K, i)
|
||||
}
|
||||
if a.Value() != "" && a.Key() == "" {
|
||||
return nil, fmt.Errorf("No key at binaryAnnotations[%d]", i)
|
||||
}
|
||||
res[i] = &s.BAnno[i]
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *span) Timestamp() time.Time {
|
||||
if s.Time == nil {
|
||||
return time.Time{}
|
||||
}
|
||||
return codec.MicroToTime(*s.Time)
|
||||
}
|
||||
|
||||
func (s *span) Duration() time.Duration {
|
||||
if s.Dur == nil {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(*s.Dur) * time.Microsecond
|
||||
}
|
||||
|
||||
type annotation struct {
|
||||
Endpoint *endpoint `json:"endpoint,omitempty"`
|
||||
Time int64 `json:"timestamp"`
|
||||
Val string `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (a *annotation) Timestamp() time.Time {
|
||||
return codec.MicroToTime(a.Time)
|
||||
}
|
||||
|
||||
func (a *annotation) Value() string {
|
||||
return a.Val
|
||||
}
|
||||
|
||||
func (a *annotation) Host() codec.Endpoint {
|
||||
return a.Endpoint
|
||||
}
|
||||
|
||||
type binaryAnnotation struct {
|
||||
K string `json:"key"`
|
||||
V json.RawMessage `json:"value"`
|
||||
Type string `json:"type"`
|
||||
Endpoint *endpoint `json:"endpoint,omitempty"`
|
||||
}
|
||||
|
||||
func (b *binaryAnnotation) Key() string {
|
||||
return b.K
|
||||
}
|
||||
|
||||
func (b *binaryAnnotation) Value() string {
|
||||
t, err := zipkincore.AnnotationTypeFromString(b.Type)
|
||||
// Assume this is a string if we cannot tell the type
|
||||
if err != nil {
|
||||
t = zipkincore.AnnotationType_STRING
|
||||
}
|
||||
|
||||
switch t {
|
||||
case zipkincore.AnnotationType_BOOL:
|
||||
var v bool
|
||||
err := json.Unmarshal(b.V, &v)
|
||||
if err == nil {
|
||||
return strconv.FormatBool(v)
|
||||
}
|
||||
case zipkincore.AnnotationType_BYTES:
|
||||
return string(b.V)
|
||||
case zipkincore.AnnotationType_I16, zipkincore.AnnotationType_I32, zipkincore.AnnotationType_I64:
|
||||
var v int64
|
||||
err := json.Unmarshal(b.V, &v)
|
||||
if err == nil {
|
||||
return strconv.FormatInt(v, 10)
|
||||
}
|
||||
case zipkincore.AnnotationType_DOUBLE:
|
||||
var v float64
|
||||
err := json.Unmarshal(b.V, &v)
|
||||
if err == nil {
|
||||
return strconv.FormatFloat(v, 'f', -1, 64)
|
||||
}
|
||||
case zipkincore.AnnotationType_STRING:
|
||||
var v string
|
||||
err := json.Unmarshal(b.V, &v)
|
||||
if err == nil {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (b *binaryAnnotation) Host() codec.Endpoint {
|
||||
return b.Endpoint
|
||||
}
|
||||
|
||||
type endpoint struct {
|
||||
ServiceName string `json:"serviceName"`
|
||||
Ipv4 string `json:"ipv4"`
|
||||
Ipv6 string `json:"ipv6,omitempty"`
|
||||
Port int `json:"port"`
|
||||
}
|
||||
|
||||
func (e *endpoint) Host() string {
|
||||
if e.Port != 0 {
|
||||
return fmt.Sprintf("%s:%d", e.Ipv4, e.Port)
|
||||
}
|
||||
return e.Ipv4
|
||||
}
|
||||
|
||||
func (e *endpoint) Name() string {
|
||||
return e.ServiceName
|
||||
}
|
||||
|
||||
// TraceIDFromString creates a TraceID from a hexadecimal string
|
||||
func TraceIDFromString(s string) (string, error) {
|
||||
var hi, lo uint64
|
||||
var err error
|
||||
if len(s) > 32 {
|
||||
return "", fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s)
|
||||
} else if len(s) > 16 {
|
||||
hiLen := len(s) - 16
|
||||
if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
if lo, err = strconv.ParseUint(s, 16, 64); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if hi == 0 {
|
||||
return fmt.Sprintf("%x", lo), nil
|
||||
}
|
||||
return fmt.Sprintf("%x%016x", hi, lo), nil
|
||||
}
|
||||
|
||||
// IDFromString creates a decimal id from a hexadecimal string
|
||||
func IDFromString(s string) (string, error) {
|
||||
if len(s) > 16 {
|
||||
return "", fmt.Errorf("ID cannot be longer than 16 hex characters: %s", s)
|
||||
}
|
||||
id, err := strconv.ParseUint(s, 16, 64)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strconv.FormatUint(id, 10), nil
|
||||
}
|
||||
920
plugins/inputs/zipkin/codec/jsonV1/jsonV1_test.go
Normal file
920
plugins/inputs/zipkin/codec/jsonV1/jsonV1_test.go
Normal file
@@ -0,0 +1,920 @@
|
||||
package jsonV1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
|
||||
)
|
||||
|
||||
func TestJSON_Decode(t *testing.T) {
|
||||
addr := func(i int64) *int64 { return &i }
|
||||
tests := []struct {
|
||||
name string
|
||||
octets []byte
|
||||
want []codec.Span
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "bad json is error",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
]`),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Decodes simple trace",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": "6b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c"
|
||||
}
|
||||
]`),
|
||||
want: []codec.Span{
|
||||
&span{
|
||||
TraceID: "6b221d5bc9e6496c",
|
||||
SpanName: "get-traces",
|
||||
ID: "6b221d5bc9e6496c",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Decodes two spans",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": "6b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c"
|
||||
},
|
||||
{
|
||||
"traceId": "6b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "c6946e9cb5d122b6",
|
||||
"parentId": "6b221d5bc9e6496c",
|
||||
"duration": 10000
|
||||
}
|
||||
]`),
|
||||
want: []codec.Span{
|
||||
&span{
|
||||
TraceID: "6b221d5bc9e6496c",
|
||||
SpanName: "get-traces",
|
||||
ID: "6b221d5bc9e6496c",
|
||||
},
|
||||
&span{
|
||||
TraceID: "6b221d5bc9e6496c",
|
||||
SpanName: "get-traces",
|
||||
ID: "c6946e9cb5d122b6",
|
||||
ParentID: "6b221d5bc9e6496c",
|
||||
Dur: addr(10000),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Decodes trace with timestamp",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": "6b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c",
|
||||
"timestamp": 1503031538791000
|
||||
}
|
||||
]`),
|
||||
want: []codec.Span{
|
||||
&span{
|
||||
TraceID: "6b221d5bc9e6496c",
|
||||
SpanName: "get-traces",
|
||||
ID: "6b221d5bc9e6496c",
|
||||
Time: addr(1503031538791000),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Decodes simple trace with high and low trace id",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c"
|
||||
}
|
||||
]`),
|
||||
want: []codec.Span{
|
||||
&span{
|
||||
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||
SpanName: "get-traces",
|
||||
ID: "6b221d5bc9e6496c",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Error when trace id is null",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": null,
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c"
|
||||
}
|
||||
]`),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "ignore null parentId",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c",
|
||||
"parentId": null
|
||||
}
|
||||
]`),
|
||||
want: []codec.Span{
|
||||
&span{
|
||||
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||
SpanName: "get-traces",
|
||||
ID: "6b221d5bc9e6496c",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ignore null timestamp",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c",
|
||||
"timestamp": null
|
||||
}
|
||||
]`),
|
||||
want: []codec.Span{
|
||||
&span{
|
||||
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||
SpanName: "get-traces",
|
||||
ID: "6b221d5bc9e6496c",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ignore null duration",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c",
|
||||
"duration": null
|
||||
}
|
||||
]`),
|
||||
want: []codec.Span{
|
||||
&span{
|
||||
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||
SpanName: "get-traces",
|
||||
ID: "6b221d5bc9e6496c",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ignore null annotation endpoint",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c",
|
||||
"annotations": [
|
||||
{
|
||||
"timestamp": 1461750491274000,
|
||||
"value": "cs",
|
||||
"endpoint": null
|
||||
}
|
||||
]
|
||||
}
|
||||
]`),
|
||||
want: []codec.Span{
|
||||
&span{
|
||||
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||
SpanName: "get-traces",
|
||||
ID: "6b221d5bc9e6496c",
|
||||
Anno: []annotation{
|
||||
{
|
||||
Time: 1461750491274000,
|
||||
Val: "cs",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ignore null binary annotation endpoint",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c",
|
||||
"binaryAnnotations": [
|
||||
{
|
||||
"key": "lc",
|
||||
"value": "JDBCSpanStore",
|
||||
"endpoint": null
|
||||
}
|
||||
]
|
||||
}
|
||||
]`),
|
||||
want: []codec.Span{
|
||||
&span{
|
||||
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||
SpanName: "get-traces",
|
||||
ID: "6b221d5bc9e6496c",
|
||||
BAnno: []binaryAnnotation{
|
||||
{
|
||||
K: "lc",
|
||||
V: json.RawMessage(`"JDBCSpanStore"`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Error when binary annotation has no key",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c",
|
||||
"binaryAnnotations": [
|
||||
{
|
||||
"value": "JDBCSpanStore",
|
||||
"endpoint": null
|
||||
}
|
||||
]
|
||||
}
|
||||
]`),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Error when binary annotation has no value",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c",
|
||||
"binaryAnnotations": [
|
||||
{
|
||||
"key": "lc",
|
||||
"endpoint": null
|
||||
}
|
||||
]
|
||||
}
|
||||
]`),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "binary annotation with endpoint",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c",
|
||||
"binaryAnnotations": [
|
||||
{
|
||||
"key": "lc",
|
||||
"value": "JDBCSpanStore",
|
||||
"endpoint": {
|
||||
"serviceName": "service",
|
||||
"port": 65535
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]`),
|
||||
want: []codec.Span{
|
||||
&span{
|
||||
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||
SpanName: "get-traces",
|
||||
ID: "6b221d5bc9e6496c",
|
||||
BAnno: []binaryAnnotation{
|
||||
{
|
||||
K: "lc",
|
||||
V: json.RawMessage(`"JDBCSpanStore"`),
|
||||
Endpoint: &endpoint{
|
||||
ServiceName: "service",
|
||||
Port: 65535,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "binary annotation with double value",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c",
|
||||
"binaryAnnotations": [
|
||||
{
|
||||
"key": "num",
|
||||
"value": 1.23456789,
|
||||
"type": "DOUBLE"
|
||||
}
|
||||
]
|
||||
}
|
||||
]`),
|
||||
want: []codec.Span{
|
||||
&span{
|
||||
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||
SpanName: "get-traces",
|
||||
ID: "6b221d5bc9e6496c",
|
||||
BAnno: []binaryAnnotation{
|
||||
{
|
||||
K: "num",
|
||||
V: json.RawMessage{0x31, 0x2e, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39},
|
||||
Type: "DOUBLE",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "binary annotation with integer value",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c",
|
||||
"binaryAnnotations": [
|
||||
{
|
||||
"key": "num",
|
||||
"value": 1,
|
||||
"type": "I16"
|
||||
}
|
||||
]
|
||||
}
|
||||
]`),
|
||||
want: []codec.Span{
|
||||
&span{
|
||||
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||
SpanName: "get-traces",
|
||||
ID: "6b221d5bc9e6496c",
|
||||
BAnno: []binaryAnnotation{
|
||||
{
|
||||
K: "num",
|
||||
V: json.RawMessage{0x31},
|
||||
Type: "I16",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "binary annotation with bool value",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c",
|
||||
"binaryAnnotations": [
|
||||
{
|
||||
"key": "num",
|
||||
"value": true,
|
||||
"type": "BOOL"
|
||||
}
|
||||
]
|
||||
}
|
||||
]`),
|
||||
want: []codec.Span{
|
||||
&span{
|
||||
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||
SpanName: "get-traces",
|
||||
ID: "6b221d5bc9e6496c",
|
||||
BAnno: []binaryAnnotation{
|
||||
{
|
||||
K: "num",
|
||||
V: json.RawMessage(`true`),
|
||||
Type: "BOOL",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "binary annotation with bytes value",
|
||||
octets: []byte(`
|
||||
[
|
||||
{
|
||||
"traceId": "48485a3953bb61246b221d5bc9e6496c",
|
||||
"name": "get-traces",
|
||||
"id": "6b221d5bc9e6496c",
|
||||
"binaryAnnotations": [
|
||||
{
|
||||
"key": "num",
|
||||
"value": "1",
|
||||
"type": "BYTES"
|
||||
}
|
||||
]
|
||||
}
|
||||
]`),
|
||||
want: []codec.Span{
|
||||
&span{
|
||||
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||
SpanName: "get-traces",
|
||||
ID: "6b221d5bc9e6496c",
|
||||
BAnno: []binaryAnnotation{
|
||||
{
|
||||
K: "num",
|
||||
V: json.RawMessage(`"1"`),
|
||||
Type: "BYTES",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
j := &JSON{}
|
||||
got, err := j.Decode(tt.octets)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("JSON.Decode() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !cmp.Equal(tt.want, got) {
|
||||
t.Errorf("JSON.Decode() = got(-)/want(+) %s", cmp.Diff(tt.want, got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_span_Trace(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
TraceID string
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Trace IDs cannot be null",
|
||||
TraceID: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "converts hex string correctly",
|
||||
TraceID: "deadbeef",
|
||||
want: "deadbeef",
|
||||
},
|
||||
{
|
||||
name: "converts high and low trace id correctly",
|
||||
TraceID: "48485a3953bb61246b221d5bc9e6496c",
|
||||
want: "48485a3953bb61246b221d5bc9e6496c",
|
||||
},
|
||||
{
|
||||
name: "errors when string isn't hex",
|
||||
TraceID: "oxdeadbeef",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "errors when id is too long",
|
||||
TraceID: "1234567890abcdef1234567890abcdef1",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &span{
|
||||
TraceID: tt.TraceID,
|
||||
}
|
||||
got, err := s.Trace()
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("span.Trace() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !cmp.Equal(tt.want, got) {
|
||||
t.Errorf("span.Trace() = got(-)/want(+) %s", cmp.Diff(tt.want, got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_span_SpanID(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ID string
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Span IDs cannot be null",
|
||||
ID: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "converts known id correctly",
|
||||
ID: "b26412d1ac16767d",
|
||||
want: "12854419928166856317",
|
||||
},
|
||||
{
|
||||
name: "converts hex string correctly",
|
||||
ID: "deadbeef",
|
||||
want: "3735928559",
|
||||
},
|
||||
{
|
||||
name: "errors when string isn't hex",
|
||||
ID: "oxdeadbeef",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "errors when id is too long",
|
||||
ID: "1234567890abcdef1",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &span{
|
||||
ID: tt.ID,
|
||||
}
|
||||
got, err := s.SpanID()
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("span.SpanID() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !cmp.Equal(tt.want, got) {
|
||||
t.Errorf("span.SpanID() = got(-)/want(+) %s", cmp.Diff(tt.want, got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_span_Parent(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ParentID string
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "when there is no parent return empty string",
|
||||
ParentID: "",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "converts hex string correctly",
|
||||
ParentID: "deadbeef",
|
||||
want: "3735928559",
|
||||
},
|
||||
{
|
||||
name: "errors when string isn't hex",
|
||||
ParentID: "oxdeadbeef",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "errors when parent id is too long",
|
||||
ParentID: "1234567890abcdef1",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &span{
|
||||
ParentID: tt.ParentID,
|
||||
}
|
||||
got, err := s.Parent()
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("span.Parent() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !cmp.Equal(tt.want, got) {
|
||||
t.Errorf("span.Parent() = got(-)/want(+) %s", cmp.Diff(tt.want, got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_span_Timestamp(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
Time *int64
|
||||
want time.Time
|
||||
}{
|
||||
{
|
||||
name: "converts to microseconds",
|
||||
Time: func(i int64) *int64 { return &i }(3000000),
|
||||
want: time.Unix(3, 0).UTC(),
|
||||
},
|
||||
{
|
||||
name: "nil time should be zero time",
|
||||
Time: nil,
|
||||
want: time.Time{},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &span{
|
||||
Time: tt.Time,
|
||||
}
|
||||
if got := s.Timestamp(); !cmp.Equal(tt.want, got) {
|
||||
t.Errorf("span.Timestamp() = got(-)/want(+) %s", cmp.Diff(tt.want, got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_span_Duration(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
dur *int64
|
||||
want time.Duration
|
||||
}{
|
||||
{
|
||||
name: "converts from 3 microseconds",
|
||||
dur: func(i int64) *int64 { return &i }(3000000),
|
||||
want: 3 * time.Second,
|
||||
},
|
||||
{
|
||||
name: "nil time should be zero duration",
|
||||
dur: nil,
|
||||
want: 0,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &span{
|
||||
Dur: tt.dur,
|
||||
}
|
||||
if got := s.Duration(); got != tt.want {
|
||||
t.Errorf("span.Duration() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_annotation(t *testing.T) {
|
||||
type fields struct {
|
||||
Endpoint *endpoint
|
||||
Time int64
|
||||
Val string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
tm time.Time
|
||||
val string
|
||||
endpoint *endpoint
|
||||
}{
|
||||
{
|
||||
name: "returns all fields",
|
||||
fields: fields{
|
||||
Time: 3000000,
|
||||
Val: "myvalue",
|
||||
Endpoint: &endpoint{
|
||||
ServiceName: "myservice",
|
||||
Ipv4: "127.0.0.1",
|
||||
Port: 443,
|
||||
},
|
||||
},
|
||||
tm: time.Unix(3, 0).UTC(),
|
||||
val: "myvalue",
|
||||
endpoint: &endpoint{
|
||||
ServiceName: "myservice",
|
||||
Ipv4: "127.0.0.1",
|
||||
Port: 443,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
an := annotation(tt.fields)
|
||||
a := &an
|
||||
if got := a.Timestamp(); got != tt.tm {
|
||||
t.Errorf("annotation.Timestamp() = %v, want %v", got, tt.tm)
|
||||
}
|
||||
if got := a.Value(); got != tt.val {
|
||||
t.Errorf("annotation.Value() = %v, want %v", got, tt.val)
|
||||
}
|
||||
if got := a.Host(); !cmp.Equal(tt.endpoint, got) {
|
||||
t.Errorf("annotation.Endpoint() = %v, want %v", got, tt.endpoint)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_binaryAnnotation(t *testing.T) {
|
||||
type fields struct {
|
||||
K string
|
||||
V json.RawMessage
|
||||
Type string
|
||||
Endpoint *endpoint
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
key string
|
||||
value string
|
||||
endpoint *endpoint
|
||||
}{
|
||||
{
|
||||
name: "returns all fields",
|
||||
fields: fields{
|
||||
K: "key",
|
||||
V: json.RawMessage(`"value"`),
|
||||
Endpoint: &endpoint{
|
||||
ServiceName: "myservice",
|
||||
Ipv4: "127.0.0.1",
|
||||
Port: 443,
|
||||
},
|
||||
},
|
||||
key: "key",
|
||||
value: "value",
|
||||
endpoint: &endpoint{
|
||||
ServiceName: "myservice",
|
||||
Ipv4: "127.0.0.1",
|
||||
Port: 443,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
bin := binaryAnnotation(tt.fields)
|
||||
b := &bin
|
||||
if got := b.Key(); got != tt.key {
|
||||
t.Errorf("binaryAnnotation.Key() = %v, want %v", got, tt.key)
|
||||
}
|
||||
if got := b.Value(); got != tt.value {
|
||||
t.Errorf("binaryAnnotation.Value() = %v, want %v", got, tt.value)
|
||||
}
|
||||
if got := b.Host(); !cmp.Equal(tt.endpoint, got) {
|
||||
t.Errorf("binaryAnnotation.Endpoint() = %v, want %v", got, tt.endpoint)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_endpoint_Host(t *testing.T) {
|
||||
type fields struct {
|
||||
Ipv4 string
|
||||
Port int
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "with port",
|
||||
fields: fields{
|
||||
Ipv4: "127.0.0.1",
|
||||
Port: 443,
|
||||
},
|
||||
want: "127.0.0.1:443",
|
||||
},
|
||||
{
|
||||
name: "no port",
|
||||
fields: fields{
|
||||
Ipv4: "127.0.0.1",
|
||||
},
|
||||
want: "127.0.0.1",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
e := &endpoint{
|
||||
Ipv4: tt.fields.Ipv4,
|
||||
Port: tt.fields.Port,
|
||||
}
|
||||
if got := e.Host(); got != tt.want {
|
||||
t.Errorf("endpoint.Host() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_endpoint_Name(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ServiceName string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "has service name",
|
||||
ServiceName: "myservicename",
|
||||
want: "myservicename",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
e := &endpoint{
|
||||
ServiceName: tt.ServiceName,
|
||||
}
|
||||
if got := e.Name(); got != tt.want {
|
||||
t.Errorf("endpoint.Name() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTraceIDFromString(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s string
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Convert hex string id",
|
||||
s: "6b221d5bc9e6496c",
|
||||
want: "6b221d5bc9e6496c",
|
||||
},
|
||||
{
|
||||
name: "error : id too long",
|
||||
s: "1234567890abcdef1234567890abcdef1",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "error : not parsable",
|
||||
s: "howdyhowdyhowdy",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Convert hex string with high/low",
|
||||
s: "48485a3953bb61246b221d5bc9e6496c",
|
||||
want: "48485a3953bb61246b221d5bc9e6496c",
|
||||
},
|
||||
{
|
||||
name: "errors in high",
|
||||
s: "ERR85a3953bb61246b221d5bc9e6496c",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "errors in low",
|
||||
s: "48485a3953bb61246b221d5bc9e64ERR",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := TraceIDFromString(tt.s)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("TraceIDFromString() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("TraceIDFromString() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIDFromString(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s string
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Convert hex string id",
|
||||
s: "6b221d5bc9e6496c",
|
||||
want: "7719764991332993388",
|
||||
},
|
||||
{
|
||||
name: "error : id too long",
|
||||
s: "1234567890abcdef1",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "error : not parsable",
|
||||
s: "howdyhowdyhowdy",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := IDFromString(tt.s)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("IDFromString() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("IDFromString() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
203
plugins/inputs/zipkin/codec/thrift/thrift.go
Normal file
203
plugins/inputs/zipkin/codec/thrift/thrift.go
Normal file
@@ -0,0 +1,203 @@
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
|
||||
|
||||
"github.com/apache/thrift/lib/go/thrift"
|
||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
||||
)
|
||||
|
||||
// UnmarshalThrift converts raw bytes in thrift format to a slice of spans
|
||||
func UnmarshalThrift(body []byte) ([]*zipkincore.Span, error) {
|
||||
buffer := thrift.NewTMemoryBuffer()
|
||||
if _, err := buffer.Write(body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
transport := thrift.NewTBinaryProtocolTransport(buffer)
|
||||
_, size, err := transport.ReadListBegin()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
spans := make([]*zipkincore.Span, size)
|
||||
for i := 0; i < size; i++ {
|
||||
zs := &zipkincore.Span{}
|
||||
if err = zs.Read(transport); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spans[i] = zs
|
||||
}
|
||||
|
||||
if err = transport.ReadListEnd(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return spans, nil
|
||||
}
|
||||
|
||||
// Thrift decodes binary data to create a Trace
|
||||
type Thrift struct{}
|
||||
|
||||
// Decode unmarshals and validates bytes in thrift format
|
||||
func (t *Thrift) Decode(octets []byte) ([]codec.Span, error) {
|
||||
spans, err := UnmarshalThrift(octets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := make([]codec.Span, len(spans))
|
||||
for i, s := range spans {
|
||||
res[i] = &span{s}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
var _ codec.Endpoint = &endpoint{}
|
||||
|
||||
type endpoint struct {
|
||||
*zipkincore.Endpoint
|
||||
}
|
||||
|
||||
func (e *endpoint) Host() string {
|
||||
ipv4 := func(addr int32) string {
|
||||
buf := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(buf, uint32(addr))
|
||||
return net.IP(buf).String()
|
||||
}
|
||||
|
||||
if e.Endpoint == nil {
|
||||
return ipv4(int32(0))
|
||||
}
|
||||
if e.Endpoint.GetPort() == 0 {
|
||||
return ipv4(e.Endpoint.GetIpv4())
|
||||
}
|
||||
// Zipkin uses a signed int16 for the port, but, warns us that they actually treat it
|
||||
// as an unsigned int16. So, we convert from int16 to int32 followed by taking & 0xffff
|
||||
// to convert from signed to unsigned
|
||||
// https://github.com/openzipkin/zipkin/blob/57dc2ec9c65fe6144e401c0c933b4400463a69df/zipkin/src/main/java/zipkin/Endpoint.java#L44
|
||||
return ipv4(e.Endpoint.GetIpv4()) + ":" + strconv.FormatInt(int64(int(e.Endpoint.GetPort())&0xffff), 10)
|
||||
}
|
||||
|
||||
func (e *endpoint) Name() string {
|
||||
if e.Endpoint == nil {
|
||||
return codec.DefaultServiceName
|
||||
}
|
||||
return e.Endpoint.GetServiceName()
|
||||
}
|
||||
|
||||
var _ codec.BinaryAnnotation = &binaryAnnotation{}
|
||||
|
||||
type binaryAnnotation struct {
|
||||
*zipkincore.BinaryAnnotation
|
||||
}
|
||||
|
||||
func (b *binaryAnnotation) Key() string {
|
||||
return b.BinaryAnnotation.GetKey()
|
||||
}
|
||||
|
||||
func (b *binaryAnnotation) Value() string {
|
||||
return string(b.BinaryAnnotation.GetValue())
|
||||
}
|
||||
|
||||
func (b *binaryAnnotation) Host() codec.Endpoint {
|
||||
if b.BinaryAnnotation.Host == nil {
|
||||
return nil
|
||||
}
|
||||
return &endpoint{b.BinaryAnnotation.Host}
|
||||
}
|
||||
|
||||
var _ codec.Annotation = &annotation{}
|
||||
|
||||
type annotation struct {
|
||||
*zipkincore.Annotation
|
||||
}
|
||||
|
||||
func (a *annotation) Timestamp() time.Time {
|
||||
ts := a.Annotation.GetTimestamp()
|
||||
if ts == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return codec.MicroToTime(ts)
|
||||
}
|
||||
|
||||
func (a *annotation) Value() string {
|
||||
return a.Annotation.GetValue()
|
||||
}
|
||||
|
||||
func (a *annotation) Host() codec.Endpoint {
|
||||
if a.Annotation.Host == nil {
|
||||
return nil
|
||||
}
|
||||
return &endpoint{a.Annotation.Host}
|
||||
}
|
||||
|
||||
var _ codec.Span = &span{}
|
||||
|
||||
type span struct {
|
||||
*zipkincore.Span
|
||||
}
|
||||
|
||||
func (s *span) Trace() (string, error) {
|
||||
if s.Span.GetTraceIDHigh() == 0 && s.Span.GetTraceID() == 0 {
|
||||
return "", fmt.Errorf("Span does not have a trace ID")
|
||||
}
|
||||
|
||||
if s.Span.GetTraceIDHigh() == 0 {
|
||||
return fmt.Sprintf("%x", s.Span.GetTraceID()), nil
|
||||
}
|
||||
return fmt.Sprintf("%x%016x", s.Span.GetTraceIDHigh(), s.Span.GetTraceID()), nil
|
||||
}
|
||||
|
||||
func (s *span) SpanID() (string, error) {
|
||||
return formatID(s.Span.GetID()), nil
|
||||
}
|
||||
|
||||
func (s *span) Parent() (string, error) {
|
||||
id := s.Span.GetParentID()
|
||||
if id != 0 {
|
||||
return formatID(id), nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (s *span) Name() string {
|
||||
return s.Span.GetName()
|
||||
}
|
||||
|
||||
func (s *span) Annotations() []codec.Annotation {
|
||||
res := make([]codec.Annotation, len(s.Span.Annotations))
|
||||
for i := range s.Span.Annotations {
|
||||
res[i] = &annotation{s.Span.Annotations[i]}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *span) BinaryAnnotations() ([]codec.BinaryAnnotation, error) {
|
||||
res := make([]codec.BinaryAnnotation, len(s.Span.BinaryAnnotations))
|
||||
for i := range s.Span.BinaryAnnotations {
|
||||
res[i] = &binaryAnnotation{s.Span.BinaryAnnotations[i]}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *span) Timestamp() time.Time {
|
||||
ts := s.Span.GetTimestamp()
|
||||
if ts == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return codec.MicroToTime(ts)
|
||||
}
|
||||
|
||||
func (s *span) Duration() time.Duration {
|
||||
return time.Duration(s.Span.GetDuration()) * time.Microsecond
|
||||
}
|
||||
|
||||
func formatID(id int64) string {
|
||||
return strconv.FormatInt(id, 10)
|
||||
}
|
||||
211
plugins/inputs/zipkin/codec/thrift/thrift_test.go
Normal file
211
plugins/inputs/zipkin/codec/thrift/thrift_test.go
Normal file
@@ -0,0 +1,211 @@
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
||||
)
|
||||
|
||||
func Test_endpointHost(t *testing.T) {
|
||||
type args struct {
|
||||
h *zipkincore.Endpoint
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "Host Found",
|
||||
args: args{
|
||||
h: &zipkincore.Endpoint{
|
||||
Ipv4: 1234,
|
||||
Port: 8888,
|
||||
},
|
||||
},
|
||||
want: "0.0.4.210:8888",
|
||||
},
|
||||
{
|
||||
name: "No Host",
|
||||
args: args{
|
||||
h: nil,
|
||||
},
|
||||
want: "0.0.0.0",
|
||||
},
|
||||
{
|
||||
name: "int overflow zipkin uses an int16 type as an unsigned int 16.",
|
||||
args: args{
|
||||
h: &zipkincore.Endpoint{
|
||||
Ipv4: 1234,
|
||||
Port: -1,
|
||||
},
|
||||
},
|
||||
want: "0.0.4.210:65535",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
e := endpoint{tt.args.h}
|
||||
if got := e.Host(); got != tt.want {
|
||||
t.Errorf("host() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_endpointName(t *testing.T) {
|
||||
type args struct {
|
||||
h *zipkincore.Endpoint
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "Found ServiceName",
|
||||
args: args{
|
||||
h: &zipkincore.Endpoint{
|
||||
ServiceName: "zipkin",
|
||||
},
|
||||
},
|
||||
want: "zipkin",
|
||||
},
|
||||
{
|
||||
name: "No ServiceName",
|
||||
args: args{
|
||||
h: nil,
|
||||
},
|
||||
want: "unknown",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
e := endpoint{tt.args.h}
|
||||
if got := e.Name(); got != tt.want {
|
||||
t.Errorf("serviceName() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalThrift(t *testing.T) {
|
||||
addr := func(i int64) *int64 { return &i }
|
||||
tests := []struct {
|
||||
name string
|
||||
filename string
|
||||
want []*zipkincore.Span
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "threespans",
|
||||
filename: "../../testdata/threespans.dat",
|
||||
want: []*zipkincore.Span{
|
||||
{
|
||||
TraceID: 2505404965370368069,
|
||||
Name: "Child",
|
||||
ID: 8090652509916334619,
|
||||
ParentID: addr(22964302721410078),
|
||||
Timestamp: addr(1498688360851331),
|
||||
Duration: addr(53106),
|
||||
Annotations: []*zipkincore.Annotation{},
|
||||
BinaryAnnotations: []*zipkincore.BinaryAnnotation{
|
||||
&zipkincore.BinaryAnnotation{
|
||||
Key: "lc",
|
||||
AnnotationType: zipkincore.AnnotationType_STRING,
|
||||
Value: []byte("trivial"),
|
||||
Host: &zipkincore.Endpoint{
|
||||
Ipv4: 2130706433,
|
||||
ServiceName: "trivial",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
TraceID: 2505404965370368069,
|
||||
Name: "Child",
|
||||
ID: 103618986556047333,
|
||||
ParentID: addr(22964302721410078),
|
||||
Timestamp: addr(1498688360904552),
|
||||
Duration: addr(50410),
|
||||
Annotations: []*zipkincore.Annotation{},
|
||||
BinaryAnnotations: []*zipkincore.BinaryAnnotation{
|
||||
&zipkincore.BinaryAnnotation{
|
||||
Key: "lc",
|
||||
AnnotationType: zipkincore.AnnotationType_STRING,
|
||||
Value: []byte("trivial"),
|
||||
Host: &zipkincore.Endpoint{
|
||||
Ipv4: 2130706433,
|
||||
ServiceName: "trivial",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
TraceID: 2505404965370368069,
|
||||
Name: "Parent",
|
||||
ID: 22964302721410078,
|
||||
Timestamp: addr(1498688360851318),
|
||||
Duration: addr(103680),
|
||||
Annotations: []*zipkincore.Annotation{
|
||||
&zipkincore.Annotation{
|
||||
Timestamp: 1498688360851325,
|
||||
Value: "Starting child #0",
|
||||
Host: &zipkincore.Endpoint{
|
||||
Ipv4: 2130706433,
|
||||
ServiceName: "trivial",
|
||||
},
|
||||
},
|
||||
&zipkincore.Annotation{
|
||||
Timestamp: 1498688360904545,
|
||||
Value: "Starting child #1",
|
||||
Host: &zipkincore.Endpoint{
|
||||
Ipv4: 2130706433,
|
||||
ServiceName: "trivial",
|
||||
},
|
||||
},
|
||||
&zipkincore.Annotation{
|
||||
Timestamp: 1498688360954992,
|
||||
Value: "A Log",
|
||||
Host: &zipkincore.Endpoint{
|
||||
Ipv4: 2130706433,
|
||||
ServiceName: "trivial",
|
||||
},
|
||||
},
|
||||
},
|
||||
BinaryAnnotations: []*zipkincore.BinaryAnnotation{
|
||||
&zipkincore.BinaryAnnotation{
|
||||
Key: "lc",
|
||||
AnnotationType: zipkincore.AnnotationType_STRING,
|
||||
Value: []byte("trivial"),
|
||||
Host: &zipkincore.Endpoint{
|
||||
Ipv4: 2130706433,
|
||||
ServiceName: "trivial",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dat, err := ioutil.ReadFile(tt.filename)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not find file %s\n", tt.filename)
|
||||
}
|
||||
|
||||
got, err := UnmarshalThrift(dat)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("UnmarshalThrift() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !cmp.Equal(tt.want, got) {
|
||||
t.Errorf("UnmarshalThrift() got(-)/want(+): %s", cmp.Diff(tt.want, got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,22 +1,10 @@
|
||||
package zipkin
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
|
||||
)
|
||||
|
||||
// DefaultServiceName when the span does not have any serviceName
|
||||
const DefaultServiceName = "unknown"
|
||||
|
||||
//now is a moackable time for now
|
||||
var now = time.Now
|
||||
|
||||
// LineProtocolConverter implements the Recorder interface; it is a
|
||||
// type meant to encapsulate the storage of zipkin tracing data in
|
||||
// telegraf as line protocol.
|
||||
@@ -35,7 +23,7 @@ func NewLineProtocolConverter(acc telegraf.Accumulator) *LineProtocolConverter {
|
||||
// Record is LineProtocolConverter's implementation of the Record method of
|
||||
// the Recorder interface; it takes a trace as input, and adds it to an internal
|
||||
// telegraf.Accumulator.
|
||||
func (l *LineProtocolConverter) Record(t Trace) error {
|
||||
func (l *LineProtocolConverter) Record(t trace.Trace) error {
|
||||
for _, s := range t {
|
||||
fields := map[string]interface{}{
|
||||
"duration_ns": s.Duration.Nanoseconds(),
|
||||
@@ -83,167 +71,3 @@ func (l *LineProtocolConverter) Record(t Trace) error {
|
||||
func (l *LineProtocolConverter) Error(err error) {
|
||||
l.acc.AddError(err)
|
||||
}
|
||||
|
||||
// NewTrace converts a slice of []*zipkincore.Spans into a new Trace
|
||||
func NewTrace(spans []*zipkincore.Span) Trace {
|
||||
trace := make(Trace, len(spans))
|
||||
for i, span := range spans {
|
||||
endpoint := serviceEndpoint(span.GetAnnotations(), span.GetBinaryAnnotations())
|
||||
trace[i] = Span{
|
||||
ID: formatID(span.GetID()),
|
||||
TraceID: formatTraceID(span.GetTraceIDHigh(), span.GetTraceID()),
|
||||
Name: span.GetName(),
|
||||
Timestamp: guessTimestamp(span),
|
||||
Duration: convertDuration(span),
|
||||
ParentID: parentID(span),
|
||||
ServiceName: serviceName(endpoint),
|
||||
Annotations: NewAnnotations(span.GetAnnotations(), endpoint),
|
||||
BinaryAnnotations: NewBinaryAnnotations(span.GetBinaryAnnotations(), endpoint),
|
||||
}
|
||||
}
|
||||
return trace
|
||||
}
|
||||
|
||||
// NewAnnotations converts a slice of *zipkincore.Annotation into a slice
|
||||
// of new Annotations
|
||||
func NewAnnotations(annotations []*zipkincore.Annotation, endpoint *zipkincore.Endpoint) []Annotation {
|
||||
formatted := make([]Annotation, len(annotations))
|
||||
for i, annotation := range annotations {
|
||||
formatted[i] = Annotation{
|
||||
Host: host(endpoint),
|
||||
ServiceName: serviceName(endpoint),
|
||||
Timestamp: microToTime(annotation.GetTimestamp()),
|
||||
Value: annotation.GetValue(),
|
||||
}
|
||||
}
|
||||
|
||||
return formatted
|
||||
}
|
||||
|
||||
// NewBinaryAnnotations is very similar to NewAnnotations, but it
|
||||
// converts zipkincore.BinaryAnnotations instead of the normal zipkincore.Annotation
|
||||
func NewBinaryAnnotations(annotations []*zipkincore.BinaryAnnotation, endpoint *zipkincore.Endpoint) []BinaryAnnotation {
|
||||
formatted := make([]BinaryAnnotation, len(annotations))
|
||||
for i, annotation := range annotations {
|
||||
formatted[i] = BinaryAnnotation{
|
||||
Host: host(endpoint),
|
||||
ServiceName: serviceName(endpoint),
|
||||
Key: annotation.GetKey(),
|
||||
Value: string(annotation.GetValue()),
|
||||
Type: annotation.GetAnnotationType().String(),
|
||||
}
|
||||
}
|
||||
return formatted
|
||||
}
|
||||
|
||||
func microToTime(micro int64) time.Time {
|
||||
return time.Unix(0, micro*int64(time.Microsecond)).UTC()
|
||||
}
|
||||
|
||||
func formatID(id int64) string {
|
||||
return strconv.FormatInt(id, 10)
|
||||
}
|
||||
|
||||
func formatTraceID(high, low int64) string {
|
||||
if high == 0 {
|
||||
return fmt.Sprintf("%x", low)
|
||||
}
|
||||
return fmt.Sprintf("%x%016x", high, low)
|
||||
}
|
||||
|
||||
func minMax(span *zipkincore.Span) (time.Time, time.Time) {
|
||||
min := now().UTC()
|
||||
max := time.Time{}.UTC()
|
||||
for _, annotation := range span.Annotations {
|
||||
ts := microToTime(annotation.GetTimestamp())
|
||||
if !ts.IsZero() && ts.Before(min) {
|
||||
min = ts
|
||||
}
|
||||
if !ts.IsZero() && ts.After(max) {
|
||||
max = ts
|
||||
}
|
||||
}
|
||||
if max.IsZero() {
|
||||
max = min
|
||||
}
|
||||
return min, max
|
||||
}
|
||||
|
||||
func guessTimestamp(span *zipkincore.Span) time.Time {
|
||||
if span.GetTimestamp() != 0 {
|
||||
return microToTime(span.GetTimestamp())
|
||||
}
|
||||
min, _ := minMax(span)
|
||||
return min
|
||||
}
|
||||
|
||||
func convertDuration(span *zipkincore.Span) time.Duration {
|
||||
duration := time.Duration(span.GetDuration()) * time.Microsecond
|
||||
if duration != 0 {
|
||||
return duration
|
||||
}
|
||||
min, max := minMax(span)
|
||||
return max.Sub(min)
|
||||
}
|
||||
|
||||
func parentID(span *zipkincore.Span) string {
|
||||
// A parent ID of 0 means that this is a parent span. In this case,
|
||||
// we set the parent ID of the span to be its own id, so it points to
|
||||
// itself.
|
||||
id := span.GetParentID()
|
||||
if id != 0 {
|
||||
return formatID(id)
|
||||
}
|
||||
return formatID(span.ID)
|
||||
}
|
||||
|
||||
func ipv4(addr int32) string {
|
||||
buf := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(buf, uint32(addr))
|
||||
return net.IP(buf).String()
|
||||
}
|
||||
|
||||
func host(h *zipkincore.Endpoint) string {
|
||||
if h == nil {
|
||||
return ipv4(int32(0))
|
||||
}
|
||||
if h.GetPort() == 0 {
|
||||
return ipv4(h.GetIpv4())
|
||||
}
|
||||
// Zipkin uses a signed int16 for the port, but, warns us that they actually treat it
|
||||
// as an unsigned int16. So, we convert from int16 to int32 followed by taking & 0xffff
|
||||
// to convert from signed to unsigned
|
||||
// https://github.com/openzipkin/zipkin/blob/57dc2ec9c65fe6144e401c0c933b4400463a69df/zipkin/src/main/java/zipkin/Endpoint.java#L44
|
||||
return ipv4(h.GetIpv4()) + ":" + strconv.FormatInt(int64(int(h.GetPort())&0xffff), 10)
|
||||
}
|
||||
|
||||
func serviceName(h *zipkincore.Endpoint) string {
|
||||
if h == nil {
|
||||
return DefaultServiceName
|
||||
}
|
||||
return h.GetServiceName()
|
||||
}
|
||||
|
||||
func serviceEndpoint(ann []*zipkincore.Annotation, bann []*zipkincore.BinaryAnnotation) *zipkincore.Endpoint {
|
||||
for _, a := range ann {
|
||||
switch a.Value {
|
||||
case zipkincore.SERVER_RECV, zipkincore.SERVER_SEND, zipkincore.CLIENT_RECV, zipkincore.CLIENT_SEND:
|
||||
if a.Host != nil && a.Host.ServiceName != "" {
|
||||
return a.Host
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, a := range bann {
|
||||
if a.Key == zipkincore.LOCAL_COMPONENT && a.Host != nil && a.Host.ServiceName != "" {
|
||||
return a.Host
|
||||
}
|
||||
}
|
||||
// Unable to find any "standard" endpoint host, so, use any that exist in the regular annotations
|
||||
for _, a := range ann {
|
||||
if a.Host != nil && a.Host.ServiceName != "" {
|
||||
return a.Host
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
package zipkin
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
||||
)
|
||||
|
||||
func TestLineProtocolConverter_Record(t *testing.T) {
|
||||
@@ -17,7 +16,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
||||
acc telegraf.Accumulator
|
||||
}
|
||||
type args struct {
|
||||
t Trace
|
||||
t trace.Trace
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -32,8 +31,8 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
||||
acc: &mockAcc,
|
||||
},
|
||||
args: args{
|
||||
t: Trace{
|
||||
Span{
|
||||
t: trace.Trace{
|
||||
{
|
||||
ID: "8090652509916334619",
|
||||
TraceID: "2505404965370368069",
|
||||
Name: "Child",
|
||||
@@ -41,18 +40,17 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
||||
Timestamp: time.Unix(0, 1498688360851331000).UTC(),
|
||||
Duration: time.Duration(53106) * time.Microsecond,
|
||||
ServiceName: "trivial",
|
||||
Annotations: []Annotation{},
|
||||
BinaryAnnotations: []BinaryAnnotation{
|
||||
BinaryAnnotation{
|
||||
Annotations: []trace.Annotation{},
|
||||
BinaryAnnotations: []trace.BinaryAnnotation{
|
||||
{
|
||||
Key: "lc",
|
||||
Value: "dHJpdmlhbA==",
|
||||
Host: "2130706433:0",
|
||||
ServiceName: "trivial",
|
||||
Type: "STRING",
|
||||
},
|
||||
},
|
||||
},
|
||||
Span{
|
||||
{
|
||||
ID: "103618986556047333",
|
||||
TraceID: "2505404965370368069",
|
||||
Name: "Child",
|
||||
@@ -60,18 +58,17 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
||||
Timestamp: time.Unix(0, 1498688360904552000).UTC(),
|
||||
Duration: time.Duration(50410) * time.Microsecond,
|
||||
ServiceName: "trivial",
|
||||
Annotations: []Annotation{},
|
||||
BinaryAnnotations: []BinaryAnnotation{
|
||||
BinaryAnnotation{
|
||||
Annotations: []trace.Annotation{},
|
||||
BinaryAnnotations: []trace.BinaryAnnotation{
|
||||
{
|
||||
Key: "lc",
|
||||
Value: "dHJpdmlhbA==",
|
||||
Host: "2130706433:0",
|
||||
ServiceName: "trivial",
|
||||
Type: "STRING",
|
||||
},
|
||||
},
|
||||
},
|
||||
Span{
|
||||
{
|
||||
ID: "22964302721410078",
|
||||
TraceID: "2505404965370368069",
|
||||
Name: "Parent",
|
||||
@@ -79,33 +76,32 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
||||
Timestamp: time.Unix(0, 1498688360851318000).UTC(),
|
||||
Duration: time.Duration(103680) * time.Microsecond,
|
||||
ServiceName: "trivial",
|
||||
Annotations: []Annotation{
|
||||
Annotation{
|
||||
Annotations: []trace.Annotation{
|
||||
{
|
||||
Timestamp: time.Unix(0, 1498688360851325000).UTC(),
|
||||
Value: "Starting child #0",
|
||||
Host: "2130706433:0",
|
||||
ServiceName: "trivial",
|
||||
},
|
||||
Annotation{
|
||||
{
|
||||
Timestamp: time.Unix(0, 1498688360904545000).UTC(),
|
||||
Value: "Starting child #1",
|
||||
Host: "2130706433:0",
|
||||
ServiceName: "trivial",
|
||||
},
|
||||
Annotation{
|
||||
{
|
||||
Timestamp: time.Unix(0, 1498688360954992000).UTC(),
|
||||
Value: "A Log",
|
||||
Host: "2130706433:0",
|
||||
ServiceName: "trivial",
|
||||
},
|
||||
},
|
||||
BinaryAnnotations: []BinaryAnnotation{
|
||||
BinaryAnnotation{
|
||||
BinaryAnnotations: []trace.BinaryAnnotation{
|
||||
{
|
||||
Key: "lc",
|
||||
Value: "dHJpdmlhbA==",
|
||||
Host: "2130706433:0",
|
||||
ServiceName: "trivial",
|
||||
Type: "STRING",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -265,8 +261,8 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
||||
acc: &mockAcc,
|
||||
},
|
||||
args: args{
|
||||
t: Trace{
|
||||
Span{
|
||||
t: trace.Trace{
|
||||
{
|
||||
ID: "6802735349851856000",
|
||||
TraceID: "0:6802735349851856000",
|
||||
Name: "main.dud",
|
||||
@@ -274,15 +270,15 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
||||
Timestamp: time.Unix(1, 0).UTC(),
|
||||
Duration: 1,
|
||||
ServiceName: "trivial",
|
||||
Annotations: []Annotation{
|
||||
Annotation{
|
||||
Annotations: []trace.Annotation{
|
||||
{
|
||||
Timestamp: time.Unix(0, 1433330263415871000).UTC(),
|
||||
Value: "cs",
|
||||
Host: "0:9410",
|
||||
ServiceName: "go-zipkin-testclient",
|
||||
},
|
||||
},
|
||||
BinaryAnnotations: []BinaryAnnotation{},
|
||||
BinaryAnnotations: []trace.BinaryAnnotation{},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -339,206 +335,3 @@ func TestLineProtocolConverter_Record(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_microToTime(t *testing.T) {
|
||||
type args struct {
|
||||
micro int64
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want time.Time
|
||||
}{
|
||||
{
|
||||
name: "given zero micro seconds expected unix time zero",
|
||||
args: args{
|
||||
micro: 0,
|
||||
},
|
||||
want: time.Unix(0, 0).UTC(),
|
||||
},
|
||||
{
|
||||
name: "given a million micro seconds expected unix time one",
|
||||
args: args{
|
||||
micro: 1000000,
|
||||
},
|
||||
want: time.Unix(1, 0).UTC(),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := microToTime(tt.args.micro); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("microToTime() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newAnnotation(micro int64) *zipkincore.Annotation {
|
||||
return &zipkincore.Annotation{
|
||||
Timestamp: micro,
|
||||
}
|
||||
}
|
||||
|
||||
func Test_minMax(t *testing.T) {
|
||||
type args struct {
|
||||
span *zipkincore.Span
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
now func() time.Time
|
||||
wantMin time.Time
|
||||
wantMax time.Time
|
||||
}{
|
||||
{
|
||||
name: "Single annotation",
|
||||
args: args{
|
||||
span: &zipkincore.Span{
|
||||
Annotations: []*zipkincore.Annotation{
|
||||
newAnnotation(1000000),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantMin: time.Unix(1, 0).UTC(),
|
||||
wantMax: time.Unix(1, 0).UTC(),
|
||||
},
|
||||
{
|
||||
name: "Three annotations",
|
||||
args: args{
|
||||
span: &zipkincore.Span{
|
||||
Annotations: []*zipkincore.Annotation{
|
||||
newAnnotation(1000000),
|
||||
newAnnotation(2000000),
|
||||
newAnnotation(3000000),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantMin: time.Unix(1, 0).UTC(),
|
||||
wantMax: time.Unix(3, 0).UTC(),
|
||||
},
|
||||
{
|
||||
name: "Annotations are in the future",
|
||||
args: args{
|
||||
span: &zipkincore.Span{
|
||||
Annotations: []*zipkincore.Annotation{
|
||||
newAnnotation(3000000),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantMin: time.Unix(2, 0).UTC(),
|
||||
wantMax: time.Unix(3, 0).UTC(),
|
||||
now: func() time.Time {
|
||||
return time.Unix(2, 0).UTC()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "No Annotations",
|
||||
args: args{
|
||||
span: &zipkincore.Span{
|
||||
Annotations: []*zipkincore.Annotation{},
|
||||
},
|
||||
},
|
||||
wantMin: time.Unix(2, 0).UTC(),
|
||||
wantMax: time.Unix(2, 0).UTC(),
|
||||
now: func() time.Time {
|
||||
return time.Unix(2, 0).UTC()
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.now != nil {
|
||||
now = tt.now
|
||||
}
|
||||
got, got1 := minMax(tt.args.span)
|
||||
if !reflect.DeepEqual(got, tt.wantMin) {
|
||||
t.Errorf("minMax() got = %v, want %v", got, tt.wantMin)
|
||||
}
|
||||
if !reflect.DeepEqual(got1, tt.wantMax) {
|
||||
t.Errorf("minMax() got1 = %v, want %v", got1, tt.wantMax)
|
||||
}
|
||||
now = time.Now
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_host(t *testing.T) {
|
||||
type args struct {
|
||||
h *zipkincore.Endpoint
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "Host Found",
|
||||
args: args{
|
||||
h: &zipkincore.Endpoint{
|
||||
Ipv4: 1234,
|
||||
Port: 8888,
|
||||
},
|
||||
},
|
||||
want: "0.0.4.210:8888",
|
||||
},
|
||||
{
|
||||
name: "No Host",
|
||||
args: args{
|
||||
h: nil,
|
||||
},
|
||||
want: "0.0.0.0",
|
||||
},
|
||||
{
|
||||
name: "int overflow zipkin uses an int16 type as an unsigned int 16.",
|
||||
args: args{
|
||||
h: &zipkincore.Endpoint{
|
||||
Ipv4: 1234,
|
||||
Port: -1,
|
||||
},
|
||||
},
|
||||
want: "0.0.4.210:65535",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := host(tt.args.h); got != tt.want {
|
||||
t.Errorf("host() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_serviceName(t *testing.T) {
|
||||
type args struct {
|
||||
h *zipkincore.Endpoint
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "Found ServiceName",
|
||||
args: args{
|
||||
h: &zipkincore.Endpoint{
|
||||
ServiceName: "zipkin",
|
||||
},
|
||||
},
|
||||
want: "zipkin",
|
||||
},
|
||||
{
|
||||
name: "No ServiceName",
|
||||
args: args{
|
||||
h: nil,
|
||||
},
|
||||
want: "unknown",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := serviceName(tt.args.h); got != tt.want {
|
||||
t.Errorf("serviceName() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,22 +2,23 @@ package zipkin
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/apache/thrift/lib/go/thrift"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/jsonV1"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift"
|
||||
)
|
||||
|
||||
// SpanHandler is an implementation of a Handler which accepts zipkin thrift
|
||||
// span data and sends it to the recorder
|
||||
type SpanHandler struct {
|
||||
Path string
|
||||
recorder Recorder
|
||||
waitGroup *sync.WaitGroup
|
||||
Path string
|
||||
recorder Recorder
|
||||
}
|
||||
|
||||
// NewSpanHandler returns a new server instance given path to handle
|
||||
@@ -81,6 +82,12 @@ func (s *SpanHandler) Spans(w http.ResponseWriter, r *http.Request) {
|
||||
defer body.Close()
|
||||
}
|
||||
|
||||
decoder, err := ContentDecoder(r)
|
||||
if err != nil {
|
||||
s.recorder.Error(err)
|
||||
w.WriteHeader(http.StatusUnsupportedMediaType)
|
||||
}
|
||||
|
||||
octets, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
s.recorder.Error(err)
|
||||
@@ -88,14 +95,19 @@ func (s *SpanHandler) Spans(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
spans, err := unmarshalThrift(octets)
|
||||
spans, err := decoder.Decode(octets)
|
||||
if err != nil {
|
||||
s.recorder.Error(err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
trace := NewTrace(spans)
|
||||
trace, err := codec.NewTrace(spans)
|
||||
if err != nil {
|
||||
s.recorder.Error(err)
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err = s.recorder.Record(trace); err != nil {
|
||||
s.recorder.Error(err)
|
||||
@@ -106,30 +118,25 @@ func (s *SpanHandler) Spans(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func unmarshalThrift(body []byte) ([]*zipkincore.Span, error) {
|
||||
buffer := thrift.NewTMemoryBuffer()
|
||||
if _, err := buffer.Write(body); err != nil {
|
||||
return nil, err
|
||||
// ContentDecoer returns a Decoder that is able to produce Traces from bytes.
|
||||
// Failure should yield an HTTP 415 (`http.StatusUnsupportedMediaType`)
|
||||
// If a Content-Type is not set, zipkin assumes application/json
|
||||
func ContentDecoder(r *http.Request) (codec.Decoder, error) {
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
return &jsonV1.JSON{}, nil
|
||||
}
|
||||
|
||||
transport := thrift.NewTBinaryProtocolTransport(buffer)
|
||||
_, size, err := transport.ReadListBegin()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
spans := make([]*zipkincore.Span, size)
|
||||
for i := 0; i < size; i++ {
|
||||
zs := &zipkincore.Span{}
|
||||
if err = zs.Read(transport); err != nil {
|
||||
return nil, err
|
||||
for _, v := range strings.Split(contentType, ",") {
|
||||
t, _, err := mime.ParseMediaType(v)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if t == "application/json" {
|
||||
return &jsonV1.JSON{}, nil
|
||||
} else if t == "application/x-thrift" {
|
||||
return &thrift.Thrift{}, nil
|
||||
}
|
||||
spans[i] = zs
|
||||
}
|
||||
|
||||
if err = transport.ReadListEnd(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return spans, nil
|
||||
return nil, fmt.Errorf("Unknown Content-Type: %s", contentType)
|
||||
}
|
||||
|
||||
@@ -10,14 +10,15 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
|
||||
)
|
||||
|
||||
type MockRecorder struct {
|
||||
Data Trace
|
||||
Data trace.Trace
|
||||
Err error
|
||||
}
|
||||
|
||||
func (m *MockRecorder) Record(t Trace) error {
|
||||
func (m *MockRecorder) Record(t trace.Trace) error {
|
||||
m.Data = t
|
||||
return nil
|
||||
}
|
||||
@@ -39,6 +40,7 @@ func TestSpanHandler(t *testing.T) {
|
||||
ioutil.NopCloser(
|
||||
bytes.NewReader(dat)))
|
||||
|
||||
r.Header.Set("Content-Type", "application/x-thrift")
|
||||
handler := NewSpanHandler("/api/v1/spans")
|
||||
mockRecorder := &MockRecorder{}
|
||||
handler.recorder = mockRecorder
|
||||
@@ -51,8 +53,8 @@ func TestSpanHandler(t *testing.T) {
|
||||
got := mockRecorder.Data
|
||||
|
||||
parentID := strconv.FormatInt(22964302721410078, 10)
|
||||
want := Trace{
|
||||
Span{
|
||||
want := trace.Trace{
|
||||
{
|
||||
Name: "Child",
|
||||
ID: "8090652509916334619",
|
||||
TraceID: "22c4fc8ab3669045",
|
||||
@@ -60,18 +62,17 @@ func TestSpanHandler(t *testing.T) {
|
||||
Timestamp: time.Unix(0, 1498688360851331*int64(time.Microsecond)).UTC(),
|
||||
Duration: time.Duration(53106) * time.Microsecond,
|
||||
ServiceName: "trivial",
|
||||
Annotations: []Annotation{},
|
||||
BinaryAnnotations: []BinaryAnnotation{
|
||||
BinaryAnnotation{
|
||||
Annotations: []trace.Annotation{},
|
||||
BinaryAnnotations: []trace.BinaryAnnotation{
|
||||
{
|
||||
Key: "lc",
|
||||
Value: "trivial",
|
||||
Host: "127.0.0.1",
|
||||
ServiceName: "trivial",
|
||||
Type: "STRING",
|
||||
},
|
||||
},
|
||||
},
|
||||
Span{
|
||||
{
|
||||
Name: "Child",
|
||||
ID: "103618986556047333",
|
||||
TraceID: "22c4fc8ab3669045",
|
||||
@@ -79,18 +80,17 @@ func TestSpanHandler(t *testing.T) {
|
||||
Timestamp: time.Unix(0, 1498688360904552*int64(time.Microsecond)).UTC(),
|
||||
Duration: time.Duration(50410) * time.Microsecond,
|
||||
ServiceName: "trivial",
|
||||
Annotations: []Annotation{},
|
||||
BinaryAnnotations: []BinaryAnnotation{
|
||||
BinaryAnnotation{
|
||||
Annotations: []trace.Annotation{},
|
||||
BinaryAnnotations: []trace.BinaryAnnotation{
|
||||
{
|
||||
Key: "lc",
|
||||
Value: "trivial",
|
||||
Host: "127.0.0.1",
|
||||
ServiceName: "trivial",
|
||||
Type: "STRING",
|
||||
},
|
||||
},
|
||||
},
|
||||
Span{
|
||||
{
|
||||
Name: "Parent",
|
||||
ID: "22964302721410078",
|
||||
TraceID: "22c4fc8ab3669045",
|
||||
@@ -98,33 +98,32 @@ func TestSpanHandler(t *testing.T) {
|
||||
Timestamp: time.Unix(0, 1498688360851318*int64(time.Microsecond)).UTC(),
|
||||
Duration: time.Duration(103680) * time.Microsecond,
|
||||
ServiceName: "trivial",
|
||||
Annotations: []Annotation{
|
||||
Annotation{
|
||||
Annotations: []trace.Annotation{
|
||||
{
|
||||
Timestamp: time.Unix(0, 1498688360851325*int64(time.Microsecond)).UTC(),
|
||||
Value: "Starting child #0",
|
||||
Host: "127.0.0.1",
|
||||
ServiceName: "trivial",
|
||||
},
|
||||
Annotation{
|
||||
{
|
||||
Timestamp: time.Unix(0, 1498688360904545*int64(time.Microsecond)).UTC(),
|
||||
Value: "Starting child #1",
|
||||
Host: "127.0.0.1",
|
||||
ServiceName: "trivial",
|
||||
},
|
||||
Annotation{
|
||||
{
|
||||
Timestamp: time.Unix(0, 1498688360954992*int64(time.Microsecond)).UTC(),
|
||||
Value: "A Log",
|
||||
Host: "127.0.0.1",
|
||||
ServiceName: "trivial",
|
||||
},
|
||||
},
|
||||
BinaryAnnotations: []BinaryAnnotation{
|
||||
BinaryAnnotation{
|
||||
BinaryAnnotations: []trace.BinaryAnnotation{
|
||||
{
|
||||
Key: "lc",
|
||||
Value: "trivial",
|
||||
Host: "127.0.0.1",
|
||||
ServiceName: "trivial",
|
||||
Type: "STRING",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
188
plugins/inputs/zipkin/testdata/json/brave-tracer-example.json
vendored
Normal file
188
plugins/inputs/zipkin/testdata/json/brave-tracer-example.json
vendored
Normal file
@@ -0,0 +1,188 @@
|
||||
[
|
||||
{
|
||||
"traceId": "7312f822d43d0fd8",
|
||||
"id": "b26412d1ac16767d",
|
||||
"name": "http:/hi2",
|
||||
"parentId": "7312f822d43d0fd8",
|
||||
"annotations": [
|
||||
{
|
||||
"timestamp": 1503031538791000,
|
||||
"value": "sr",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp": 1503031538794000,
|
||||
"value": "ss",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
}
|
||||
],
|
||||
"binaryAnnotations": [
|
||||
{
|
||||
"key": "mvc.controller.class",
|
||||
"value": "Demo2Application",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "mvc.controller.method",
|
||||
"value": "hi2",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "spring.instance_id",
|
||||
"value": "192.168.0.8:test:8010",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"traceId": "7312f822d43d0fd8",
|
||||
"id": "b26412d1ac16767d",
|
||||
"name": "http:/hi2",
|
||||
"parentId": "7312f822d43d0fd8",
|
||||
"timestamp": 1503031538786000,
|
||||
"duration": 10000,
|
||||
"annotations": [
|
||||
{
|
||||
"timestamp": 1503031538786000,
|
||||
"value": "cs",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp": 1503031538796000,
|
||||
"value": "cr",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
}
|
||||
],
|
||||
"binaryAnnotations": [
|
||||
{
|
||||
"key": "http.host",
|
||||
"value": "localhost",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "http.method",
|
||||
"value": "GET",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "http.path",
|
||||
"value": "/hi2",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "http.url",
|
||||
"value": "http://localhost:8010/hi2",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "spring.instance_id",
|
||||
"value": "192.168.0.8:test:8010",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"traceId": "7312f822d43d0fd8",
|
||||
"id": "7312f822d43d0fd8",
|
||||
"name": "http:/hi",
|
||||
"timestamp": 1503031538778000,
|
||||
"duration": 23393,
|
||||
"annotations": [
|
||||
{
|
||||
"timestamp": 1503031538778000,
|
||||
"value": "sr",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp": 1503031538801000,
|
||||
"value": "ss",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
}
|
||||
],
|
||||
"binaryAnnotations": [
|
||||
{
|
||||
"key": "mvc.controller.class",
|
||||
"value": "Demo2Application",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "mvc.controller.method",
|
||||
"value": "hi",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "spring.instance_id",
|
||||
"value": "192.168.0.8:test:8010",
|
||||
"endpoint": {
|
||||
"serviceName": "test",
|
||||
"ipv4": "192.168.0.8",
|
||||
"port": 8010
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
41
plugins/inputs/zipkin/trace/trace.go
Normal file
41
plugins/inputs/zipkin/trace/trace.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package trace
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Trace is an array (or a series) of spans
|
||||
type Trace []Span
|
||||
|
||||
//Span represents a specific zipkin span. It holds the majority of the same
|
||||
// data as a zipkin span sent via the thrift protocol, but is presented in a
|
||||
// format which is more straightforward for storage purposes.
|
||||
type Span struct {
|
||||
ID string
|
||||
TraceID string // zipkin traceid high concat with traceid
|
||||
Name string
|
||||
ParentID string
|
||||
ServiceName string
|
||||
Timestamp time.Time // If zipkin input is nil then time.Now()
|
||||
Duration time.Duration
|
||||
Annotations []Annotation
|
||||
BinaryAnnotations []BinaryAnnotation
|
||||
}
|
||||
|
||||
// BinaryAnnotation represents a zipkin binary annotation. It contains
|
||||
// all of the same fields as might be found in its zipkin counterpart.
|
||||
type BinaryAnnotation struct {
|
||||
Key string
|
||||
Value string
|
||||
Host string // annotation.endpoint.ipv4 + ":" + annotation.endpoint.port
|
||||
ServiceName string
|
||||
}
|
||||
|
||||
// Annotation represents an ordinary zipkin annotation. It contains the data fields
|
||||
// which will become fields/tags in influxdb
|
||||
type Annotation struct {
|
||||
Timestamp time.Time
|
||||
Value string
|
||||
Host string // annotation.endpoint.ipv4 + ":" + annotation.endpoint.port
|
||||
ServiceName string
|
||||
}
|
||||
@@ -8,11 +8,11 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -32,7 +32,7 @@ const (
|
||||
// Recorder represents a type which can record zipkin trace data as well as
|
||||
// any accompanying errors, and process that data.
|
||||
type Recorder interface {
|
||||
Record(Trace) error
|
||||
Record(trace.Trace) error
|
||||
Error(error)
|
||||
}
|
||||
|
||||
@@ -42,43 +42,6 @@ type Handler interface {
|
||||
Register(router *mux.Router, recorder Recorder) error
|
||||
}
|
||||
|
||||
// BinaryAnnotation represents a zipkin binary annotation. It contains
|
||||
// all of the same fields as might be found in its zipkin counterpart.
|
||||
type BinaryAnnotation struct {
|
||||
Key string
|
||||
Value string
|
||||
Host string // annotation.endpoint.ipv4 + ":" + annotation.endpoint.port
|
||||
ServiceName string
|
||||
Type string
|
||||
}
|
||||
|
||||
// Annotation represents an ordinary zipkin annotation. It contains the data fields
|
||||
// which will become fields/tags in influxdb
|
||||
type Annotation struct {
|
||||
Timestamp time.Time
|
||||
Value string
|
||||
Host string // annotation.endpoint.ipv4 + ":" + annotation.endpoint.port
|
||||
ServiceName string
|
||||
}
|
||||
|
||||
//Span represents a specific zipkin span. It holds the majority of the same
|
||||
// data as a zipkin span sent via the thrift protocol, but is presented in a
|
||||
// format which is more straightforward for storage purposes.
|
||||
type Span struct {
|
||||
ID string
|
||||
TraceID string // zipkin traceid high concat with traceid
|
||||
Name string
|
||||
ParentID string
|
||||
ServiceName string
|
||||
Timestamp time.Time // If zipkin input is nil then time.Now()
|
||||
Duration time.Duration
|
||||
Annotations []Annotation
|
||||
BinaryAnnotations []BinaryAnnotation
|
||||
}
|
||||
|
||||
// Trace is an array (or a series) of spans
|
||||
type Trace []Span
|
||||
|
||||
const sampleConfig = `
|
||||
# path = "/api/v1/spans" # URL path for span data
|
||||
# port = 9411 # Port on which Telegraf listens
|
||||
@@ -122,7 +85,9 @@ func (z *Zipkin) Start(acc telegraf.Accumulator) error {
|
||||
|
||||
router := mux.NewRouter()
|
||||
converter := NewLineProtocolConverter(acc)
|
||||
z.handler.Register(router, converter)
|
||||
if err := z.handler.Register(router, converter); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
z.server = &http.Server{
|
||||
Handler: router,
|
||||
|
||||
@@ -16,14 +16,16 @@ func TestZipkinPlugin(t *testing.T) {
|
||||
mockAcc := testutil.Accumulator{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
thriftDataFile string //path name to a binary thrift data file which contains test data
|
||||
wantErr bool
|
||||
want []testutil.Metric
|
||||
name string
|
||||
datafile string // data file which contains test data
|
||||
contentType string
|
||||
wantErr bool
|
||||
want []testutil.Metric
|
||||
}{
|
||||
{
|
||||
name: "threespan",
|
||||
thriftDataFile: "testdata/threespans.dat",
|
||||
name: "threespan",
|
||||
datafile: "testdata/threespans.dat",
|
||||
contentType: "application/x-thrift",
|
||||
want: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
Measurement: "zipkin",
|
||||
@@ -170,8 +172,9 @@ func TestZipkinPlugin(t *testing.T) {
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "distributed_trace_sample",
|
||||
thriftDataFile: "testdata/distributed_trace_sample.dat",
|
||||
name: "distributed_trace_sample",
|
||||
datafile: "testdata/distributed_trace_sample.dat",
|
||||
contentType: "application/x-thrift",
|
||||
want: []testutil.Metric{
|
||||
testutil.Metric{
|
||||
Measurement: "zipkin",
|
||||
@@ -185,7 +188,6 @@ func TestZipkinPlugin(t *testing.T) {
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(),
|
||||
},
|
||||
//Time: time.Unix(1, 0).UTC(),
|
||||
Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
@@ -202,7 +204,6 @@ func TestZipkinPlugin(t *testing.T) {
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(),
|
||||
},
|
||||
//Time: time.Unix(1, 0).UTC(),
|
||||
Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
@@ -223,6 +224,337 @@ func TestZipkinPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "JSON rather than thrift",
|
||||
datafile: "testdata/json/brave-tracer-example.json",
|
||||
contentType: "application/json",
|
||||
want: []testutil.Metric{
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"id": "12854419928166856317",
|
||||
"name": "http:/hi2",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(3000000),
|
||||
}, Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "sr",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "12854419928166856317",
|
||||
"name": "http:/hi2",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(3000000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "ss",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "12854419928166856317",
|
||||
"name": "http:/hi2",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(3000000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "Demo2Application",
|
||||
"annotation_key": "mvc.controller.class",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "12854419928166856317",
|
||||
"name": "http:/hi2",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(3000000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "hi2",
|
||||
"annotation_key": "mvc.controller.method",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "12854419928166856317",
|
||||
"name": "http:/hi2",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(3000000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "192.168.0.8:test:8010",
|
||||
"annotation_key": "spring.instance_id",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "12854419928166856317",
|
||||
"name": "http:/hi2",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(3000000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"id": "12854419928166856317",
|
||||
"name": "http:/hi2",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(10000000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "cs",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "12854419928166856317",
|
||||
"name": "http:/hi2",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(10000000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "cr",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "12854419928166856317",
|
||||
"name": "http:/hi2",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(10000000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "localhost",
|
||||
"annotation_key": "http.host",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "12854419928166856317",
|
||||
"name": "http:/hi2",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(10000000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "GET",
|
||||
"annotation_key": "http.method",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "12854419928166856317",
|
||||
"name": "http:/hi2",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(10000000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "/hi2",
|
||||
"annotation_key": "http.path",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "12854419928166856317",
|
||||
"name": "http:/hi2",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(10000000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "http://localhost:8010/hi2",
|
||||
"annotation_key": "http.url",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "12854419928166856317",
|
||||
"name": "http:/hi2",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(10000000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "192.168.0.8:test:8010",
|
||||
"annotation_key": "spring.instance_id",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "12854419928166856317",
|
||||
"name": "http:/hi2",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(10000000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"id": "8291962692415852504",
|
||||
"name": "http:/hi",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(23393000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "sr",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "8291962692415852504",
|
||||
"name": "http:/hi",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(23393000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "ss",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "8291962692415852504",
|
||||
"name": "http:/hi",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(23393000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "Demo2Application",
|
||||
"annotation_key": "mvc.controller.class",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "8291962692415852504",
|
||||
"name": "http:/hi",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(23393000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "hi",
|
||||
"annotation_key": "mvc.controller.method",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "8291962692415852504",
|
||||
"name": "http:/hi",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(23393000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
testutil.Metric{
|
||||
Measurement: "zipkin",
|
||||
Tags: map[string]string{
|
||||
"annotation": "192.168.0.8:test:8010",
|
||||
"annotation_key": "spring.instance_id",
|
||||
"endpoint_host": "192.168.0.8:8010",
|
||||
"id": "8291962692415852504",
|
||||
"name": "http:/hi",
|
||||
"parent_id": "8291962692415852504",
|
||||
"service_name": "test",
|
||||
"trace_id": "7312f822d43d0fd8",
|
||||
},
|
||||
Fields: map[string]interface{}{
|
||||
"duration_ns": int64(23393000),
|
||||
},
|
||||
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
z := &Zipkin{
|
||||
@@ -240,7 +572,7 @@ func TestZipkinPlugin(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
mockAcc.ClearMetrics()
|
||||
if err := postThriftData(tt.thriftDataFile, z.address); err != nil {
|
||||
if err := postThriftData(tt.datafile, z.address, tt.contentType); err != nil {
|
||||
t.Fatalf("Posting data to http endpoint /api/v1/spans failed. Error: %s\n", err)
|
||||
}
|
||||
mockAcc.Wait(len(tt.want)) //Since the server is running concurrently, we need to wait for the number of data points we want to test to be added to the Accumulator.
|
||||
@@ -252,7 +584,6 @@ func TestZipkinPlugin(t *testing.T) {
|
||||
for _, m := range mockAcc.Metrics {
|
||||
got = append(got, *m)
|
||||
}
|
||||
|
||||
if !cmp.Equal(tt.want, got) {
|
||||
t.Fatalf("Got != Want\n %s", cmp.Diff(tt.want, got))
|
||||
}
|
||||
@@ -266,19 +597,18 @@ func TestZipkinPlugin(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func postThriftData(datafile, address string) error {
|
||||
func postThriftData(datafile, address, contentType string) error {
|
||||
dat, err := ioutil.ReadFile(datafile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not read from data file %s", datafile)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", fmt.Sprintf("http://%s/api/v1/spans", address), bytes.NewReader(dat))
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("HTTP request creation failed")
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/x-thrift")
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
client := &http.Client{}
|
||||
_, err = client.Do(req)
|
||||
if err != nil {
|
||||
|
||||
@@ -193,6 +193,25 @@ func BuildMetricDatum(point telegraf.Metric) []*cloudwatch.MetricDatum {
|
||||
continue
|
||||
}
|
||||
|
||||
// Do CloudWatch boundary checking
|
||||
// Constraints at: http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html
|
||||
if math.IsNaN(value) {
|
||||
datums = datums[:len(datums)-1]
|
||||
continue
|
||||
}
|
||||
if math.IsInf(value, 0) {
|
||||
datums = datums[:len(datums)-1]
|
||||
continue
|
||||
}
|
||||
if value > 0 && value < float64(8.515920e-109) {
|
||||
datums = datums[:len(datums)-1]
|
||||
continue
|
||||
}
|
||||
if value > float64(1.174271e+108) {
|
||||
datums = datums[:len(datums)-1]
|
||||
continue
|
||||
}
|
||||
|
||||
datums[i] = &cloudwatch.MetricDatum{
|
||||
MetricName: aws.String(strings.Join([]string{point.Name(), k}, "_")),
|
||||
Value: aws.Float64(value),
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package cloudwatch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
@@ -51,22 +53,32 @@ func TestBuildDimensions(t *testing.T) {
|
||||
func TestBuildMetricDatums(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
zero := 0.0
|
||||
validMetrics := []telegraf.Metric{
|
||||
testutil.TestMetric(1),
|
||||
testutil.TestMetric(int32(1)),
|
||||
testutil.TestMetric(int64(1)),
|
||||
testutil.TestMetric(float64(1)),
|
||||
testutil.TestMetric(float64(0)),
|
||||
testutil.TestMetric(math.Copysign(zero, -1)), // the CW documentation does not call out -0 as rejected
|
||||
testutil.TestMetric(float64(8.515920e-109)),
|
||||
testutil.TestMetric(float64(1.174271e+108)), // largest should be 1.174271e+108
|
||||
testutil.TestMetric(true),
|
||||
}
|
||||
|
||||
invalidMetrics := []telegraf.Metric{
|
||||
testutil.TestMetric("Foo"),
|
||||
testutil.TestMetric(math.Log(-1.0)),
|
||||
testutil.TestMetric(float64(8.515919e-109)), // smallest should be 8.515920e-109
|
||||
testutil.TestMetric(float64(1.174272e+108)), // largest should be 1.174271e+108
|
||||
}
|
||||
for _, point := range validMetrics {
|
||||
datums := BuildMetricDatum(point)
|
||||
assert.Equal(1, len(datums), "Valid type should create a Datum")
|
||||
assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", point))
|
||||
}
|
||||
for _, point := range invalidMetrics {
|
||||
datums := BuildMetricDatum(point)
|
||||
assert.Equal(0, len(datums), fmt.Sprintf("Valid point should not create a Datum {value: %v}", point))
|
||||
}
|
||||
|
||||
nonValidPoint := testutil.TestMetric("Foo")
|
||||
|
||||
assert.Equal(0, len(BuildMetricDatum(nonValidPoint)), "Invalid type should not create a Datum")
|
||||
}
|
||||
|
||||
func TestPartitionDatums(t *testing.T) {
|
||||
@@ -78,10 +90,13 @@ func TestPartitionDatums(t *testing.T) {
|
||||
Value: aws.Float64(1),
|
||||
}
|
||||
|
||||
zeroDatum := []*cloudwatch.MetricDatum{}
|
||||
oneDatum := []*cloudwatch.MetricDatum{&testDatum}
|
||||
twoDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum}
|
||||
threeDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum, &testDatum}
|
||||
|
||||
assert.Equal([][]*cloudwatch.MetricDatum{}, PartitionDatums(2, zeroDatum))
|
||||
assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum))
|
||||
assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum))
|
||||
assert.Equal([][]*cloudwatch.MetricDatum{twoDatum}, PartitionDatums(2, twoDatum))
|
||||
assert.Equal([][]*cloudwatch.MetricDatum{twoDatum, oneDatum}, PartitionDatums(2, threeDatum))
|
||||
|
||||
@@ -44,6 +44,9 @@ This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP.
|
||||
## HTTP Proxy Config
|
||||
# http_proxy = "http://corporate.proxy:3128"
|
||||
|
||||
## Optional HTTP headers
|
||||
# http_headers = {"X-Special-Header" = "Special-Value"}
|
||||
|
||||
## Compress each HTTP request payload using GZIP.
|
||||
# content_encoding = "gzip"
|
||||
```
|
||||
@@ -70,4 +73,5 @@ to write to. Each URL should start with either `http://` or `udp://`
|
||||
* `ssl_key`: SSL key
|
||||
* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false)
|
||||
* `http_proxy`: HTTP Proxy URI
|
||||
* `http_headers`: HTTP headers to add to each HTTP request
|
||||
* `content_encoding`: Compress each HTTP request payload using gzip if set to: "gzip"
|
||||
|
||||
@@ -68,6 +68,8 @@ func NewHTTP(config HTTPConfig, defaultWP WriteParams) (Client, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
type HTTPHeaders map[string]string
|
||||
|
||||
type HTTPConfig struct {
|
||||
// URL should be of the form "http://host:port" (REQUIRED)
|
||||
URL string
|
||||
@@ -95,6 +97,9 @@ type HTTPConfig struct {
|
||||
// Proxy URL should be of the form "http://host:port"
|
||||
HTTPProxy string
|
||||
|
||||
// HTTP headers to append to HTTP requests.
|
||||
HTTPHeaders HTTPHeaders
|
||||
|
||||
// The content encoding mechanism to use for each request.
|
||||
ContentEncoding string
|
||||
}
|
||||
@@ -253,6 +258,11 @@ func (c *httpClient) makeRequest(uri string, body io.Reader) (*http.Request, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for header, value := range c.config.HTTPHeaders {
|
||||
req.Header.Set(header, value)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "text/plain")
|
||||
req.Header.Set("User-Agent", c.config.UserAgent)
|
||||
if c.config.Username != "" && c.config.Password != "" {
|
||||
|
||||
@@ -55,6 +55,13 @@ func TestHTTPClient_Write(t *testing.T) {
|
||||
fmt.Fprintln(w, `{"results":[{}],"error":"basic auth incorrect"}`)
|
||||
}
|
||||
|
||||
// test that user-specified http header is set properly
|
||||
if r.Header.Get("X-Test-Header") != "Test-Value" {
|
||||
w.WriteHeader(http.StatusTeapot)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprintln(w, `{"results":[{}],"error":"wrong http header value"}`)
|
||||
}
|
||||
|
||||
// Validate Content-Length Header
|
||||
if r.ContentLength != 13 {
|
||||
w.WriteHeader(http.StatusTeapot)
|
||||
@@ -90,6 +97,9 @@ func TestHTTPClient_Write(t *testing.T) {
|
||||
UserAgent: "test-agent",
|
||||
Username: "test-user",
|
||||
Password: "test-password",
|
||||
HTTPHeaders: HTTPHeaders{
|
||||
"X-Test-Header": "Test-Value",
|
||||
},
|
||||
}
|
||||
wp := WriteParams{
|
||||
Database: "test",
|
||||
|
||||
@@ -32,9 +32,10 @@ type InfluxDB struct {
|
||||
RetentionPolicy string
|
||||
WriteConsistency string
|
||||
Timeout internal.Duration
|
||||
UDPPayload int `toml:"udp_payload"`
|
||||
HTTPProxy string `toml:"http_proxy"`
|
||||
ContentEncoding string `toml:"content_encoding"`
|
||||
UDPPayload int `toml:"udp_payload"`
|
||||
HTTPProxy string `toml:"http_proxy"`
|
||||
HTTPHeaders map[string]string `toml:"http_headers"`
|
||||
ContentEncoding string `toml:"content_encoding"`
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
@@ -88,7 +89,10 @@ var sampleConfig = `
|
||||
|
||||
## HTTP Proxy Config
|
||||
# http_proxy = "http://corporate.proxy:3128"
|
||||
|
||||
|
||||
## Optional HTTP headers
|
||||
# http_headers = {"X-Special-Header" = "Special-Value"}
|
||||
|
||||
## Compress each HTTP request payload using GZIP.
|
||||
# content_encoding = "gzip"
|
||||
`
|
||||
@@ -132,8 +136,12 @@ func (i *InfluxDB) Connect() error {
|
||||
Username: i.Username,
|
||||
Password: i.Password,
|
||||
HTTPProxy: i.HTTPProxy,
|
||||
HTTPHeaders: client.HTTPHeaders{},
|
||||
ContentEncoding: i.ContentEncoding,
|
||||
}
|
||||
for header, value := range i.HTTPHeaders {
|
||||
config.HTTPHeaders[header] = value
|
||||
}
|
||||
wp := client.WriteParams{
|
||||
Database: i.Database,
|
||||
RetentionPolicy: i.RetentionPolicy,
|
||||
@@ -199,6 +207,7 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
|
||||
i.Database)
|
||||
}
|
||||
}
|
||||
|
||||
if strings.Contains(e.Error(), "field type conflict") {
|
||||
log.Printf("E! Field type conflict, dropping conflicted points: %s", e)
|
||||
// setting err to nil, otherwise we will keep retrying and points
|
||||
@@ -206,6 +215,31 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
|
||||
if strings.Contains(e.Error(), "points beyond retention policy") {
|
||||
log.Printf("W! Points beyond retention policy: %s", e)
|
||||
// This error is indicates the point is older than the
|
||||
// retention policy permits, and is probably not a cause for
|
||||
// concern. Retrying will not help unless the retention
|
||||
// policy is modified.
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
|
||||
if strings.Contains(e.Error(), "unable to parse") {
|
||||
log.Printf("E! Parse error; dropping points: %s", e)
|
||||
// This error indicates a bug in Telegraf or InfluxDB parsing
|
||||
// of line protocol. Retries will not be successful.
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
|
||||
if strings.Contains(e.Error(), "hinted handoff queue not empty") {
|
||||
// This is an informational message
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
|
||||
// Log write failure
|
||||
log.Printf("E! InfluxDB Output Error: %s", e)
|
||||
} else {
|
||||
|
||||
@@ -178,28 +178,107 @@ func TestHTTPError_DatabaseNotFound(t *testing.T) {
|
||||
require.NoError(t, i.Close())
|
||||
}
|
||||
|
||||
// field type conflict does not return an error, instead we
|
||||
func TestHTTPError_FieldTypeConflict(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/write":
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprintln(w, `{"results":[{}],"error":"field type conflict: input field \"value\" on measurement \"test\" is type integer, already exists as type float dropped=1"}`)
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
i := InfluxDB{
|
||||
URLs: []string{ts.URL},
|
||||
Database: "test",
|
||||
func TestHTTPError_WriteErrors(t *testing.T) {
|
||||
var testCases = []struct {
|
||||
name string
|
||||
status int
|
||||
contentType string
|
||||
body string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
// HTTP/1.1 400 Bad Request
|
||||
// Content-Type: application/json
|
||||
// X-Influxdb-Version: 1.3.3
|
||||
//
|
||||
// {
|
||||
// "error": "partial write: points beyond retention policy dropped=1"
|
||||
// }
|
||||
name: "beyond retention policy is not an error",
|
||||
status: http.StatusBadRequest,
|
||||
contentType: "application/json",
|
||||
body: `{"error":"partial write: points beyond retention policy dropped=1"}`,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
// HTTP/1.1 400 Bad Request
|
||||
// Content-Type: application/json
|
||||
// X-Influxdb-Version: 1.3.3
|
||||
//
|
||||
// {
|
||||
// "error": "unable to parse 'foo bar=': missing field value"
|
||||
// }
|
||||
name: "unable to parse is not an error",
|
||||
status: http.StatusBadRequest,
|
||||
contentType: "application/json",
|
||||
body: `{"error":"unable to parse 'foo bar=': missing field value"}`,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
// HTTP/1.1 400 Bad Request
|
||||
// Content-Type: application/json
|
||||
// X-Influxdb-Version: 1.3.3
|
||||
//
|
||||
// {
|
||||
// "error": "partial write: field type conflict: input field \"bar\" on measurement \"foo\" is type float, already exists as type integer dropped=1"
|
||||
// }
|
||||
name: "field type conflict is not an error",
|
||||
status: http.StatusBadRequest,
|
||||
contentType: "application/json",
|
||||
body: `{"error": "partial write: field type conflict: input field \"bar\" on measurement \"foo\" is type float, already exists as type integer dropped=1"}`,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
// HTTP/1.1 500 Internal Server Error
|
||||
// Content-Type: application/json
|
||||
// X-Influxdb-Version: 1.3.3-c1.3.3
|
||||
//
|
||||
// {
|
||||
// "error": "write failed: hinted handoff queue not empty"
|
||||
// }
|
||||
name: "hinted handoff queue not empty is not an error",
|
||||
status: http.StatusInternalServerError,
|
||||
contentType: "application/json",
|
||||
body: `{"error":"write failed: hinted handoff queue not empty"}`,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
// HTTP/1.1 500 Internal Server Error
|
||||
// Content-Type: application/json
|
||||
// X-Influxdb-Version: 1.3.3-c1.3.3
|
||||
//
|
||||
// {
|
||||
// "error": "partial write"
|
||||
// }
|
||||
name: "plain partial write is an error",
|
||||
status: http.StatusInternalServerError,
|
||||
contentType: "application/json",
|
||||
body: `{"error":"partial write"}`,
|
||||
err: fmt.Errorf("Could not write to any InfluxDB server in cluster"),
|
||||
},
|
||||
}
|
||||
|
||||
err := i.Connect()
|
||||
require.NoError(t, err)
|
||||
err = i.Write(testutil.MockMetrics())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, i.Close())
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.WriteHeader(tt.status)
|
||||
rw.Header().Set("Content-Type", tt.contentType)
|
||||
fmt.Fprintln(rw, tt.body)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
influx := InfluxDB{
|
||||
URLs: []string{ts.URL},
|
||||
Database: "test",
|
||||
}
|
||||
|
||||
err := influx.Connect()
|
||||
require.NoError(t, err)
|
||||
err = influx.Write(testutil.MockMetrics())
|
||||
require.Equal(t, tt.err, err)
|
||||
require.NoError(t, influx.Close())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type MockClient struct {
|
||||
|
||||
@@ -32,13 +32,22 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) {
|
||||
}
|
||||
|
||||
for fieldName, value := range metric.Fields() {
|
||||
// Convert value to string
|
||||
valueS := fmt.Sprintf("%#v", value)
|
||||
point := []byte(fmt.Sprintf("%s %s %d\n",
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
continue
|
||||
case bool:
|
||||
if v {
|
||||
value = 1
|
||||
} else {
|
||||
value = 0
|
||||
}
|
||||
}
|
||||
metricString := fmt.Sprintf("%s %#v %d\n",
|
||||
// insert "field" section of template
|
||||
sanitizedChars.Replace(InsertField(bucket, fieldName)),
|
||||
sanitizedChars.Replace(valueS),
|
||||
timestamp))
|
||||
value,
|
||||
timestamp)
|
||||
point := []byte(metricString)
|
||||
out = append(out, point...)
|
||||
}
|
||||
return out, nil
|
||||
|
||||
@@ -165,6 +165,56 @@ func TestSerializeValueField2(t *testing.T) {
|
||||
assert.Equal(t, expS, mS)
|
||||
}
|
||||
|
||||
func TestSerializeValueString(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"cpu": "cpu0",
|
||||
"datacenter": "us-west-2",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": "asdasd",
|
||||
}
|
||||
m, err := metric.New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
s := GraphiteSerializer{
|
||||
Template: "host.field.tags.measurement",
|
||||
}
|
||||
buf, _ := s.Serialize(m)
|
||||
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "", mS[0])
|
||||
}
|
||||
|
||||
func TestSerializeValueBoolean(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"cpu": "cpu0",
|
||||
"datacenter": "us-west-2",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"enabled": true,
|
||||
"disabled": false,
|
||||
}
|
||||
m, err := metric.New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
s := GraphiteSerializer{
|
||||
Template: "host.field.tags.measurement",
|
||||
}
|
||||
buf, _ := s.Serialize(m)
|
||||
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
|
||||
assert.NoError(t, err)
|
||||
|
||||
expS := []string{
|
||||
fmt.Sprintf("localhost.enabled.cpu0.us-west-2.cpu 1 %d", now.Unix()),
|
||||
fmt.Sprintf("localhost.disabled.cpu0.us-west-2.cpu 0 %d", now.Unix()),
|
||||
}
|
||||
assert.Equal(t, expS, mS)
|
||||
}
|
||||
|
||||
// test that fields with spaces get fixed.
|
||||
func TestSerializeFieldWithSpaces(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
@@ -61,8 +61,6 @@ exit_if_fail sleep 60
|
||||
exit_if_fail go test -race ./...
|
||||
|
||||
# Simple Integration Tests
|
||||
# check that version was properly set
|
||||
exit_if_fail "./telegraf -version | grep $VERSION"
|
||||
# check that one test cpu & mem output work
|
||||
tmpdir=$(mktemp -d)
|
||||
./telegraf -sample-config > $tmpdir/config.toml
|
||||
|
||||
@@ -97,6 +97,5 @@ elif [[ -f /etc/os-release ]]; then
|
||||
else
|
||||
install_chkconfig
|
||||
fi
|
||||
/etc/init.d/telegraf restart
|
||||
fi
|
||||
fi
|
||||
|
||||
Reference in New Issue
Block a user